diff --git a/.dockerignore b/.dockerignore index 3a8e436d515..f24c490e9ad 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,5 +1,11 @@ .git .worktrees + +# Sensitive files – docker-setup.sh writes .env with OPENCLAW_GATEWAY_TOKEN +# into the project root; keep it out of the build context. +.env +.env.* + .bun-cache .bun .tmp diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml deleted file mode 100644 index 082086ea079..00000000000 --- a/.github/FUNDING.yml +++ /dev/null @@ -1 +0,0 @@ -custom: ["https://github.com/sponsors/steipete"] diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index c45885b48b6..3be43c6740a 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -76,6 +76,37 @@ body: label: Install method description: How OpenClaw was installed or launched. placeholder: npm global / pnpm dev / docker / mac app + - type: input + id: model + attributes: + label: Model + description: Effective model under test. + placeholder: minimax/text-01 / openrouter/anthropic/claude-opus-4.1 / anthropic/claude-sonnet-4.5 + validations: + required: true + - type: input + id: provider_chain + attributes: + label: Provider / routing chain + description: Effective request path through gateways, proxies, providers, or model routers. + placeholder: openclaw -> cloudflare-ai-gateway -> minimax + validations: + required: true + - type: input + id: config_location + attributes: + label: Config file / key location + description: Optional. Relevant config source or key path if this bug depends on overrides or custom provider setup. Redact secrets. + placeholder: ~/.openclaw/openclaw.json ; models.providers.cloudflare-ai-gateway.baseUrl ; ~/.openclaw/agents//agent/models.json + - type: textarea + id: provider_setup_details + attributes: + label: Additional provider/model setup details + description: Optional. Include redacted routing details, per-agent overrides, auth-profile interactions, env/config context, or anything else needed to explain the effective provider/model setup. Do not include API keys, tokens, or passwords. + placeholder: | + Default route is openclaw -> cloudflare-ai-gateway -> minimax. + Previous setup was openclaw -> cloudflare-ai-gateway -> openrouter -> minimax. + Relevant config lives in ~/.openclaw/openclaw.json under models.providers.minimax and models.providers.cloudflare-ai-gateway. - type: textarea id: logs attributes: diff --git a/.github/actions/setup-node-env/action.yml b/.github/actions/setup-node-env/action.yml index c46387517e4..41ca9eb98b0 100644 --- a/.github/actions/setup-node-env/action.yml +++ b/.github/actions/setup-node-env/action.yml @@ -1,12 +1,16 @@ name: Setup Node environment description: > - Initialize submodules with retry, install Node 22, pnpm, optionally Bun, + Initialize submodules with retry, install Node 24 by default, pnpm, optionally Bun, and optionally run pnpm install. Requires actions/checkout to run first. inputs: node-version: description: Node.js version to install. required: false - default: "22.x" + default: "24.x" + cache-key-suffix: + description: Suffix appended to the pnpm store cache key. + required: false + default: "node24" pnpm-version: description: pnpm version for corepack. required: false @@ -16,7 +20,7 @@ inputs: required: false default: "true" use-sticky-disk: - description: Use Blacksmith sticky disks for pnpm store caching. + description: Request Blacksmith sticky-disk pnpm caching on trusted runs; pull_request runs fall back to actions/cache. required: false default: "false" install-deps: @@ -45,7 +49,7 @@ runs: exit 1 - name: Setup Node.js - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0 + uses: actions/setup-node@v6 with: node-version: ${{ inputs.node-version }} check-latest: false @@ -54,12 +58,12 @@ runs: uses: ./.github/actions/setup-pnpm-store-cache with: pnpm-version: ${{ inputs.pnpm-version }} - cache-key-suffix: "node22" + cache-key-suffix: ${{ inputs.cache-key-suffix }} use-sticky-disk: ${{ inputs.use-sticky-disk }} - name: Setup Bun if: inputs.install-bun == 'true' - uses: oven-sh/setup-bun@v2 + uses: oven-sh/setup-bun@v2.1.3 with: bun-version: "1.3.9" diff --git a/.github/actions/setup-pnpm-store-cache/action.yml b/.github/actions/setup-pnpm-store-cache/action.yml index e1e5a34abda..2f7c992a978 100644 --- a/.github/actions/setup-pnpm-store-cache/action.yml +++ b/.github/actions/setup-pnpm-store-cache/action.yml @@ -8,9 +8,9 @@ inputs: cache-key-suffix: description: Suffix appended to the cache key. required: false - default: "node22" + default: "node24" use-sticky-disk: - description: Use Blacksmith sticky disks instead of actions/cache for pnpm store. + description: Use Blacksmith sticky disks instead of actions/cache for pnpm store on trusted runs; pull_request runs fall back to actions/cache. required: false default: "false" use-restore-keys: @@ -18,7 +18,7 @@ inputs: required: false default: "true" use-actions-cache: - description: Whether to restore/save pnpm store with actions/cache. + description: Whether to restore/save pnpm store with actions/cache, including pull_request fallback when sticky disks are disabled. required: false default: "true" runs: @@ -51,22 +51,24 @@ runs: run: echo "path=$(pnpm store path --silent)" >> "$GITHUB_OUTPUT" - name: Mount pnpm store sticky disk - if: inputs.use-sticky-disk == 'true' + # Keep persistent sticky-disk state off untrusted PR runs. + if: inputs.use-sticky-disk == 'true' && github.event_name != 'pull_request' uses: useblacksmith/stickydisk@v1 with: - key: ${{ github.repository }}-pnpm-store-${{ runner.os }}-${{ inputs.cache-key-suffix }} + key: ${{ github.repository }}-pnpm-store-${{ runner.os }}-${{ github.ref_name }}-${{ inputs.cache-key-suffix }}-${{ hashFiles('pnpm-lock.yaml') }} path: ${{ steps.pnpm-store.outputs.path }} - name: Restore pnpm store cache (exact key only) - if: inputs.use-actions-cache == 'true' && inputs.use-sticky-disk != 'true' && inputs.use-restore-keys != 'true' - uses: actions/cache@v4 + # PRs that request sticky disks still need a safe cache restore path. + if: inputs.use-actions-cache == 'true' && (inputs.use-sticky-disk != 'true' || github.event_name == 'pull_request') && inputs.use-restore-keys != 'true' + uses: actions/cache@v5 with: path: ${{ steps.pnpm-store.outputs.path }} key: ${{ runner.os }}-pnpm-store-${{ inputs.cache-key-suffix }}-${{ hashFiles('pnpm-lock.yaml') }} - name: Restore pnpm store cache (with fallback keys) - if: inputs.use-actions-cache == 'true' && inputs.use-sticky-disk != 'true' && inputs.use-restore-keys == 'true' - uses: actions/cache@v4 + if: inputs.use-actions-cache == 'true' && (inputs.use-sticky-disk != 'true' || github.event_name == 'pull_request') && inputs.use-restore-keys == 'true' + uses: actions/cache@v5 with: path: ${{ steps.pnpm-store.outputs.path }} key: ${{ runner.os }}-pnpm-store-${{ inputs.cache-key-suffix }}-${{ hashFiles('pnpm-lock.yaml') }} diff --git a/.github/workflows/auto-response.yml b/.github/workflows/auto-response.yml index a40149b7ccb..69dff002c7b 100644 --- a/.github/workflows/auto-response.yml +++ b/.github/workflows/auto-response.yml @@ -5,9 +5,12 @@ on: types: [opened, edited, labeled] issue_comment: types: [created] - pull_request_target: + pull_request_target: # zizmor: ignore[dangerous-triggers] maintainer-owned label automation; no untrusted checkout or code execution types: [labeled] +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" + permissions: {} jobs: @@ -17,20 +20,20 @@ jobs: pull-requests: write runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - - uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1 + - uses: actions/create-github-app-token@v2 id: app-token continue-on-error: true with: app-id: "2729701" private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} - - uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1 + - uses: actions/create-github-app-token@v2 id: app-token-fallback if: steps.app-token.outcome == 'failure' with: app-id: "2971289" private-key: ${{ secrets.GH_APP_PRIVATE_KEY_FALLBACK }} - name: Handle labeled items - uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7 + uses: actions/github-script@v8 with: github-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }} script: | @@ -51,6 +54,7 @@ jobs: }, { label: "r: no-ci-pr", + close: true, message: "Please don't make PRs for test failures on main.\n\n" + "The team is aware of those and will handle them directly on the codebase, not only fixing the tests but also investigating what the root cause is. Having to sift through test-fix-PRs (including some that have been out of date for weeks...) on top of that doesn't help. There are already way too many PRs for humans to manage; please don't make the flood worse.\n\n" + @@ -392,6 +396,7 @@ jobs: } const invalidLabel = "invalid"; + const spamLabel = "r: spam"; const dirtyLabel = "dirty"; const noisyPrMessage = "Closing this PR because it looks dirty (too many unrelated or unexpected changes). This usually happens when a branch picks up unrelated commits or a merge went sideways. Please recreate the PR from a clean branch."; @@ -428,6 +433,21 @@ jobs: }); return; } + if (labelSet.has(spamLabel)) { + await github.rest.issues.update({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: pullRequest.number, + state: "closed", + }); + await github.rest.issues.lock({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: pullRequest.number, + lock_reason: "spam", + }); + return; + } if (labelSet.has(invalidLabel)) { await github.rest.issues.update({ owner: context.repo.owner, @@ -439,6 +459,23 @@ jobs: } } + if (issue && labelSet.has(spamLabel)) { + await github.rest.issues.update({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issue.number, + state: "closed", + state_reason: "not_planned", + }); + await github.rest.issues.lock({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: issue.number, + lock_reason: "spam", + }); + return; + } + if (issue && labelSet.has(invalidLabel)) { await github.rest.issues.update({ owner: context.repo.owner, diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1d248d5c804..00670107d00 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,7 +7,10 @@ on: concurrency: group: ci-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: ${{ github.event_name == 'pull_request' }} + cancel-in-progress: true + +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" jobs: # Detect docs-only changes to skip heavy jobs (test, build, Windows, macOS, Android). @@ -19,7 +22,7 @@ jobs: docs_changed: ${{ steps.check.outputs.docs_changed }} steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 1 fetch-tags: false @@ -35,9 +38,8 @@ jobs: id: check uses: ./.github/actions/detect-docs-changes - # Detect which heavy areas are touched so PRs can skip unrelated expensive jobs. - # Push to main keeps broad coverage, but this job still needs to run so - # downstream jobs that list it in `needs` are not skipped. + # Detect which heavy areas are touched so CI can skip unrelated expensive jobs. + # Fail-safe: if detection fails, downstream jobs run. changed-scope: needs: [docs-scope] if: needs.docs-scope.outputs.docs_only != 'true' @@ -50,7 +52,7 @@ jobs: run_windows: ${{ steps.scope.outputs.run_windows }} steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 1 fetch-tags: false @@ -79,11 +81,11 @@ jobs: # Build dist once for Node-relevant changes and share it with downstream jobs. build-artifacts: needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true') + if: github.event_name == 'push' && needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: submodules: false @@ -98,13 +100,13 @@ jobs: uses: ./.github/actions/setup-node-env with: install-bun: "false" - use-sticky-disk: "true" + use-sticky-disk: "false" - name: Build dist run: pnpm build - name: Upload dist artifact - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v7 with: name: dist-build path: dist/ @@ -117,7 +119,7 @@ jobs: runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: submodules: false @@ -125,10 +127,10 @@ jobs: uses: ./.github/actions/setup-node-env with: install-bun: "false" - use-sticky-disk: "true" + use-sticky-disk: "false" - name: Download dist artifact - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v8 with: name: dist-build path: dist/ @@ -138,7 +140,7 @@ jobs: checks: needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true') + if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 strategy: fail-fast: false @@ -146,6 +148,13 @@ jobs: include: - runtime: node task: test + shard_index: 1 + shard_count: 2 + command: pnpm canvas:a2ui:bundle && pnpm test + - runtime: node + task: test + shard_index: 2 + shard_count: 2 command: pnpm canvas:a2ui:bundle && pnpm test - runtime: node task: extensions @@ -157,44 +166,51 @@ jobs: task: test command: pnpm canvas:a2ui:bundle && bunx vitest run --config vitest.unit.config.ts steps: - - name: Skip bun lane on push - if: github.event_name == 'push' && matrix.runtime == 'bun' - run: echo "Skipping bun test lane on push events." + - name: Skip bun lane on pull requests + if: github.event_name == 'pull_request' && matrix.runtime == 'bun' + run: echo "Skipping Bun compatibility lane on pull requests." - name: Checkout - if: github.event_name != 'push' || matrix.runtime != 'bun' - uses: actions/checkout@v4 + if: github.event_name != 'pull_request' || matrix.runtime != 'bun' + uses: actions/checkout@v6 with: submodules: false - name: Setup Node environment - if: matrix.runtime != 'bun' || github.event_name != 'push' + if: matrix.runtime != 'bun' || github.event_name != 'pull_request' uses: ./.github/actions/setup-node-env with: install-bun: "${{ matrix.runtime == 'bun' }}" - use-sticky-disk: "true" + use-sticky-disk: "false" - name: Configure Node test resources - if: (github.event_name != 'push' || matrix.runtime != 'bun') && matrix.task == 'test' && matrix.runtime == 'node' + if: (github.event_name != 'pull_request' || matrix.runtime != 'bun') && matrix.task == 'test' && matrix.runtime == 'node' + env: + SHARD_COUNT: ${{ matrix.shard_count || '' }} + SHARD_INDEX: ${{ matrix.shard_index || '' }} run: | # `pnpm test` runs `scripts/test-parallel.mjs`, which spawns multiple Node processes. # Default heap limits have been too low on Linux CI (V8 OOM near 4GB). echo "OPENCLAW_TEST_WORKERS=2" >> "$GITHUB_ENV" echo "OPENCLAW_TEST_MAX_OLD_SPACE_SIZE_MB=6144" >> "$GITHUB_ENV" + if [ -n "$SHARD_COUNT" ] && [ -n "$SHARD_INDEX" ]; then + echo "OPENCLAW_TEST_SHARDS=$SHARD_COUNT" >> "$GITHUB_ENV" + echo "OPENCLAW_TEST_SHARD_INDEX=$SHARD_INDEX" >> "$GITHUB_ENV" + fi - name: Run ${{ matrix.task }} (${{ matrix.runtime }}) - if: matrix.runtime != 'bun' || github.event_name != 'push' + if: matrix.runtime != 'bun' || github.event_name != 'pull_request' run: ${{ matrix.command }} # Types, lint, and format check. check: name: "check" needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true') + if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: submodules: false @@ -202,7 +218,7 @@ jobs: uses: ./.github/actions/setup-node-env with: install-bun: "false" - use-sticky-disk: "true" + use-sticky-disk: "false" - name: Check types and lint and oxfmt run: pnpm check @@ -220,7 +236,7 @@ jobs: runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: submodules: false @@ -228,23 +244,57 @@ jobs: uses: ./.github/actions/setup-node-env with: install-bun: "false" - use-sticky-disk: "true" + use-sticky-disk: "false" - name: Check docs run: pnpm check:docs - skills-python: + compat-node22: + name: "compat-node22" needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true' || needs.changed-scope.outputs.run_skills_python == 'true') + if: github.event_name == 'push' && needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 + with: + submodules: false + + - name: Setup Node 22 compatibility environment + uses: ./.github/actions/setup-node-env + with: + node-version: "22.x" + cache-key-suffix: "node22" + install-bun: "false" + use-sticky-disk: "false" + + - name: Configure Node 22 test resources + run: | + # Keep the compatibility lane aligned with the default Node test lane. + echo "OPENCLAW_TEST_WORKERS=2" >> "$GITHUB_ENV" + echo "OPENCLAW_TEST_MAX_OLD_SPACE_SIZE_MB=6144" >> "$GITHUB_ENV" + + - name: Build under Node 22 + run: pnpm build + + - name: Run tests under Node 22 + run: pnpm test + + - name: Verify npm pack under Node 22 + run: pnpm release:check + + skills-python: + needs: [docs-scope, changed-scope] + if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_skills_python == 'true' + runs-on: blacksmith-16vcpu-ubuntu-2404 + steps: + - name: Checkout + uses: actions/checkout@v6 with: submodules: false - name: Setup Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: "3.12" @@ -263,7 +313,7 @@ jobs: runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: submodules: false @@ -282,7 +332,7 @@ jobs: - name: Setup Python id: setup-python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: "3.12" cache: "pip" @@ -292,7 +342,7 @@ jobs: .github/workflows/ci.yml - name: Restore pre-commit cache - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: ~/.cache/pre-commit key: pre-commit-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('.pre-commit-config.yaml') }} @@ -302,34 +352,6 @@ jobs: python -m pip install --upgrade pip python -m pip install pre-commit - - name: Detect secrets - run: | - set -euo pipefail - - if [ "${{ github.event_name }}" = "push" ]; then - echo "Running full detect-secrets scan on push." - pre-commit run --all-files detect-secrets - exit 0 - fi - - BASE="${{ github.event.pull_request.base.sha }}" - changed_files=() - if git rev-parse --verify "$BASE^{commit}" >/dev/null 2>&1; then - while IFS= read -r path; do - [ -n "$path" ] || continue - [ -f "$path" ] || continue - changed_files+=("$path") - done < <(git diff --name-only --diff-filter=ACMR "$BASE" HEAD) - fi - - if [ "${#changed_files[@]}" -gt 0 ]; then - echo "Running detect-secrets on ${#changed_files[@]} changed file(s)." - pre-commit run detect-secrets --files "${changed_files[@]}" - else - echo "Falling back to full detect-secrets scan." - pre-commit run --all-files detect-secrets - fi - - name: Detect committed private keys run: pre-commit run --all-files detect-private-key @@ -356,7 +378,7 @@ jobs: checks-windows: needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_windows == 'true') + if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_windows == 'true' runs-on: blacksmith-32vcpu-windows-2025 timeout-minutes: 45 env: @@ -403,7 +425,7 @@ jobs: command: pnpm test steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: submodules: false @@ -427,16 +449,16 @@ jobs: } - name: Setup Node.js - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0 + uses: actions/setup-node@v6 with: - node-version: 22.x + node-version: 24.x check-latest: false - name: Setup pnpm + cache store uses: ./.github/actions/setup-pnpm-store-cache with: pnpm-version: "10.23.0" - cache-key-suffix: "node22" + cache-key-suffix: "node24" # Sticky disk mount currently retries/fails on every shard and adds ~50s # before install while still yielding zero pnpm store reuse. # Try exact-key actions/cache restores instead to recover store reuse @@ -489,7 +511,7 @@ jobs: runs-on: macos-latest steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: submodules: false @@ -525,7 +547,7 @@ jobs: swiftformat --lint apps/macos/Sources --config .swiftformat - name: Cache SwiftPM - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: ~/Library/Caches/org.swift.swiftpm key: ${{ runner.os }}-swiftpm-${{ hashFiles('apps/macos/Package.resolved') }} @@ -561,7 +583,7 @@ jobs: runs-on: macos-latest steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: submodules: false @@ -718,7 +740,7 @@ jobs: android: needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_android == 'true') + if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_android == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 strategy: fail-fast: false @@ -730,31 +752,45 @@ jobs: command: ./gradlew --no-daemon :app:assembleDebug steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: submodules: false - name: Setup Java - uses: actions/setup-java@v4 + uses: actions/setup-java@v5 with: distribution: temurin - # setup-android's sdkmanager currently crashes on JDK 21 in CI. + # Keep sdkmanager on the stable JDK path for Linux CI runners. java-version: 17 - - name: Setup Android SDK - uses: android-actions/setup-android@v3 - with: - accept-android-sdk-licenses: false + - name: Setup Android SDK cmdline-tools + run: | + set -euo pipefail + ANDROID_SDK_ROOT="$HOME/.android-sdk" + CMDLINE_TOOLS_VERSION="12266719" + ARCHIVE="commandlinetools-linux-${CMDLINE_TOOLS_VERSION}_latest.zip" + URL="https://dl.google.com/android/repository/${ARCHIVE}" + + mkdir -p "$ANDROID_SDK_ROOT/cmdline-tools" + curl -fsSL "$URL" -o "/tmp/${ARCHIVE}" + rm -rf "$ANDROID_SDK_ROOT/cmdline-tools/latest" + unzip -q "/tmp/${ARCHIVE}" -d "$ANDROID_SDK_ROOT/cmdline-tools" + mv "$ANDROID_SDK_ROOT/cmdline-tools/cmdline-tools" "$ANDROID_SDK_ROOT/cmdline-tools/latest" + + echo "ANDROID_SDK_ROOT=$ANDROID_SDK_ROOT" >> "$GITHUB_ENV" + echo "ANDROID_HOME=$ANDROID_SDK_ROOT" >> "$GITHUB_ENV" + echo "$ANDROID_SDK_ROOT/cmdline-tools/latest/bin" >> "$GITHUB_PATH" + echo "$ANDROID_SDK_ROOT/platform-tools" >> "$GITHUB_PATH" - name: Setup Gradle - uses: gradle/actions/setup-gradle@v4 + uses: gradle/actions/setup-gradle@v5 with: gradle-version: 8.11.1 - name: Install Android SDK packages run: | - yes | sdkmanager --licenses >/dev/null - sdkmanager --install \ + yes | sdkmanager --sdk_root="${ANDROID_SDK_ROOT}" --licenses >/dev/null + sdkmanager --sdk_root="${ANDROID_SDK_ROOT}" --install \ "platform-tools" \ "platforms;android-36" \ "build-tools;36.0.0" diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 9b78a3c6172..79c041ef727 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -7,6 +7,9 @@ concurrency: group: codeql-${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} cancel-in-progress: ${{ github.event_name == 'pull_request' }} +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" + permissions: actions: read contents: read @@ -67,7 +70,7 @@ jobs: config_file: "" steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: submodules: false @@ -76,24 +79,28 @@ jobs: uses: ./.github/actions/setup-node-env with: install-bun: "false" - use-sticky-disk: "true" + use-sticky-disk: "false" - name: Setup Python if: matrix.needs_python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: "3.12" - name: Setup Java if: matrix.needs_java - uses: actions/setup-java@v4 + uses: actions/setup-java@v5 with: distribution: temurin java-version: "21" - name: Setup Swift build tools if: matrix.needs_swift_tools - run: brew install xcodegen swiftlint swiftformat + run: | + sudo xcode-select -s /Applications/Xcode_26.1.app + xcodebuild -version + brew install xcodegen swiftlint swiftformat + swift --version - name: Initialize CodeQL uses: github/codeql-action/init@v4 diff --git a/.github/workflows/docker-release.yml b/.github/workflows/docker-release.yml index f991b7f8653..f4128cddc88 100644 --- a/.github/workflows/docker-release.yml +++ b/.github/workflows/docker-release.yml @@ -18,6 +18,7 @@ concurrency: cancel-in-progress: false env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" REGISTRY: ghcr.io IMAGE_NAME: ${{ github.repository }} @@ -33,13 +34,13 @@ jobs: slim-digest: ${{ steps.build-slim.outputs.digest }} steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Set up Docker Builder - uses: useblacksmith/setup-docker-builder@v1 + uses: docker/setup-buildx-action@v4 - name: Login to GitHub Container Registry - uses: docker/login-action@v3 + uses: docker/login-action@v4 with: registry: ${{ env.REGISTRY }} username: ${{ github.repository_owner }} @@ -109,8 +110,6 @@ jobs: labels: ${{ steps.labels.outputs.value }} provenance: false push: true - cache-from: type=gha,scope=docker-release-amd64 - cache-to: type=gha,mode=max,scope=docker-release-amd64 - name: Build and push amd64 slim image id: build-slim @@ -124,8 +123,6 @@ jobs: labels: ${{ steps.labels.outputs.value }} provenance: false push: true - cache-from: type=gha,scope=docker-release-amd64 - cache-to: type=gha,mode=max,scope=docker-release-amd64 # Build arm64 images (default + slim share the build stage cache) build-arm64: @@ -138,13 +135,13 @@ jobs: slim-digest: ${{ steps.build-slim.outputs.digest }} steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Set up Docker Builder - uses: useblacksmith/setup-docker-builder@v1 + uses: docker/setup-buildx-action@v4 - name: Login to GitHub Container Registry - uses: docker/login-action@v3 + uses: docker/login-action@v4 with: registry: ${{ env.REGISTRY }} username: ${{ github.repository_owner }} @@ -214,8 +211,6 @@ jobs: labels: ${{ steps.labels.outputs.value }} provenance: false push: true - cache-from: type=gha,scope=docker-release-arm64 - cache-to: type=gha,mode=max,scope=docker-release-arm64 - name: Build and push arm64 slim image id: build-slim @@ -229,8 +224,6 @@ jobs: labels: ${{ steps.labels.outputs.value }} provenance: false push: true - cache-from: type=gha,scope=docker-release-arm64 - cache-to: type=gha,mode=max,scope=docker-release-arm64 # Create multi-platform manifests create-manifest: @@ -241,10 +234,10 @@ jobs: needs: [build-amd64, build-arm64] steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Login to GitHub Container Registry - uses: docker/login-action@v3 + uses: docker/login-action@v4 with: registry: ${{ env.REGISTRY }} username: ${{ github.repository_owner }} diff --git a/.github/workflows/install-smoke.yml b/.github/workflows/install-smoke.yml index 36f64d2d6ad..f48c794b668 100644 --- a/.github/workflows/install-smoke.yml +++ b/.github/workflows/install-smoke.yml @@ -10,6 +10,9 @@ concurrency: group: install-smoke-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: ${{ github.event_name == 'pull_request' }} +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" + jobs: docs-scope: runs-on: blacksmith-16vcpu-ubuntu-2404 @@ -17,7 +20,7 @@ jobs: docs_only: ${{ steps.check.outputs.docs_only }} steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 1 fetch-tags: false @@ -38,11 +41,13 @@ jobs: runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - name: Checkout CLI - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Set up Docker Builder - uses: useblacksmith/setup-docker-builder@v1 + uses: docker/setup-buildx-action@v4 + # Blacksmith can fall back to the local docker driver, which rejects gha + # cache export/import. Keep smoke builds driver-agnostic. - name: Build root Dockerfile smoke image uses: useblacksmith/build-push-action@v2 with: @@ -52,8 +57,6 @@ jobs: load: true push: false provenance: false - cache-from: type=gha,scope=install-smoke-root-dockerfile - cache-to: type=gha,mode=max,scope=install-smoke-root-dockerfile - name: Run root Dockerfile CLI smoke run: | @@ -73,8 +76,6 @@ jobs: load: true push: false provenance: false - cache-from: type=gha,scope=install-smoke-root-dockerfile-ext - cache-to: type=gha,mode=max,scope=install-smoke-root-dockerfile-ext - name: Smoke test Dockerfile with extension build arg run: | @@ -89,8 +90,6 @@ jobs: load: true push: false provenance: false - cache-from: type=gha,scope=install-smoke-installer-root - cache-to: type=gha,mode=max,scope=install-smoke-installer-root - name: Build installer non-root image if: github.event_name != 'pull_request' @@ -102,8 +101,6 @@ jobs: load: true push: false provenance: false - cache-from: type=gha,scope=install-smoke-installer-nonroot - cache-to: type=gha,mode=max,scope=install-smoke-installer-nonroot - name: Run installer docker tests env: diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index 8de54a416f8..3a38e5213c3 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -1,7 +1,7 @@ name: Labeler on: - pull_request_target: + pull_request_target: # zizmor: ignore[dangerous-triggers] maintainer-owned triage workflow; no untrusted checkout or PR code execution types: [opened, synchronize, reopened] issues: types: [opened] @@ -16,6 +16,9 @@ on: required: false default: "50" +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" + permissions: {} jobs: @@ -25,25 +28,25 @@ jobs: pull-requests: write runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - - uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1 + - uses: actions/create-github-app-token@v2 id: app-token continue-on-error: true with: app-id: "2729701" private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} - - uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1 + - uses: actions/create-github-app-token@v2 id: app-token-fallback if: steps.app-token.outcome == 'failure' with: app-id: "2971289" private-key: ${{ secrets.GH_APP_PRIVATE_KEY_FALLBACK }} - - uses: actions/labeler@8558fd74291d67161a8a78ce36a881fa63b766a9 # v5 + - uses: actions/labeler@v6 with: configuration-path: .github/labeler.yml repo-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }} sync-labels: true - name: Apply PR size label - uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7 + uses: actions/github-script@v8 with: github-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }} script: | @@ -132,7 +135,7 @@ jobs: labels: [targetSizeLabel], }); - name: Apply maintainer or trusted-contributor label - uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7 + uses: actions/github-script@v8 with: github-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }} script: | @@ -203,7 +206,7 @@ jobs: // }); // } - name: Apply too-many-prs label - uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7 + uses: actions/github-script@v8 with: github-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }} script: | @@ -381,20 +384,20 @@ jobs: pull-requests: write runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - - uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1 + - uses: actions/create-github-app-token@v2 id: app-token continue-on-error: true with: app-id: "2729701" private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} - - uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1 + - uses: actions/create-github-app-token@v2 id: app-token-fallback if: steps.app-token.outcome == 'failure' with: app-id: "2971289" private-key: ${{ secrets.GH_APP_PRIVATE_KEY_FALLBACK }} - name: Backfill PR labels - uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7 + uses: actions/github-script@v8 with: github-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }} script: | @@ -629,20 +632,20 @@ jobs: issues: write runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - - uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1 + - uses: actions/create-github-app-token@v2 id: app-token continue-on-error: true with: app-id: "2729701" private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} - - uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1 + - uses: actions/create-github-app-token@v2 id: app-token-fallback if: steps.app-token.outcome == 'failure' with: app-id: "2971289" private-key: ${{ secrets.GH_APP_PRIVATE_KEY_FALLBACK }} - name: Apply maintainer or trusted-contributor label - uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7 + uses: actions/github-script@v8 with: github-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }} script: | diff --git a/.github/workflows/openclaw-npm-release.yml b/.github/workflows/openclaw-npm-release.yml new file mode 100644 index 00000000000..903bba74706 --- /dev/null +++ b/.github/workflows/openclaw-npm-release.yml @@ -0,0 +1,85 @@ +name: OpenClaw NPM Release + +on: + push: + tags: + - "v*" + +concurrency: + group: openclaw-npm-release-${{ github.ref }} + cancel-in-progress: false + +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" + NODE_VERSION: "24.x" + PNPM_VERSION: "10.23.0" + +jobs: + publish_openclaw_npm: + # npm trusted publishing + provenance requires a GitHub-hosted runner. + runs-on: ubuntu-latest + permissions: + contents: read + id-token: write + steps: + - name: Checkout + uses: actions/checkout@v6 + with: + fetch-depth: 0 + + - name: Setup Node environment + uses: ./.github/actions/setup-node-env + with: + node-version: ${{ env.NODE_VERSION }} + pnpm-version: ${{ env.PNPM_VERSION }} + install-bun: "false" + use-sticky-disk: "false" + + - name: Validate release tag and package metadata + env: + RELEASE_SHA: ${{ github.sha }} + RELEASE_TAG: ${{ github.ref_name }} + RELEASE_MAIN_REF: origin/main + run: | + set -euo pipefail + # Fetch the full main ref so merge-base ancestry checks keep working + # for older tagged commits that are still contained in main. + git fetch --no-tags origin +refs/heads/main:refs/remotes/origin/main + pnpm release:openclaw:npm:check + + - name: Ensure version is not already published + run: | + set -euo pipefail + PACKAGE_VERSION=$(node -p "require('./package.json').version") + + if npm view "openclaw@${PACKAGE_VERSION}" version >/dev/null 2>&1; then + echo "openclaw@${PACKAGE_VERSION} is already published on npm." + exit 1 + fi + + echo "Publishing openclaw@${PACKAGE_VERSION}" + + - name: Check + run: pnpm check + + - name: Build + run: pnpm build + + - name: Verify release contents + run: pnpm release:check + + - name: Publish + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} + run: | + set -euo pipefail + if [[ -n "${NODE_AUTH_TOKEN:-}" ]]; then + printf '//registry.npmjs.org/:_authToken=%s\n' "$NODE_AUTH_TOKEN" > "$HOME/.npmrc" + fi + PACKAGE_VERSION=$(node -p "require('./package.json').version") + + if [[ "$PACKAGE_VERSION" == *-beta.* ]]; then + npm publish --access public --tag beta --provenance + else + npm publish --access public --provenance + fi diff --git a/.github/workflows/sandbox-common-smoke.yml b/.github/workflows/sandbox-common-smoke.yml index 13688bd0f25..4a839b4d878 100644 --- a/.github/workflows/sandbox-common-smoke.yml +++ b/.github/workflows/sandbox-common-smoke.yml @@ -17,17 +17,20 @@ concurrency: group: sandbox-common-smoke-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: ${{ github.event_name == 'pull_request' }} +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" + jobs: sandbox-common-smoke: runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: submodules: false - name: Set up Docker Builder - uses: useblacksmith/setup-docker-builder@v1 + uses: docker/setup-buildx-action@v4 - name: Build minimal sandbox base (USER sandbox) shell: bash diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index e6feef90e6b..95dc406da45 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -5,6 +5,9 @@ on: - cron: "17 3 * * *" workflow_dispatch: +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" + permissions: {} jobs: @@ -14,13 +17,13 @@ jobs: pull-requests: write runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - - uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1 + - uses: actions/create-github-app-token@v2 id: app-token continue-on-error: true with: app-id: "2729701" private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} - - uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1 + - uses: actions/create-github-app-token@v2 id: app-token-fallback continue-on-error: true with: @@ -29,7 +32,7 @@ jobs: - name: Mark stale issues and pull requests (primary) id: stale-primary continue-on-error: true - uses: actions/stale@v9 + uses: actions/stale@v10 with: repo-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }} days-before-issue-stale: 7 @@ -62,7 +65,7 @@ jobs: - name: Check stale state cache id: stale-state if: always() - uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7 + uses: actions/github-script@v8 with: github-token: ${{ steps.app-token-fallback.outputs.token || steps.app-token.outputs.token }} script: | @@ -85,7 +88,7 @@ jobs: } - name: Mark stale issues and pull requests (fallback) if: (steps.stale-primary.outcome == 'failure' || steps.stale-state.outputs.has_state == 'true') && steps.app-token-fallback.outputs.token != '' - uses: actions/stale@v9 + uses: actions/stale@v10 with: repo-token: ${{ steps.app-token-fallback.outputs.token }} days-before-issue-stale: 7 @@ -121,13 +124,13 @@ jobs: issues: write runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - - uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1 + - uses: actions/create-github-app-token@v2 id: app-token with: app-id: "2729701" private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} - name: Lock closed issues after 48h of no comments - uses: actions/github-script@f28e40c7f34bde8b3046d885e986cb6290c5673b # v7 + uses: actions/github-script@v8 with: github-token: ${{ steps.app-token.outputs.token }} script: | diff --git a/.github/workflows/workflow-sanity.yml b/.github/workflows/workflow-sanity.yml index 19668e697ad..9426f678926 100644 --- a/.github/workflows/workflow-sanity.yml +++ b/.github/workflows/workflow-sanity.yml @@ -9,12 +9,15 @@ concurrency: group: workflow-sanity-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: ${{ github.event_name == 'pull_request' }} +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" + jobs: no-tabs: runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Fail on tabs in workflow files run: | @@ -45,7 +48,7 @@ jobs: runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Install actionlint shell: bash diff --git a/.gitignore b/.gitignore index 29afb5e1261..0eabcb6843c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ node_modules **/node_modules/ .env +docker-compose.override.yml docker-compose.extra.yml dist pnpm-lock.yaml @@ -81,6 +82,7 @@ apps/ios/*.mobileprovision # Local untracked files .local/ docs/.local/ +tmp/ IDENTITY.md USER.md .tgz @@ -121,3 +123,13 @@ dist/protocol.schema.json # Synthing **/.stfolder/ +.dev-state +docs/superpowers/plans/2026-03-10-collapsed-side-nav.md +docs/superpowers/specs/2026-03-10-collapsed-side-nav-design.md +.gitignore +test/config-form.analyze.telegram.test.ts +ui/src/ui/theme-variants.browser.test.ts +ui/src/ui/__screenshots__ +ui/src/ui/views/__screenshots__ +ui/.vitest-attachments +docs/superpowers diff --git a/.jscpd.json b/.jscpd.json new file mode 100644 index 00000000000..777b025b0c8 --- /dev/null +++ b/.jscpd.json @@ -0,0 +1,16 @@ +{ + "gitignore": true, + "noSymlinks": true, + "ignore": [ + "**/node_modules/**", + "**/dist/**", + "dist/**", + "**/.git/**", + "**/coverage/**", + "**/build/**", + "**/.build/**", + "**/.artifacts/**", + "docs/zh-CN/**", + "**/CHANGELOG.md" + ] +} diff --git a/.npmignore b/.npmignore new file mode 100644 index 00000000000..7cd53fdbc08 --- /dev/null +++ b/.npmignore @@ -0,0 +1 @@ +**/node_modules/ diff --git a/.pi/prompts/reviewpr.md b/.pi/prompts/reviewpr.md index 835be806dd5..1b8a20dda90 100644 --- a/.pi/prompts/reviewpr.md +++ b/.pi/prompts/reviewpr.md @@ -9,7 +9,19 @@ Input - If ambiguous: ask. Do (review-only) -Goal: produce a thorough review and a clear recommendation (READY for /landpr vs NEEDS WORK). Do NOT merge, do NOT push, do NOT make changes in the repo as part of this command. +Goal: produce a thorough review and a clear recommendation (READY FOR /landpr vs NEEDS WORK vs INVALID CLAIM). Do NOT merge, do NOT push, do NOT make changes in the repo as part of this command. + +0. Truthfulness + reality gate (required for bug-fix claims) + - Do not trust the issue text or PR summary by default; verify in code and evidence. + - If the PR claims to fix a bug linked to an issue, confirm the bug exists now (repro steps, logs, failing test, or clear code-path proof). + - Prove root cause with exact location (`path/file.ts:line` + explanation of why behavior is wrong). + - Verify fix targets the same code path as the root cause. + - Require a regression test when feasible (fails before fix, passes after fix). If not feasible, require explicit justification + manual verification evidence. + - Hallucination/BS red flags (treat as BLOCKER until disproven): + - claimed behavior not present in repo, + - issue/PR says "fixes #..." but changed files do not touch implicated path, + - only docs/comments changed for a runtime bug claim, + - vague AI-generated rationale without concrete evidence. 1. Identify PR meta + context @@ -56,6 +68,7 @@ Goal: produce a thorough review and a clear recommendation (READY for /landpr vs - Any deprecations, docs, types, or lint rules we should adjust? 8. Key questions to answer explicitly + - Is the core claim substantiated by evidence, or is it likely invalid/hallucinated? - Can we fix everything ourselves in a follow-up, or does the contributor need to update this PR? - Any blocking concerns (must-fix before merge)? - Is this PR ready to land, or does it need work? @@ -65,18 +78,32 @@ Goal: produce a thorough review and a clear recommendation (READY for /landpr vs A) TL;DR recommendation -- One of: READY FOR /landpr | NEEDS WORK | NEEDS DISCUSSION +- One of: READY FOR /landpr | NEEDS WORK | INVALID CLAIM (issue/bug not substantiated) | NEEDS DISCUSSION - 1–3 sentence rationale. -B) What changed +B) Claim verification matrix (required) + +- Fill this table: + + | Field | Evidence | + | ----------------------------------------------- | -------- | + | Claimed problem | ... | + | Evidence observed (repro/log/test/code) | ... | + | Root cause location (`path:line`) | ... | + | Why this fix addresses that root cause | ... | + | Regression coverage (test name or manual proof) | ... | + +- If any row is missing/weak, default to `NEEDS WORK` or `INVALID CLAIM`. + +C) What changed - Brief bullet summary of the diff/behavioral changes. -C) What's good +D) What's good - Bullets: correctness, simplicity, tests, docs, ergonomics, etc. -D) Concerns / questions (actionable) +E) Concerns / questions (actionable) - Numbered list. - Mark each item as: @@ -84,17 +111,19 @@ D) Concerns / questions (actionable) - IMPORTANT (should fix before merge) - NIT (optional) - For each: point to the file/area and propose a concrete fix or alternative. +- If evidence for the core bug claim is missing, add a `BLOCKER` explicitly. -E) Tests +F) Tests - What exists. - What's missing (specific scenarios). +- State clearly whether there is a regression test for the claimed bug. -F) Follow-ups (optional) +G) Follow-ups (optional) - Non-blocking refactors/tickets to open later. -G) Suggested PR comment (optional) +H) Suggested PR comment (optional) - Offer: "Want me to draft a PR comment to the author?" - If yes, provide a ready-to-paste comment summarizing the above, with clear asks. diff --git a/.secrets.baseline b/.secrets.baseline index b1f909e6ca4..056b2dd8778 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -205,7 +205,7 @@ "filename": "apps/macos/Sources/OpenClawProtocol/GatewayModels.swift", "hashed_secret": "7990585255d25249fb1e6eac3d2bd6c37429b2cd", "is_verified": false, - "line_number": 1763 + "line_number": 1859 } ], "apps/macos/Tests/OpenClawIPCTests/AnthropicAuthResolverTests.swift": [ @@ -266,7 +266,7 @@ "filename": "apps/shared/OpenClawKit/Sources/OpenClawProtocol/GatewayModels.swift", "hashed_secret": "7990585255d25249fb1e6eac3d2bd6c37429b2cd", "is_verified": false, - "line_number": 1763 + "line_number": 1859 } ], "docs/.i18n/zh-CN.tm.jsonl": [ @@ -11659,7 +11659,7 @@ "filename": "src/agents/tools/web-search.ts", "hashed_secret": "dfba7aade0868074c2861c98e2a9a92f3178a51b", "is_verified": false, - "line_number": 292 + "line_number": 291 } ], "src/agents/tools/web-tools.enabled-defaults.e2e.test.ts": [ @@ -12991,7 +12991,7 @@ "filename": "ui/src/i18n/locales/en.ts", "hashed_secret": "de0ff6b974d6910aca8d6b830e1b761f076d8fe6", "is_verified": false, - "line_number": 61 + "line_number": 74 } ], "ui/src/i18n/locales/pt-BR.ts": [ @@ -13000,7 +13000,7 @@ "filename": "ui/src/i18n/locales/pt-BR.ts", "hashed_secret": "ef7b6f95faca2d7d3a5aa5a6434c89530c6dd243", "is_verified": false, - "line_number": 61 + "line_number": 73 } ], "vendor/a2ui/README.md": [ @@ -13013,5 +13013,5 @@ } ] }, - "generated_at": "2026-03-09T08:37:13Z" + "generated_at": "2026-03-10T03:11:06Z" } diff --git a/.swiftformat b/.swiftformat index ab608a90178..a5f551b9e35 100644 --- a/.swiftformat +++ b/.swiftformat @@ -48,4 +48,4 @@ --allman false # Exclusions ---exclude .build,.swiftpm,DerivedData,node_modules,dist,coverage,xcuserdata,Peekaboo,Swabble,apps/android,apps/ios,apps/shared,apps/macos/Sources/MoltbotProtocol,apps/macos/Sources/OpenClaw/HostEnvSecurityPolicy.generated.swift +--exclude .build,.swiftpm,DerivedData,node_modules,dist,coverage,xcuserdata,Peekaboo,Swabble,apps/android,apps/ios,apps/shared,apps/macos/Sources/OpenClawProtocol,apps/macos/Sources/OpenClaw/HostEnvSecurityPolicy.generated.swift diff --git a/.swiftlint.yml b/.swiftlint.yml index e4f925fdf20..567b1a1683a 100644 --- a/.swiftlint.yml +++ b/.swiftlint.yml @@ -18,7 +18,7 @@ excluded: - coverage - "*.playground" # Generated (protocol-gen-swift.ts) - - apps/macos/Sources/MoltbotProtocol/GatewayModels.swift + - apps/macos/Sources/OpenClawProtocol/GatewayModels.swift # Generated (generate-host-env-security-policy-swift.mjs) - apps/macos/Sources/OpenClaw/HostEnvSecurityPolicy.generated.swift diff --git a/AGENTS.md b/AGENTS.md index b70210cf8e3..0b1e17c8b3e 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -10,6 +10,36 @@ - GitHub searching footgun: don't limit yourself to the first 500 issues or PRs when wanting to search all. Unless you're supposed to look at the most recent, keep going until you've reached the last page in the search - Security advisory analysis: before triage/severity decisions, read `SECURITY.md` to align with OpenClaw's trust model and design boundaries. +## Auto-close labels (issues and PRs) + +- If an issue/PR matches one of the reasons below, apply the label and let `.github/workflows/auto-response.yml` handle comment/close/lock. +- Do not manually close + manually comment for these reasons. +- Why: keeps wording consistent, preserves automation behavior (`state_reason`, locking), and keeps triage/reporting searchable by label. +- `r:*` labels can be used on both issues and PRs. + +- `r: skill`: close with guidance to publish skills on Clawhub. +- `r: support`: close with redirect to Discord support + stuck FAQ. +- `r: no-ci-pr`: close test-fix-only PRs for failing `main` CI and post the standard explanation. +- `r: too-many-prs`: close when author exceeds active PR limit. +- `r: testflight`: close requests asking for TestFlight access/builds. OpenClaw does not provide TestFlight distribution yet, so use the standard response (“Not available, build from source.”) instead of ad-hoc replies. +- `r: third-party-extension`: close with guidance to ship as third-party plugin. +- `r: moltbook`: close + lock as off-topic (not affiliated). +- `r: spam`: close + lock as spam (`lock_reason: spam`). +- `invalid`: close invalid items (issues are closed as `not_planned`; PRs are closed). +- `dirty`: close PRs with too many unrelated/unexpected changes (PR-only label). + +## PR truthfulness and bug-fix validation + +- Never merge a bug-fix PR based only on issue text, PR text, or AI rationale. +- Before `/landpr`, run `/reviewpr` and require explicit evidence for bug-fix claims. +- Minimum merge gate for bug-fix PRs: + 1. symptom evidence (repro/log/failing test), + 2. verified root cause in code with file/line, + 3. fix touches the implicated code path, + 4. regression test (fail before/pass after) when feasible; if not feasible, include manual verification proof and why no test was added. +- If claim is unsubstantiated or likely hallucinated/BS: do not merge. Request evidence/changes, or close with `invalid` when appropriate. +- If linked issue appears wrong/outdated, correct triage first; do not merge speculative fixes. + ## Project Structure & Module Organization - Source code: `src/` (CLI wiring in `src/cli`, commands in `src/commands`, web provider in `src/provider-web.ts`, infra in `src/infra`, media pipeline in `src/media`). @@ -88,6 +118,7 @@ - Keep files concise; extract helpers instead of “V2” copies. Use existing patterns for CLI options and dependency injection via `createDefaultDeps`. - Aim to keep files under ~700 LOC; guideline only (not a hard guardrail). Split/refactor when it improves clarity or testability. - Naming: use **OpenClaw** for product/app/docs headings; use `openclaw` for CLI command, package/binary, paths, and config keys. +- Written English: use American spelling and grammar in code, comments, docs, and UI strings (e.g. "color" not "colour", "behavior" not "behaviour", "analyze" not "analyse"). ## Release Channels (Naming) @@ -101,6 +132,7 @@ - Framework: Vitest with V8 coverage thresholds (70% lines/branches/functions/statements). - Naming: match source names with `*.test.ts`; e2e in `*.e2e.test.ts`. - Run `pnpm test` (or `pnpm test:coverage`) before pushing when you touch logic. +- For targeted/local debugging, keep using the wrapper: `pnpm test -- [vitest args...]` (for example `pnpm test -- src/commands/onboard-search.test.ts -t "shows registered plugin providers"`); do not default to raw `pnpm vitest run ...` because it bypasses wrapper config/profile/pool routing. - Do not set test workers above 16; tried already. - If local Vitest runs cause memory pressure (common on non-Mac-Studio hosts), use `OPENCLAW_TEST_PROFILE=low OPENCLAW_TEST_SERIAL_GATEWAY=1 pnpm test` for land/gate runs. - Live tests (real keys): `CLAWDBOT_LIVE_TEST=1 pnpm test:live` (OpenClaw-only) or `LIVE=1 pnpm test:live` (includes provider live tests). Docker: `pnpm test:docker:live-models`, `pnpm test:docker:live-gateway`. Onboarding Docker E2E: `pnpm test:docker:onboard`. @@ -170,6 +202,44 @@ ## Agent-Specific Notes - Vocabulary: "makeup" = "mac app". +- Parallels macOS retests: use the snapshot most closely named like `macOS 26.3.1 fresh` when the user asks for a clean/fresh macOS rerun; avoid older Tahoe snapshots unless explicitly requested. +- Parallels beta smoke: use `--target-package-spec openclaw@` for the beta artifact, and pin the stable side with both `--install-version ` and `--latest-version ` for upgrade runs. npm dist-tags can move mid-run. +- Parallels beta smoke, Windows nuance: old stable `2026.3.12` still prints the Unicode Windows onboarding banner, so mojibake during the stable precheck log is expected there. Judge the beta package by the post-upgrade lane. +- Parallels macOS smoke playbook: + - `prlctl exec` is fine for deterministic repo commands, but it can misrepresent interactive shell behavior (`PATH`, `HOME`, `curl | bash`, shebang resolution). For installer parity or shell-sensitive repros, prefer the guest Terminal or `prlctl enter`. + - Fresh Tahoe snapshot current reality: `brew` exists, `node` may not be on `PATH` in noninteractive guest exec. Use absolute `/opt/homebrew/bin/node` for repo/CLI runs when needed. + - Preferred automation entrypoint: `pnpm test:parallels:macos`. It restores the snapshot most closely matching `macOS 26.3.1 fresh`, serves the current `main` tarball from the host, then runs fresh-install and latest-release-to-main smoke lanes. + - Gateway verification in smoke runs should use `openclaw gateway status --deep --require-rpc`, not plain `--deep`, so probe failures go non-zero. + - Latest-release pre-upgrade diagnostics still need compatibility fallback: stable `2026.3.12` does not know `--require-rpc`, so precheck status dumps should fall back to plain `gateway status --deep` until the guest is upgraded. + - Harness output: pass `--json` for machine-readable summary; per-phase logs land under `/tmp/openclaw-parallels-smoke.*`. + - All-OS parallel runs should share the host `dist` build via `/tmp/openclaw-parallels-build.lock` instead of rebuilding three times. + - Current expected outcome on latest stable pre-upgrade: `precheck=latest-ref-fail` is normal on `2026.3.12`; treat it as a baseline signal, not a regression, unless the post-upgrade `main` lane also fails. + - Fresh host-served tgz install: restore fresh snapshot, install tgz as guest root with `HOME=/var/root`, then run onboarding as the desktop user via `prlctl exec --current-user`. + - For `openclaw onboard --non-interactive --secret-input-mode ref --install-daemon`, expect env-backed auth-profile refs (for example `OPENAI_API_KEY`) to be copied into the service env at install time; this path was fixed and should stay green. + - Don’t run local + gateway agent turns in parallel on the same fresh workspace/session; they can collide on the session lock. Run sequentially. + - Root-installed tarball smoke on Tahoe can still log plugin blocks for world-writable `extensions/*` under `/opt/homebrew/lib/node_modules/openclaw`; treat that as separate from onboarding/gateway health unless the task is plugin loading. +- Parallels Windows smoke playbook: + - Preferred automation entrypoint: `pnpm test:parallels:windows`. It restores the snapshot most closely matching `pre-openclaw-native-e2e-2026-03-12`, serves the current `main` tarball from the host, then runs fresh-install and latest-release-to-main smoke lanes. + - Gateway verification in smoke runs should use `openclaw gateway status --deep --require-rpc`, not plain `--deep`, so probe failures go non-zero. + - Latest-release pre-upgrade diagnostics still need compatibility fallback: stable `2026.3.12` does not know `--require-rpc`, so precheck status dumps should fall back to plain `gateway status --deep` until the guest is upgraded. + - Always use `prlctl exec --current-user` for Windows guest runs; plain `prlctl exec` lands in `NT AUTHORITY\SYSTEM` and does not match the real desktop-user install path. + - Prefer explicit `npm.cmd` / `openclaw.cmd`. Bare `npm` / `openclaw` in PowerShell can hit the `.ps1` shim and fail under restrictive execution policy. + - Use PowerShell only as the transport (`powershell.exe -NoProfile -ExecutionPolicy Bypass`) and call the `.cmd` shims explicitly from inside it. + - Harness output: pass `--json` for machine-readable summary; per-phase logs land under `/tmp/openclaw-parallels-windows.*`. + - Current expected outcome on latest stable pre-upgrade: `precheck=latest-ref-fail` is normal on `2026.3.12`; treat it as a baseline signal, not a regression, unless the post-upgrade `main` lane also fails. + - Keep Windows onboarding/status text ASCII-clean in logs. Fancy punctuation in banners shows up as mojibake through the current guest PowerShell capture path. +- Parallels Linux smoke playbook: + - Preferred automation entrypoint: `pnpm test:parallels:linux`. It restores the snapshot most closely matching `fresh` on `Ubuntu 24.04.3 ARM64`, serves the current `main` tarball from the host, then runs fresh-install and latest-release-to-main smoke lanes. + - Use plain `prlctl exec` on this snapshot. `--current-user` is not the right transport there. + - Fresh snapshot reality: `curl` is missing and `apt-get update` can fail on clock skew. Bootstrap with `apt-get -o Acquire::Check-Date=false update` and install `curl ca-certificates` before testing installer paths. + - Fresh `main` tgz smoke on Linux still needs the latest-release installer first, because this snapshot has no Node/npm before bootstrap. The harness does stable bootstrap first, then overlays current `main`. + - This snapshot does not have a usable `systemd --user` session. Treat managed daemon install as unsupported here; use `--skip-health`, then verify with direct `openclaw gateway run --bind loopback --port 18789 --force`. + - Env-backed auth refs are still fine, but any direct shell launch (`openclaw gateway run`, `openclaw agent --local`, Linux `gateway status --deep` against that direct run) must inherit the referenced env vars in the same shell. + - `prlctl exec` reaps detached Linux child processes on this snapshot, so a background `openclaw gateway run` launched from automation is not a trustworthy smoke path. The harness verifies installer + `agent --local`; do direct gateway checks only from an interactive guest shell when needed. + - When you do run Linux gateway checks manually from an interactive guest shell, use `openclaw gateway status --deep --require-rpc` so an RPC miss is a hard failure. + - Prefer direct argv guest commands for fetch/install steps (`curl`, `npm install -g`, `openclaw ...`) over nested `bash -lc` quoting; Linux guest quoting through Parallels was the flaky part. + - Harness output: pass `--json` for machine-readable summary; per-phase logs land under `/tmp/openclaw-parallels-linux.*`. + - Current expected outcome on Linux smoke: fresh + upgrade should pass installer and `agent --local`; gateway remains `skipped-no-detached-linux-gateway` on this snapshot and should not be treated as a regression by itself. - Never edit `node_modules` (global/Homebrew/npm/git installs too). Updates overwrite. Skill notes go in `tools.md` or `AGENTS.md`. - When adding a new `AGENTS.md` anywhere in the repo, also add a `CLAUDE.md` symlink pointing to it (example: `ln -s AGENTS.md CLAUDE.md`). - Signal: "update fly" => `fly ssh console -a flawd-bot -C "bash -lc 'cd /data/clawd/openclaw && git pull --rebase origin main'"` then `fly machines restart e825232f34d058 -a flawd-bot`. diff --git a/CHANGELOG.md b/CHANGELOG.md index 1e51ea3a0a1..25bad54390e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,18 +6,308 @@ Docs: https://docs.openclaw.ai ### Changes -### Breaking +- Placeholder: replace with the first 2026.3.14 user-facing change. + +## 2026.3.13 + +### Changes + +- Android/chat settings: redesign the chat settings sheet with grouped device and media sections, refresh the Connect and Voice tabs, and tighten the chat composer/session header for a denser mobile layout. (#44894) Thanks @obviyus. +- iOS/onboarding: add a first-run welcome pager before gateway setup, stop auto-opening the QR scanner, and show `/pair qr` instructions on the connect step. (#45054) Thanks @ngutman. +- Browser/existing-session: add an official Chrome DevTools MCP attach mode for signed-in live Chrome sessions, with docs for `chrome://inspect/#remote-debugging` enablement and direct backlinks to Chrome’s own setup guides. +- Browser/agents: add built-in `profile="user"` for the logged-in host browser and `profile="chrome-relay"` for the extension relay, so agent browser calls can prefer the real signed-in browser without the extra `browserSession` selector. +- Browser/act automation: add batched actions, selector targeting, and delayed clicks for browser act requests with normalized batch dispatch. Thanks @vincentkoc. +- Docker/timezone override: add `OPENCLAW_TZ` so `docker-setup.sh` can pin gateway and CLI containers to a chosen IANA timezone instead of inheriting the daemon default. (#34119) Thanks @Lanfei. +- Dependencies/pi: bump `@mariozechner/pi-agent-core`, `@mariozechner/pi-ai`, `@mariozechner/pi-coding-agent`, and `@mariozechner/pi-tui` to `0.58.0`. +- Cron/sessions: add `sessionTarget: "current"` and `session:` support so cron jobs can bind to the creating session or a persistent named session instead of only `main` or `isolated`. Thanks @kkhomej33-netizen and @ImLukeF. ### Fixes -- macOS/LaunchAgent install: tighten LaunchAgent directory and plist permissions during install so launchd bootstrap does not fail when the target home path or generated plist inherited group/world-writable modes. -- Gateway/Control UI: keep dashboard auth tokens in session-scoped browser storage so same-tab refreshes preserve remote token auth without restoring long-lived localStorage token persistence, while scoping tokens to the selected gateway URL and fragment-only bootstrap flow. (#40892) thanks @velvet-shark. +- Dashboard/chat UI: stop reloading full chat history on every live tool result in dashboard v2 so tool-heavy runs no longer trigger UI freeze/re-render storms while the final event still refreshes persisted history. (#45541) Thanks @BunsDev. +- Gateway/client requests: reject unanswered gateway RPC calls after a bounded timeout and clear their pending state, so stalled connections no longer leak hanging `GatewayClient.request()` promises indefinitely. +- Build/plugin-sdk bundling: bundle plugin-sdk subpath entries in one shared build pass so published packages stop duplicating shared chunks and avoid the recent plugin-sdk memory blow-up. (#45426) Thanks @TarasShyn. +- Ollama/reasoning visibility: stop promoting native `thinking` and `reasoning` fields into final assistant text so local reasoning models no longer leak internal thoughts in normal replies. (#45330) Thanks @xi7ang. +- Android/onboarding QR scan: switch setup QR scanning to Google Code Scanner so onboarding uses a more reliable scanner instead of the legacy embedded ZXing flow. (#45021) Thanks @obviyus. +- Browser/existing-session: harden driver validation and session lifecycle so transport errors trigger reconnects while tool-level errors preserve the session, and extract shared ARIA role sets to deduplicate Playwright and Chrome MCP snapshot paths. (#45682) Thanks @odysseus0. +- Browser/existing-session: accept text-only `list_pages` and `new_page` responses from Chrome DevTools MCP so live-session tab discovery and new-tab open flows keep working when the server omits structured page metadata. +- Control UI/insecure auth: preserve explicit shared token and password auth on plain-HTTP Control UI connects so LAN and reverse-proxy sessions no longer drop shared auth before the first WebSocket handshake. (#45088) Thanks @velvet-shark. +- Gateway/session reset: preserve `lastAccountId` and `lastThreadId` across gateway session resets so replies keep routing back to the same account and thread after `/reset`. (#44773) Thanks @Lanfei. +- macOS/onboarding: avoid self-restarting freshly bootstrapped launchd gateways and give new daemon installs longer to become healthy, so `openclaw onboard --install-daemon` no longer false-fails on slower Macs and fresh VM snapshots. +- Gateway/status: add `openclaw gateway status --require-rpc` and clearer Linux non-interactive daemon-install failure reporting so automation can fail hard on probe misses instead of treating a printed RPC error as green. +- macOS/exec approvals: respect per-agent exec approval settings in the gateway prompter, including allowlist fallback when the native prompt cannot be shown, so gateway-triggered `system.run` requests follow configured policy instead of always prompting or denying unexpectedly. (#13707) Thanks @sliekens. +- Telegram/media downloads: thread the same direct or proxy transport policy into SSRF-guarded file fetches so inbound attachments keep working when Telegram falls back between env-proxy and direct networking. (#44639) Thanks @obviyus. +- Telegram/inbound media IPv4 fallback: retry SSRF-guarded Telegram file downloads once with the same IPv4 fallback policy as Bot API calls so fresh installs on IPv6-broken hosts no longer fail to download inbound images. +- Windows/gateway install: bound `schtasks` calls and fall back to the Startup-folder login item when task creation hangs, so native `openclaw gateway install` fails fast instead of wedging forever on broken Scheduled Task setups. +- Windows/gateway stop: resolve Startup-folder fallback listeners from the installed `gateway.cmd` port, so `openclaw gateway stop` now actually kills fallback-launched gateway processes before restart. +- Windows/gateway status: reuse the installed service command environment when reading runtime status, so startup-fallback gateways keep reporting the configured port and running state in `gateway status --json` instead of falling back to `gateway port unknown`. +- Windows/gateway auth: stop attaching device identity on local loopback shared-token and password gateway calls, so native Windows agent replies no longer log stale `device signature expired` fallback noise before succeeding. +- Discord/gateway startup: treat plain-text and transient `/gateway/bot` metadata fetch failures as transient startup errors so Discord gateway boot no longer crashes on unhandled rejections. (#44397) Thanks @jalehman. +- Slack/probe: keep `auth.test()` bot and team metadata mapping stable while simplifying the probe result path. (#44775) Thanks @Cafexss. +- Dashboard/chat UI: render oversized plain-text replies as normal paragraphs instead of capped gray code blocks, so long desktop chat responses stay readable without tab-switching refreshes. +- Dashboard/chat UI: restore the `chat-new-messages` class on the New messages scroll pill so the button uses its existing compact styling instead of rendering as a full-screen SVG overlay. (#44856) Thanks @Astro-Han. +- Gateway/Control UI: restore the operator-only device-auth bypass and classify browser connect failures so origin and device-identity problems no longer show up as auth errors in the Control UI and web chat. (#45512) thanks @sallyom. +- macOS/voice wake: stop crashing wake-word command extraction when speech segment ranges come from a different transcript instance. +- Discord/allowlists: honor raw `guild_id` when hydrated guild objects are missing so allowlisted channels and threads like `#maintainers` no longer get false-dropped before channel allowlist checks. +- macOS/runtime locator: require Node >=22.16.0 during macOS runtime discovery so the app no longer accepts Node versions that the main runtime guard rejects later. Thanks @sumleo. +- Agents/custom providers: preserve blank API keys for loopback OpenAI-compatible custom providers by clearing the synthetic Authorization header at runtime, while keeping explicit apiKey and oauth/token config from silently downgrading into fake bearer auth. (#45631) Thanks @xinhuagu. +- Models/google-vertex Gemini flash-lite normalization: apply existing bare-ID preview normalization to `google-vertex` model refs and provider configs so `google-vertex/gemini-3.1-flash-lite` resolves as `gemini-3.1-flash-lite-preview`. (#42435) thanks @scoootscooob. +- iMessage/remote attachments: reject unsafe remote attachment paths before spawning SCP, so sender-controlled filenames can no longer inject shell metacharacters into remote media staging. Thanks @lintsinghua. +- Telegram/webhook auth: validate the Telegram webhook secret before reading or parsing request bodies, so unauthenticated requests are rejected immediately instead of consuming up to 1 MB first. Thanks @space08. +- Security/device pairing: make bootstrap setup codes single-use so pending device pairing requests cannot be silently replayed and widened to admin before approval. Thanks @tdjackey. +- Security/external content: strip zero-width and soft-hyphen marker-splitting characters during boundary sanitization so spoofed `EXTERNAL_UNTRUSTED_CONTENT` markers fall back to the existing hardening path instead of bypassing marker normalization. +- Security/exec approvals: unwrap more `pnpm` runtime forms during approval binding, including `pnpm --reporter ... exec` and direct `pnpm node` file runs, with matching regression coverage and docs updates. +- Security/exec approvals: fail closed for Perl `-M` and `-I` approval flows so preload and load-path module resolution stays outside approval-backed runtime execution unless the operator uses a broader explicit trust path. +- Security/exec approvals: recognize PowerShell `-File` and `-f` wrapper forms during inline-command extraction so approval and command-analysis paths treat file-based PowerShell launches like the existing `-Command` variants. +- Security/exec approvals: unwrap `env` dispatch wrappers inside shell-segment allowlist resolution on macOS so `env FOO=bar /path/to/bin` resolves against the effective executable instead of the wrapper token. +- Security/exec approvals: treat backslash-newline as shell line continuation during macOS shell-chain parsing so line-continued `$(` substitutions fail closed instead of slipping past command-substitution checks. +- Security/exec approvals: bind macOS skill auto-allow trust to both executable name and resolved path so same-basename binaries no longer inherit trust from unrelated skill bins. +- Build/plugin-sdk bundling: bundle plugin-sdk subpath entries in one shared build pass so published packages stop duplicating shared chunks and avoid the recent plugin-sdk memory blow-up. (#45426) Thanks @TarasShyn. +- Cron/isolated sessions: route nested cron-triggered embedded runner work onto the nested lane so isolated cron jobs no longer deadlock when compaction or other queued inner work runs. Thanks @vincentkoc. +- Agents/OpenAI-compatible compat overrides: respect explicit user `models[].compat` opt-ins for non-native `openai-completions` endpoints so usage-in-streaming capability overrides no longer get forced off when the endpoint actually supports them. (#44432) Thanks @cheapestinference. +- Agents/Azure OpenAI startup prompts: rephrase the built-in `/new`, `/reset`, and post-compaction startup instruction so Azure OpenAI deployments no longer hit HTTP 400 false positives from the content filter. (#43403) Thanks @xingsy97. +- Agents/memory bootstrap: load only one root memory file, preferring `MEMORY.md` and using `memory.md` as a fallback, so case-insensitive Docker mounts no longer inject duplicate memory context. (#26054) Thanks @Lanfei. +- Agents/compaction: compare post-compaction token sanity checks against full-session pre-compaction totals and skip the check when token estimation fails, so sessions with large bootstrap context keep real token counts instead of falling back to unknown. (#28347) thanks @efe-arv. +- Agents/compaction: preserve safeguard compaction summary language continuity via default and configurable custom instructions so persona drift is reduced after auto-compaction. (#10456) Thanks @keepitmello. +- Agents/tool warnings: distinguish gated core tools like `apply_patch` from plugin-only unknown entries in `tools.profile` warnings, so unavailable core tools now report current runtime/provider/model/config gating instead of suggesting a missing plugin. +- Config/validation: accept documented `agents.list[].params` per-agent overrides in strict config validation so `openclaw config validate` no longer rejects runtime-supported `cacheRetention`, `temperature`, and `maxTokens` settings. (#41171) Thanks @atian8179. +- Config/web fetch: restore runtime validation for documented `tools.web.fetch.readability` and `tools.web.fetch.firecrawl` settings so valid web fetch configs no longer fail with unrecognized-key errors. (#42583) Thanks @stim64045-spec. +- Signal/config validation: add `channels.signal.groups` schema support so per-group `requireMention`, `tools`, and `toolsBySender` overrides no longer get rejected during config validation. (#27199) Thanks @unisone. +- Config/discovery: accept `discovery.wideArea.domain` in strict config validation so unicast DNS-SD gateway configs no longer fail with an unrecognized-key error. (#35615) Thanks @ingyukoh. +- Telegram/media errors: redact Telegram file URLs before building media fetch errors so failed inbound downloads do not leak bot tokens into logs. Thanks @space08. +- Agents/failover: normalize abort-wrapped `429 RESOURCE_EXHAUSTED` provider failures before abort short-circuiting so wrapped Google/Vertex rate limits continue across configured fallback models, including the embedded runner prompt-error path. (#39820) Thanks @lupuletic. +- Mattermost/thread routing: non-inbound reply paths (TUI/WebUI turns, tool-call callbacks, subagent responses) now correctly route to the originating Mattermost thread when `replyToMode: "all"` is active; also prevents stale `origin.threadId` metadata from resurrecting cleared thread routes. (#44283) thanks @teconomix + +## 2026.3.12 + +### Changes + +- Control UI/dashboard-v2: refresh the gateway dashboard with modular overview, chat, config, agent, and session views, plus a command palette, mobile bottom tabs, and richer chat tools like slash commands, search, export, and pinned messages. (#41503) Thanks @BunsDev. +- OpenAI/GPT-5.4 fast mode: add configurable session-level fast toggles across `/fast`, TUI, Control UI, and ACP, with per-model config defaults and OpenAI/Codex request shaping. +- Anthropic/Claude fast mode: map the shared `/fast` toggle and `params.fastMode` to direct Anthropic API-key `service_tier` requests, with live verification for both Anthropic and OpenAI fast-mode tiers. +- Models/plugins: move Ollama, vLLM, and SGLang onto the provider-plugin architecture, with provider-owned onboarding, discovery, model-picker setup, and post-selection hooks so core provider wiring is more modular. +- Docs/Kubernetes: Add a starter K8s install path with raw manifests, Kind setup, and deployment docs. Thanks @sallyom @dzianisv @egkristi +- Agents/subagents: add `sessions_yield` so orchestrators can end the current turn immediately, skip queued tool work, and carry a hidden follow-up payload into the next session turn. (#36537) thanks @jriff +- Slack/agent replies: support `channelData.slack.blocks` in the shared reply delivery path so agents can send Block Kit messages through standard Slack outbound delivery. (#44592) Thanks @vincentkoc. +- Slack/interactive replies: add opt-in Slack button and select reply directives behind `channels.slack.capabilities.interactiveReplies`, disabled by default unless explicitly enabled. (#44607) Thanks @vincentkoc. + +### Fixes + +- Security/device pairing: switch `/pair` and `openclaw qr` setup codes to short-lived bootstrap tokens so the next release no longer embeds shared gateway credentials in chat or QR pairing payloads. Thanks @lintsinghua. +- Security/plugins: disable implicit workspace plugin auto-load so cloned repositories cannot execute workspace plugin code without an explicit trust decision. (`GHSA-99qw-6mr3-36qr`)(#44174) Thanks @lintsinghua and @vincentkoc. - Models/Kimi Coding: send `anthropic-messages` tools in native Anthropic format again so `kimi-coding` stops degrading tool calls into XML/plain-text pseudo invocations instead of real `tool_use` blocks. (#38669, #39907, #40552) Thanks @opriz. -- Context engine/tests: add bundled-registry regression coverage for cross-chunk resolution, plugin-sdk re-exports, and concurrent chunk registration. (#40460) thanks @dsantoreis. -- Agents/embedded runner: bound compaction retry waiting and drain embedded runs during SIGUSR1 restart so session lanes recover instead of staying blocked behind compaction. (#40324) thanks @cgdusek. +- TUI/chat log: reuse the active assistant message component for the same streaming run so `openclaw tui` no longer renders duplicate assistant replies. (#35364) Thanks @lisitan. +- Telegram/model picker: make inline model button selections persist the chosen session model correctly, clear overrides when selecting the configured default, and include effective fallback models in `/models` button validation. (#40105) Thanks @avirweb. +- Cron/proactive delivery: keep isolated direct cron sends out of the write-ahead resend queue so transient-send retries do not replay duplicate proactive messages after restart. (#40646) Thanks @openperf and @vincentkoc. +- Models/Kimi Coding: send the built-in `User-Agent: claude-code/0.1.0` header by default for `kimi-coding` while still allowing explicit provider headers to override it, so Kimi Code subscription auth can work without a local header-injection proxy. (#30099) Thanks @Amineelfarssi and @vincentkoc. +- Models/OpenAI Codex Spark: keep `gpt-5.3-codex-spark` working on the `openai-codex/*` path via resolver fallbacks and clearer Codex-only handling, while continuing to suppress the stale direct `openai/*` Spark row that OpenAI rejects live. +- Ollama/Kimi Cloud: apply the Moonshot Kimi payload compatibility wrapper to Ollama-hosted Kimi models like `kimi-k2.5:cloud`, so tool routing no longer breaks when thinking is enabled. (#41519) Thanks @vincentkoc. +- Moonshot CN API: respect explicit `baseUrl` (api.moonshot.cn) in implicit provider resolution so platform.moonshot.cn API keys authenticate correctly instead of returning HTTP 401. (#33637) Thanks @chengzhichao-xydt. +- Kimi Coding/provider config: respect explicit `models.providers["kimi-coding"].baseUrl` when resolving the implicit provider so custom Kimi Coding endpoints no longer get overwritten by the built-in default. (#36353) Thanks @2233admin. +- Gateway/main-session routing: keep TUI and other `mode:UI` main-session sends on the internal surface when `deliver` is enabled, so replies no longer inherit the session's persisted Telegram/WhatsApp route. (#43918) Thanks @obviyus. +- BlueBubbles/self-chat echo dedupe: drop reflected duplicate webhook copies only when a matching `fromMe` event was just seen for the same chat, body, and timestamp, preventing self-chat loops without broad webhook suppression. Related to #32166. (#38442) Thanks @vincentkoc. +- iMessage/self-chat echo dedupe: drop reflected duplicate copies only when a matching `is_from_me` event was just seen for the same chat, text, and `created_at`, preventing self-chat loops without broad text-only suppression. Related to #32166. (#38440) Thanks @vincentkoc. +- Subagents/completion announce retries: raise the default announce timeout to 90 seconds and stop retrying gateway-timeout failures for externally delivered completion announces, preventing duplicate user-facing completion messages after slow gateway responses. Fixes #41235. Thanks @vasujain00 and @vincentkoc. +- Mattermost/block streaming: fix duplicate message delivery (one threaded, one top-level) when block streaming is active by excluding `replyToId` from the block reply dedup key and adding an explicit `threading` dock to the Mattermost plugin. (#41362) Thanks @mathiasnagler and @vincentkoc. +- Mattermost/reply media delivery: pass agent-scoped `mediaLocalRoots` through shared reply delivery so allowed local files upload correctly from button, slash-command, and model-picker replies. (#44021) Thanks @LyleLiu666. +- macOS/Reminders: add the missing `NSRemindersUsageDescription` to the bundled app so `apple-reminders` can trigger the system permission prompt from OpenClaw.app. (#8559) Thanks @dinakars777. +- Gateway/session discovery: discover disk-only and retired ACP session stores under custom templated `session.store` roots so ACP reconciliation, session-id/session-label targeting, and run-id fallback keep working after restart. (#44176) thanks @gumadeiras. +- Plugins/env-scoped roots: fix plugin discovery/load caches and provenance tracking so same-process `HOME`/`OPENCLAW_HOME` changes no longer reuse stale plugin state or misreport `~/...` plugins as untracked. (#44046) thanks @gumadeiras. +- Models/OpenRouter native ids: canonicalize native OpenRouter model keys across config writes, runtime lookups, fallback management, and `models list --plain`, and migrate legacy duplicated `openrouter/openrouter/...` config entries forward on write. +- Windows/native update: make package installs use the npm update path instead of the git path, carry portable Git into native Windows updates, and mirror the installer's Windows npm env so `openclaw update` no longer dies early on missing `git` or `node-llama-cpp` download setup. +- Sandbox/write: preserve pinned mutation-helper payload stdin so sandboxed `write` no longer reports success while creating empty files. (#43876) Thanks @glitch418x. +- Security/exec approvals: escape invisible Unicode format characters in approval prompts so zero-width command text renders as visible `\u{...}` escapes instead of spoofing the reviewed command. (`GHSA-pcqg-f7rg-xfvv`)(#43687) Thanks @EkiXu and @vincentkoc. +- Hooks/loader: fail closed when workspace hook paths cannot be resolved with `realpath`, so unreadable or broken internal hook paths are skipped instead of falling back to unresolved imports. (#44437) Thanks @vincentkoc. +- Hooks/agent deliveries: dedupe repeated hook requests by optional idempotency key so webhook retries can reuse the first run instead of launching duplicate agent executions. (#44438) Thanks @vincentkoc. +- Security/exec detection: normalize compatibility Unicode and strip invisible formatting code points before obfuscation checks so zero-width and fullwidth command tricks no longer suppress heuristic detection. (`GHSA-9r3v-37xh-2cf6`)(#44091) Thanks @wooluo and @vincentkoc. +- Security/exec allowlist: preserve POSIX case sensitivity and keep `?` within a single path segment so exact-looking allowlist patterns no longer overmatch executables across case or directory boundaries. (`GHSA-f8r2-vg7x-gh8m`)(#43798) Thanks @zpbrent and @vincentkoc. +- Security/commands: require sender ownership for `/config` and `/debug` so authorized non-owner senders can no longer reach owner-only config and runtime debug surfaces. (`GHSA-r7vr-gr74-94p8`)(#44305) Thanks @tdjackey and @vincentkoc. +- Security/gateway auth: clear unbound client-declared scopes on shared-token WebSocket connects so device-less shared-token operators cannot self-declare elevated scopes. (`GHSA-rqpp-rjj8-7wv8`)(#44306) Thanks @LUOYEcode and @vincentkoc. +- Security/browser.request: block persistent browser profile create/delete routes from write-scoped `browser.request` so callers can no longer persist admin-only browser profile changes through the browser control surface. (`GHSA-vmhq-cqm9-6p7q`)(#43800) Thanks @tdjackey and @vincentkoc. +- Security/agent: reject public spawned-run lineage fields and keep workspace inheritance on the internal spawned-session path so external `agent` callers can no longer override the gateway workspace boundary. (`GHSA-2rqg-gjgv-84jm`)(#43801) Thanks @tdjackey and @vincentkoc. +- Security/session_status: enforce sandbox session-tree visibility and shared agent-to-agent access guards before reading or mutating target session state, so sandboxed subagents can no longer inspect parent session metadata or write parent model overrides via `session_status`. (`GHSA-wcxr-59v9-rxr8`)(#43754) Thanks @tdjackey and @vincentkoc. +- Security/agent tools: mark `nodes` as explicitly owner-only and document/test that `canvas` remains a shared trusted-operator surface unless a real boundary bypass exists. +- Security/exec approvals: fail closed for Ruby approval flows that use `-r`, `--require`, or `-I` so approval-backed commands no longer bind only the main script while extra local code-loading flags remain outside the reviewed file snapshot. +- Security/device pairing: cap issued and verified device-token scopes to each paired device's approved scope baseline so stale or overbroad tokens cannot exceed approved access. (`GHSA-2pwv-x786-56f8`)(#43686) Thanks @tdjackey and @vincentkoc. +- Docs/onboarding: align the legacy wizard reference and `openclaw onboard` command docs with the Ollama onboarding flow so all onboarding reference paths now document `--auth-choice ollama`, Cloud + Local mode, and non-interactive usage. (#43473) Thanks @BruceMacD. +- Models/secrets: enforce source-managed SecretRef markers in generated `models.json` so runtime-resolved provider secrets are not persisted when runtime projection is skipped. (#43759) Thanks @joshavant. +- Security/WebSocket preauth: shorten unauthenticated handshake retention and reject oversized pre-auth frames before application-layer parsing to reduce pre-pairing exposure on unsupported public deployments. (`GHSA-jv4g-m82p-2j93`)(#44089) (`GHSA-xwx2-ppv2-wx98`)(#44089) Thanks @ez-lbz and @vincentkoc. +- Security/proxy attachments: restore the shared media-store size cap for persisted browser proxy files so oversized payloads are rejected instead of overriding the intended 5 MB limit. (`GHSA-6rph-mmhp-h7h9`)(#43684) Thanks @tdjackey and @vincentkoc. +- Security/host env: block inherited `GIT_EXEC_PATH` from sanitized host exec environments so Git helper resolution cannot be steered by host environment state. (`GHSA-jf5v-pqgw-gm5m`)(#43685) Thanks @zpbrent and @vincentkoc. +- Security/Feishu webhook: require `encryptKey` alongside `verificationToken` in webhook mode so unsigned forged events are rejected instead of being processed with token-only configuration. (`GHSA-g353-mgv3-8pcj`)(#44087) Thanks @lintsinghua and @vincentkoc. +- Security/Feishu reactions: preserve looked-up group chat typing and fail closed on ambiguous reaction context so group authorization and mention gating cannot be bypassed through synthetic `p2p` reactions. (`GHSA-m69h-jm2f-2pv8`)(#44088) Thanks @zpbrent and @vincentkoc. +- Security/LINE webhook: require signatures for empty-event POST probes too so unsigned requests no longer confirm webhook reachability with a `200` response. (`GHSA-mhxh-9pjm-w7q5`)(#44090) Thanks @TerminalsandCoffee and @vincentkoc. +- Security/Zalo webhook: rate limit invalid secret guesses before auth so weak webhook secrets cannot be brute-forced through unauthenticated churned requests without pre-auth `429` responses. (`GHSA-5m9r-p9g7-679c`)(#44173) Thanks @zpbrent and @vincentkoc. +- Security/Zalouser groups: require stable group IDs for allowlist auth by default and gate mutable group-name matching behind `channels.zalouser.dangerouslyAllowNameMatching`. Thanks @zpbrent. +- Security/Slack and Teams routing: require stable channel and team IDs for allowlist routing by default, with mutable name matching only via each channel's `dangerouslyAllowNameMatching` break-glass flag. +- Security/exec approvals: fail closed for ambiguous inline loader and shell-payload script execution, bind the real script after POSIX shell value-taking flags, and unwrap `pnpm`/`npm exec`/`npx` script runners before approval binding. (`GHSA-57jw-9722-6rf2`)(`GHSA-jvqh-rfmh-jh27`)(`GHSA-x7pp-23xv-mmr4`)(`GHSA-jc5j-vg4r-j5jx`)(#44247) Thanks @tdjackey and @vincentkoc. +- Doctor/gateway service audit: canonicalize service entrypoint paths before comparing them so symlink-vs-realpath installs no longer trigger false "entrypoint does not match the current install" repair prompts. (#43882) Thanks @ngutman. +- Doctor/gateway service audit: earlier groundwork for this fix landed in the superseded #28338 branch. Thanks @realriphub. +- Gateway/session stores: regenerate the Swift push-test protocol models and align Windows native session-store realpath handling so protocol checks and sync session discovery stop drifting on Windows. (#44266) thanks @jalehman. +- Context engine/session routing: forward optional `sessionKey` through context-engine lifecycle calls so plugins can see structured routing metadata during bootstrap, assembly, post-turn ingestion, and compaction. (#44157) thanks @jalehman. +- Agents/failover: classify z.ai `network_error` stop reasons as retryable timeouts so provider connectivity failures trigger fallback instead of surfacing raw unhandled-stop-reason errors. (#43884) Thanks @hougangdev. +- Config/Anthropic startup: inline Anthropic alias normalization during config load so gateway startup no longer crashes on dated Anthropic model refs like `anthropic/claude-sonnet-4-20250514`. (#45520) Thanks @BunsDev. +- Memory/session sync: add mode-aware post-compaction session reindexing with `agents.defaults.compaction.postIndexSync` plus `agents.defaults.memorySearch.sync.sessions.postCompactionForce`, so compacted session memory can refresh immediately without forcing every deployment into synchronous reindexing. (#25561) thanks @rodrigouroz. +- Telegram/model picker: make inline model button selections persist the chosen session model correctly, clear overrides when selecting the configured default, and include effective fallback models in `/models` button validation. (#40105) Thanks @avirweb. +- Telegram/native command sync: suppress expected `BOT_COMMANDS_TOO_MUCH` retry error noise, add a final fallback summary log, and document the difference between command-menu overflow and real Telegram network failures. +- Mattermost/reply media delivery: pass agent-scoped `mediaLocalRoots` through shared reply delivery so allowed local files upload correctly from button, slash-command, and model-picker replies. (#44021) Thanks @LyleLiu666. +- Plugins/env-scoped roots: fix plugin discovery/load caches and provenance tracking so same-process `HOME`/`OPENCLAW_HOME` changes no longer reuse stale plugin state or misreport `~/...` plugins as untracked. (#44046) thanks @gumadeiras. +- Gateway/session discovery: discover disk-only and retired ACP session stores under custom templated `session.store` roots so ACP reconciliation, session-id/session-label targeting, and run-id fallback keep working after restart. (#44176) thanks @gumadeiras. +- Browser/existing-session: stop reporting fake CDP ports/URLs for live attached Chrome sessions, render `transport: chrome-mcp` in CLI/status output instead of `port: 0`, and keep timeout diagnostics transport-aware when no direct CDP URL exists. +- Models/OpenRouter native ids: canonicalize native OpenRouter model keys across config writes, runtime lookups, fallback management, and `models list --plain`, and migrate legacy duplicated `openrouter/openrouter/...` config entries forward on write. +- Feishu/event dedupe: keep early duplicate suppression aligned with the shared Feishu message-id contract and release the pre-queue dedupe marker after failed dispatch so retried events can recover instead of being dropped until the short TTL expires. (#43762) Thanks @yunweibang. +- Gateway/hooks: bucket hook auth failures by forwarded client IP behind trusted proxies and warn when `hooks.allowedAgentIds` leaves hook routing unrestricted. +- Agents/compaction: skip the post-compaction `cache-ttl` marker write when a compaction completed in the same attempt, preventing the next turn from immediately triggering a second tiny compaction. (#28548) thanks @MoerAI. +- Native chat/macOS: add `/new`, `/reset`, and `/clear` reset triggers, keep shared main-session aliases aligned, and ignore stale model-selection completions so native chat state stays in sync across reset and fast model changes. (#10898) Thanks @Nachx639. +- Agents/compaction safeguard: route missing-model and missing-API-key cancellation warnings through the shared subsystem logger so they land in structured and file logs. (#9974) Thanks @dinakars777. +- Cron/doctor: stop flagging canonical `agentTurn` and `systemEvent` payload kinds as legacy cron storage, while still normalizing whitespace-padded and non-canonical variants. (#44012) Thanks @shuicici. +- ACP/client final-message delivery: preserve terminal assistant text snapshots before resolving `end_turn`, so ACP clients no longer drop the last visible reply when the gateway sends the final message body on the terminal chat event. (#17615) Thanks @pjeby. +- Telegram/Discord status reactions: show a temporary compacting reaction during auto-compaction pauses and restore thinking afterward so the bot no longer appears frozen while context is being compacted. (#35474) thanks @Cypherm. +- Delivery/dedupe: trim completed direct-cron delivery cache correctly and keep mirrored transcript dedupe active even when transcript files contain malformed lines. (#44666) thanks @frankekn. +- CLI/thinking help: add the missing `xhigh` level hints to `openclaw cron add`, `openclaw cron edit`, and `openclaw agent` so the help text matches the levels already accepted at runtime. (#44819) Thanks @kiki830621. +- Agents/Anthropic replay: drop replayed assistant thinking blocks for native Anthropic and Bedrock Claude providers so persisted follow-up turns no longer fail on stored thinking blocks. (#44843) Thanks @jmcte. +- Docs/Brave pricing: escape literal dollar signs in Brave Search cost text so the docs render the free credit and per-request pricing correctly. (#44989) Thanks @keelanfh. +- Feishu/file uploads: preserve literal UTF-8 filenames in `im.file.create` so Chinese and other non-ASCII filenames no longer appear percent-encoded in chat. (#34262) Thanks @fabiaodemianyang and @KangShuaiFu. + +## 2026.3.11 + +### Security + +- Gateway/WebSocket: enforce browser origin validation for all browser-originated connections regardless of whether proxy headers are present, closing a cross-site WebSocket hijacking path in `trusted-proxy` mode that could grant untrusted origins `operator.admin` access. (GHSA-5wcw-8jjv-m286) + +### Changes + +- OpenRouter/models: add temporary Hunter Alpha and Healer Alpha entries to the built-in catalog so OpenRouter users can try the new free stealth models during their roughly one-week availability window. (#43642) Thanks @ping-Toven. +- iOS/Home canvas: add a bundled welcome screen with a live agent overview that refreshes on connect, reconnect, and foreground return, and move the compact connection pill off the top-left canvas overlay. (#42456) Thanks @ngutman. +- iOS/Home canvas: replace floating controls with a docked toolbar, make the bundled home scaffold adapt to smaller phones, and open chat in the resolved main session instead of a synthetic `ios` session. (#42456) Thanks @ngutman. +- macOS/chat UI: add a chat model picker, persist explicit thinking-level selections across relaunch, and harden provider-aware session model sync for the shared chat composer. (#42314) Thanks @ImLukeF. +- Onboarding/Ollama: add first-class Ollama setup with Local or Cloud + Local modes, browser-based cloud sign-in, curated model suggestions, and cloud-model handling that skips unnecessary local pulls. (#41529) Thanks @BruceMacD. +- OpenCode/onboarding: add new OpenCode Go provider, treat Zen and Go as one OpenCode setup in the wizard/docs while keeping the runtime providers split, store one shared OpenCode key for both profiles, and stop overriding the built-in `opencode-go` catalog routing. (#42313) Thanks @ImLukeF and @vincentkoc. +- Memory: add opt-in multimodal image and audio indexing for `memorySearch.extraPaths` with Gemini `gemini-embedding-2-preview`, strict fallback gating, and scope-based reindexing. (#43460) Thanks @gumadeiras. +- Memory/Gemini: add `gemini-embedding-2-preview` memory-search support with configurable output dimensions and automatic reindexing when the configured dimensions change. (#42501) Thanks @BillChirico and @gumadeiras. +- macOS/onboarding: detect when remote gateways need a shared auth token, explain where to find it on the gateway host, and clarify when a successful check used paired-device auth instead. (#43100) Thanks @ngutman. +- Discord/auto threads: add `autoArchiveDuration` channel config for auto-created threads so Discord thread archiving can stay at 1 hour, 1 day, 3 days, or 1 week instead of always using the 1-hour default. (#35065) Thanks @davidguttman. +- iOS/TestFlight: add a local beta release flow with Fastlane prepare/archive/upload support, canonical beta bundle IDs, and watch-app archive fixes. (#42991) Thanks @ngutman. +- ACP/sessions_spawn: add optional `resumeSessionId` for `runtime: "acp"` so spawned ACP sessions can resume an existing ACPX/Codex conversation instead of always starting fresh. (#41847) Thanks @pejmanjohn. +- Gateway/node pending work: add narrow in-memory pending-work queue primitives (`node.pending.enqueue` / `node.pending.drain`) and wake-helper reuse as a foundation for dormant-node work delivery. (#41409) Thanks @mbelinky. +- Git/runtime state: ignore the gateway-generated `.dev-state` file so local runtime state does not show up as untracked repo noise. (#41848) Thanks @smysle. +- Exec/child commands: mark child command environments with `OPENCLAW_CLI` so subprocesses can detect when they were launched from the OpenClaw CLI. (#41411) Thanks @vincentkoc. +- LLM Task/Lobster: add an optional `thinking` override so workflow calls can explicitly set embedded reasoning level with shared validation for invalid values and unsupported `xhigh` modes. (#15606) Thanks @xadenryan and @ImLukeF. +- Mattermost/reply threading: add `channels.mattermost.replyToMode` for channel and group messages so top-level posts can start thread-scoped sessions without the manual reply-then-thread workaround. (#29587) Thanks @teconomix. +- iOS/push relay: add relay-backed official-build push delivery with App Attest + receipt verification, gateway-bound send delegation, and config-based relay URL setup on the gateway. (#43369) Thanks @ngutman. + +### Breaking + +- Cron/doctor: tighten isolated cron delivery so cron jobs can no longer notify through ad hoc agent sends or fallback main-session summaries, and add `openclaw doctor --fix` migration for legacy cron storage and legacy notify/webhook delivery metadata. (#40998) Thanks @mbelinky. + +### Fixes + +- Windows/install: stop auto-installing `node-llama-cpp` during normal npm CLI installs so `openclaw@latest` no longer fails on Windows while building optional local-embedding dependencies. +- Windows/update: mirror the native installer environment during global npm updates, including portable Git fallback and Windows-safe npm shell settings, so `openclaw update` works again on native Windows installs. +- Gateway/status: expose `runtimeVersion` in gateway status output so install/update smoke tests can verify the running version before and after updates. +- Windows/onboarding: explain when non-interactive local onboarding is waiting for an already-running gateway, and surface native Scheduled Task admin requirements more clearly instead of failing with an opaque gateway timeout. +- Windows/gateway install: fall back from denied Scheduled Task creation to a per-user Startup-folder login item, so native `openclaw gateway install` and `--install-daemon` keep working without an elevated PowerShell shell. +- Agents/text sanitization: strip leaked model control tokens (`<|...|>` and full-width `<|...|>` variants) from user-facing assistant text, preventing GLM-5 and DeepSeek internal delimiters from reaching end users. (#42173) Thanks @imwyvern. +- iOS/gateway foreground recovery: reconnect immediately on foreground return after stale background sockets are torn down, so the app no longer stays disconnected until a later wake path happens. (#41384) Thanks @mbelinky. +- Gateway/Control UI: keep dashboard auth tokens in session-scoped browser storage so same-tab refreshes preserve remote token auth without restoring long-lived localStorage token persistence, while scoping tokens to the selected gateway URL and fragment-only bootstrap flow. (#40892) thanks @velvet-shark. +- Gateway/macOS launchd restarts: keep the LaunchAgent registered during explicit restarts, hand off self-restarts through a detached launchd helper, and recover config/hot reload restart paths without unloading the service. Fixes #43311, #43406, #43035, and #43049. +- macOS/LaunchAgent install: tighten LaunchAgent directory and plist permissions during install so launchd bootstrap does not fail when the target home path or generated plist inherited group/world-writable modes. +- Discord/reply chunking: resolve the effective `maxLinesPerMessage` config across live reply paths and preserve `chunkMode` in the fast send path so long Discord replies no longer split unexpectedly at the default 17-line limit. (#40133) thanks @rbutera. +- Feishu/local image auto-convert: pass `mediaLocalRoots` through the `sendText` local-image shim so allowed local image paths upload as Feishu images again instead of falling back to raw path text. (#40623) Thanks @ayanesakura. +- Models/Kimi Coding: send `anthropic-messages` tools in native Anthropic format again so `kimi-coding` stops degrading tool calls into XML/plain-text pseudo invocations instead of real `tool_use` blocks. (#38669, #39907, #40552) Thanks @opriz. +- Telegram/outbound HTML sends: chunk long HTML-mode messages, preserve plain-text fallback and silent-delivery params across retries, and cut over to plain text when HTML chunk planning cannot safely preserve the full message. (#42240) thanks @obviyus. +- Telegram/final preview delivery: split active preview lifecycle from cleanup retention so missing archived preview edits avoid duplicate fallback sends without clearing the live preview or blocking later in-place finalization. (#41662) thanks @hougangdev. +- Telegram/final preview delivery followup: keep ambiguous missing-`message_id` finals only when a preview was already visible, while first-preview/no-id cases still fall back so Telegram users do not lose the final reply. (#41932) thanks @hougangdev. +- Telegram/final preview cleanup follow-up: clear stale cleanup-retain state only for transient preview finals so archived-preview retains no longer leave a stale partial bubble beside a later fallback-sent final. (#41763) Thanks @obviyus. +- Telegram/poll restarts: scope process-level polling restarts to real Telegram `getUpdates` failures so unrelated network errors, such as Slack DNS misses, no longer bounce Telegram polling. (#43799) Thanks @obviyus. +- Gateway/auth: allow one trusted device-token retry on shared-token mismatch with recovery hints to prevent reconnect churn during token drift. (#42507) Thanks @joshavant. +- Gateway/config errors: surface up to three validation issues in top-level `config.set`, `config.patch`, and `config.apply` error messages while preserving structured issue details. (#42664) Thanks @huntharo. +- Agents/Azure OpenAI Responses: include the `azure-openai` provider in the Responses API store override so Azure OpenAI multi-turn cron jobs and embedded agent runs no longer fail with HTTP 400 "store is set to false". (#42934, fixes #42800) Thanks @ademczuk. +- Agents/error rendering: ignore stale assistant `errorMessage` fields on successful turns so background/tool-side failures no longer prepend synthetic billing errors over valid replies. (#40616) Thanks @ingyukoh. +- Agents/billing recovery: probe single-provider billing cooldowns on the existing throttle so topping up credits can recover without a manual gateway restart. (#41422) thanks @altaywtf. +- Agents/fallback: treat HTTP 499 responses as transient in both raw-text and structured failover paths so Anthropic-style client-closed overload responses trigger model fallback reliably. (#41468) thanks @zeroasterisk. +- Agents/fallback: recognize Venice `402 Insufficient USD or Diem balance` billing errors so configured model fallbacks trigger instead of surfacing the raw provider error. (#43205) Thanks @Squabble9. +- Agents/fallback: recognize Poe `402 You've used up your points!` billing errors so configured model fallbacks trigger instead of surfacing the raw provider error. (#42278) Thanks @CryUshio. +- Agents/failover: treat Gemini `MALFORMED_RESPONSE` stop reasons as retryable timeouts so preview-model enum drift falls back cleanly instead of crashing the run, without also reclassifying malformed function-call errors. (#42292) Thanks @jnMetaCode. +- Agents/cooldowns: default cooldown windows with no recorded failure history to `unknown` instead of `rate_limit`, avoiding false API rate-limit warnings while preserving cooldown recovery probes. (#42911) Thanks @VibhorGautam. +- Auth/cooldowns: reset expired auth-profile cooldown error counters before computing the next backoff so stale on-disk counters do not re-escalate into long cooldown loops after expiry. (#41028) thanks @zerone0x. +- Agents/memory flush: forward `memoryFlushWritePath` through `runEmbeddedPiAgent` so memory-triggered flush turns keep the append-only write guard without aborting before tool setup. Follows up on #38574. (#41761) Thanks @frankekn. +- Agents/context pruning: prune image-only tool results during soft-trim, align context-pruning coverage with the new tool-result contract, and extend historical image cleanup to the same screenshot-heavy session path. (#43045) Thanks @MoerAI. +- Sessions/reset model recompute: clear stale runtime model, context-token, and system-prompt metadata before session resets recompute the replacement session, so resets pick up current defaults and explicit overrides instead of reusing old runtime model state. (#41173) thanks @PonyX-lab. +- Channels/allowlists: remove stale matcher caching so same-array allowlist edits and wildcard replacements take effect immediately, with regression coverage for in-place mutation cases. +- Discord/Telegram outbound runtime config: thread runtime-resolved config through Discord and Telegram send paths so SecretRef-based credentials stay resolved during message delivery. (#42352) Thanks @joshavant. +- Tools/web search: treat Brave `llm-context` grounding snippets as plain strings so `web_search` no longer returns empty snippet arrays in LLM Context mode. (#41387) thanks @zheliu2. +- Tools/web search: recover OpenRouter Perplexity citation extraction from `message.annotations` when chat-completions responses omit top-level citations. (#40881) Thanks @laurieluo. +- CLI/skills JSON: strip ANSI and C1 control bytes from `skills list --json`, `skills info --json`, and `skills check --json` so machine-readable output stays valid for terminals and skill metadata with embedded control characters. Fixes #27530. Related #27557. Thanks @Jimmy-xuzimo and @vincentkoc. +- CLI/tables: default shared tables to ASCII borders on legacy Windows consoles while keeping Unicode borders on modern Windows terminals, so commands like `openclaw skills` stop rendering mojibake under GBK/936 consoles. Fixes #40853. Related #41015. Thanks @ApacheBin and @vincentkoc. +- CLI/memory teardown: close cached memory search/index managers in the one-shot CLI shutdown path so watcher-backed memory caches no longer keep completed CLI runs alive after output finishes. (#40389) thanks @Julbarth. +- Control UI/Sessions: restore single-column session table collapse on narrow viewport or container widths by moving the responsive table override next to the base grid rule and enabling inline-size container queries. (#12175) Thanks @benjipeng. +- Telegram/network env-proxy: apply configured transport policy to proxied HTTPS dispatchers as well as direct `NO_PROXY` bypasses, so resolver-scoped IPv4 fallback and network settings work consistently for env-proxied Telegram traffic. (#40740) Thanks @sircrumpet. +- Mattermost/Markdown formatting: preserve first-line indentation when stripping bot mentions so nested list items and indented code blocks keep their structure, and render Mattermost tables natively by default instead of fenced-code fallback. (#18655) thanks @echo931. +- Mattermost/plugin send actions: normalize direct `replyTo` fallback handling so threaded plugin sends trim blank IDs and reuse the correct reply target again. (#41176) Thanks @hnykda. +- MS Teams/allowlist resolution: use the General channel conversation ID as the resolved team key (with Graph GUID fallback) so Bot Framework runtime `channelData.team.id` matching works for team and team/channel allowlist entries. (#41838) Thanks @BradGroux. +- Signal/config schema: accept `channels.signal.accountUuid` in strict config validation so loop-protection configs no longer fail with an unrecognized-key error. (#35578) Thanks @ingyukoh. +- Telegram/config schema: accept `channels.telegram.actions.editMessage` and `createForumTopic` in strict config validation so existing Telegram action toggles no longer fail as unrecognized keys. (#35498) Thanks @ingyukoh. +- Telegram/docs: clarify that `channels.telegram.groups` allowlists chats while `groupAllowFrom` allowlists users inside those chats, and point invalid negative chat IDs at the right config key. (#42451) Thanks @altaywtf. +- Discord/config typing: expose channel-level `autoThread` on the canonical guild-channel config type so strict config loading matches the existing Discord schema and runtime behavior. (#35608) Thanks @ingyukoh. +- fix(models): guard optional model.input capability checks (#42096) thanks @andyliu +- Models/Alibaba Cloud Model Studio: wire `MODELSTUDIO_API_KEY` through shared env auth, implicit provider discovery, and shell-env fallback so onboarding works outside the wizard too. (#40634) Thanks @pomelo-nwu. +- Resolve web tool SecretRefs atomically at runtime. (#41599) Thanks @joshavant. +- Secret files: harden CLI and channel credential file reads against path-swap races by requiring direct regular files for `*File` secret inputs and rejecting symlink-backed secret files. +- Archive extraction: harden TAR and external `tar.bz2` installs against destination symlink and pre-existing child-symlink escapes by extracting into staging first and merging into the canonical destination with safe file opens. +- Secrets/SecretRef: reject exec SecretRef traversal ids across schema, runtime, and gateway. (#42370) Thanks @joshavant. +- Sandbox/fs bridge: pin staged writes to verified parent directories so temporary write files cannot materialize outside the allowed mount before atomic replace. Thanks @tdjackey. +- Gateway/auth: fail closed when local `gateway.auth.*` SecretRefs are configured but unavailable, instead of silently falling back to `gateway.remote.*` credentials in local mode. (#42672) Thanks @joshavant. +- Commands/config writes: enforce `configWrites` against both the originating account and the targeted account scope for `/config` and config-backed `/allowlist` edits, blocking sibling-account mutations while preserving gateway `operator.admin` flows. Thanks @tdjackey for reporting. +- Security/system.run: fail closed for approval-backed interpreter/runtime commands when OpenClaw cannot bind exactly one concrete local file operand, while extending best-effort direct-file binding to additional runtime forms. Thanks @tdjackey for reporting. +- Gateway/session reset auth: split conversation `/new` and `/reset` handling away from the admin-only `sessions.reset` control-plane RPC so write-scoped gateway callers can no longer reach the privileged reset path through `agent`. Thanks @tdjackey for reporting. +- Security/plugin runtime: stop unauthenticated plugin HTTP routes from inheriting synthetic admin gateway scopes when they call `runtime.subagent.*`, so admin-only methods like `sessions.delete` stay blocked without gateway auth. +- Security/nodes: treat the `nodes` agent tool as owner-only fallback policy so non-owner senders cannot reach paired-node approval or invoke paths through the shared tool set. +- Sandbox/sessions_spawn: restore real workspace handoff for read-only sandboxed sessions so spawned subagents mount the configured workspace at `/agent` instead of inheriting the sandbox copy. Related #40582. +- Security/external content: treat whitespace-delimited `EXTERNAL UNTRUSTED CONTENT` boundary markers like underscore-delimited variants so prompt wrappers cannot bypass marker sanitization. (#35983) Thanks @urianpaul94. +- Telegram/exec approvals: reject `/approve` commands aimed at other bots, keep deterministic approval prompts visible when tool-result delivery fails, and stop resolved exact IDs from matching other pending approvals by prefix. (#37233) Thanks @huntharo. +- Subagents/authority: persist leaf vs orchestrator control scope at spawn time and route tool plus slash-command control through shared ownership checks, so leaf sessions cannot regain orchestration privileges after restore or flat-key lookups. Thanks @tdjackey. +- ACP/ACPX plugin: bump the bundled `acpx` pin to `0.1.16` so plugin-local installs and strict version checks match the latest published CLI. (#41975) Thanks @dutifulbob. - ACP/sessions.patch: allow `spawnedBy` and `spawnDepth` lineage fields on ACP session keys so `sessions_spawn` with `runtime: "acp"` no longer fails during child-session setup. Fixes #40971. (#40995) thanks @xaeon2026. - ACP/stop reason mapping: resolve gateway chat `state: "error"` completions as ACP `end_turn` instead of `refusal` so transient backend failures are not surfaced as deliberate refusals. (#41187) thanks @pejmanjohn. - ACP/setSessionMode: propagate gateway `sessions.patch` failures back to ACP clients so rejected mode changes no longer return silent success. (#41185) thanks @pejmanjohn. +- ACP/bridge mode: reject unsupported per-session MCP server setup and propagate rejected session-mode changes so IDE clients see explicit bridge limitations instead of silent success. (#41424) Thanks @mbelinky. +- ACP/session UX: replay stored user and assistant text on `loadSession`, expose Gateway-backed session controls and metadata, and emit approximate session usage updates so IDE clients restore context more faithfully. (#41425) Thanks @mbelinky. +- ACP/tool streaming: enrich `tool_call` and `tool_call_update` events with best-effort text content and file-location hints so IDE clients can follow bridge tool activity more naturally. (#41442) Thanks @mbelinky. +- ACP/runtime attachments: forward normalized inbound image attachments into ACP runtime turns so ACPX sessions can preserve image prompt content on the runtime path. (#41427) Thanks @mbelinky. +- ACP/regressions: add gateway RPC coverage for ACP lineage patching, ACPX runtime coverage for image prompt serialization, and an operator smoke-test procedure for live ACP spawn verification. (#41456) Thanks @mbelinky. +- ACP/follow-up hardening: make session restore and prompt completion degrade gracefully on transcript/update failures, enforce bounded tool-location traversal, and skip non-image ACPX turns the runtime cannot serialize. (#41464) Thanks @mbelinky. +- ACP/sessions_spawn: implicitly stream `mode="run"` ACP spawns to parent only for eligible subagent orchestrator sessions (heartbeat `target: "last"` with a usable session-local route), restoring parent progress relays without thread binding. (#42404) Thanks @davidguttman. +- ACP/main session aliases: canonicalize `main` before ACP session lookup so restarted ACP main sessions rehydrate instead of failing closed with `Session is not ACP-enabled: main`. (#43285, fixes #25692) +- Plugins/context-engine model auth: expose `runtime.modelAuth` and plugin-sdk auth helpers so plugins can resolve provider/model API keys through the normal auth pipeline. (#41090) thanks @xinhuagu. +- Hooks/plugin context parity followup: pass `trigger` and `channelId` through embedded `llm_input`, `agent_end`, and `llm_output` hook contexts so plugins receive the same agent metadata across hook phases. (#42362) Thanks @zhoulf1006. +- Plugins/global hook runner: harden singleton state handling so shared global hook runner reuse does not leak or corrupt runner state across executions. (#40184) Thanks @vincentkoc. +- Context engine/tests: add bundled-registry regression coverage for cross-chunk resolution, plugin-sdk re-exports, and concurrent chunk registration. (#40460) thanks @dsantoreis. +- Agents/embedded runner: bound compaction retry waiting and drain embedded runs during SIGUSR1 restart so session lanes recover instead of staying blocked behind compaction. (#40324) thanks @cgdusek. +- Agents/embedded logs: add structured, sanitized lifecycle and failover observation events so overload and provider failures are easier to tail and filter. (#41336) thanks @altaywtf. +- Agents/embedded overload logs: include the failing model and provider in error-path console output, with lifecycle regression coverage for the rendered and sanitized `consoleMessage`. (#41236) thanks @jiarung. +- Agents/fallback observability: add structured, sanitized model-fallback decision and auth-profile failure-state events with correlated run IDs so cooldown probes and failover paths are easier to trace in logs. (#41337) thanks @altaywtf. +- Logging/probe observations: suppress structured embedded and model-fallback probe warnings on the console without hiding error or fatal output. (#41338) thanks @altaywtf. +- Agents/context-engine compaction: guard thrown engine-owned overflow compaction attempts and fire compaction hooks for `ownsCompaction` engines so overflow recovery no longer crashes and plugin subscribers still observe compact runs. (#41361) thanks @davidrudduck. +- Gateway/node pending drain followup: keep `hasMore` true when the deferred baseline status item still needs delivery, and avoid allocating empty pending-work state for drain-only nodes with no queued work. (#41429) Thanks @mbelinky. +- Protocol/Swift model sync: regenerate pending node work Swift bindings after the landed `node.pending.*` schema additions so generated protocol artifacts are consistent again. (#41477) Thanks @mbelinky. +- Cron/subagent followup: do not misclassify empty or `NO_REPLY` cron responses as interim acknowledgements that need a rerun, so deliberately silent cron jobs are no longer retried. (#41383) thanks @jackal092927. +- Cron/state errors: record `lastErrorReason` in cron job state and keep the gateway schema aligned with the full failover-reason set, including regression coverage for protocol conformance. (#14382) thanks @futuremind2026. +- Browser/Browserbase 429 handling: surface stable no-retry rate-limit guidance without buffering discarded HTTP 429 response bodies from remote browser services. (#40491) thanks @mvanhorn. +- CI/CodeQL Swift toolchain: select Xcode 26.1 before installing Swift build tools so the CodeQL Swift job uses Swift tools 6.2 on `macos-latest`. (#41787) thanks @BunsDev. +- Sandbox/subagents: pass the real configured workspace through `sessions_spawn` inheritance when a parent agent runs in a copied-workspace sandbox, so child `/agent` mounts point at the configured workspace instead of the parent sandbox copy. (#40757) Thanks @dsantoreis. +- Agents/fallback cooldown probing: cap cooldown-bypass probing to one attempt per provider per fallback run so multi-model same-provider cooldown chains can continue to cross-provider fallbacks instead of repeatedly stalling on duplicate cooldown probes. (#41711) Thanks @cgdusek. +- Telegram/direct delivery: bridge direct delivery sends to internal `message:sent` hooks so internal hook listeners observe successful Telegram deliveries. (#40185) Thanks @vincentkoc. +- Dependencies: refresh workspace dependencies except the pinned Carbon package, and harden ACP session-config writes against non-string SDK values so newer ACP clients fail fast instead of tripping type/runtime mismatches. +- Telegram/polling restarts: clear bounded cleanup timeout handles after `runner.stop()` and `bot.stop()` settle so stall recovery no longer leaves stray 15-second timers behind on clean shutdown. (#43188) thanks @kyohwang. +- Gateway/config errors: surface up to three validation issues in top-level `config.set`, `config.patch`, and `config.apply` error messages while preserving structured issue details. (#42664) Thanks @huntharo. +- Hooks/plugin context parity followup: pass `trigger` and `channelId` through embedded `llm_input`, `agent_end`, and `llm_output` hook contexts so plugins receive the same agent metadata across hook phases. (#42362) Thanks @zhoulf1006. +- Status/context windows: normalize provider-qualified override cache keys so `/status` resolves the active provider's configured context window even when `models.providers` keys use mixed case or surrounding whitespace. (#36389) Thanks @haoruilee. +- ACP/main session aliases: canonicalize `main` before ACP session lookup so restarted ACP main sessions rehydrate instead of failing closed with `Session is not ACP-enabled: main`. (#43285, fixes #25692) +- Agents/embedded runner: recover canonical allowlisted tool names from malformed `toolCallId` and malformed non-blank tool-name variants before dispatch, while failing closed on ambiguous matches. (#34485) thanks @yuweuii. +- Agents/failover: classify ZenMux quota-refresh `402` responses as `rate_limit` so model fallback retries continue instead of stopping on a temporary subscription window. (#43917) thanks @bwjoke. +- Agents/failover: classify HTTP 422 malformed-request responses as `format` and recognize OpenRouter "requires more credits" billing errors so provider fallback triggers instead of surfacing raw errors. (#43823) thanks @jnMetaCode. +- Memory/QMD Windows: fail closed when `qmd.cmd` or `mcporter.cmd` wrappers cannot be resolved to a direct entrypoint, so memory search no longer falls back to shell execution on Windows. +- macOS/remote gateway: stop PortGuardian from killing Docker Desktop and other external listeners on the gateway port in remote mode, so containerized and tunneled gateway setups no longer lose their port-forward owner on app startup. (#6755) Thanks @teslamint. ## 2026.3.8 @@ -73,6 +363,7 @@ Docs: https://docs.openclaw.ai - Docs/Changelog: correct the contributor credit for the bundled Control UI global-install fix to @LarytheLord. (#40420) Thanks @velvet-shark. - Telegram/media downloads: time out only stalled body reads so polling recovers from hung file downloads without aborting slow downloads that are still streaming data. (#40098) thanks @tysoncung. - Docker/runtime image: prune dev dependencies, strip build-only dist metadata for smaller Docker images. (#40307) Thanks @vincentkoc. +- Subagents/sandboxing: restrict leaf subagents to their own spawned runs and remove leaf `subagents` control access so sandboxed leaf workers can no longer steer sibling sessions. Thanks @tdjackey. - Gateway/restart timeout recovery: exit non-zero when restart-triggered shutdown drains time out so launchd/systemd restart the gateway instead of treating the failed restart as a clean stop. Landed from contributor PR #40380 by @dsantoreis. Thanks @dsantoreis. - Gateway/config restart guard: validate config before service start/restart and keep post-SIGUSR1 startup failures from crashing the gateway process, reducing invalid-config restart loops and macOS permission loss. Landed from contributor PR #38699 by @lml2468. Thanks @lml2468. - Gateway/launchd respawn detection: treat `XPC_SERVICE_NAME` as a launchd supervision hint so macOS restarts exit cleanly under launchd instead of attempting detached self-respawn. Landed from contributor PR #20555 by @dimat. Thanks @dimat. @@ -81,8 +372,21 @@ Docs: https://docs.openclaw.ai - Cron/owner-only tools: pass trusted isolated cron runs into the embedded agent with owner context so `cron`/`gateway` tooling remains available after the owner-auth hardening narrowed direct-message ownership inference. - Browser/SSRF: block private-network intermediate redirect hops in strict browser navigation flows and fail closed when remote tab-open paths cannot inspect redirect chains. Thanks @zpbrent. - MS Teams/authz: keep `groupPolicy: "allowlist"` enforcing sender allowlists even when a team/channel route allowlist is configured, so route matches no longer widen group access to every sender in that route. Thanks @zpbrent. +- Security/Gateway: block `device.token.rotate` from minting operator scopes broader than the caller session already holds, closing the critical paired-device token privilege escalation reported as GHSA-4jpw-hj22-2xmc. - Security/system.run: bind approved `bun` and `deno run` script operands to on-disk file snapshots so post-approval script rewrites are denied before execution. - Skills/download installs: pin the validated per-skill tools root before writing downloaded archives, so rebinding the lexical tools path cannot redirect download writes outside the intended tools directory. Thanks @tdjackey. +- Control UI/Debug: replace the Manual RPC free-text method field with a sorted dropdown sourced from gateway-advertised methods, and stack the form vertically for narrower layouts. (#14967) thanks @rixau. +- Auth/profile resolution: log debug details when auto-discovered auth profiles fail during provider API-key resolution, so `--debug` output surfaces the real refresh/keychain/credential-store failure instead of only the generic missing-key message. (#41271) thanks @he-yufeng. +- ACP/cancel scoping: scope `chat.abort` and shared-session ACP event routing by `runId` so one session cannot cancel or consume another session's run when they share the same gateway session key. (#41331) Thanks @pejmanjohn. +- SecretRef/models: harden custom/provider secret persistence and reuse across models.json snapshots, merge behavior, runtime headers, and secret audits. (#42554) Thanks @joshavant. +- macOS/browser proxy: serialize non-GET browser proxy request bodies through `AnyCodable.foundationValue` so nested JSON bodies no longer crash the macOS app with `Invalid type in JSON write (__SwiftValue)`. (#43069) Thanks @Effet. +- CLI/skills tables: keep terminal table borders aligned for wide graphemes, use full reported terminal width, and switch a few ambiguous skill icons to Terminal-safe emoji so `openclaw skills` renders more consistently in Terminal.app and iTerm. Thanks @vincentkoc. +- Memory/Gemini: normalize returned Gemini embeddings across direct query, direct batch, and async batch paths so memory search uses consistent vector handling for Gemini too. (#43409) Thanks @gumadeiras. +- Agents/failover: recognize additional serialized network errno strings plus `EHOSTDOWN` and `EPIPE` structured codes so transient transport failures trigger timeout failover more reliably. (#42830) Thanks @jnMetaCode. +- Telegram/model picker: make inline model button selections persist the chosen session model correctly, clear overrides when selecting the configured default, and include effective fallback models in `/models` button validation. (#40105) Thanks @avirweb. +- Agents/embedded runner: carry provider-observed overflow token counts into compaction so overflow retries and diagnostics use the rejected live prompt size instead of only transcript estimates. (#40357) thanks @rabsef-bicrym. +- Agents/compaction transcript updates: emit a transcript-update event immediately after successful embedded compaction so downstream listeners observe the post-compact transcript without waiting for a later write. (#25558) thanks @rodrigouroz. +- Agents/sessions_spawn: use the target agent workspace for cross-agent spawned runs instead of inheriting the caller workspace, so child sessions load the correct workspace-scoped instructions and persona files. (#40176) Thanks @moshehbenavraham. ## 2026.3.7 @@ -149,6 +453,7 @@ Docs: https://docs.openclaw.ai - Onboarding/API key input hardening: strip non-Latin1 Unicode artifacts from normalized secret input (while preserving Latin-1 content and internal spaces) so malformed copied API keys cannot trigger HTTP header `ByteString` construction crashes; adds regression coverage for shared normalization and MiniMax auth header usage. (#24496) Thanks @fa6maalassaf. - Kimi Coding/Anthropic tools compatibility: normalize `anthropic-messages` tool payloads to OpenAI-style `tools[].function` + compatible `tool_choice` when targeting Kimi Coding endpoints, restoring tool-call workflows that regressed after v2026.3.2. (#37038) Thanks @mochimochimochi-hub. - Heartbeat/workspace-path guardrails: append explicit workspace `HEARTBEAT.md` path guidance (and `docs/heartbeat.md` avoidance) to heartbeat prompts so heartbeat runs target workspace checklists reliably across packaged install layouts. (#37037) Thanks @stofancy. +- Node/system.run approvals: bind approval prompts to the exact executed argv text and show shell payload only as a secondary preview, closing basename-spoofed wrapper approval mismatches. Thanks @tdjackey. - Subagents/kill-complete announce race: when a late `subagent-complete` lifecycle event arrives after an earlier kill marker, clear stale kill suppression/cleanup flags and re-run announce cleanup so finished runs no longer get silently swallowed. (#37024) Thanks @cmfinlan. - Agents/tool-result cleanup timeout hardening: on embedded runner teardown idle timeouts, clear pending tool-call state without persisting synthetic `missing tool result` entries, preventing timeout cleanups from poisoning follow-up turns; adds regression coverage for timeout clear-vs-flush behavior. (#37081) Thanks @Coyote-Den. - Agents/openai-completions stream timeout hardening: ensure runtime undici global dispatchers use extended streaming body/header timeouts (including env-proxy dispatcher mode) before embedded runs, reducing forced mid-stream `terminated` failures on long generations; adds regression coverage for dispatcher selection and idempotent reconfiguration. (#9708) Thanks @scottchguard. @@ -440,6 +745,9 @@ Docs: https://docs.openclaw.ai - Control UI/Telegram sender labels: preserve inbound sender labels in sanitized chat history so dashboard user-message groups split correctly and show real group-member names instead of `You`. (#39414) Thanks @obviyus. - Agents/failover 402 recovery: keep temporary spend-limit `402` payloads retryable, preserve explicit insufficient-credit billing detection even in long provider payloads, and allow throttled billing-cooldown probes so single-provider setups can recover instead of staying locked out. (#38533) Thanks @xialonglee. - Browser/config schema: accept `browser.profiles.*.driver: "openclaw"` while preserving legacy `"clawd"` compatibility in validated config. (#39374; based on #35621) Thanks @gambletan and @ingyukoh. +- Memory flush/bootstrap file protection: restrict memory-flush runs to append-only `read`/`write` tools and route host-side memory appends through root-enforced safe file handles so flush turns cannot overwrite bootstrap files via `exec` or unsafe raw rewrites. (#38574) Thanks @frankekn. +- Mattermost/DM media uploads: resolve bare 26-character Mattermost IDs user-first for direct messages so media sends no longer fail with `403 Forbidden` when targets are configured as unprefixed user IDs. (#29925) Thanks @teconomix. +- Voice-call/OpenAI TTS config parity: add missing `speed`, `instructions`, and `baseUrl` fields to the OpenAI TTS config schema and gate `instructions` to supported models so voice-call overrides validate and route cleanly through core TTS. (#39226) Thanks @ademczuk. ## 2026.3.2 @@ -947,6 +1255,7 @@ Docs: https://docs.openclaw.ai - Browser/Navigate: resolve the correct `targetId` in navigate responses after renderer swaps. (#25326) Thanks @stone-jin and @vincentkoc. - FS/Sandbox workspace boundaries: add a dedicated `outside-workspace` safe-open error code for root-escape checks, and propagate specific outside-workspace messages across edit/browser/media consumers instead of generic not-found/invalid-path fallbacks. (#29715) Thanks @YuzuruS. - Diagnostics/Stuck session signal: add configurable stuck-session warning threshold via `diagnostics.stuckSessionWarnMs` (default 120000ms) to reduce false-positive warnings on long multi-tool turns. (#31032) +- Agents/error classification: check billing errors before context overflow heuristics in the agent runner catch block so spend-limit and quota errors show the billing-specific message instead of being misclassified as "Context overflow: prompt too large". (#40409) Thanks @ademczuk. ## 2026.2.26 @@ -3002,7 +3311,7 @@ Docs: https://docs.openclaw.ai - Agents: add CLI log hint to "agent failed before reply" messages. (#1550) Thanks @sweepies. - Agents: warn and ignore tool allowlists that only reference unknown or unloaded plugin tools. (#1566) - Agents: treat plugin-only tool allowlists as opt-ins; keep core tools enabled. (#1467) -- Agents: honor enqueue overrides for embedded runs to avoid queue deadlocks in tests. (commit 084002998) +- Agents: honor enqueue overrides for embedded runs to avoid queue deadlocks in tests. (#45459) Thanks @LyttonFeng and @vincentkoc. - Slack: honor open groupPolicy for unlisted channels in message + slash gating. (#1563) Thanks @itsjaydesu. - Discord: limit autoThread mention bypass to bot-owned threads; keep ack reactions mention-gated. (#1511) Thanks @pvoo. - Discord: retry rate-limited allowlist resolution + command deploy to avoid gateway crashes. (commit f70ac0c7c) @@ -3919,6 +4228,7 @@ Thanks @AlexMikhalev, @CoreyH, @John-Rood, @KrauseFx, @MaudeBot, @Nachx639, @Nic - Gateway/Daemon/Doctor: atomic config writes; repair gateway service entrypoint + install switches; non-interactive legacy migrations; systemd unit alignment + KillMode=process; node bridge keepalive/pings; Launch at Login persistence; bundle MoltbotKit resources + Swift 6.2 compat dylib; relay version check + remove smoke test; regen Swift GatewayModels + keep agent provider string; cron jobId alias + channel alias migration + main session key normalization; heartbeat Telegram accountId resolution; avoid WhatsApp fallback for internal runs; gateway listener error wording; serveBaseUrl param; honor gateway --dev; fix wide-area discovery updates; align agents.defaults schema; provider account metadata in daemon status; refresh Carbon patch for gateway fixes; restore doctor prompter initialValue handling. - Control UI/TUI: persist per-session verbose off + hide tool cards; logs tab opens at bottom; relative asset paths + landing cleanup; session labels lookup/persistence; stop pinning main session in recents; start logs at bottom; TUI status bar refresh + timeout handling + hide reasoning label when off. - Onboarding/Configure: QuickStart single-select provider picker; avoid Codex CLI false-expiry warnings; clarify WhatsApp owner prompt; fix Minimax hosted onboarding (agents.defaults + msteams heartbeat target); remove configure Control UI prompt; honor gateway --dev flag. +- Agent loop: guard overflow compaction throws and restore compaction hooks for engine-owned context engines. (#41361) — thanks @davidrudduck ### Maintenance diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1127d7dc791..87ccbeff4ef 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -23,7 +23,7 @@ Welcome to the lobster tank! 🦞 - **Jos** - Telegram, API, Nix mode - GitHub: [@joshp123](https://github.com/joshp123) · X: [@jjpcodes](https://x.com/jjpcodes) -- **Ayaan Zaidi** - Telegram subsystem, iOS app +- **Ayaan Zaidi** - Telegram subsystem, Android app - GitHub: [@obviyus](https://github.com/obviyus) · X: [@0bviyus](https://x.com/0bviyus) - **Tyler Yust** - Agents/subagents, cron, BlueBubbles, macOS app @@ -73,6 +73,9 @@ Welcome to the lobster tank! 🦞 - **Robin Waslander** - Security, PR triage, bug fixes - GitHub: [@hydro13](https://github.com/hydro13) · X: [@Robin_waslander](https://x.com/Robin_waslander) +- **Tengji (George) Zhang** - Chinese model APIs, cloud, pi + - GitHub: [@odysseus0](https://github.com/odysseus0) · X: [@odysseus0z](https://x.com/odysseus0z) + ## How to Contribute 1. **Bugs & small fixes** → Open a PR! @@ -83,11 +86,13 @@ Welcome to the lobster tank! 🦞 - Test locally with your OpenClaw instance - Run tests: `pnpm build && pnpm check && pnpm test` +- If you have access to Codex, run `codex review --base origin/main` locally before opening or updating your PR. Treat this as the current highest standard of AI review, even if GitHub Codex review also runs. - Ensure CI checks pass - Keep PRs focused (one thing per PR; do not mix unrelated concerns) - Describe what & why - Reply to or resolve bot review conversations you addressed before asking for review again - **Include screenshots** — one showing the problem/before, one showing the fix/after (for UI or visual changes) +- Use American English spelling and grammar in code, comments, docs, and UI strings ## Review Conversations Are Author-Owned @@ -96,6 +101,8 @@ If a review bot leaves review conversations on your PR, you are expected to hand - Resolve the conversation yourself once the code or explanation fully addresses the bot's concern - Reply and leave it open only when you need maintainer or reviewer judgment - Do not leave "fixed" bot review conversations for maintainers to clean up for you +- If Codex leaves comments, address every relevant one or resolve it with a short explanation when it is not applicable to your change +- If GitHub Codex review does not trigger for some reason, run `codex review --base origin/main` locally anyway and treat that output as required review work This applies to both human-authored and AI-assisted PRs. @@ -124,6 +131,7 @@ Please include in your PR: - [ ] Note the degree of testing (untested / lightly tested / fully tested) - [ ] Include prompts or session logs if possible (super helpful!) - [ ] Confirm you understand what the code does +- [ ] If you have access to Codex, run `codex review --base origin/main` locally and address the findings before asking for review - [ ] Resolve or reply to bot review conversations after you address them AI PRs are first-class citizens here. We just want transparency so reviewers know what to look for. If you are using an LLM coding agent, instruct it to resolve bot review conversations it has addressed instead of leaving them for maintainers. diff --git a/Dockerfile b/Dockerfile index d6923365b4b..57a3440f385 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,14 +14,14 @@ # Slim (bookworm-slim): docker build --build-arg OPENCLAW_VARIANT=slim . ARG OPENCLAW_EXTENSIONS="" ARG OPENCLAW_VARIANT=default -ARG OPENCLAW_NODE_BOOKWORM_IMAGE="node:22-bookworm@sha256:b501c082306a4f528bc4038cbf2fbb58095d583d0419a259b2114b5ac53d12e9" -ARG OPENCLAW_NODE_BOOKWORM_DIGEST="sha256:b501c082306a4f528bc4038cbf2fbb58095d583d0419a259b2114b5ac53d12e9" -ARG OPENCLAW_NODE_BOOKWORM_SLIM_IMAGE="node:22-bookworm-slim@sha256:9c2c405e3ff9b9afb2873232d24bb06367d649aa3e6259cbe314da59578e81e9" -ARG OPENCLAW_NODE_BOOKWORM_SLIM_DIGEST="sha256:9c2c405e3ff9b9afb2873232d24bb06367d649aa3e6259cbe314da59578e81e9" +ARG OPENCLAW_NODE_BOOKWORM_IMAGE="node:24-bookworm@sha256:3a09aa6354567619221ef6c45a5051b671f953f0a1924d1f819ffb236e520e6b" +ARG OPENCLAW_NODE_BOOKWORM_DIGEST="sha256:3a09aa6354567619221ef6c45a5051b671f953f0a1924d1f819ffb236e520e6b" +ARG OPENCLAW_NODE_BOOKWORM_SLIM_IMAGE="node:24-bookworm-slim@sha256:e8e2e91b1378f83c5b2dd15f0247f34110e2fe895f6ca7719dbb780f929368eb" +ARG OPENCLAW_NODE_BOOKWORM_SLIM_DIGEST="sha256:e8e2e91b1378f83c5b2dd15f0247f34110e2fe895f6ca7719dbb780f929368eb" # Base images are pinned to SHA256 digests for reproducible builds. # Trade-off: digests must be updated manually when upstream tags move. -# To update, run: docker manifest inspect node:22-bookworm (or podman) +# To update, run: docker buildx imagetools inspect node:24-bookworm (or podman) # and replace the digest below with the current multi-arch manifest list entry. FROM ${OPENCLAW_NODE_BOOKWORM_IMAGE} AS ext-deps @@ -39,8 +39,18 @@ RUN mkdir -p /out && \ # ── Stage 2: Build ────────────────────────────────────────────── FROM ${OPENCLAW_NODE_BOOKWORM_IMAGE} AS build -# Install Bun (required for build scripts) -RUN curl -fsSL https://bun.sh/install | bash +# Install Bun (required for build scripts). Retry the whole bootstrap flow to +# tolerate transient 5xx failures from bun.sh/GitHub during CI image builds. +RUN set -eux; \ + for attempt in 1 2 3 4 5; do \ + if curl --retry 5 --retry-all-errors --retry-delay 2 -fsSL https://bun.sh/install | bash; then \ + break; \ + fi; \ + if [ "$attempt" -eq 5 ]; then \ + exit 1; \ + fi; \ + sleep $((attempt * 2)); \ + done ENV PATH="/root/.bun/bin:${PATH}" RUN corepack enable @@ -92,12 +102,12 @@ RUN CI=true pnpm prune --prod && \ # ── Runtime base images ───────────────────────────────────────── FROM ${OPENCLAW_NODE_BOOKWORM_IMAGE} AS base-default ARG OPENCLAW_NODE_BOOKWORM_DIGEST -LABEL org.opencontainers.image.base.name="docker.io/library/node:22-bookworm" \ +LABEL org.opencontainers.image.base.name="docker.io/library/node:24-bookworm" \ org.opencontainers.image.base.digest="${OPENCLAW_NODE_BOOKWORM_DIGEST}" FROM ${OPENCLAW_NODE_BOOKWORM_SLIM_IMAGE} AS base-slim ARG OPENCLAW_NODE_BOOKWORM_SLIM_DIGEST -LABEL org.opencontainers.image.base.name="docker.io/library/node:22-bookworm-slim" \ +LABEL org.opencontainers.image.base.name="docker.io/library/node:24-bookworm-slim" \ org.opencontainers.image.base.digest="${OPENCLAW_NODE_BOOKWORM_SLIM_DIGEST}" # ── Stage 3: Runtime ──────────────────────────────────────────── @@ -122,6 +132,7 @@ WORKDIR /app RUN --mount=type=cache,id=openclaw-bookworm-apt-cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,id=openclaw-bookworm-apt-lists,target=/var/lib/apt,sharing=locked \ apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get upgrade -y --no-install-recommends && \ DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ procps hostname curl git openssl @@ -141,7 +152,15 @@ COPY --from=runtime-assets --chown=node:node /app/docs ./docs ENV COREPACK_HOME=/usr/local/share/corepack RUN install -d -m 0755 "$COREPACK_HOME" && \ corepack enable && \ - corepack prepare "$(node -p "require('./package.json').packageManager")" --activate && \ + for attempt in 1 2 3 4 5; do \ + if corepack prepare "$(node -p "require('./package.json').packageManager")" --activate; then \ + break; \ + fi; \ + if [ "$attempt" -eq 5 ]; then \ + exit 1; \ + fi; \ + sleep $((attempt * 2)); \ + done && \ chmod -R a+rX "$COREPACK_HOME" # Install additional system packages needed by your skills or extensions. @@ -209,7 +228,7 @@ RUN ln -sf /app/openclaw.mjs /usr/local/bin/openclaw \ ENV NODE_ENV=production # Security hardening: Run as non-root user -# The node:22-bookworm image includes a 'node' user (uid 1000) +# The node:24-bookworm image includes a 'node' user (uid 1000) # This reduces the attack surface by preventing container escape via root privileges USER node diff --git a/Dockerfile.sandbox b/Dockerfile.sandbox index 8b50c7a6745..37cdab5fcd2 100644 --- a/Dockerfile.sandbox +++ b/Dockerfile.sandbox @@ -7,6 +7,7 @@ ENV DEBIAN_FRONTEND=noninteractive RUN --mount=type=cache,id=openclaw-sandbox-bookworm-apt-cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,id=openclaw-sandbox-bookworm-apt-lists,target=/var/lib/apt,sharing=locked \ apt-get update \ + && apt-get upgrade -y --no-install-recommends \ && apt-get install -y --no-install-recommends \ bash \ ca-certificates \ diff --git a/Dockerfile.sandbox-browser b/Dockerfile.sandbox-browser index f04e4a82a62..e8e8bb59f84 100644 --- a/Dockerfile.sandbox-browser +++ b/Dockerfile.sandbox-browser @@ -7,6 +7,7 @@ ENV DEBIAN_FRONTEND=noninteractive RUN --mount=type=cache,id=openclaw-sandbox-bookworm-apt-cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,id=openclaw-sandbox-bookworm-apt-lists,target=/var/lib/apt,sharing=locked \ apt-get update \ + && apt-get upgrade -y --no-install-recommends \ && apt-get install -y --no-install-recommends \ bash \ ca-certificates \ diff --git a/Dockerfile.sandbox-common b/Dockerfile.sandbox-common index 39eaa3692b4..fba29a5df3d 100644 --- a/Dockerfile.sandbox-common +++ b/Dockerfile.sandbox-common @@ -24,6 +24,7 @@ ENV PATH=${BUN_INSTALL_DIR}/bin:${BREW_INSTALL_DIR}/bin:${BREW_INSTALL_DIR}/sbin RUN --mount=type=cache,id=openclaw-sandbox-common-apt-cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,id=openclaw-sandbox-common-apt-lists,target=/var/lib/apt,sharing=locked \ apt-get update \ + && apt-get upgrade -y --no-install-recommends \ && apt-get install -y --no-install-recommends ${PACKAGES} RUN if [ "${INSTALL_PNPM}" = "1" ]; then npm install -g pnpm; fi diff --git a/SECURITY.md b/SECURITY.md index 5f1e8f0cb9e..bef814525a5 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -37,6 +37,7 @@ For fastest triage, include all of the following: - Exact vulnerable path (`file`, function, and line range) on a current revision. - Tested version details (OpenClaw version and/or commit SHA). - Reproducible PoC against latest `main` or latest released version. +- If the claim targets a released version, evidence from the shipped tag and published artifact/package for that exact version (not only `main`). - Demonstrated impact tied to OpenClaw's documented trust boundaries. - For exposed-secret reports: proof the credential is OpenClaw-owned (or grants access to OpenClaw-operated infrastructure/services). - Explicit statement that the report does not rely on adversarial operators sharing one gateway host/config. @@ -55,6 +56,7 @@ These are frequently reported but are typically closed with no code change: - Authorized user-triggered local actions presented as privilege escalation. Example: an allowlisted/owner sender running `/export-session /absolute/path.html` to write on the host. In this trust model, authorized user actions are trusted host actions unless you demonstrate an auth/sandbox/boundary bypass. - Reports that only show a malicious plugin executing privileged actions after a trusted operator installs/enables it. - Reports that assume per-user multi-tenant authorization on a shared gateway host/config. +- Reports that treat the Gateway HTTP compatibility endpoints (`POST /v1/chat/completions`, `POST /v1/responses`) as if they implemented scoped operator auth (`operator.write` vs `operator.admin`). These endpoints authenticate the shared Gateway bearer secret/password and are documented full operator-access surfaces, not per-user/per-scope boundaries. - Reports that only show differences in heuristic detection/parity (for example obfuscation-pattern detection on one exec path but not another, such as `node.invoke -> system.run` parity gaps) without demonstrating bypass of auth, approvals, allowlist enforcement, sandboxing, or other documented trust boundaries. - ReDoS/DoS claims that require trusted operator configuration input (for example catastrophic regex in `sessionFilter` or `logging.redactPatterns`) without a trust-boundary bypass. - Archive/install extraction claims that require pre-existing local filesystem priming in trusted state (for example planting symlink/hardlink aliases under destination directories such as skills/tools paths) without showing an untrusted path that can create/control that primitive. @@ -65,6 +67,7 @@ These are frequently reported but are typically closed with no code change: - Discord inbound webhook signature findings for paths not used by this repo's Discord integration. - Claims that Microsoft Teams `fileConsent/invoke` `uploadInfo.uploadUrl` is attacker-controlled without demonstrating one of: auth boundary bypass, a real authenticated Teams/Bot Framework event carrying attacker-chosen URL, or compromise of the Microsoft/Bot trust path. - Scanner-only claims against stale/nonexistent paths, or claims without a working repro. +- Reports that restate an already-fixed issue against later released versions without showing the vulnerable path still exists in the shipped tag or published artifact for that later version. ### Duplicate Report Handling @@ -90,6 +93,7 @@ When patching a GHSA via `gh api`, include `X-GitHub-Api-Version: 2022-11-28` (o OpenClaw does **not** model one gateway as a multi-tenant, adversarial user boundary. - Authenticated Gateway callers are treated as trusted operators for that gateway instance. +- The HTTP compatibility endpoints (`POST /v1/chat/completions`, `POST /v1/responses`) are in that same trusted-operator bucket. Passing Gateway bearer auth there is equivalent to operator access for that gateway; they do not implement a narrower `operator.write` vs `operator.admin` trust split. - Session identifiers (`sessionKey`, session IDs, labels) are routing controls, not per-user authorization boundaries. - If one operator can view data from another operator on the same gateway, that is expected in this trust model. - OpenClaw can technically run multiple gateway instances on one machine, but recommended operations are clean separation by trust boundary. @@ -125,6 +129,7 @@ Plugins/extensions are part of OpenClaw's trusted computing base for a gateway. - Any report whose only claim is that an operator-enabled `dangerous*`/`dangerously*` config option weakens defaults (these are explicit break-glass tradeoffs by design) - Reports that depend on trusted operator-supplied configuration values to trigger availability impact (for example custom regex patterns). These may still be fixed as defense-in-depth hardening, but are not security-boundary bypasses. - Reports whose only claim is heuristic/parity drift in command-risk detection (for example obfuscation-pattern checks) across exec surfaces, without a demonstrated trust-boundary bypass. These are hardening-only findings and are not vulnerabilities; triage may close them as `invalid`/`no-action` or track them separately as low/informational hardening. +- Reports whose only claim is that exec approvals do not semantically model every interpreter/runtime loader form, subcommand, flag combination, package script, or transitive module/config import. Exec approvals bind exact request context and best-effort direct local file operands; they are not a complete semantic model of everything a runtime may load. - Exposed secrets that are third-party/user-controlled credentials (not OpenClaw-owned and not granting access to OpenClaw-operated infrastructure/services) without demonstrated OpenClaw impact - Reports whose only claim is host-side exec when sandbox runtime is disabled/unavailable (documented default behavior in the trusted-operator model), without a boundary bypass. - Reports whose only claim is that a platform-provided upload destination URL is untrusted (for example Microsoft Teams `fileConsent/invoke` `uploadInfo.uploadUrl`) without proving attacker control in an authenticated production flow. @@ -144,6 +149,7 @@ OpenClaw security guidance assumes: OpenClaw's security model is "personal assistant" (one trusted operator, potentially many agents), not "shared multi-tenant bus." - If multiple people can message the same tool-enabled agent (for example a shared Slack workspace), they can all steer that agent within its granted permissions. +- Non-owner sender status only affects owner-only tools/commands. If a non-owner can still access a non-owner-only tool on that same agent (for example `canvas`), that is within the granted tool boundary unless the report demonstrates an auth, policy, allowlist, approval, or sandbox bypass. - Session or memory scoping reduces context bleed, but does **not** create per-user host authorization boundaries. - For mixed-trust or adversarial users, isolate by OS user/host/gateway and use separate credentials per boundary. - A company-shared agent can be a valid setup when users are in the same trust boundary and the agent is strictly business-only. @@ -165,6 +171,7 @@ OpenClaw separates routing from execution, but both remain inside the same opera - **Gateway** is the control plane. If a caller passes Gateway auth, they are treated as a trusted operator for that Gateway. - **Node** is an execution extension of the Gateway. Pairing a node grants operator-level remote capability on that node. - **Exec approvals** (allowlist/ask UI) are operator guardrails to reduce accidental command execution, not a multi-tenant authorization boundary. +- Exec approvals bind exact command/cwd/env context and, when OpenClaw can identify one concrete local script/file operand, that file snapshot too. This is best-effort integrity hardening, not a complete semantic model of every interpreter/runtime loader path. - Differences in command-risk warning heuristics between exec surfaces (`gateway`, `node`, `sandbox`) do not, by themselves, constitute a security-boundary bypass. - For untrusted-user isolation, split by trust boundary: separate gateways and separate OS users/hosts per boundary. diff --git a/Swabble/Sources/SwabbleKit/WakeWordGate.swift b/Swabble/Sources/SwabbleKit/WakeWordGate.swift index 27c952a8d1b..1a1479b630b 100644 --- a/Swabble/Sources/SwabbleKit/WakeWordGate.swift +++ b/Swabble/Sources/SwabbleKit/WakeWordGate.swift @@ -101,25 +101,19 @@ public enum WakeWordGate { } public static func commandText( - transcript: String, + transcript _: String, segments: [WakeWordSegment], triggerEndTime: TimeInterval) -> String { let threshold = triggerEndTime + 0.001 + var commandWords: [String] = [] + commandWords.reserveCapacity(segments.count) for segment in segments where segment.start >= threshold { - if normalizeToken(segment.text).isEmpty { continue } - if let range = segment.range { - let slice = transcript[range.lowerBound...] - return String(slice).trimmingCharacters(in: Self.whitespaceAndPunctuation) - } - break + let normalized = normalizeToken(segment.text) + if normalized.isEmpty { continue } + commandWords.append(segment.text) } - - let text = segments - .filter { $0.start >= threshold && !normalizeToken($0.text).isEmpty } - .map(\.text) - .joined(separator: " ") - return text.trimmingCharacters(in: Self.whitespaceAndPunctuation) + return commandWords.joined(separator: " ").trimmingCharacters(in: Self.whitespaceAndPunctuation) } public static func matchesTextOnly(text: String, triggers: [String]) -> Bool { diff --git a/Swabble/Tests/SwabbleKitTests/WakeWordGateTests.swift b/Swabble/Tests/SwabbleKitTests/WakeWordGateTests.swift index 5cc283c35ae..7e5b4abdd74 100644 --- a/Swabble/Tests/SwabbleKitTests/WakeWordGateTests.swift +++ b/Swabble/Tests/SwabbleKitTests/WakeWordGateTests.swift @@ -46,6 +46,25 @@ import Testing let match = WakeWordGate.match(transcript: transcript, segments: segments, config: config) #expect(match?.command == "do it") } + + @Test func commandTextHandlesForeignRangeIndices() { + let transcript = "hey clawd do thing" + let other = "do thing" + let foreignRange = other.range(of: "do") + let segments = [ + WakeWordSegment(text: "hey", start: 0.0, duration: 0.1, range: transcript.range(of: "hey")), + WakeWordSegment(text: "clawd", start: 0.2, duration: 0.1, range: transcript.range(of: "clawd")), + WakeWordSegment(text: "do", start: 0.9, duration: 0.1, range: foreignRange), + WakeWordSegment(text: "thing", start: 1.1, duration: 0.1, range: nil), + ] + + let command = WakeWordGate.commandText( + transcript: transcript, + segments: segments, + triggerEndTime: 0.3) + + #expect(command == "do thing") + } } private func makeSegments( diff --git a/appcast.xml b/appcast.xml index 4bceb205614..c1919972b22 100644 --- a/appcast.xml +++ b/appcast.xml @@ -2,6 +2,174 @@ OpenClaw + + 2026.3.13 + Sat, 14 Mar 2026 05:19:48 +0000 + https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml + 2026031390 + 2026.3.13 + 15.0 + OpenClaw 2026.3.13 +

Changes

+
    +
  • Android/chat settings: redesign the chat settings sheet with grouped device and media sections, refresh the Connect and Voice tabs, and tighten the chat composer/session header for a denser mobile layout. (#44894) Thanks @obviyus.
  • +
  • iOS/onboarding: add a first-run welcome pager before gateway setup, stop auto-opening the QR scanner, and show /pair qr instructions on the connect step. (#45054) Thanks @ngutman.
  • +
  • Browser/existing-session: add an official Chrome DevTools MCP attach mode for signed-in live Chrome sessions, with docs for chrome://inspect/#remote-debugging enablement and direct backlinks to Chrome’s own setup guides.
  • +
  • Browser/agents: add built-in profile="user" for the logged-in host browser and profile="chrome-relay" for the extension relay, so agent browser calls can prefer the real signed-in browser without the extra browserSession selector.
  • +
  • Browser/act automation: add batched actions, selector targeting, and delayed clicks for browser act requests with normalized batch dispatch. Thanks @vincentkoc.
  • +
  • Docker/timezone override: add OPENCLAW_TZ so docker-setup.sh can pin gateway and CLI containers to a chosen IANA timezone instead of inheriting the daemon default. (#34119) Thanks @Lanfei.
  • +
  • Dependencies/pi: bump @mariozechner/pi-agent-core, @mariozechner/pi-ai, @mariozechner/pi-coding-agent, and @mariozechner/pi-tui to 0.58.0.
  • +
+

Fixes

+
    +
  • Dashboard/chat UI: stop reloading full chat history on every live tool result in dashboard v2 so tool-heavy runs no longer trigger UI freeze/re-render storms while the final event still refreshes persisted history. (#45541) Thanks @BunsDev.
  • +
  • Gateway/client requests: reject unanswered gateway RPC calls after a bounded timeout and clear their pending state, so stalled connections no longer leak hanging GatewayClient.request() promises indefinitely.
  • +
  • Build/plugin-sdk bundling: bundle plugin-sdk subpath entries in one shared build pass so published packages stop duplicating shared chunks and avoid the recent plugin-sdk memory blow-up. (#45426) Thanks @TarasShyn.
  • +
  • Ollama/reasoning visibility: stop promoting native thinking and reasoning fields into final assistant text so local reasoning models no longer leak internal thoughts in normal replies. (#45330) Thanks @xi7ang.
  • +
  • Android/onboarding QR scan: switch setup QR scanning to Google Code Scanner so onboarding uses a more reliable scanner instead of the legacy embedded ZXing flow. (#45021) Thanks @obviyus.
  • +
  • Browser/existing-session: harden driver validation and session lifecycle so transport errors trigger reconnects while tool-level errors preserve the session, and extract shared ARIA role sets to deduplicate Playwright and Chrome MCP snapshot paths. (#45682) Thanks @odysseus0.
  • +
  • Browser/existing-session: accept text-only list_pages and new_page responses from Chrome DevTools MCP so live-session tab discovery and new-tab open flows keep working when the server omits structured page metadata.
  • +
  • Control UI/insecure auth: preserve explicit shared token and password auth on plain-HTTP Control UI connects so LAN and reverse-proxy sessions no longer drop shared auth before the first WebSocket handshake. (#45088) Thanks @velvet-shark.
  • +
  • Gateway/session reset: preserve lastAccountId and lastThreadId across gateway session resets so replies keep routing back to the same account and thread after /reset. (#44773) Thanks @Lanfei.
  • +
  • macOS/onboarding: avoid self-restarting freshly bootstrapped launchd gateways and give new daemon installs longer to become healthy, so openclaw onboard --install-daemon no longer false-fails on slower Macs and fresh VM snapshots.
  • +
  • Gateway/status: add openclaw gateway status --require-rpc and clearer Linux non-interactive daemon-install failure reporting so automation can fail hard on probe misses instead of treating a printed RPC error as green.
  • +
  • macOS/exec approvals: respect per-agent exec approval settings in the gateway prompter, including allowlist fallback when the native prompt cannot be shown, so gateway-triggered system.run requests follow configured policy instead of always prompting or denying unexpectedly. (#13707) Thanks @sliekens.
  • +
  • Telegram/media downloads: thread the same direct or proxy transport policy into SSRF-guarded file fetches so inbound attachments keep working when Telegram falls back between env-proxy and direct networking. (#44639) Thanks @obviyus.
  • +
  • Telegram/inbound media IPv4 fallback: retry SSRF-guarded Telegram file downloads once with the same IPv4 fallback policy as Bot API calls so fresh installs on IPv6-broken hosts no longer fail to download inbound images.
  • +
  • Windows/gateway install: bound schtasks calls and fall back to the Startup-folder login item when task creation hangs, so native openclaw gateway install fails fast instead of wedging forever on broken Scheduled Task setups.
  • +
  • Windows/gateway stop: resolve Startup-folder fallback listeners from the installed gateway.cmd port, so openclaw gateway stop now actually kills fallback-launched gateway processes before restart.
  • +
  • Windows/gateway status: reuse the installed service command environment when reading runtime status, so startup-fallback gateways keep reporting the configured port and running state in gateway status --json instead of falling back to gateway port unknown.
  • +
  • Windows/gateway auth: stop attaching device identity on local loopback shared-token and password gateway calls, so native Windows agent replies no longer log stale device signature expired fallback noise before succeeding.
  • +
  • Discord/gateway startup: treat plain-text and transient /gateway/bot metadata fetch failures as transient startup errors so Discord gateway boot no longer crashes on unhandled rejections. (#44397) Thanks @jalehman.
  • +
  • Slack/probe: keep auth.test() bot and team metadata mapping stable while simplifying the probe result path. (#44775) Thanks @Cafexss.
  • +
  • Dashboard/chat UI: render oversized plain-text replies as normal paragraphs instead of capped gray code blocks, so long desktop chat responses stay readable without tab-switching refreshes.
  • +
  • Dashboard/chat UI: restore the chat-new-messages class on the New messages scroll pill so the button uses its existing compact styling instead of rendering as a full-screen SVG overlay. (#44856) Thanks @Astro-Han.
  • +
  • Gateway/Control UI: restore the operator-only device-auth bypass and classify browser connect failures so origin and device-identity problems no longer show up as auth errors in the Control UI and web chat. (#45512) thanks @sallyom.
  • +
  • macOS/voice wake: stop crashing wake-word command extraction when speech segment ranges come from a different transcript instance.
  • +
  • Discord/allowlists: honor raw guild_id when hydrated guild objects are missing so allowlisted channels and threads like #maintainers no longer get false-dropped before channel allowlist checks.
  • +
  • macOS/runtime locator: require Node >=22.16.0 during macOS runtime discovery so the app no longer accepts Node versions that the main runtime guard rejects later. Thanks @sumleo.
  • +
  • Agents/custom providers: preserve blank API keys for loopback OpenAI-compatible custom providers by clearing the synthetic Authorization header at runtime, while keeping explicit apiKey and oauth/token config from silently downgrading into fake bearer auth. (#45631) Thanks @xinhuagu.
  • +
  • Models/google-vertex Gemini flash-lite normalization: apply existing bare-ID preview normalization to google-vertex model refs and provider configs so google-vertex/gemini-3.1-flash-lite resolves as gemini-3.1-flash-lite-preview. (#42435) thanks @scoootscooob.
  • +
  • iMessage/remote attachments: reject unsafe remote attachment paths before spawning SCP, so sender-controlled filenames can no longer inject shell metacharacters into remote media staging. Thanks @lintsinghua.
  • +
  • Telegram/webhook auth: validate the Telegram webhook secret before reading or parsing request bodies, so unauthenticated requests are rejected immediately instead of consuming up to 1 MB first. Thanks @space08.
  • +
  • Security/device pairing: make bootstrap setup codes single-use so pending device pairing requests cannot be silently replayed and widened to admin before approval. Thanks @tdjackey.
  • +
  • Security/external content: strip zero-width and soft-hyphen marker-splitting characters during boundary sanitization so spoofed EXTERNAL_UNTRUSTED_CONTENT markers fall back to the existing hardening path instead of bypassing marker normalization.
  • +
  • Security/exec approvals: unwrap more pnpm runtime forms during approval binding, including pnpm --reporter ... exec and direct pnpm node file runs, with matching regression coverage and docs updates.
  • +
  • Security/exec approvals: fail closed for Perl -M and -I approval flows so preload and load-path module resolution stays outside approval-backed runtime execution unless the operator uses a broader explicit trust path.
  • +
  • Security/exec approvals: recognize PowerShell -File and -f wrapper forms during inline-command extraction so approval and command-analysis paths treat file-based PowerShell launches like the existing -Command variants.
  • +
  • Security/exec approvals: unwrap env dispatch wrappers inside shell-segment allowlist resolution on macOS so env FOO=bar /path/to/bin resolves against the effective executable instead of the wrapper token.
  • +
  • Security/exec approvals: treat backslash-newline as shell line continuation during macOS shell-chain parsing so line-continued $( substitutions fail closed instead of slipping past command-substitution checks.
  • +
  • Security/exec approvals: bind macOS skill auto-allow trust to both executable name and resolved path so same-basename binaries no longer inherit trust from unrelated skill bins.
  • +
  • Build/plugin-sdk bundling: bundle plugin-sdk subpath entries in one shared build pass so published packages stop duplicating shared chunks and avoid the recent plugin-sdk memory blow-up. (#45426) Thanks @TarasShyn.
  • +
  • Cron/isolated sessions: route nested cron-triggered embedded runner work onto the nested lane so isolated cron jobs no longer deadlock when compaction or other queued inner work runs. Thanks @vincentkoc.
  • +
  • Agents/OpenAI-compatible compat overrides: respect explicit user models[].compat opt-ins for non-native openai-completions endpoints so usage-in-streaming capability overrides no longer get forced off when the endpoint actually supports them. (#44432) Thanks @cheapestinference.
  • +
  • Agents/Azure OpenAI startup prompts: rephrase the built-in /new, /reset, and post-compaction startup instruction so Azure OpenAI deployments no longer hit HTTP 400 false positives from the content filter. (#43403) Thanks @xingsy97.
  • +
  • Agents/memory bootstrap: load only one root memory file, preferring MEMORY.md and using memory.md as a fallback, so case-insensitive Docker mounts no longer inject duplicate memory context. (#26054) Thanks @Lanfei.
  • +
  • Agents/compaction: compare post-compaction token sanity checks against full-session pre-compaction totals and skip the check when token estimation fails, so sessions with large bootstrap context keep real token counts instead of falling back to unknown. (#28347) thanks @efe-arv.
  • +
  • Agents/compaction: preserve safeguard compaction summary language continuity via default and configurable custom instructions so persona drift is reduced after auto-compaction. (#10456) Thanks @keepitmello.
  • +
  • Agents/tool warnings: distinguish gated core tools like apply_patch from plugin-only unknown entries in tools.profile warnings, so unavailable core tools now report current runtime/provider/model/config gating instead of suggesting a missing plugin.
  • +
  • Config/validation: accept documented agents.list[].params per-agent overrides in strict config validation so openclaw config validate no longer rejects runtime-supported cacheRetention, temperature, and maxTokens settings. (#41171) Thanks @atian8179.
  • +
  • Config/web fetch: restore runtime validation for documented tools.web.fetch.readability and tools.web.fetch.firecrawl settings so valid web fetch configs no longer fail with unrecognized-key errors. (#42583) Thanks @stim64045-spec.
  • +
  • Signal/config validation: add channels.signal.groups schema support so per-group requireMention, tools, and toolsBySender overrides no longer get rejected during config validation. (#27199) Thanks @unisone.
  • +
  • Config/discovery: accept discovery.wideArea.domain in strict config validation so unicast DNS-SD gateway configs no longer fail with an unrecognized-key error. (#35615) Thanks @ingyukoh.
  • +
  • Telegram/media errors: redact Telegram file URLs before building media fetch errors so failed inbound downloads do not leak bot tokens into logs. Thanks @space08.
  • +
+

View full changelog

+]]>
+ +
+ + 2026.3.12 + Fri, 13 Mar 2026 04:25:50 +0000 + https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml + 2026031290 + 2026.3.12 + 15.0 + OpenClaw 2026.3.12 +

Changes

+
    +
  • Control UI/dashboard-v2: refresh the gateway dashboard with modular overview, chat, config, agent, and session views, plus a command palette, mobile bottom tabs, and richer chat tools like slash commands, search, export, and pinned messages. (#41503) Thanks @BunsDev.
  • +
  • OpenAI/GPT-5.4 fast mode: add configurable session-level fast toggles across /fast, TUI, Control UI, and ACP, with per-model config defaults and OpenAI/Codex request shaping.
  • +
  • Anthropic/Claude fast mode: map the shared /fast toggle and params.fastMode to direct Anthropic API-key service_tier requests, with live verification for both Anthropic and OpenAI fast-mode tiers.
  • +
  • Models/plugins: move Ollama, vLLM, and SGLang onto the provider-plugin architecture, with provider-owned onboarding, discovery, model-picker setup, and post-selection hooks so core provider wiring is more modular.
  • +
  • Docs/Kubernetes: Add a starter K8s install path with raw manifests, Kind setup, and deployment docs. Thanks @sallyom @dzianisv @egkristi
  • +
  • Agents/subagents: add sessions_yield so orchestrators can end the current turn immediately, skip queued tool work, and carry a hidden follow-up payload into the next session turn. (#36537) thanks @jriff
  • +
  • Slack/agent replies: support channelData.slack.blocks in the shared reply delivery path so agents can send Block Kit messages through standard Slack outbound delivery. (#44592) Thanks @vincentkoc.
  • +
+

Fixes

+
    +
  • Security/device pairing: switch /pair and openclaw qr setup codes to short-lived bootstrap tokens so the next release no longer embeds shared gateway credentials in chat or QR pairing payloads. Thanks @lintsinghua.
  • +
  • Security/plugins: disable implicit workspace plugin auto-load so cloned repositories cannot execute workspace plugin code without an explicit trust decision. (GHSA-99qw-6mr3-36qr)(#44174) Thanks @lintsinghua and @vincentkoc.
  • +
  • Models/Kimi Coding: send anthropic-messages tools in native Anthropic format again so kimi-coding stops degrading tool calls into XML/plain-text pseudo invocations instead of real tool_use blocks. (#38669, #39907, #40552) Thanks @opriz.
  • +
  • TUI/chat log: reuse the active assistant message component for the same streaming run so openclaw tui no longer renders duplicate assistant replies. (#35364) Thanks @lisitan.
  • +
  • Telegram/model picker: make inline model button selections persist the chosen session model correctly, clear overrides when selecting the configured default, and include effective fallback models in /models button validation. (#40105) Thanks @avirweb.
  • +
  • Cron/proactive delivery: keep isolated direct cron sends out of the write-ahead resend queue so transient-send retries do not replay duplicate proactive messages after restart. (#40646) Thanks @openperf and @vincentkoc.
  • +
  • Models/Kimi Coding: send the built-in User-Agent: claude-code/0.1.0 header by default for kimi-coding while still allowing explicit provider headers to override it, so Kimi Code subscription auth can work without a local header-injection proxy. (#30099) Thanks @Amineelfarssi and @vincentkoc.
  • +
  • Models/OpenAI Codex Spark: keep gpt-5.3-codex-spark working on the openai-codex/* path via resolver fallbacks and clearer Codex-only handling, while continuing to suppress the stale direct openai/* Spark row that OpenAI rejects live.
  • +
  • Ollama/Kimi Cloud: apply the Moonshot Kimi payload compatibility wrapper to Ollama-hosted Kimi models like kimi-k2.5:cloud, so tool routing no longer breaks when thinking is enabled. (#41519) Thanks @vincentkoc.
  • +
  • Moonshot CN API: respect explicit baseUrl (api.moonshot.cn) in implicit provider resolution so platform.moonshot.cn API keys authenticate correctly instead of returning HTTP 401. (#33637) Thanks @chengzhichao-xydt.
  • +
  • Kimi Coding/provider config: respect explicit models.providers["kimi-coding"].baseUrl when resolving the implicit provider so custom Kimi Coding endpoints no longer get overwritten by the built-in default. (#36353) Thanks @2233admin.
  • +
  • Gateway/main-session routing: keep TUI and other mode:UI main-session sends on the internal surface when deliver is enabled, so replies no longer inherit the session's persisted Telegram/WhatsApp route. (#43918) Thanks @obviyus.
  • +
  • BlueBubbles/self-chat echo dedupe: drop reflected duplicate webhook copies only when a matching fromMe event was just seen for the same chat, body, and timestamp, preventing self-chat loops without broad webhook suppression. Related to #32166. (#38442) Thanks @vincentkoc.
  • +
  • iMessage/self-chat echo dedupe: drop reflected duplicate copies only when a matching is_from_me event was just seen for the same chat, text, and created_at, preventing self-chat loops without broad text-only suppression. Related to #32166. (#38440) Thanks @vincentkoc.
  • +
  • Subagents/completion announce retries: raise the default announce timeout to 90 seconds and stop retrying gateway-timeout failures for externally delivered completion announces, preventing duplicate user-facing completion messages after slow gateway responses. Fixes #41235. Thanks @vasujain00 and @vincentkoc.
  • +
  • Mattermost/block streaming: fix duplicate message delivery (one threaded, one top-level) when block streaming is active by excluding replyToId from the block reply dedup key and adding an explicit threading dock to the Mattermost plugin. (#41362) Thanks @mathiasnagler and @vincentkoc.
  • +
  • Mattermost/reply media delivery: pass agent-scoped mediaLocalRoots through shared reply delivery so allowed local files upload correctly from button, slash-command, and model-picker replies. (#44021) Thanks @LyleLiu666.
  • +
  • macOS/Reminders: add the missing NSRemindersUsageDescription to the bundled app so apple-reminders can trigger the system permission prompt from OpenClaw.app. (#8559) Thanks @dinakars777.
  • +
  • Gateway/session discovery: discover disk-only and retired ACP session stores under custom templated session.store roots so ACP reconciliation, session-id/session-label targeting, and run-id fallback keep working after restart. (#44176) thanks @gumadeiras.
  • +
  • Plugins/env-scoped roots: fix plugin discovery/load caches and provenance tracking so same-process HOME/OPENCLAW_HOME changes no longer reuse stale plugin state or misreport ~/... plugins as untracked. (#44046) thanks @gumadeiras.
  • +
  • Models/OpenRouter native ids: canonicalize native OpenRouter model keys across config writes, runtime lookups, fallback management, and models list --plain, and migrate legacy duplicated openrouter/openrouter/... config entries forward on write.
  • +
  • Windows/native update: make package installs use the npm update path instead of the git path, carry portable Git into native Windows updates, and mirror the installer's Windows npm env so openclaw update no longer dies early on missing git or node-llama-cpp download setup.
  • +
  • Sandbox/write: preserve pinned mutation-helper payload stdin so sandboxed write no longer reports success while creating empty files. (#43876) Thanks @glitch418x.
  • +
  • Security/exec approvals: escape invisible Unicode format characters in approval prompts so zero-width command text renders as visible \u{...} escapes instead of spoofing the reviewed command. (GHSA-pcqg-f7rg-xfvv)(#43687) Thanks @EkiXu and @vincentkoc.
  • +
  • Hooks/loader: fail closed when workspace hook paths cannot be resolved with realpath, so unreadable or broken internal hook paths are skipped instead of falling back to unresolved imports. (#44437) Thanks @vincentkoc.
  • +
  • Hooks/agent deliveries: dedupe repeated hook requests by optional idempotency key so webhook retries can reuse the first run instead of launching duplicate agent executions. (#44438) Thanks @vincentkoc.
  • +
  • Security/exec detection: normalize compatibility Unicode and strip invisible formatting code points before obfuscation checks so zero-width and fullwidth command tricks no longer suppress heuristic detection. (GHSA-9r3v-37xh-2cf6)(#44091) Thanks @wooluo and @vincentkoc.
  • +
  • Security/exec allowlist: preserve POSIX case sensitivity and keep ? within a single path segment so exact-looking allowlist patterns no longer overmatch executables across case or directory boundaries. (GHSA-f8r2-vg7x-gh8m)(#43798) Thanks @zpbrent and @vincentkoc.
  • +
  • Security/commands: require sender ownership for /config and /debug so authorized non-owner senders can no longer reach owner-only config and runtime debug surfaces. (GHSA-r7vr-gr74-94p8)(#44305) Thanks @tdjackey and @vincentkoc.
  • +
  • Security/gateway auth: clear unbound client-declared scopes on shared-token WebSocket connects so device-less shared-token operators cannot self-declare elevated scopes. (GHSA-rqpp-rjj8-7wv8)(#44306) Thanks @LUOYEcode and @vincentkoc.
  • +
  • Security/browser.request: block persistent browser profile create/delete routes from write-scoped browser.request so callers can no longer persist admin-only browser profile changes through the browser control surface. (GHSA-vmhq-cqm9-6p7q)(#43800) Thanks @tdjackey and @vincentkoc.
  • +
  • Security/agent: reject public spawned-run lineage fields and keep workspace inheritance on the internal spawned-session path so external agent callers can no longer override the gateway workspace boundary. (GHSA-2rqg-gjgv-84jm)(#43801) Thanks @tdjackey and @vincentkoc.
  • +
  • Security/session_status: enforce sandbox session-tree visibility and shared agent-to-agent access guards before reading or mutating target session state, so sandboxed subagents can no longer inspect parent session metadata or write parent model overrides via session_status. (GHSA-wcxr-59v9-rxr8)(#43754) Thanks @tdjackey and @vincentkoc.
  • +
  • Security/agent tools: mark nodes as explicitly owner-only and document/test that canvas remains a shared trusted-operator surface unless a real boundary bypass exists.
  • +
  • Security/exec approvals: fail closed for Ruby approval flows that use -r, --require, or -I so approval-backed commands no longer bind only the main script while extra local code-loading flags remain outside the reviewed file snapshot.
  • +
  • Security/device pairing: cap issued and verified device-token scopes to each paired device's approved scope baseline so stale or overbroad tokens cannot exceed approved access. (GHSA-2pwv-x786-56f8)(#43686) Thanks @tdjackey and @vincentkoc.
  • +
  • Docs/onboarding: align the legacy wizard reference and openclaw onboard command docs with the Ollama onboarding flow so all onboarding reference paths now document --auth-choice ollama, Cloud + Local mode, and non-interactive usage. (#43473) Thanks @BruceMacD.
  • +
  • Models/secrets: enforce source-managed SecretRef markers in generated models.json so runtime-resolved provider secrets are not persisted when runtime projection is skipped. (#43759) Thanks @joshavant.
  • +
  • Security/WebSocket preauth: shorten unauthenticated handshake retention and reject oversized pre-auth frames before application-layer parsing to reduce pre-pairing exposure on unsupported public deployments. (GHSA-jv4g-m82p-2j93)(#44089) (GHSA-xwx2-ppv2-wx98)(#44089) Thanks @ez-lbz and @vincentkoc.
  • +
  • Security/proxy attachments: restore the shared media-store size cap for persisted browser proxy files so oversized payloads are rejected instead of overriding the intended 5 MB limit. (GHSA-6rph-mmhp-h7h9)(#43684) Thanks @tdjackey and @vincentkoc.
  • +
  • Security/host env: block inherited GIT_EXEC_PATH from sanitized host exec environments so Git helper resolution cannot be steered by host environment state. (GHSA-jf5v-pqgw-gm5m)(#43685) Thanks @zpbrent and @vincentkoc.
  • +
  • Security/Feishu webhook: require encryptKey alongside verificationToken in webhook mode so unsigned forged events are rejected instead of being processed with token-only configuration. (GHSA-g353-mgv3-8pcj)(#44087) Thanks @lintsinghua and @vincentkoc.
  • +
  • Security/Feishu reactions: preserve looked-up group chat typing and fail closed on ambiguous reaction context so group authorization and mention gating cannot be bypassed through synthetic p2p reactions. (GHSA-m69h-jm2f-2pv8)(#44088) Thanks @zpbrent and @vincentkoc.
  • +
  • Security/LINE webhook: require signatures for empty-event POST probes too so unsigned requests no longer confirm webhook reachability with a 200 response. (GHSA-mhxh-9pjm-w7q5)(#44090) Thanks @TerminalsandCoffee and @vincentkoc.
  • +
  • Security/Zalo webhook: rate limit invalid secret guesses before auth so weak webhook secrets cannot be brute-forced through unauthenticated churned requests without pre-auth 429 responses. (GHSA-5m9r-p9g7-679c)(#44173) Thanks @zpbrent and @vincentkoc.
  • +
  • Security/Zalouser groups: require stable group IDs for allowlist auth by default and gate mutable group-name matching behind channels.zalouser.dangerouslyAllowNameMatching. Thanks @zpbrent.
  • +
  • Security/Slack and Teams routing: require stable channel and team IDs for allowlist routing by default, with mutable name matching only via each channel's dangerouslyAllowNameMatching break-glass flag.
  • +
  • Security/exec approvals: fail closed for ambiguous inline loader and shell-payload script execution, bind the real script after POSIX shell value-taking flags, and unwrap pnpm/npm exec/npx script runners before approval binding. (GHSA-57jw-9722-6rf2)(GHSA-jvqh-rfmh-jh27)(GHSA-x7pp-23xv-mmr4)(GHSA-jc5j-vg4r-j5jx)(#44247) Thanks @tdjackey and @vincentkoc.
  • +
  • Doctor/gateway service audit: canonicalize service entrypoint paths before comparing them so symlink-vs-realpath installs no longer trigger false "entrypoint does not match the current install" repair prompts. (#43882) Thanks @ngutman.
  • +
  • Doctor/gateway service audit: earlier groundwork for this fix landed in the superseded #28338 branch. Thanks @realriphub.
  • +
  • Gateway/session stores: regenerate the Swift push-test protocol models and align Windows native session-store realpath handling so protocol checks and sync session discovery stop drifting on Windows. (#44266) thanks @jalehman.
  • +
  • Context engine/session routing: forward optional sessionKey through context-engine lifecycle calls so plugins can see structured routing metadata during bootstrap, assembly, post-turn ingestion, and compaction. (#44157) thanks @jalehman.
  • +
  • Agents/failover: classify z.ai network_error stop reasons as retryable timeouts so provider connectivity failures trigger fallback instead of surfacing raw unhandled-stop-reason errors. (#43884) Thanks @hougangdev.
  • +
  • Memory/session sync: add mode-aware post-compaction session reindexing with agents.defaults.compaction.postIndexSync plus agents.defaults.memorySearch.sync.sessions.postCompactionForce, so compacted session memory can refresh immediately without forcing every deployment into synchronous reindexing. (#25561) thanks @rodrigouroz.
  • +
  • Telegram/model picker: make inline model button selections persist the chosen session model correctly, clear overrides when selecting the configured default, and include effective fallback models in /models button validation. (#40105) Thanks @avirweb.
  • +
  • Telegram/native command sync: suppress expected BOT_COMMANDS_TOO_MUCH retry error noise, add a final fallback summary log, and document the difference between command-menu overflow and real Telegram network failures.
  • +
  • Mattermost/reply media delivery: pass agent-scoped mediaLocalRoots through shared reply delivery so allowed local files upload correctly from button, slash-command, and model-picker replies. (#44021) Thanks @LyleLiu666.
  • +
  • Plugins/env-scoped roots: fix plugin discovery/load caches and provenance tracking so same-process HOME/OPENCLAW_HOME changes no longer reuse stale plugin state or misreport ~/... plugins as untracked. (#44046) thanks @gumadeiras.
  • +
  • Gateway/session discovery: discover disk-only and retired ACP session stores under custom templated session.store roots so ACP reconciliation, session-id/session-label targeting, and run-id fallback keep working after restart. (#44176) thanks @gumadeiras.
  • +
  • Models/OpenRouter native ids: canonicalize native OpenRouter model keys across config writes, runtime lookups, fallback management, and models list --plain, and migrate legacy duplicated openrouter/openrouter/... config entries forward on write.
  • +
  • Gateway/hooks: bucket hook auth failures by forwarded client IP behind trusted proxies and warn when hooks.allowedAgentIds leaves hook routing unrestricted.
  • +
  • Agents/compaction: skip the post-compaction cache-ttl marker write when a compaction completed in the same attempt, preventing the next turn from immediately triggering a second tiny compaction. (#28548) thanks @MoerAI.
  • +
  • Native chat/macOS: add /new, /reset, and /clear reset triggers, keep shared main-session aliases aligned, and ignore stale model-selection completions so native chat state stays in sync across reset and fast model changes. (#10898) Thanks @Nachx639.
  • +
  • Agents/compaction safeguard: route missing-model and missing-API-key cancellation warnings through the shared subsystem logger so they land in structured and file logs. (#9974) Thanks @dinakars777.
  • +
  • Cron/doctor: stop flagging canonical agentTurn and systemEvent payload kinds as legacy cron storage, while still normalizing whitespace-padded and non-canonical variants. (#44012) Thanks @shuicici.
  • +
  • ACP/client final-message delivery: preserve terminal assistant text snapshots before resolving end_turn, so ACP clients no longer drop the last visible reply when the gateway sends the final message body on the terminal chat event. (#17615) Thanks @pjeby.
  • +
  • Telegram/Discord status reactions: show a temporary compacting reaction during auto-compaction pauses and restore thinking afterward so the bot no longer appears frozen while context is being compacted. (#35474) thanks @Cypherm.
  • +
+

View full changelog

+]]>
+ +
2026.3.8-beta.1 Mon, 09 Mar 2026 07:19:57 +0000 @@ -76,587 +244,5 @@ ]]> - - 2026.3.7 - Sun, 08 Mar 2026 04:42:35 +0000 - https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml - 2026030790 - 2026.3.7 - 15.0 - OpenClaw 2026.3.7 -

Changes

-
    -
  • Agents/context engine plugin interface: add ContextEngine plugin slot with full lifecycle hooks (bootstrap, ingest, assemble, compact, afterTurn, prepareSubagentSpawn, onSubagentEnded), slot-based registry with config-driven resolution, LegacyContextEngine wrapper preserving existing compaction behavior, scoped subagent runtime for plugin runtimes via AsyncLocalStorage, and sessions.get gateway method. Enables plugins like lossless-claw to provide alternative context management strategies without modifying core compaction logic. Zero behavior change when no context engine plugin is configured. (#22201) thanks @jalehman.
  • -
  • ACP/persistent channel bindings: add durable Discord channel and Telegram topic binding storage, routing resolution, and CLI/docs support so ACP thread targets survive restarts and can be managed consistently. (#34873) Thanks @dutifulbob.
  • -
  • Telegram/ACP topic bindings: accept Telegram Mac Unicode dash option prefixes in /acp spawn, support Telegram topic thread binding (--thread here|auto), route bound-topic follow-ups to ACP sessions, add actionable Telegram approval buttons with prefixed approval-id resolution, and pin successful bind confirmations in-topic. (#36683) Thanks @huntharo.
  • -
  • Telegram/topic agent routing: support per-topic agentId overrides in forum groups and DM topics so topics can route to dedicated agents with isolated sessions. (#33647; based on #31513) Thanks @kesor and @Sid-Qin.
  • -
  • Web UI/i18n: add Spanish (es) locale support in the Control UI, including locale detection, lazy loading, and language picker labels across supported locales. (#35038) Thanks @DaoPromociones.
  • -
  • Onboarding/web search: add provider selection step and full provider list in configure wizard, with SecretRef ref-mode support during onboarding. (#34009) Thanks @kesku and @thewilloftheshadow.
  • -
  • Tools/Web search: switch Perplexity provider to Search API with structured results plus new language/region/time filters. (#33822) Thanks @kesku.
  • -
  • Gateway: add SecretRef support for gateway.auth.token with auth-mode guardrails. (#35094) Thanks @joshavant.
  • -
  • Docker/Podman extension dependency baking: add OPENCLAW_EXTENSIONS so container builds can preinstall selected bundled extension npm dependencies into the image for faster and more reproducible startup in container deployments. (#32223) Thanks @sallyom.
  • -
  • Plugins/before_prompt_build system-context fields: add prependSystemContext and appendSystemContext so static plugin guidance can be placed in system prompt space for provider caching and lower repeated prompt token cost. (#35177) thanks @maweibin.
  • -
  • Plugins/hook policy: add plugins.entries..hooks.allowPromptInjection, validate unknown typed hook names at runtime, and preserve legacy before_agent_start model/provider overrides while stripping prompt-mutating fields when prompt injection is disabled. (#36567) thanks @gumadeiras.
  • -
  • Hooks/Compaction lifecycle: emit session:compact:before and session:compact:after internal events plus plugin compaction callbacks with session/count metadata, so automations can react to compaction runs consistently. (#16788) thanks @vincentkoc.
  • -
  • Agents/compaction post-context configurability: add agents.defaults.compaction.postCompactionSections so deployments can choose which AGENTS.md sections are re-injected after compaction, while preserving legacy fallback behavior when the documented default pair is configured in any order. (#34556) thanks @efe-arv.
  • -
  • TTS/OpenAI-compatible endpoints: add messages.tts.openai.baseUrl config support with config-over-env precedence, endpoint-aware directive validation, and OpenAI TTS request routing to the resolved base URL. (#34321) thanks @RealKai42.
  • -
  • Slack/DM typing feedback: add channels.slack.typingReaction so Socket Mode DMs can show reaction-based processing status even when Slack native assistant typing is unavailable. (#19816) Thanks @dalefrieswthat.
  • -
  • Discord/allowBots mention gating: add allowBots: "mentions" to only accept bot-authored messages that mention the bot. Thanks @thewilloftheshadow.
  • -
  • Agents/tool-result truncation: preserve important tail diagnostics by using head+tail truncation for oversized tool results while keeping configurable truncation options. (#20076) thanks @jlwestsr.
  • -
  • Cron/job snapshot persistence: skip backup during normalization persistence in ensureLoaded so jobs.json.bak keeps the pre-edit snapshot for recovery, while preserving backup creation on explicit user-driven writes. (#35234) Thanks @0xsline.
  • -
  • CLI: make read-only SecretRef status flows degrade safely (#37023) thanks @joshavant.
  • -
  • Tools/Diffs guidance: restore a short system-prompt hint for enabled diffs while keeping the detailed instructions in the companion skill, so diffs usage guidance stays out of user-prompt space. (#36904) thanks @gumadeiras.
  • -
  • Tools/Diffs guidance loading: move diffs usage guidance from unconditional prompt-hook injection to the plugin companion skill path, reducing unrelated-turn prompt noise while keeping diffs tool behavior unchanged. (#32630) thanks @sircrumpet.
  • -
  • Docs/Web search: remove outdated Brave free-tier wording and replace prescriptive AI ToS guidance with neutral compliance language in Brave setup docs. (#26860) Thanks @HenryLoenwind.
  • -
  • Config/Compaction safeguard tuning: expose agents.defaults.compaction.recentTurnsPreserve and quality-guard retry knobs through the validated config surface and embedded-runner wiring, with regression coverage for real config loading and schema metadata. (#25557) thanks @rodrigouroz.
  • -
  • iOS/App Store Connect release prep: align iOS bundle identifiers under ai.openclaw.client, refresh Watch app icons, add Fastlane metadata/screenshot automation, and support Keychain-backed ASC auth for uploads. (#38936) Thanks @ngutman.
  • -
  • Mattermost/model picker: add Telegram-style interactive provider/model browsing for /oc_model and /oc_models, fix picker callback updates, and emit a normal confirmation reply when a model is selected. (#38767) thanks @mukhtharcm.
  • -
  • Docker/multi-stage build: restructure Dockerfile as a multi-stage build to produce a minimal runtime image without build tools, source code, or Bun; add OPENCLAW_VARIANT=slim build arg for a bookworm-slim variant. (#38479) Thanks @sallyom.
  • -
  • Google/Gemini 3.1 Flash-Lite: add first-class google/gemini-3.1-flash-lite-preview support across model-id normalization, default aliases, media-understanding image lookups, Google Gemini CLI forward-compat fallback, and docs.
  • -
-

Breaking

-
    -
  • BREAKING: Gateway auth now requires explicit gateway.auth.mode when both gateway.auth.token and gateway.auth.password are configured (including SecretRefs). Set gateway.auth.mode to token or password before upgrade to avoid startup/pairing/TUI failures. (#35094) Thanks @joshavant.
  • -
-

Fixes

-
    -
  • Models/MiniMax: stop advertising removed MiniMax-M2.5-Lightning in built-in provider catalogs, onboarding metadata, and docs; keep the supported fast-tier model as MiniMax-M2.5-highspeed.
  • -
  • Security/Config: fail closed when loadConfig() hits validation or read errors so invalid configs cannot silently fall back to permissive runtime defaults. (#9040) Thanks @joetomasone.
  • -
  • Memory/Hybrid search: preserve negative FTS5 BM25 relevance ordering in bm25RankToScore() so stronger keyword matches rank above weaker ones instead of collapsing or reversing scores. (#33757) Thanks @lsdcc01.
  • -
  • LINE/requireMention group gating: align inbound and reply-stage LINE group policy resolution across raw, group:, and room: keys (including account-scoped group config), preserve plugin-backed reply-stage fallback behavior, and add regression coverage for prefixed-only group/room config plus reply-stage policy resolution. (#35847) Thanks @kirisame-wang.
  • -
  • Onboarding/local setup: default unset local tools.profile to coding instead of messaging, restoring file/runtime tools for fresh local installs while preserving explicit user-set profiles. (from #38241, overlap with #34958) Thanks @cgdusek.
  • -
  • Gateway/Telegram stale-socket restart guard: only apply stale-socket restarts to channels that publish event-liveness timestamps, preventing Telegram providers from being misclassified as stale solely due to long uptime and avoiding restart/pairing storms after upgrade. (openclaw#38464)
  • -
  • Onboarding/headless Linux daemon probe hardening: treat systemctl --user is-enabled probe failures as non-fatal during daemon install flow so onboarding no longer crashes on SSH/headless VPS environments before showing install guidance. (#37297) Thanks @acarbajal-web.
  • -
  • Memory/QMD mcporter Windows spawn hardening: when mcporter.cmd launch fails with spawn EINVAL, retry via bare mcporter shell resolution so QMD recall can continue instead of falling back to builtin memory search. (#27402) Thanks @i0ivi0i.
  • -
  • Tools/web_search Brave language-code validation: align search_lang handling with Brave-supported codes (including zh-hans, zh-hant, en-gb, and pt-br), map common alias inputs (zh, ja) to valid Brave values, and reject unsupported codes before upstream requests to prevent 422 failures. (#37260) Thanks @heyanming.
  • -
  • Models/openai-completions streaming compatibility: force compat.supportsUsageInStreaming=false for non-native OpenAI-compatible endpoints during model normalization, preventing usage-only stream chunks from triggering choices[0] parser crashes in provider streams. (#8714) Thanks @nonanon1.
  • -
  • Tools/xAI native web-search collision guard: drop OpenClaw web_search from tool registration when routing to xAI/Grok model providers (including OpenRouter x-ai/*) to avoid duplicate tool-name request failures against provider-native web_search. (#14749) Thanks @realsamrat.
  • -
  • TUI/token copy-safety rendering: treat long credential-like mixed alphanumeric tokens (including quoted forms) as copy-sensitive in render sanitization so formatter hard-wrap guards no longer inject visible spaces into auth-style values before display. (#26710) Thanks @jasonthane.
  • -
  • WhatsApp/self-chat response prefix fallback: stop forcing "[openclaw]" as the implicit outbound response prefix when no identity name or response prefix is configured, so blank/default prefix settings no longer inject branding text unexpectedly in self-chat flows. (#27962) Thanks @ecanmor.
  • -
  • Memory/QMD search result decoding: accept qmd search hits that only include file URIs (for example qmd://collection/path.md) without docid, resolve them through managed collection roots, and keep multi-collection results keyed by file fallback so valid QMD hits no longer collapse to empty memory_search output. (#28181) Thanks @0x76696265.
  • -
  • Memory/QMD collection-name conflict recovery: when qmd collection add fails because another collection already occupies the same path + pattern, detect the conflicting collection from collection list, remove it, and retry add so agent-scoped managed collections are created deterministically instead of being silently skipped; also add warning-only fallback when qmd metadata is unavailable to avoid destructive guesses. (#25496) Thanks @Ramsbaby.
  • -
  • Slack/app_mention race dedupe: when app_mention dispatch wins while same-ts message prepare is still in-flight, suppress the later message dispatch so near-simultaneous Slack deliveries do not produce duplicate replies; keep single-retry behavior and add regression coverage for both dropped and successful message-prepare outcomes. (#37033) Thanks @Takhoffman.
  • -
  • Gateway/chat streaming tool-boundary text retention: merge assistant delta segments into per-run chat buffers so pre-tool text is preserved in live chat deltas/finals when providers emit post-tool assistant segments as non-prefix snapshots. (#36957) Thanks @Datyedyeguy.
  • -
  • TUI/model indicator freshness: prevent stale session snapshots from overwriting freshly patched model selection (and reset per-session freshness when switching session keys) so /model updates reflect immediately instead of lagging by one or more commands. (#21255) Thanks @kowza.
  • -
  • TUI/final-error rendering fallback: when a chat final event has no renderable assistant content but includes envelope errorMessage, render the formatted error text instead of collapsing to "(no output)", preserving actionable failure context in-session. (#14687) Thanks @Mquarmoc.
  • -
  • TUI/session-key alias event matching: treat chat events whose session keys are canonical aliases (for example agent::main vs main) as the same session while preserving cross-agent isolation, so assistant replies no longer disappear or surface in another terminal window due to strict key-form mismatch. (#33937) Thanks @yjh1412.
  • -
  • OpenAI Codex OAuth/login parity: keep openclaw models auth login --provider openai-codex on the built-in path even without provider plugins, preserve Pi-generated authorize URLs without local scope rewriting, and stop validating successful Codex sign-ins against the public OpenAI Responses API after callback. (#37558; follow-up to #36660 and #24720) Thanks @driesvints, @Skippy-Gunboat, and @obviyus.
  • -
  • Agents/config schema lookup: add gateway tool action config.schema.lookup so agents can inspect one config path at a time before edits without loading the full schema into prompt context. (#37266) Thanks @gumadeiras.
  • -
  • Onboarding/API key input hardening: strip non-Latin1 Unicode artifacts from normalized secret input (while preserving Latin-1 content and internal spaces) so malformed copied API keys cannot trigger HTTP header ByteString construction crashes; adds regression coverage for shared normalization and MiniMax auth header usage. (#24496) Thanks @fa6maalassaf.
  • -
  • Kimi Coding/Anthropic tools compatibility: normalize anthropic-messages tool payloads to OpenAI-style tools[].function + compatible tool_choice when targeting Kimi Coding endpoints, restoring tool-call workflows that regressed after v2026.3.2. (#37038) Thanks @mochimochimochi-hub.
  • -
  • Heartbeat/workspace-path guardrails: append explicit workspace HEARTBEAT.md path guidance (and docs/heartbeat.md avoidance) to heartbeat prompts so heartbeat runs target workspace checklists reliably across packaged install layouts. (#37037) Thanks @stofancy.
  • -
  • Subagents/kill-complete announce race: when a late subagent-complete lifecycle event arrives after an earlier kill marker, clear stale kill suppression/cleanup flags and re-run announce cleanup so finished runs no longer get silently swallowed. (#37024) Thanks @cmfinlan.
  • -
  • Agents/tool-result cleanup timeout hardening: on embedded runner teardown idle timeouts, clear pending tool-call state without persisting synthetic missing tool result entries, preventing timeout cleanups from poisoning follow-up turns; adds regression coverage for timeout clear-vs-flush behavior. (#37081) Thanks @Coyote-Den.
  • -
  • Agents/openai-completions stream timeout hardening: ensure runtime undici global dispatchers use extended streaming body/header timeouts (including env-proxy dispatcher mode) before embedded runs, reducing forced mid-stream terminated failures on long generations; adds regression coverage for dispatcher selection and idempotent reconfiguration. (#9708) Thanks @scottchguard.
  • -
  • Agents/fallback cooldown probe execution: thread explicit rate-limit cooldown probe intent from model fallback into embedded runner auth-profile selection so same-provider fallback attempts can actually run when all profiles are cooldowned for rate_limit (instead of failing pre-run as No available auth profile), while preserving default cooldown skip behavior and adding regression tests at both fallback and runner layers. (#13623) Thanks @asfura.
  • -
  • Cron/OpenAI Codex OAuth refresh hardening: when openai-codex token refresh fails specifically on account-id extraction, reuse the cached access token instead of failing the run immediately, with regression coverage to keep non-Codex and unrelated refresh failures unchanged. (#36604) Thanks @laulopezreal.
  • -
  • TUI/session isolation for /new: make /new allocate a unique tui- session key instead of resetting the shared agent session, so multiple TUI clients on the same agent stop receiving each other’s replies; also sanitize /new and /reset failure text before rendering in-terminal. Landed from contributor PR #39238 by @widingmarcus-cyber. Thanks @widingmarcus-cyber.
  • -
  • Synology Chat/rate-limit env parsing: honor SYNOLOGY_RATE_LIMIT=0 as an explicit value while still falling back to the default limit for malformed env values instead of partially parsing them. Landed from contributor PR #39197 by @scoootscooob. Thanks @scoootscooob.
  • -
  • Voice-call/OpenAI Realtime STT config defaults: honor explicit vadThreshold: 0 and silenceDurationMs: 0 instead of silently replacing them with defaults. Landed from contributor PR #39196 by @scoootscooob. Thanks @scoootscooob.
  • -
  • Voice-call/OpenAI TTS speed config: honor explicit speed: 0 instead of silently replacing it with the default speed. Landed from contributor PR #39318 by @ql-wade. Thanks @ql-wade.
  • -
  • launchd/runtime PID parsing: reject pid <= 0 from launchctl print so the daemon state parser no longer treats kernel/non-running sentinel values as real process IDs. Landed from contributor PR #39281 by @mvanhorn. Thanks @mvanhorn.
  • -
  • Cron/file permission hardening: enforce owner-only (0600) cron store/backup/run-log files and harden cron store + run-log directories to 0700, including pre-existing directories from older installs. (#36078) Thanks @aerelune.
  • -
  • Gateway/remote WS break-glass hostname support: honor OPENCLAW_ALLOW_INSECURE_PRIVATE_WS=1 for ws:// hostname URLs (not only private IP literals) across onboarding validation and runtime gateway connection checks, while still rejecting public IP literals and non-unicast IPv6 endpoints. (#36930) Thanks @manju-rn.
  • -
  • Routing/binding lookup scalability: pre-index route bindings by channel/account and avoid full binding-list rescans on channel-account cache rollover, preventing multi-second resolveAgentRoute stalls in large binding configurations. (#36915) Thanks @songchenghao.
  • -
  • Browser/session cleanup: track browser tabs opened by session-scoped browser tool runs and close tracked tabs during sessions.reset/sessions.delete runtime cleanup, preventing orphaned tabs and unbounded browser memory growth after session teardown. (#36666) Thanks @Harnoor6693.
  • -
  • Plugin/hook install rollback hardening: stage installs under the canonical install base, validate and run dependency installs before publish, and restore updates by rename instead of deleting the target path, reducing partial-replace and symlink-rebind risk during install failures.
  • -
  • Slack/local file upload allowlist parity: propagate mediaLocalRoots through the Slack send action pipeline so workspace-rooted attachments pass assertLocalMediaAllowed checks while non-allowlisted paths remain blocked. (synthesis: #36656; overlap considered from #36516, #36496, #36493, #36484, #32648, #30888) Thanks @2233admin.
  • -
  • Agents/compaction safeguard pre-check: skip embedded compaction before entering the Pi SDK when a session has no real conversation messages, avoiding unnecessary LLM API calls on idle sessions. (#36451) thanks @Sid-Qin.
  • -
  • Config/schema cache key stability: build merged schema cache keys with incremental hashing to avoid large single-string serialization and prevent RangeError: Invalid string length on high-cardinality plugin/channel metadata. (#36603) Thanks @powermaster888.
  • -
  • iMessage/cron completion announces: strip leaked inline reply tags (for example [[reply_to:6100]]) from user-visible completion text so announcement deliveries do not expose threading metadata. (#24600) Thanks @vincentkoc.
  • -
  • Control UI/iMessage duplicate reply routing: keep internal webchat turns on dispatcher delivery (instead of origin-channel reroute) so Control UI chats do not duplicate replies into iMessage, while preserving webchat-provider relayed routing for external surfaces. Fixes #33483. Thanks @alicexmolt.
  • -
  • Sessions/daily reset transcript archival: archive prior transcript files during stale-session scheduled/daily resets by capturing the previous session entry before rollover, preventing orphaned transcript files on disk. (#35493) Thanks @byungsker.
  • -
  • Feishu/group slash command detection: normalize group mention wrappers before command-authorization probing so mention-prefixed commands (for example @Bot/model and @Bot /reset) are recognized as gateway commands instead of being forwarded to the agent. (#35994) Thanks @liuxiaopai-ai.
  • -
  • Control UI/auth token separation: keep the shared gateway token in browser auth validation while reserving cached device tokens for signed device payloads, preventing false device token mismatch disconnects after restart/rotation. Landed from contributor PR #37382 by @FradSer. Thanks @FradSer.
  • -
  • Gateway/browser auth reconnect hardening: stop counting missing token/password submissions as auth rate-limit failures, and stop auto-reconnecting Control UI clients on non-recoverable auth errors so misconfigured browser tabs no longer lock out healthy sessions. Landed from contributor PR #38725 by @ademczuk. Thanks @ademczuk.
  • -
  • Gateway/service token drift repair: stop persisting shared auth tokens into installed gateway service units, flag stale embedded service tokens for reinstall, and treat tokenless service env as canonical so token rotation/reboot flows stay aligned with config/env resolution. Landed from contributor PR #28428 by @l0cka. Thanks @l0cka.
  • -
  • Control UI/agents-page selection: keep the edited agent selected after saving agent config changes and reloading the agents list, so /agents no longer snaps back to the default agent. Landed from contributor PR #39301 by @MumuTW. Thanks @MumuTW.
  • -
  • Gateway/auth follow-up hardening: preserve systemd EnvironmentFile= precedence/source provenance in daemon audits and doctor repairs, block shared-password override flows from piggybacking cached device tokens, and fail closed when config-first gateway SecretRefs cannot resolve. Follow-up to #39241.
  • -
  • Agents/context pruning: guard assistant thinking/text char estimation against malformed blocks (missing thinking/text strings or null entries) so pruning no longer crashes with malformed provider content. (openclaw#35146) thanks @Sid-Qin.
  • -
  • Agents/transcript policy: set preserveSignatures to Anthropic-only handling in resolveTranscriptPolicy so Anthropic thinking signatures are preserved while non-Anthropic providers remain unchanged. (#32813) thanks @Sid-Qin.
  • -
  • Agents/schema cleaning: detect Venice + Grok model IDs as xAI-proxied targets so unsupported JSON Schema keywords are stripped before requests, preventing Venice/Grok Invalid arguments failures. (openclaw#35355) thanks @Sid-Qin.
  • -
  • Skills/native command deduplication: centralize skill command dedupe by canonical skillName in listSkillCommandsForAgents so duplicate suffixed variants (for example _2) are no longer surfaced across interfaces outside Discord. (#27521) thanks @shivama205.
  • -
  • Agents/xAI tool-call argument decoding: decode HTML-entity encoded xAI/Grok tool-call argument values (&, ", <, >, numeric entities) before tool execution so commands with shell operators and quotes no longer fail with parse errors. (#35276) Thanks @Sid-Qin.
  • -
  • Linux/WSL2 daemon install hardening: add regression coverage for WSL environment detection, WSL-specific systemd guidance, and systemctl --user is-enabled failure paths so WSL2/headless onboarding keeps treating bus-unavailable probes as non-fatal while preserving real permission errors. Related: #36495. Thanks @vincentkoc.
  • -
  • Linux/systemd status and degraded-session handling: treat degraded-but-reachable systemctl --user status results as available, preserve early errors for truly unavailable user-bus cases, and report externally managed running services as running instead of not installed. Thanks @vincentkoc.
  • -
  • Agents/thinking-tag promotion hardening: guard promoteThinkingTagsToBlocks against malformed assistant content entries (null/undefined) before block.type reads so malformed provider payloads no longer crash session processing while preserving pass-through behavior. (#35143) thanks @Sid-Qin.
  • -
  • Gateway/Control UI version reporting: align runtime and browser client version metadata to avoid dev placeholders, wait for bootstrap version before first UI websocket connect, and only forward bootstrap serverVersion to same-origin gateway targets to prevent cross-target version leakage. (from #35230, #30928, #33928) Thanks @Sid-Qin, @joelnishanth, and @MoerAI.
  • -
  • Control UI/markdown parser crash fallback: catch marked.parse() failures and fall back to escaped plain-text
     rendering so malformed recursive markdown no longer crashes Control UI session rendering on load. (#36445) Thanks @BinHPdev.
  • -
  • Control UI/markdown fallback regression coverage: add explicit regression assertions for parser-error fallback behavior so malformed markdown no longer risks reintroducing hard-crash rendering paths in future markdown/parser upgrades. (#36445) Thanks @BinHPdev.
  • -
  • Web UI/config form: treat additionalProperties: true object schemas as editable map entries instead of unsupported fields so Accounts-style maps stay editable in form mode. (#35380, supersedes #32072) Thanks @stakeswky and @liuxiaopai-ai.
  • -
  • Feishu/streaming card delivery synthesis: unify snapshot and delta streaming merge semantics, apply overlap-aware final merge, suppress duplicate final text delivery (including text+media final packets), prefer topic-thread message.reply routing when a reply target exists, and tune card print cadence to avoid duplicate incremental rendering. (from #33245, #32896, #33840) Thanks @rexl2018, @kcinzgg, and @aerelune.
  • -
  • Feishu/group mention detection: carry startup-probed bot display names through monitor dispatch so requireMention checks compare against current bot identity instead of stale config names, fixing missed @bot handling in groups while preserving multi-bot false-positive guards. (#36317, #34271) Thanks @liuxiaopai-ai.
  • -
  • Security/dependency audit: patch transitive Hono vulnerabilities by pinning hono to 4.12.5 and @hono/node-server to 1.19.10 in production resolution paths. Thanks @shakkernerd.
  • -
  • Security/dependency audit: bump tar to 7.5.10 (from 7.5.9) to address the high-severity hardlink path traversal advisory (GHSA-qffp-2rhf-9h96). Thanks @shakkernerd.
  • -
  • Cron/announce delivery robustness: bypass pending-descendant announce guards for cron completion sends, ensure named-agent announce routes have outbound session entries, and fall back to direct delivery only when an announce send was actually attempted and failed. (from #35185, #32443, #34987) Thanks @Sid-Qin, @scoootscooob, and @bmendonca3.
  • -
  • Cron/announce best-effort fallback: run direct outbound fallback after attempted announce failures even when delivery is configured as best-effort, so Telegram cron sends are not left as attempted-but-undelivered after cron announce delivery failed warnings.
  • -
  • Auto-reply/system events: restore runtime system events to the message timeline (System: lines), preserve think-hint parsing with prepended events, and carry events into deferred followup/collect/steer-backlog prompts to keep cache behavior stable without dropping queued metadata. (#34794) Thanks @anisoptera.
  • -
  • Security/audit account handling: avoid prototype-chain account IDs in audit validation by using own-property checks for accounts. (#34982) Thanks @HOYALIM.
  • -
  • Cron/restart catch-up semantics: replay interrupted recurring jobs and missed immediate cron slots on startup without replaying interrupted one-shot jobs, with guarded missed-slot probing to avoid malformed-schedule startup aborts and duplicate-trigger drift after restart. (from #34466, #34896, #34625, #33206) Thanks @dunamismax, @dsantoreis, @Octane0411, and @Sid-Qin.
  • -
  • Venice/provider onboarding hardening: align per-model Venice completion-token limits with discovery metadata, clamp untrusted discovery values to safe bounds, sync the static Venice fallback catalog with current live model metadata, and disable tool wiring for Venice models that do not support function calling so default Venice setups no longer fail with max_completion_tokens or unsupported-tools 400s. Fixes #38168. Thanks @Sid-Qin, @powermaster888 and @vincentkoc.
  • -
  • Agents/session usage tracking: preserve accumulated usage metadata on embedded Pi runner error exits so failed turns still update session totalTokens from real usage instead of stale prior values. (#34275) thanks @RealKai42.
  • -
  • Slack/reaction thread context routing: carry Slack native DM channel IDs through inbound context and threading tool resolution so reaction targets resolve consistently for DM To=user:* sessions (including toolContext.currentChannelId fallback behavior). (from #34831; overlaps #34440, #34502, #34483, #32754) Thanks @dunamismax.
  • -
  • Subagents/announce completion scoping: scope nested direct-child completion aggregation to the current requester run window, harden frozen completion capture for deterministic descendant synthesis, and route completion announce delivery through parent-agent announce turns with provenance-aware internal events. (#35080) Thanks @tyler6204.
  • -
  • Nodes/system.run approval hardening: use explicit argv-mutation signaling when regenerating prepared rawCommand, and cover the system.run.prepare -> system.run handoff so direct PATH-based nodes.run commands no longer fail with rawCommand does not match command. (#33137) thanks @Sid-Qin.
  • -
  • Models/custom provider headers: propagate models.providers..headers across inline, fallback, and registry-found model resolution so header-authenticated proxies consistently receive configured request headers. (#27490) thanks @Sid-Qin.
  • -
  • Ollama/remote provider auth fallback: synthesize a local runtime auth key for explicitly configured models.providers.ollama entries that omit apiKey, so remote Ollama endpoints run without requiring manual dummy-key setup while preserving env/profile/config key precedence and missing-config failures. (#11283) Thanks @cpreecs.
  • -
  • Ollama/custom provider headers: forward resolved model headers into native Ollama stream requests so header-authenticated Ollama proxies receive configured request headers. (#24337) thanks @echoVic.
  • -
  • Ollama/compaction and summarization: register custom api: "ollama" handling for compaction, branch-style internal summarization, and TTS text summarization on current main, so native Ollama models no longer fail with No API provider registered for api: ollama outside the main run loop. Thanks @JaviLib.
  • -
  • Daemon/systemd install robustness: treat systemctl --user is-enabled exit-code-4 not-found responses as not-enabled by combining stderr/stdout detail parsing, so Ubuntu fresh installs no longer fail with systemctl is-enabled unavailable. (#33634) Thanks @Yuandiaodiaodiao.
  • -
  • Slack/system-event session routing: resolve reaction/member/pin/interaction system-event session keys through channel/account bindings (with sender-aware DM routing) so inbound Slack events target the correct agent session in multi-account setups instead of defaulting to agent:main. (#34045) Thanks @paulomcg, @daht-mad and @vincentkoc.
  • -
  • Slack/native streaming markdown conversion: stop pre-normalizing text passed to Slack native markdown_text in streaming start/append/stop paths to prevent Markdown style corruption from double conversion. (#34931)
  • -
  • Gateway/HTTP tools invoke media compatibility: preserve raw media payload access for direct /tools/invoke clients by allowing media nodes invoke commands only in HTTP tool context, while keeping agent-context media invoke blocking to prevent base64 prompt bloat. (#34365) Thanks @obviyus.
  • -
  • Security/archive ZIP hardening: extract ZIP entries via same-directory temp files plus atomic rename, then re-open and reject post-rename hardlink alias races outside the destination root.
  • -
  • Agents/Nodes media outputs: add dedicated photos_latest action handling, block media-returning nodes invoke commands, keep metadata-only camera.list invoke allowed, and normalize empty photos_latest results to a consistent response shape to prevent base64 context bloat. (#34332) Thanks @obviyus.
  • -
  • TUI/session-key canonicalization: normalize openclaw tui --session values to lowercase so uppercase session names no longer drop real-time streaming updates due to gateway/TUI key mismatches. (#33866, #34013) thanks @lynnzc.
  • -
  • iMessage/echo loop hardening: strip leaked assistant-internal scaffolding from outbound iMessage replies, drop reflected assistant-content messages before they re-enter inbound processing, extend echo-cache text retention for delayed reflections, and suppress repeated loop traffic before it amplifies into queue overflow. (#33295) Thanks @joelnishanth.
  • -
  • Skills/workspace boundary hardening: reject workspace and extra-dir skill roots or SKILL.md files whose realpath escapes the configured source root, and skip syncing those escaped skills into sandbox workspaces.
  • -
  • Outbound/send config threading: pass resolved SecretRef config through outbound adapters and helper send paths so send flows do not reload unresolved runtime config. (#33987) Thanks @joshavant.
  • -
  • gateway: harden shared auth resolution across systemd, discord, and node host (#39241) Thanks @joshavant.
  • -
  • Secrets/models.json persistence hardening: keep SecretRef-managed api keys + headers from persisting in generated models.json, expand audit/apply coverage, and harden marker handling/serialization. (#38955) Thanks @joshavant.
  • -
  • Sessions/subagent attachments: remove attachments[].content.maxLength from sessions_spawn schema to avoid llama.cpp GBNF repetition overflow, and preflight UTF-8 byte size before buffer allocation while keeping runtime file-size enforcement unchanged. (#33648) Thanks @anisoptera.
  • -
  • Runtime/tool-state stability: recover from dangling Anthropic tool_use after compaction, serialize long-running Discord handler runs without blocking new inbound events, and prevent stale busy snapshots from suppressing stuck-channel recovery. (from #33630, #33583) Thanks @kevinWangSheng and @theotarr.
  • -
  • ACP/Discord startup hardening: clean up stuck ACP worker children on gateway restart, unbind stale ACP thread bindings during Discord startup reconciliation, and add per-thread listener watchdog timeouts so wedged turns cannot block later messages. (#33699) Thanks @dutifulbob.
  • -
  • Extensions/media local-root propagation: consistently forward mediaLocalRoots through extension sendMedia adapters (Google Chat, Slack, iMessage, Signal, WhatsApp), preserving non-local media behavior while restoring local attachment resolution from configured roots. Synthesis of #33581, #33545, #33540, #33536, #33528. Thanks @bmendonca3.
  • -
  • Gateway/plugin HTTP auth hardening: require gateway auth when any overlapping matched route needs it, block mixed-auth fallthrough at dispatch, and reject mixed-auth exact/prefix route overlaps during plugin registration.
  • -
  • Feishu/video media send contract: keep mp4-like outbound payloads on msg_type: "media" (including reply and reply-in-thread paths) so videos render as media instead of degrading to file-link behavior, while preserving existing non-video file subtype handling. (from #33720, #33808, #33678) Thanks @polooooo, @dingjianrui, and @kevinWangSheng.
  • -
  • Gateway/security default response headers: add Permissions-Policy: camera=(), microphone=(), geolocation=() to baseline gateway HTTP security headers for all responses. (#30186) thanks @habakan.
  • -
  • Plugins/startup loading: lazily initialize plugin runtime, split startup-critical plugin SDK imports into openclaw/plugin-sdk/core and openclaw/plugin-sdk/telegram, and preserve api.runtime reflection semantics for plugin compatibility. (#28620) thanks @hmemcpy.
  • -
  • Plugins/startup performance: reduce bursty plugin discovery/manifest overhead with short in-process caches, skip importing bundled memory plugins that are disabled by slot selection, and speed legacy root openclaw/plugin-sdk compatibility via runtime root-alias routing while preserving backward compatibility. Thanks @gumadeiras.
  • -
  • Build/lazy runtime boundaries: replace ineffective dynamic import sites with dedicated lazy runtime boundaries across Slack slash handling, Telegram audit, CLI send deps, memory fallback, and outbound delivery paths while preserving behavior. (#33690) thanks @gumadeiras.
  • -
  • Gateway/password CLI hardening: add openclaw gateway run --password-file, warn when inline --password is used because it can leak via process listings, and document env/file-backed password input as the preferred startup path. Fixes #27948. Thanks @vibewrk and @vincentkoc.
  • -
  • Config/heartbeat legacy-path handling: auto-migrate top-level heartbeat into agents.defaults.heartbeat (with merge semantics that preserve explicit defaults), and keep startup failures on non-migratable legacy entries in the detailed invalid-config path instead of generic migration-failed errors. (#32706) thanks @xiwan.
  • -
  • Plugins/SDK subpath parity: expand plugin SDK subpaths across bundled channels/extensions (Discord, Slack, Signal, iMessage, WhatsApp, LINE, and bundled companion plugins), with build/export/type/runtime wiring so scoped imports resolve consistently in source and dist while preserving compatibility. (#33737) thanks @gumadeiras.
  • -
  • Google/Gemini Flash model selection: switch built-in gemini-flash defaults and docs/examples from the nonexistent google/gemini-3.1-flash-preview ID to the working google/gemini-3-flash-preview, while normalizing legacy OpenClaw config that still uses the old Flash 3.1 alias.
  • -
  • Plugins/bundled scoped-import migration: migrate bundled plugins from monolithic openclaw/plugin-sdk imports to scoped subpaths (or openclaw/plugin-sdk/core) across registration and startup-sensitive runtime files, add CI/release guardrails to prevent regressions, and keep root openclaw/plugin-sdk support for external/community plugins. Thanks @gumadeiras.
  • -
  • Routing/session duplicate suppression synthesis: align shared session delivery-context inheritance, channel-paired route-field merges, and reply-surface target matching so dmScope=main turns avoid cross-surface duplicate replies while thread-aware forwarding keeps intended routing semantics. (from #33629, #26889, #17337, #33250) Thanks @Yuandiaodiaodiao, @kevinwildenradt, @Glucksberg, and @bmendonca3.
  • -
  • Routing/legacy session route inheritance: preserve external route metadata inheritance for legacy channel session keys (agent::: and ...:thread:) so chat.send does not incorrectly fall back to webchat when valid delivery context exists. Follow-up to #33786.
  • -
  • Routing/legacy route guard tightening: require legacy session-key channel hints to match the saved delivery channel before inheriting external routing metadata, preventing custom namespaced keys like agent::work: from inheriting stale non-webchat routes.
  • -
  • Gateway/internal client routing continuity: prevent webchat/TUI/UI turns from inheriting stale external reply routes by requiring explicit deliver: true for external delivery, keeping main-session external inheritance scoped to non-Webchat/UI clients, and honoring configured session.mainKey when identifying main-session continuity. (from #35321, #34635, #35356) Thanks @alexyyyander and @Octane0411.
  • -
  • Security/auth labels: remove token and API-key snippets from user-facing auth status labels so /status and /models do not expose credential fragments. (#33262) thanks @cu1ch3n.
  • -
  • Models/MiniMax portal vision routing: add MiniMax-VL-01 to the minimax-portal provider, route portal image understanding through the MiniMax VLM endpoint, and align media auto-selection plus Telegram sticker description with the shared portal image provider path. (#33953) Thanks @tars90percent.
  • -
  • Auth/credential semantics: align profile eligibility + probe diagnostics with SecretRef/expiry rules and harden browser download atomic writes. (#33733) thanks @joshavant.
  • -
  • Security/audit denyCommands guidance: suggest likely exact node command IDs for unknown gateway.nodes.denyCommands entries so ineffective denylist entries are easier to correct. (#29713) thanks @liquidhorizon88-bot.
  • -
  • Agents/overload failover handling: classify overloaded provider failures separately from rate limits/status timeouts, add short overload backoff before retry/failover, record overloaded prompt/assistant failures as transient auth-profile cooldowns (with probeable same-provider fallback) instead of treating them like persistent auth/billing failures, and keep one-shot cron retry classification aligned so overloaded fallback summaries still count as transient retries.
  • -
  • Docs/security hardening guidance: document Docker DOCKER-USER + UFW policy and add cross-linking from Docker install docs for VPS/public-host setups. (#27613) thanks @dorukardahan.
  • -
  • Docs/security threat-model links: replace relative .md links with Mintlify-compatible root-relative routes in security docs to prevent broken internal navigation. (#27698) thanks @clawdoo.
  • -
  • Plugins/Update integrity drift: avoid false integrity drift prompts when updating npm-installed plugins from unpinned specs, while keeping drift checks for exact pinned versions. (#37179) Thanks @vincentkoc.
  • -
  • iOS/Voice timing safety: guard system speech start/finish callbacks to the active utterance to avoid misattributed start events during rapid stop/restart cycles. (#33304) thanks @mbelinky; original implementation direction by @ngutman.
  • -
  • Gateway/chat.send command scopes: require operator.admin for persistent /config set|unset writes routed through gateway chat clients while keeping /config show available to normal write-scoped operator clients, preserving messaging-channel config command behavior without widening RPC write scope into admin config mutation. Thanks @tdjackey for reporting.
  • -
  • iOS/Talk incremental speech pacing: allow long punctuation-free assistant chunks to start speaking at safe whitespace boundaries so voice responses begin sooner instead of waiting for terminal punctuation. (#33305) thanks @mbelinky; original implementation by @ngutman.
  • -
  • iOS/Watch reply reliability: make watch session activation waiters robust under concurrent requests so status/send calls no longer hang intermittently, and align delegate callbacks with Swift 6 actor safety. (#33306) thanks @mbelinky; original implementation by @Rocuts.
  • -
  • Docs/tool-loop detection config keys: align docs/tools/loop-detection.md examples and field names with the current tools.loopDetection schema to prevent copy-paste validation failures from outdated keys. (#33182) Thanks @Mylszd.
  • -
  • Gateway/session agent discovery: include disk-scanned agent IDs in listConfiguredAgentIds even when agents.list is configured, so disk-only/ACP agent sessions remain visible in gateway session aggregation and listings. (#32831) thanks @Sid-Qin.
  • -
  • Discord/inbound debouncer: skip bot-own MESSAGE_CREATE events before they reach the debounce queue to avoid self-triggered slowdowns in busy servers. Thanks @thewilloftheshadow.
  • -
  • Discord/Agent-scoped media roots: pass mediaLocalRoots through Discord monitor reply delivery (message + component interaction paths) so local media attachments honor per-agent workspace roots instead of falling back to default global roots. Thanks @thewilloftheshadow.
  • -
  • Discord/slash command handling: intercept text-based slash commands in channels, register plugin commands as native, and send fallback acknowledgments for empty slash runs so interactions do not hang. Thanks @thewilloftheshadow.
  • -
  • Discord/thread session lifecycle: reset thread-scoped sessions when a thread is archived so reopening a thread starts fresh without deleting transcript history. Thanks @thewilloftheshadow.
  • -
  • Discord/presence defaults: send an online presence update on ready when no custom presence is configured so bots no longer appear offline by default. Thanks @thewilloftheshadow.
  • -
  • Discord/typing cleanup: stop typing indicators after silent/NO_REPLY runs by marking the run complete before dispatch idle cleanup. Thanks @thewilloftheshadow.
  • -
  • ACP/sandbox spawn parity: block /acp spawn from sandboxed requester sessions with the same host-runtime guard already enforced for sessions_spawn({ runtime: "acp" }), preserving non-sandbox ACP flows while closing the command-path policy gap. Thanks @patte.
  • -
  • Discord/config SecretRef typing: align Discord account token config typing with SecretInput so SecretRef tokens typecheck. (#32490) Thanks @scoootscooob.
  • -
  • Discord/voice messages: request upload slots with JSON fetch calls so voice message uploads no longer fail with content-type errors. Thanks @thewilloftheshadow.
  • -
  • Discord/voice decoder fallback: drop the native Opus dependency and use opusscript for voice decoding to avoid native-opus installs. Thanks @thewilloftheshadow.
  • -
  • Discord/auto presence health signal: add runtime availability-driven presence updates plus connected-state reporting to improve health monitoring and operator visibility. (#33277) Thanks @thewilloftheshadow.
  • -
  • HEIC image inputs: accept HEIC/HEIF input_image sources in Gateway HTTP APIs, normalize them to JPEG before provider delivery, and document the expanded default MIME allowlist. Thanks @vincentkoc.
  • -
  • Gateway/HEIC input follow-up: keep non-HEIC input_image MIME handling unchanged, make HEIC tests hermetic, and enforce chat-completions maxTotalImageBytes against post-normalization image payload size. Thanks @vincentkoc.
  • -
  • Telegram/draft-stream boundary stability: materialize DM draft previews at assistant-message/tool boundaries, serialize lane-boundary callbacks before final delivery, and scope preview cleanup to the active preview so multi-step Telegram streams no longer lose, overwrite, or leave stale preview bubbles. (#33842) Thanks @ngutman.
  • -
  • Telegram/DM draft finalization reliability: require verified final-text draft emission before treating preview finalization as delivered, and fall back to normal payload send when final draft delivery is not confirmed (preventing missing final responses and preserving media/button delivery). (#32118) Thanks @OpenCils.
  • -
  • Telegram/DM draft final delivery: materialize text-only sendMessageDraft previews into one permanent final message and skip duplicate final payload sends, while preserving fallback behavior when materialization fails. (#34318) Thanks @Brotherinlaw-13.
  • -
  • Telegram/DM draft duplicate display: clear stale DM draft previews after materializing the real final message, including threadless fallback when DM topic lookup fails, so partial streaming no longer briefly shows duplicate replies. (#36746) Thanks @joelnishanth.
  • -
  • Telegram/draft preview boundary + silent-token reliability: stabilize answer-lane message boundaries across late-partial/message-start races, preserve/reset finalized preview state at the correct boundaries, and suppress NO_REPLY lead-fragment leaks without broad heartbeat-prefix false positives. (#33169) Thanks @obviyus.
  • -
  • Telegram/native commands commands.allowFrom precedence: make native Telegram commands honor commands.allowFrom as the command-specific authorization source, including group chats, instead of falling back to channel sender allowlists. (#28216) Thanks @toolsbybuddy and @vincentkoc.
  • -
  • Telegram/groupAllowFrom sender-ID validation: restore sender-only runtime validation so negative chat/group IDs remain invalid entries instead of appearing accepted while still being unable to authorize group access. (#37134) Thanks @qiuyuemartin-max and @vincentkoc.
  • -
  • Telegram/native group command auth: authorize native commands in groups and forum topics against groupAllowFrom and per-group/topic sender overrides, while keeping auth rejection replies in the originating topic thread. (#39267) Thanks @edwluo.
  • -
  • Telegram/named-account DMs: restore non-default-account DM routing when a named Telegram account falls back to the default agent by keeping groups fail-closed but deriving a per-account session key for DMs, including identity-link canonicalization and regression coverage for account isolation. (from #32426; fixes #32351) Thanks @chengzhichao-xydt.
  • -
  • Discord/audit wildcard warnings: ignore "\*" wildcard keys when counting unresolved guild channels so doctor/status no longer warns on allow-all configs. (#33125) Thanks @thewilloftheshadow.
  • -
  • Discord/channel resolution: default bare numeric recipients to channels, harden allowlist numeric ID handling with safe fallbacks, and avoid inbound WS heartbeat stalls. (#33142) Thanks @thewilloftheshadow.
  • -
  • Discord/chunk delivery reliability: preserve chunk ordering when using a REST client and retry chunk sends on 429/5xx using account retry settings. (#33226) Thanks @thewilloftheshadow.
  • -
  • Discord/mention handling: add id-based mention formatting + cached rewrites, resolve inbound mentions to display names, and add optional ignoreOtherMentions gating (excluding @everyone/@here). (#33224) Thanks @thewilloftheshadow.
  • -
  • Discord/media SSRF allowlist: allow Discord CDN hostnames (including wildcard domains) in inbound media SSRF policy to prevent proxy/VPN fake-ip blocks. (#33275) Thanks @thewilloftheshadow.
  • -
  • Telegram/device pairing notifications: auto-arm one-shot notify on /pair qr, auto-ping on new pairing requests, and add manual fallback via /pair approve latest if the ping does not arrive. (#33299) thanks @mbelinky.
  • -
  • Exec heartbeat routing: scope exec-triggered heartbeat wakes to agent session keys so unrelated agents are no longer awakened by exec events, while preserving legacy unscoped behavior for non-canonical session keys. (#32724) thanks @altaywtf
  • -
  • macOS/Tailscale remote gateway discovery: add a Tailscale Serve fallback peer probe path (wss://.ts.net) when Bonjour and wide-area DNS-SD discovery return no gateways, and refresh both discovery paths from macOS onboarding. (#32860) Thanks @ngutman.
  • -
  • iOS/Gateway keychain hardening: move gateway metadata and TLS fingerprints to device keychain storage with safer migration behavior and rollback-safe writes to reduce credential loss risk during upgrades. (#33029) thanks @mbelinky.
  • -
  • iOS/Concurrency stability: replace risky shared-state access in camera and gateway connection paths with lock-protected access patterns to reduce crash risk under load. (#33241) thanks @mbelinky.
  • -
  • iOS/Security guardrails: limit production API-key sourcing to app config and make deep-link confirmation prompts safer by coalescing queued requests instead of silently dropping them. (#33031) thanks @mbelinky.
  • -
  • iOS/TTS playback fallback: keep voice playback resilient by switching from PCM to MP3 when provider format support is unavailable, while avoiding sticky fallback on generic local playback errors. (#33032) thanks @mbelinky.
  • -
  • Plugin outbound/text-only adapter compatibility: allow direct-delivery channel plugins that only implement sendText (without sendMedia) to remain outbound-capable, gracefully fall back to text delivery for media payloads when sendMedia is absent, and fail explicitly for media-only payloads with no text fallback. (#32788) thanks @liuxiaopai-ai.
  • -
  • Telegram/multi-account default routing clarity: warn only for ambiguous (2+) account setups without an explicit default, add openclaw doctor warnings for missing/invalid multi-account defaults across channels, and document explicit-default guidance for channel routing and Telegram config. (#32544) thanks @Sid-Qin.
  • -
  • Telegram/plugin outbound hook parity: run message_sending + message_sent in Telegram reply delivery, include reply-path hook metadata (mediaUrls, threadId), and report message_sent.success=false when hooks blank text and no outbound message is delivered. (#32649) Thanks @KimGLee.
  • -
  • CLI/Coding-agent reliability: switch default claude-cli non-interactive args to --permission-mode bypassPermissions, auto-normalize legacy --dangerously-skip-permissions backend overrides to the modern permission-mode form, align coding-agent + live-test docs with the non-PTY Claude path, and emit session system-event heartbeat notices when CLI watchdog no-output timeouts terminate runs. (#28610, #31149, #34055). Thanks @niceysam, @cryptomaltese and @vincentkoc.
  • -
  • Gateway/OpenAI chat completions: parse active-turn image_url content parts (including parameterized data URIs and guarded URL sources), forward them as multimodal images, accept image-only user turns, enforce per-request image-part/byte budgets, default URL-based image fetches to disabled unless explicitly enabled by config, and redact image base64 data in cache-trace/provider payload diagnostics. (#17685) Thanks @vincentkoc
  • -
  • ACP/ACPX session bootstrap: retry with sessions new when sessions ensure returns no session identifiers so ACP spawns avoid NO_SESSION/ACP_TURN_FAILED failures on affected agents. (#28786, #31338, #34055). Thanks @Sid-Qin and @vincentkoc.
  • -
  • ACP/sessions_spawn parent stream visibility: add streamTo: "parent" for runtime: "acp" to forward initial child-run progress/no-output/completion updates back into the requester session as system events (instead of direct child delivery), and emit a tail-able session-scoped relay log (.acp-stream.jsonl, returned as streamLogPath when available), improving orchestrator visibility for blocked or long-running harness turns. (#34310, #29909; reopened from #34055). Thanks @vincentkoc.
  • -
  • Agents/bootstrap truncation warning handling: unify bootstrap budget/truncation analysis across embedded + CLI runtime, /context, and openclaw doctor; add agents.defaults.bootstrapPromptTruncationWarning (off|once|always, default once) and persist warning-signature metadata so truncation warnings are consistent and deduped across turns. (#32769) Thanks @gumadeiras.
  • -
  • Agents/Skills runtime loading: propagate run config into embedded attempt and compaction skill-entry loading so explicitly enabled bundled companion skills are discovered consistently when skill snapshots do not already provide resolved entries. Thanks @gumadeiras.
  • -
  • Agents/Session startup date grounding: substitute YYYY-MM-DD placeholders in startup/post-compaction AGENTS context and append runtime current-time lines for /new and /reset prompts so daily-memory references resolve correctly. (#32381) Thanks @chengzhichao-xydt.
  • -
  • Agents/Compaction template heading alignment: update AGENTS template section names to Session Startup/Red Lines and keep legacy Every Session/Safety fallback extraction so post-compaction context remains intact across template versions. (#25098) thanks @echoVic.
  • -
  • Agents/Compaction continuity: expand staged-summary merge instructions to preserve active task status, batch progress, latest user request, and follow-up commitments so compaction handoffs retain in-flight work context. (#8903) thanks @joetomasone.
  • -
  • Agents/Compaction safeguard structure hardening: require exact fallback summary headings, sanitize untrusted compaction instruction text before prompt embedding, and keep structured sections when preserving all turns. (#25555) thanks @rodrigouroz.
  • -
  • Gateway/status self version reporting: make Gateway self version in openclaw status prefer runtime VERSION (while preserving explicit OPENCLAW_VERSION override), preventing stale post-upgrade app version output. (#32655) thanks @liuxiaopai-ai.
  • -
  • Memory/QMD index isolation: set QMD_CONFIG_DIR alongside XDG_CONFIG_HOME so QMD config state stays per-agent despite upstream XDG handling bugs, preventing cross-agent collection indexing and excess disk/CPU usage. (#27028) thanks @HenryLoenwind.
  • -
  • Memory/QMD collection safety: stop destructive collection rebinds when QMD collection list only reports names without path metadata, preventing memory search from dropping existing collections if re-add fails. (#36870) Thanks @Adnannnnnnna.
  • -
  • Memory/QMD duplicate-document recovery: detect UNIQUE constraint failed: documents.collection, documents.path update failures, rebuild managed collections once, and retry update so periodic QMD syncs recover instead of failing every run; includes regression coverage to avoid over-matching unrelated unique constraints. (#27649) Thanks @MiscMich.
  • -
  • Memory/local embedding initialization hardening: add regression coverage for transient initialization retry and mixed embedQuery + embedBatch concurrent startup to lock single-flight initialization behavior. (#15639) thanks @SubtleSpark.
  • -
  • CLI/Coding-agent reliability: switch default claude-cli non-interactive args to --permission-mode bypassPermissions, auto-normalize legacy --dangerously-skip-permissions backend overrides to the modern permission-mode form, align coding-agent + live-test docs with the non-PTY Claude path, and emit session system-event heartbeat notices when CLI watchdog no-output timeouts terminate runs. Related to #28261. Landed from contributor PRs #28610 and #31149. Thanks @niceysam, @cryptomaltese and @vincentkoc.
  • -
  • ACP/ACPX session bootstrap: retry with sessions new when sessions ensure returns no session identifiers so ACP spawns avoid NO_SESSION/ACP_TURN_FAILED failures on affected agents. Related to #28786. Landed from contributor PR #31338. Thanks @Sid-Qin and @vincentkoc.
  • -
  • LINE/auth boundary hardening synthesis: enforce strict LINE webhook authn/z boundary semantics across pairing-store account scoping, DM/group allowlist separation, fail-closed webhook auth/runtime behavior, and replay/duplication controls (including in-flight replay reservation and post-success dedupe marking). (from #26701, #26683, #25978, #17593, #16619, #31990, #26047, #30584, #18777) Thanks @bmendonca3, @davidahmann, @harshang03, @haosenwang1018, @liuxiaopai-ai, @coygeek, and @Takhoffman.
  • -
  • LINE/media download synthesis: fix file-media download handling and M4A audio classification across overlapping LINE regressions. (from #26386, #27761, #27787, #29509, #29755, #29776, #29785, #32240) Thanks @kevinWangSheng, @loiie45e, @carrotRakko, @Sid-Qin, @codeafridi, and @bmendonca3.
  • -
  • LINE/context and routing synthesis: fix group/room peer routing and command-authorization context propagation, and keep processing later events in mixed-success webhook batches. (from #21955, #24475, #27035, #28286) Thanks @lailoo, @mcaxtr, @jervyclaw, @Glucksberg, and @Takhoffman.
  • -
  • LINE/status/config/webhook synthesis: fix status false positives from snapshot/config state and accept LINE webhook HEAD probes for compatibility. (from #10487, #25726, #27537, #27908, #31387) Thanks @BlueBirdBack, @stakeswky, @loiie45e, @puritysb, and @mcaxtr.
  • -
  • LINE cleanup/test follow-ups: fold cleanup/test learnings into the synthesis review path while keeping runtime changes focused on regression fixes. (from #17630, #17289) Thanks @Clawborn and @davidahmann.
  • -
  • Mattermost/interactive buttons: add interactive button send/callback support with directory-based channel/user target resolution, and harden callbacks via account-scoped HMAC verification plus sender-scoped DM routing. (#19957) thanks @tonydehnke.
  • -
  • Feishu/groupPolicy legacy alias compatibility: treat legacy groupPolicy: "allowall" as open in both schema parsing and runtime policy checks so intended open-group configs no longer silently drop group messages when groupAllowFrom is empty. (from #36358) Thanks @Sid-Qin.
  • -
  • Mattermost/plugin SDK import policy: replace remaining monolithic openclaw/plugin-sdk imports in Mattermost mention-gating paths/tests with scoped subpaths (openclaw/plugin-sdk/compat and openclaw/plugin-sdk/mattermost) so pnpm check passes lint:plugins:no-monolithic-plugin-sdk-entry-imports on baseline. (#36480) Thanks @Takhoffman.
  • -
  • Telegram/polls: add Telegram poll action support to channel action discovery and tool/CLI poll flows, with multi-account discoverability gated to accounts that can actually execute polls (sendMessage + poll). (#36547) thanks @gumadeiras.
  • -
  • Agents/failover cooldown classification: stop treating generic cooling down text as provider rate_limit so healthy models no longer show false global cooldown/rate-limit warnings while explicit model_cooldown markers still trigger failover. (#32972) thanks @stakeswky.
  • -
  • Agents/failover service-unavailable handling: stop treating bare proxy/CDN service unavailable errors as provider overload while keeping them retryable via the timeout/failover path, so transient outages no longer show false rate-limit warnings or block fallback. (#36646) thanks @jnMetaCode.
  • -
  • Plugins/HTTP route migration diagnostics: rewrite legacy api.registerHttpHandler(...) loader failures into actionable migration guidance so doctor/plugin diagnostics point operators to api.registerHttpRoute(...) or registerPluginHttpRoute(...). (#36794) Thanks @vincentkoc
  • -
  • Doctor/Heartbeat upgrade diagnostics: warn when heartbeat delivery is configured with an implicit directPolicy so upgrades pin direct/DM behavior explicitly instead of relying on the current default. (#36789) Thanks @vincentkoc.
  • -
  • Agents/current-time UTC anchor: append a machine-readable UTC suffix alongside local Current time: lines in shared cron-style prompt contexts so agents can compare UTC-stamped workspace timestamps without doing timezone math. (#32423) thanks @jriff.
  • -
  • Ollama/local model handling: preserve explicit lower contextWindow / maxTokens overrides during merge refresh, and keep native Ollama streamed replies from surfacing fallback thinking / reasoning text once real content starts streaming. (#39292) Thanks @vincentkoc.
  • -
  • TUI/webchat command-owner scope alignment: treat internal-channel gateway sessions with operator.admin as owner-authorized in command auth, restoring cron/gateway/connector tool access for affected TUI/webchat sessions while keeping external channels on identity-based owner checks. (from #35666, #35673, #35704) Thanks @Naylenv, @Octane0411, and @Sid-Qin.
  • -
  • Discord/inbound timeout isolation: separate inbound worker timeout tracking from listener timeout budgets so queued Discord replies are no longer dropped when listener watchdog windows expire mid-run. (#36602) Thanks @dutifulbob.
  • -
  • Memory/doctor SecretRef handling: treat SecretRef-backed memory-search API keys as configured, and fail embedding setup with explicit unresolved-secret errors instead of crashing. (#36835) Thanks @joshavant.
  • -
  • Memory/flush default prompt: ban timestamped variant filenames during default memory flush runs so durable notes stay in the canonical daily memory/YYYY-MM-DD.md file. (#34951) thanks @zerone0x.
  • -
  • Agents/reply delivery timing: flush embedded Pi block replies before waiting on compaction retries so already-generated assistant replies reach channels before compaction wait completes. (#35489) thanks @Sid-Qin.
  • -
  • Agents/gateway config guidance: stop exposing config.schema through the agent gateway tool, remove prompt/docs guidance that told agents to call it, and keep agents on config.get plus config.patch/config.apply for config changes. (#7382) thanks @kakuteki.
  • -
  • Provider/KiloCode: Keep duplicate models after malformed discovery rows, and strip legacy reasoning_effort when proxy reasoning injection is skipped. (#32352) Thanks @pandemicsyn and @vincentkoc.
  • -
  • Agents/failover: classify periodic provider limit exhaustion text (for example Weekly/Monthly Limit Exhausted) as rate_limit while keeping explicit 402 Payment Required variants in billing, so failover continues without misclassifying billing-wrapped quota errors. (#33813) thanks @zhouhe-xydt.
  • -
  • Mattermost/interactive button callbacks: allow external callback base URLs and stop requiring loopback-origin requests so button clicks work when Mattermost reaches the gateway over Tailscale, LAN, or a reverse proxy. (#37543) thanks @mukhtharcm.
  • -
  • Gateway/chat.send route inheritance: keep explicit external delivery for channel-scoped sessions while preventing shared-main and other channel-agnostic webchat sessions from inheriting stale external routes, so Control UI replies stay on webchat without breaking selected channel-target sessions. (#34669) Thanks @vincentkoc.
  • -
  • Telegram/Discord media upload caps: make outbound uploads honor channel mediaMaxMb config, raise Telegram's default media cap to 100MB, and remove MIME fallback limits that kept some Telegram uploads at 16MB. Thanks @vincentkoc.
  • -
  • Skills/nano-banana-pro resolution override: respect explicit --resolution values during image editing and only auto-detect output size from input images when the flag is omitted. (#36880) Thanks @shuofengzhang and @vincentkoc.
  • -
  • Skills/openai-image-gen CLI validation: validate --background and --style inputs early, normalize supported values, and warn when those flags are ignored for incompatible models. (#36762) Thanks @shuofengzhang and @vincentkoc.
  • -
  • Skills/openai-image-gen output formats: validate --output-format values early, normalize aliases like jpg -> jpeg, and warn when the flag is ignored for incompatible models. (#36648) Thanks @shuofengzhang and @vincentkoc.
  • -
  • ACP/skill env isolation: strip skill-injected API keys from ACP harness child-process environments so tools like Codex CLI keep their own auth flow instead of inheriting billed provider keys from active skills. (#36316) Thanks @taw0002 and @vincentkoc.
  • -
  • WhatsApp media upload caps: make outbound media sends and auto-replies honor channels.whatsapp.mediaMaxMb with per-account overrides so inbound and outbound limits use the same channel config. Thanks @vincentkoc.
  • -
  • Windows/Plugin install: when OpenClaw runs on Windows via Bun and npm-cli.js is not colocated with the runtime binary, fall back to npm.cmd/npx.cmd through the existing cmd.exe wrapper so openclaw plugins install no longer fails with spawn EINVAL. (#38056) Thanks @0xlin2023.
  • -
  • Telegram/send retry classification: retry grammY Network request ... failed after N attempts envelopes in send flows without reclassifying plain Network request ... failed! wrappers as transient, restoring the intended retry path while keeping broad send-context message matching tight. (#38056) Thanks @0xlin2023.
  • -
  • Gateway/probes: keep /health, /healthz, /ready, and /readyz reachable when the Control UI is mounted at /, preserve plugin-owned route precedence on those paths, and make /ready and /readyz report channel-backed readiness with startup grace plus 503 on disconnected managed channels, while /health and /healthz stay shallow liveness probes. (#18446) Thanks @vibecodooor, @mahsumaktas, and @vincentkoc.
  • -
  • Feishu/media downloads: drop invalid timeout fields from SDK method calls now that client-level httpTimeoutMs applies to requests. (#38267) Thanks @ant1eicher and @thewilloftheshadow.
  • -
  • PI embedded runner/Feishu docs: propagate sender identity into embedded attempts so Feishu doc auto-grant restores requester access for embedded-runner executions. (#32915) thanks @cszhouwei.
  • -
  • Agents/usage normalization: normalize missing or partial assistant usage snapshots before compaction accounting so openclaw agent --json no longer crashes when provider payloads omit totalTokens or related usage fields. (#34977) thanks @sp-hk2ldn.
  • -
  • Venice/default model refresh: switch the built-in Venice default to kimi-k2-5, update onboarding aliasing, and refresh Venice provider docs/recommendations to match the current private and anonymized catalog. (from #12964) Fixes #20156. Thanks @sabrinaaquino and @vincentkoc.
  • -
  • Agents/skill API write pacing: add a global prompt guardrail that treats skill-driven external API writes as rate-limited by default, so runners prefer batched writes, avoid tight request loops, and respect 429/Retry-After. Thanks @vincentkoc.
  • -
  • Google Chat/multi-account webhook auth fallback: when channels.googlechat.accounts.default carries shared webhook audience/path settings (for example after config normalization), inherit those defaults for named accounts while preserving top-level and per-account overrides, so inbound webhook verification no longer fails silently for named accounts missing duplicated audience fields. Fixes #38369.
  • -
  • Models/tool probing: raise the tool-capability probe budget from 32 to 256 tokens so reasoning models that spend tokens on thinking before returning a required tool call are less likely to be misclassified as not supporting tools. (#7521) Thanks @jakobdylanc.
  • -
  • Gateway/transient network classification: treat wrapped ...: fetch failed transport messages as transient while avoiding broad matches like Web fetch failed (404): ..., preventing Discord reconnect wrappers from crashing the gateway without suppressing non-network tool failures. (#38530) Thanks @xinhuagu.
  • -
  • ACP/console silent reply suppression: filter ACP NO_REPLY lead fragments and silent-only finals before openclaw agent logging/delivery so console-backed ACP sessions no longer leak NO/NO_REPLY placeholders. (#38436) Thanks @ql-wade.
  • -
  • Feishu/reply delivery reliability: disable block streaming in Feishu reply options so plain-text auto-render replies are no longer silently dropped before final delivery. (#38258) Thanks @xinhuagu.
  • -
  • Agents/reply MEDIA delivery: normalize local assistant MEDIA: paths before block/final delivery, keep media dedupe aligned with message-tool sends, and contain malformed media normalization failures so generated files send reliably instead of falling back to empty responses. (#38572) Thanks @obviyus.
  • -
  • Sessions/bootstrap cache rollover invalidation: clear cached workspace bootstrap snapshots whenever an existing sessionKey rolls to a new sessionId across auto-reply, command, and isolated cron session resolvers, so AGENTS.md/MEMORY.md/USER.md updates are reloaded after daily, idle, or forced session resets instead of staying stale until gateway restart. (#38494) Thanks @LivingInDrm.
  • -
  • Gateway/Telegram polling health monitor: skip stale-socket restarts for Telegram long-polling channels and thread channel identity through shared health evaluation so polling connections are not restarted on the WebSocket stale-socket heuristic. (#38395) Thanks @ql-wade and @Takhoffman.
  • -
  • Daemon/systemd fresh-install probe: check for OpenClaw's managed user unit before running systemctl --user is-enabled, so first-time Linux installs no longer fail on generic missing-unit probe errors. (#38819) Thanks @adaHubble.
  • -
  • Gateway/container lifecycle: allow openclaw gateway stop to SIGTERM unmanaged gateway listeners and openclaw gateway restart to SIGUSR1 a single unmanaged listener when no service manager is installed, so container and supervisor-based deployments are no longer blocked by service disabled no-op responses. Fixes #36137. Thanks @vincentkoc.
  • -
  • Gateway/Windows restart supervision: relaunch task-managed gateways through Scheduled Task with quoted helper-script command paths, distinguish restart-capable supervisors per platform, and stop orphaned Windows gateway children during self-restart. (#38825) Thanks @obviyus.
  • -
  • Telegram/native topic command routing: resolve forum-topic native commands through the same conversation route as inbound messages so topic agentId overrides and bound topic sessions target the active session instead of the default topic-parent session. (#38871) Thanks @obviyus.
  • -
  • Markdown/assistant image hardening: flatten remote markdown images to plain text across the Control UI, exported HTML, and shared Swift chat while keeping inline data:image/... markdown renderable, so model output no longer triggers automatic remote image fetches. (#38895) Thanks @obviyus.
  • -
  • Config/compaction safeguard settings: regression-test agents.defaults.compaction.recentTurnsPreserve through loadConfig() and cover the new help metadata entry so the exposed preserve knob stays wired through schema validation and config UX. (#25557) thanks @rodrigouroz.
  • -
  • iOS/Quick Setup presentation: skip automatic Quick Setup when a gateway is already configured (active connect config, last-known connection, preferred gateway, or manual host), so reconnecting installs no longer get prompted to connect again. (#38964) Thanks @ngutman.
  • -
  • CLI/Docs memory help accuracy: clarify openclaw memory status --deep behavior and align memory command examples/docs with the current search options. (#31803) Thanks @JasonOA888 and @Avi974.
  • -
  • Auto-reply/allowlist store account scoping: keep /allowlist ... --store writes scoped to the selected account and clear legacy unscoped entries when removing default-account store access, preventing cross-account default allowlist bleed-through from legacy pairing-store reads. Thanks @tdjackey for reporting and @vincentkoc for the fix.
  • -
  • Security/Nostr: harden profile mutation/import loopback guards by failing closed on non-loopback forwarded client headers (x-forwarded-for / x-real-ip) and rejecting sec-fetch-site: cross-site; adds regression coverage for proxy-forwarded and browser cross-site mutation attempts.
  • -
  • CLI/bootstrap Node version hint maintenance: replace hardcoded nvm 22 instructions in openclaw.mjs with MIN_NODE_MAJOR interpolation so future minimum-Node bumps keep startup guidance in sync automatically. (#39056) Thanks @onstash.
  • -
  • Discord/native slash command auth: honor commands.allowFrom.discord (and commands.allowFrom["*"]) in guild slash-command pre-dispatch authorization so allowlisted senders are no longer incorrectly rejected as unauthorized. (#38794) Thanks @jskoiz and @thewilloftheshadow.
  • -
  • Outbound/message target normalization: ignore empty legacy to/channelId fields when explicit target is provided so valid target-based sends no longer fail legacy-param validation; includes regression coverage. (#38944) Thanks @Narcooo.
  • -
  • Models/auth token prompts: guard cancelled manual token prompts so Symbol(clack:cancel) values cannot be persisted into auth profiles; adds regression coverage for cancelled models auth paste-token. (#38951) Thanks @MumuTW.
  • -
  • Gateway/loopback announce URLs: treat http:// and https:// aliases with the same loopback/private-network policy as websocket URLs so loopback cron announce delivery no longer fails secure URL validation. (#39064) Thanks @Narcooo.
  • -
  • Models/default provider fallback: when the hardcoded default provider is removed from models.providers, resolve defaults from configured providers instead of reporting stale removed-provider defaults in status output. (#38947) Thanks @davidemanuelDEV.
  • -
  • Agents/cache-trace stability: guard stable stringify against circular references in trace payloads so near-limit payloads no longer crash with Maximum call stack size exceeded; adds regression coverage. (#38935) Thanks @MumuTW.
  • -
  • Extensions/diffs CI stability: add headers to the localReq test helper in extensions/diffs/index.test.ts so forwarding-hint checks no longer crash with req.headers undefined. (supersedes #39063) Thanks @Shennng.
  • -
  • Agents/compaction thresholding: apply agents.defaults.contextTokens cap to the model passed into embedded run and /compact session creation so auto-compaction thresholds use the effective context window, not native model max context. (#39099) Thanks @MumuTW.
  • -
  • Models/merge mode provider precedence: when models.mode: "merge" is active and config explicitly sets a provider baseUrl, keep config as source of truth instead of preserving stale runtime models.json baseUrl values; includes normalized provider-key coverage. (#39103) Thanks @BigUncle.
  • -
  • UI/Control chat tool streaming: render tool events live in webchat without requiring refresh by enabling tool-events capability, fixing stream/event correlation, and resetting/reloading stream state around tool results and terminal events. (#39104) Thanks @jakepresent.
  • -
  • Models/provider apiKey persistence hardening: when a provider apiKey value equals a known provider env var value, persist the canonical env var name into models.json instead of resolved plaintext secrets. (#38889) Thanks @gambletan.
  • -
  • Discord/model picker persistence check: add a short post-dispatch settle delay before reading back session model state so picker confirmations stop reporting false mismatch warnings after successful model switches. (#39105) Thanks @akropp.
  • -
  • Agents/OpenAI WS compat store flag: omit store from response.create payloads when model compat sets supportsStore: false, preventing strict OpenAI-compatible providers from rejecting websocket requests with unknown-field errors. (#39113) Thanks @scoootscooob.
  • -
  • Config/validation log sanitization: sanitize config-validation issue paths/messages before logging so control characters and ANSI escape sequences cannot inject misleading terminal output from crafted config content. (#39116) Thanks @powermaster888.
  • -
  • Agents/compaction counter accuracy: count successful overflow-triggered auto-compactions (willRetry=true) in the compaction counter while still excluding aborted/no-result events, so /status reflects actual safeguard compaction activity. (#39123) Thanks @MumuTW.
  • -
  • Gateway/chat delta ordering: flush buffered assistant deltas before emitting tool start events so pre-tool text is delivered to Control UI before tool cards, avoiding transient text/tool ordering artifacts in streaming. (#39128) Thanks @0xtangping.
  • -
  • Voice-call plugin schema parity: add missing manifest configSchema fields (webhookSecurity, streaming.preStartTimeoutMs|maxPendingConnections|maxPendingConnectionsPerIp|maxConnections, staleCallReaperSeconds) so gateway AJV validation accepts already-supported runtime config instead of failing with additionalProperties errors. (#38892) Thanks @giumex.
  • -
  • Agents/OpenAI WS reconnect retry accounting: avoid double retry scheduling when reconnect failures emit both error and close, so retry budgets track actual reconnect attempts instead of exhausting early. (#39133) Thanks @scoootscooob.
  • -
  • Daemon/Windows schtasks runtime detection: use locale-invariant Last Run Result running codes (0x41301/267009) as the primary running signal so openclaw node status no longer misreports active tasks as stopped on non-English Windows locales. (#39076) Thanks @ademczuk.
  • -
  • Usage/token count formatting: round near-million token counts to millions (1.0m) instead of 1000k, with explicit boundary coverage for 999_499 and 999_500. (#39129) Thanks @CurryMessi.
  • -
  • Gateway/session bootstrap cache invalidation ordering: clear bootstrap snapshots only after active embedded-run shutdown wait completes, preventing dying runs from repopulating stale cache between /new/sessions.reset turns. (#38873) Thanks @MumuTW.
  • -
  • Browser/dispatcher error clarity: preserve dispatcher-side failure context in browser fetch errors while still appending operator guidance and explicit no-retry model hints, preventing misleading "Can't reach service" wrapping and avoiding LLM retry loops. (#39090) Thanks @NewdlDewdl.
  • -
  • Telegram/polling offset safety: confirm persisted offsets before polling startup while validating stored lastUpdateId values as non-negative safe integers (with overflow guards) so malformed offset state cannot cause update skipping/dropping. (#39111) Thanks @MumuTW.
  • -
  • Telegram/status SecretRef read-only resolution: resolve env-backed bot-token SecretRefs in config-only/status inspection while respecting provider source/defaults and env allowlists, so status no longer crashes or reports false-ready tokens for disallowed providers. (#39130) Thanks @neocody.
  • -
  • Agents/OpenAI WS max-token zero forwarding: treat maxTokens: 0 as an explicit value in websocket response.create payloads (instead of dropping it as falsy), with regression coverage for zero-token forwarding. (#39148) Thanks @scoootscooob.
  • -
  • Podman/.env gateway bind precedence: evaluate OPENCLAW_GATEWAY_BIND after sourcing .env in run-openclaw-podman.sh so env-file overrides are honored. (#38785) Thanks @majinyu666.
  • -
  • Models/default alias refresh: bump gpt to openai/gpt-5.4 and Gemini defaults to gemini-3.1 preview aliases (including normalization/default wiring) to track current model IDs. (#38638) Thanks @ademczuk.
  • -
  • Config/env substitution degraded mode: convert missing ${VAR} resolution in config reads from hard-fail to warning-backed degraded behavior, while preventing unresolved placeholders from being accepted as gateway credentials. (#39050) Thanks @akz142857.
  • -
  • Discord inbound listener non-blocking dispatch: make MESSAGE_CREATE listener handoff asynchronous (no per-listener queue blocking), so long runs no longer stall unrelated incoming events. (#39154) Thanks @yaseenkadlemakki.
  • -
  • Daemon/Windows PATH freeze fix: stop persisting install-time PATH snapshots into Scheduled Task scripts so runtime tool lookup follows current host PATH updates; also refresh local TUI history on silent local finals. (#39139) Thanks @Narcooo.
  • -
  • Gateway/systemd service restart hardening: clear stale gateway listeners by explicit run-port before service bind, add restart stale-pid port-override support, tune systemd start/stop/exit handling, and disable detached child mode only in service-managed runtime so cgroup stop semantics clean up descendants reliably. (#38463) Thanks @spirittechie.
  • -
  • Discord/plugin native command aliases: let plugins declare provider-specific slash names so native Discord registration can avoid built-in command collisions; the bundled Talk voice plugin now uses /talkvoice natively on Discord while keeping text /voice.
  • -
  • Daemon/Windows schtasks status normalization: derive runtime state from locale-neutral numeric Last Run Result codes only (without language string matching) and surface unknown when numeric result data is unavailable, preventing locale-specific misclassification drift. (#39153) Thanks @scoootscooob.
  • -
  • Telegram/polling conflict recovery: reset the polling webhookCleared latch on getUpdates 409 conflicts so webhook cleanup re-runs on restart cycles and polling avoids infinite conflict loops. (#39205) Thanks @amittell.
  • -
  • Heartbeat/requests-in-flight scheduling: stop advancing nextDueMs and avoid immediate scheduleNext() timer overrides on requests-in-flight skips, so wake-layer retry cooldowns are honored and heartbeat cadence no longer drifts under sustained contention. (#39182) Thanks @MumuTW.
  • -
  • Memory/SQLite contention resilience: re-apply PRAGMA busy_timeout on every sync-store and QMD connection open so process restarts/reopens no longer revert to immediate SQLITE_BUSY failures under lock contention. (#39183) Thanks @MumuTW.
  • -
  • Gateway/webchat route safety: block webchat/control-ui clients from inheriting stored external delivery routes on channel-scoped sessions (while preserving route inheritance for UI/TUI clients), preventing cross-channel leakage from scoped chats. (#39175) Thanks @widingmarcus-cyber.
  • -
  • Telegram error-surface resilience: return a user-visible fallback reply when dispatch/debounce processing fails instead of going silent, while preserving draft-stream cleanup and best-effort thread-scoped fallback delivery. (#39209) Thanks @riftzen-bit.
  • -
  • Gateway/password auth startup diagnostics: detect unresolved provider-reference objects in gateway.auth.password and fail with a specific bootstrap-secrets error message instead of generic misconfiguration output. (#39230) Thanks @ademczuk.
  • -
  • Agents/OpenAI-responses compatibility: strip unsupported store payload fields when supportsStore=false (including OpenAI-compatible non-OpenAI providers) while preserving server-compaction payload behavior. (#39219) Thanks @ademczuk.
  • -
  • Agents/model fallback visibility: warn when configured model IDs cannot be resolved and fallback is applied, with log-safe sanitization of model text to prevent control-sequence injection in warning output. (#39215) Thanks @ademczuk.
  • -
  • Outbound delivery replay safety: use two-phase delivery ACK markers (.json -> .delivered -> unlink) and startup marker cleanup so crash windows between send and cleanup do not replay already-delivered messages. (#38668) Thanks @Gundam98.
  • -
  • Nodes/system.run approval binding: carry prepared approval plans through gateway forwarding and bind interpreter-style script operands across approval to execution, so post-approval script rewrites are denied while unchanged approved script runs keep working. Thanks @tdjackey for reporting.
  • -
  • Nodes/system.run PowerShell wrapper parsing: treat pwsh/powershell -EncodedCommand forms as shell-wrapper payloads so allowlist mode still requires approval instead of falling back to plain argv analysis. Thanks @tdjackey for reporting.
  • -
  • Control UI/auth error reporting: map generic browser Fetch failed websocket close errors back to actionable gateway auth messages (gateway token mismatch, authentication failed, retry later) so dashboard disconnects stop hiding credential problems. Landed from contributor PR #28608 by @KimGLee. Thanks @KimGLee.
  • -
  • Media/mime unknown-kind handling: return undefined (not "unknown") for missing/unrecognized MIME kinds and use document-size fallback caps for unknown remote media, preventing phantom Signal events from being treated as real messages. (#39199) Thanks @nicolasgrasset.
  • -
  • Nodes/system.run allow-always persistence: honor shell comment semantics during allowlist analysis so #-tailed payloads that never execute are not persisted as trusted follow-up commands. Thanks @tdjackey for reporting.
  • -
  • Signal/inbound attachment fan-in: forward all successfully fetched inbound attachments through MediaPaths/MediaUrls/MediaTypes (instead of only the first), and improve multi-attachment placeholder summaries in mention-gated pending history. (#39212) Thanks @joeykrug.
  • -
  • Nodes/system.run dispatch-wrapper boundary: keep shell-wrapper approval classification active at the depth boundary so env wrapper stacks cannot reach /bin/sh -c execution without the expected approval gate. Thanks @tdjackey for reporting.
  • -
  • Docker/token persistence on reconfigure: reuse the existing .env gateway token during docker-setup.sh reruns and align compose token env defaults, so Docker installs stop silently rotating tokens and breaking existing dashboard sessions. Landed from contributor PR #33097 by @chengzhichao-xydt. Thanks @chengzhichao-xydt.
  • -
  • Agents/strict OpenAI turn ordering: apply assistant-first transcript bootstrap sanitization to strict OpenAI-compatible providers (for example vLLM/Gemma via openai-completions) without adding Google-specific session markers, preventing assistant-first history rejections. (#39252) Thanks @scoootscooob.
  • -
  • Discord/exec approvals gateway auth: pass resolved shared gateway credentials into the Discord exec-approvals gateway client so token-auth installs stop failing approvals with gateway token mismatch. Related to #38179. Thanks @0riginal-claw for the adjacent PR #35147 investigation.
  • -
  • Subagents/workspace inheritance: propagate parent workspace directory to spawned subagent runs so child sessions reliably inherit workspace-scoped instructions (AGENTS.md, SOUL.md, etc.) without exposing workspace override through tool-call arguments. (#39247) Thanks @jasonQin6.
  • -
  • Exec approvals/gateway-node policy: honor explicit ask=off from exec-approvals.json even when runtime defaults are stricter, so trusted full/off setups stop re-prompting on gateway and node exec paths. Landed from contributor PR #26789 by @pandego. Thanks @pandego.
  • -
  • Exec approvals/config fallback: inherit ask from exec-approvals.json when tools.exec.ask is unset, so local full/off defaults no longer fall back to on-miss for exec tool and nodes run. Landed from contributor PR #29187 by @Bartok9. Thanks @Bartok9.
  • -
  • Exec approvals/allow-always shell scripts: persist and match script paths for wrapper invocations like bash scripts/foo.sh while still blocking -c/-s wrapper bypasses. Landed from contributor PR #35137 by @yuweuii. Thanks @yuweuii.
  • -
  • Queue/followup dedupe across drain restarts: dedupe queued redelivery message_id values after queue recreation so busy-session followups no longer duplicate on replayed inbound events. Landed from contributor PR #33168 by @rylena. Thanks @rylena.
  • -
  • Telegram/preview-final edit idempotence: treat message is not modified errors during preview finalization as delivered so partial-stream final replies do not fall back to duplicate sends. Landed from contributor PR #34983 by @HOYALIM. Thanks @HOYALIM.
  • -
  • Telegram/DM streaming transport parity: use message preview transport for all DM streaming lanes so final delivery can edit the active preview instead of sending duplicate finals. Landed from contributor PR #38906 by @gambletan. Thanks @gambletan.
  • -
  • Telegram/DM draft streaming restoration: restore native sendMessageDraft preview transport for DM answer streaming while keeping reasoning on message transport, with regression coverage to keep draft finalization from sending duplicate finals. (#39398) Thanks @obviyus.
  • -
  • Telegram/send retry safety: retry non-idempotent send paths only for pre-connect failures and make custom retry predicates strict, preventing ambiguous reconnect retries from sending duplicate messages. Landed from contributor PR #34238 by @hal-crackbot. Thanks @hal-crackbot.
  • -
  • ACP/run spawn delivery bootstrap: stop reusing requester inline delivery targets for one-shot mode: "run" ACP spawns, so fresh run-mode workers bootstrap in isolation instead of inheriting thread-bound session delivery behavior. (#39014) Thanks @lidamao633.
  • -
  • Discord/DM session-key normalization: rewrite legacy discord:dm:* and phantom direct-message discord:channel: session keys to discord:direct:* when the sender matches, so multi-agent Discord DMs stop falling into empty channel-shaped sessions and resume replying correctly.
  • -
  • Discord/native slash session fallback: treat empty configured bound-session keys as missing so /status and other native commands fall back to the routed slash session and routed channel session instead of blanking Discord session keys in normal channel bindings.
  • -
  • Agents/tool-call dispatch normalization: normalize provider-prefixed tool names before dispatch across toolCall, toolUse, and functionCall blocks, while preserving multi-segment tool suffixes when stripping provider wrappers so malformed-but-recoverable tool names no longer fail with Tool not found. (#39328) Thanks @vincentkoc.
  • -
  • Agents/parallel tool-call compatibility: honor parallel_tool_calls / parallelToolCalls extra params only for openai-completions and openai-responses payloads, preserve higher-precedence alias overrides across config and runtime layers, and ignore invalid non-boolean values so single-tool-call providers like NVIDIA-hosted Kimi stop failing on forced parallel tool-call payloads. (#37048) Thanks @vincentkoc.
  • -
  • Config/invalid-load fail-closed: stop converting INVALID_CONFIG into an empty runtime config, keep valid settings available only through explicit best-effort diagnostic reads, and route read-only CLI diagnostics through that path so unknown keys no longer silently drop security-sensitive config. (#28140) Thanks @bobsahur-robot and @vincentkoc.
  • -
  • Agents/codex-cli sandbox defaults: switch the built-in Codex backend from read-only to workspace-write so spawned coding runs can edit files out of the box. Landed from contributor PR #39336 by @0xtangping. Thanks @0xtangping.
  • -
  • Gateway/health-monitor restart reason labeling: report disconnected instead of stuck for clean channel disconnect restarts, so operator logs distinguish socket drops from genuinely stuck channels. (#36436) Thanks @Sid-Qin.
  • -
  • Control UI/agents-page overrides: auto-create minimal per-agent config entries when editing inherited agents, so model/tool/skill changes enable Save and inherited model fallbacks can be cleared by writing a primary-only override. Landed from contributor PR #39326 by @dunamismax. Thanks @dunamismax.
  • -
  • Gateway/Telegram webhook-mode recovery: add webhookCertPath to re-upload self-signed certificates during webhook registration and skip stale-socket detection for webhook-mode channels, so Telegram webhook setups survive health-monitor restarts. Landed from contributor PR #39313 by @fellanH. Thanks @fellanH.
  • -
  • Discord/config schema parity: add channels.discord.agentComponents to the strict Zod config schema so valid agentComponents.enabled settings (root and account-scoped) no longer fail with unrecognized-key validation errors. Landed from contributor PR #39378 by @gambletan. Thanks @gambletan and @thewilloftheshadow.
  • -
  • ACPX/MCP session bootstrap: inject configured MCP servers into ACP session/new and session/load for acpx-backed sessions, restoring Canva and other external MCP tools. Landed from contributor PR #39337. Thanks @goodspeed-apps.
  • -
  • Control UI/Telegram sender labels: preserve inbound sender labels in sanitized chat history so dashboard user-message groups split correctly and show real group-member names instead of You. (#39414) Thanks @obviyus.
  • -
-

View full changelog

-]]>
- -
- - 2026.3.2 - Tue, 03 Mar 2026 04:30:29 +0000 - https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml - 2026030290 - 2026.3.2 - 15.0 - OpenClaw 2026.3.2 -

Changes

-
    -
  • Secrets/SecretRef coverage: expand SecretRef support across the full supported user-supplied credential surface (64 targets total), including runtime collectors, openclaw secrets planning/apply/audit flows, onboarding SecretInput UX, and related docs; unresolved refs now fail fast on active surfaces while inactive surfaces report non-blocking diagnostics. (#29580) Thanks @joshavant.
  • -
  • Tools/PDF analysis: add a first-class pdf tool with native Anthropic and Google PDF provider support, extraction fallback for non-native models, configurable defaults (agents.defaults.pdfModel, pdfMaxBytesMb, pdfMaxPages), and docs/tests covering routing, validation, and registration. (#31319) Thanks @tyler6204.
  • -
  • Outbound adapters/plugins: add shared sendPayload support across direct-text-media, Discord, Slack, WhatsApp, Zalo, and Zalouser with multi-media iteration and chunk-aware text fallback. (#30144) Thanks @nohat.
  • -
  • Models/MiniMax: add first-class MiniMax-M2.5-highspeed support across built-in provider catalogs, onboarding flows, and MiniMax OAuth plugin defaults, while keeping legacy MiniMax-M2.5-Lightning compatibility for existing configs.
  • -
  • Sessions/Attachments: add inline file attachment support for sessions_spawn (subagent runtime only) with base64/utf8 encoding, transcript content redaction, lifecycle cleanup, and configurable limits via tools.sessions_spawn.attachments. (#16761) Thanks @napetrov.
  • -
  • Telegram/Streaming defaults: default channels.telegram.streaming to partial (from off) so new Telegram setups get live preview streaming out of the box, with runtime fallback to message-edit preview when native drafts are unavailable.
  • -
  • Telegram/DM streaming: use sendMessageDraft for private preview streaming, keep reasoning/answer preview lanes separated in DM reasoning-stream mode. (#31824) Thanks @obviyus.
  • -
  • Telegram/voice mention gating: add optional disableAudioPreflight on group/topic config to skip mention-detection preflight transcription for inbound voice notes where operators want text-only mention checks. (#23067) Thanks @yangnim21029.
  • -
  • CLI/Config validation: add openclaw config validate (with --json) to validate config files before gateway startup, and include detailed invalid-key paths in startup invalid-config errors. (#31220) thanks @Sid-Qin.
  • -
  • Tools/Diffs: add PDF file output support and rendering quality customization controls (fileQuality, fileScale, fileMaxWidth) for generated diff artifacts, and document PDF as the preferred option when messaging channels compress images. (#31342) Thanks @gumadeiras.
  • -
  • Memory/Ollama embeddings: add memorySearch.provider = "ollama" and memorySearch.fallback = "ollama" support, honor models.providers.ollama settings for memory embedding requests, and document Ollama embedding usage. (#26349) Thanks @nico-hoff.
  • -
  • Zalo Personal plugin (@openclaw/zalouser): rebuilt channel runtime to use native zca-js integration in-process, removing external CLI transport usage and keeping QR/login + send/listen flows fully inside OpenClaw.
  • -
  • Plugin SDK/channel extensibility: expose channelRuntime on ChannelGatewayContext so external channel plugins can access shared runtime helpers (reply/routing/session/text/media/commands) without internal imports. (#25462) Thanks @guxiaobo.
  • -
  • Plugin runtime/STT: add api.runtime.stt.transcribeAudioFile(...) so extensions can transcribe local audio files through OpenClaw's configured media-understanding audio providers. (#22402) Thanks @benthecarman.
  • -
  • Plugin hooks/session lifecycle: include sessionKey in session_start/session_end hook events and contexts so plugins can correlate lifecycle callbacks with routing identity. (#26394) Thanks @tempeste.
  • -
  • Hooks/message lifecycle: add internal hook events message:transcribed and message:preprocessed, plus richer outbound message:sent context (isGroup, groupId) for group-conversation correlation and post-transcription automations. (#9859) Thanks @Drickon.
  • -
  • Media understanding/audio echo: add optional tools.media.audio.echoTranscript + echoFormat to send a pre-agent transcript confirmation message to the originating chat, with echo disabled by default. (#32150) Thanks @AytuncYildizli.
  • -
  • Plugin runtime/system: expose runtime.system.requestHeartbeatNow(...) so extensions can wake targeted sessions immediately after enqueueing system events. (#19464) Thanks @AustinEral.
  • -
  • Plugin runtime/events: expose runtime.events.onAgentEvent and runtime.events.onSessionTranscriptUpdate for extension-side subscriptions, and isolate transcript-listener failures so one faulty listener cannot break the entire update fanout. (#16044) Thanks @scifantastic.
  • -
  • CLI/Banner taglines: add cli.banner.taglineMode (random | default | off) to control funny tagline behavior in startup output, with docs + FAQ guidance and regression tests for config override behavior.
  • -
-

Breaking

-
    -
  • BREAKING: Onboarding now defaults tools.profile to messaging for new local installs (interactive + non-interactive). New setups no longer start with broad coding/system tools unless explicitly configured.
  • -
  • BREAKING: ACP dispatch now defaults to enabled unless explicitly disabled (acp.dispatch.enabled=false). If you need to pause ACP turn routing while keeping /acp controls, set acp.dispatch.enabled=false. Docs: https://docs.openclaw.ai/tools/acp-agents
  • -
  • BREAKING: Plugin SDK removed api.registerHttpHandler(...). Plugins must register explicit HTTP routes via api.registerHttpRoute({ path, auth, match, handler }), and dynamic webhook lifecycles should use registerPluginHttpRoute(...).
  • -
  • BREAKING: Zalo Personal plugin (@openclaw/zalouser) no longer depends on external zca-compatible CLI binaries (openzca, zca-cli) for runtime send/listen/login; operators should use openclaw channels login --channel zalouser after upgrade to refresh sessions in the new JS-native path.
  • -
-

Fixes

-
    -
  • Plugin command/runtime hardening: validate and normalize plugin command name/description at registration boundaries, and guard Telegram native menu normalization paths so malformed plugin command specs cannot crash startup (trim on undefined). (#31997) Fixes #31944. Thanks @liuxiaopai-ai.
  • -
  • Telegram: guard duplicate-token checks and gateway startup token normalization when account tokens are missing, preventing token.trim() crashes during status/start flows. (#31973) Thanks @ningding97.
  • -
  • Discord/lifecycle startup status: push an immediate connected status snapshot when the gateway is already connected before lifecycle debug listeners attach, with abort-guarding to avoid contradictory status flips during pre-aborted startup. (#32336) Thanks @mitchmcalister.
  • -
  • Feishu/LINE group system prompts: forward per-group systemPrompt config into inbound context GroupSystemPrompt for Feishu and LINE group/room events so configured group-specific behavior actually applies at dispatch time. (#31713) Thanks @whiskyboy.
  • -
  • Mentions/Slack formatting hardening: add null-safe guards for runtime text normalization paths so malformed/undefined text payloads do not crash mention stripping or mrkdwn conversion. (#31865) Thanks @stone-jin.
  • -
  • Feishu/Plugin sdk compatibility: add safe webhook default fallbacks when loading Feishu monitor state so mixed-version installs no longer crash if older openclaw/plugin-sdk builds omit webhook default constants. (#31606)
  • -
  • Feishu/group broadcast dispatch: add configurable multi-agent group broadcast dispatch with observer-session isolation, cross-account dedupe safeguards, and non-mention history buffering rules that avoid duplicate replay in broadcast/topic workflows. (#29575) Thanks @ohmyskyhigh.
  • -
  • Gateway/Subagent TLS pairing: allow authenticated local gateway-client backend self-connections to skip device pairing while still requiring pairing for non-local/direct-host paths, restoring sessions_spawn with gateway.tls.enabled=true in Docker/LAN setups. Fixes #30740. Thanks @Sid-Qin and @vincentkoc.
  • -
  • Browser/CDP startup diagnostics: include Chrome stderr output and a Linux no-sandbox hint in startup timeout errors so failed launches are easier to diagnose. (#29312) Thanks @veast.
  • -
  • Synology Chat/webhook ingress hardening: enforce bounded body reads (size + timeout) via shared request-body guards to prevent unauthenticated slow-body hangs before token validation. (#25831) Thanks @bmendonca3.
  • -
  • Feishu/Dedup restart resilience: warm persistent dedup state into memory on monitor startup so retry events after gateway restart stay suppressed without requiring initial on-disk probe misses. (#31605)
  • -
  • Voice-call/runtime lifecycle: prevent EADDRINUSE loops by resetting failed runtime promises, making webhook start() idempotent with the actual bound port, and fully cleaning up webhook/tunnel/tailscale resources after startup failures. (#32395) Thanks @scoootscooob.
  • -
  • Gateway/Security hardening: tie loopback-origin dev allowance to actual local socket clients (not Host header claims), add explicit warnings/metrics when gateway.controlUi.dangerouslyAllowHostHeaderOriginFallback accepts websocket origins, harden safe-regex detection for quantified ambiguous alternation patterns (for example (a|aa)+), and bound large regex-evaluation inputs for session-filter and log-redaction paths.
  • -
  • Gateway/Plugin HTTP hardening: require explicit auth for plugin route registration, add route ownership guards for duplicate path+match registrations, centralize plugin path matching/auth logic into dedicated modules, and share webhook target-route lifecycle wiring across channel monitors to avoid stale or conflicting registrations. Thanks @tdjackey for reporting.
  • -
  • Browser/Profile defaults: prefer openclaw profile over chrome in headless/no-sandbox environments unless an explicit defaultProfile is configured. (#14944) Thanks @BenediktSchackenberg.
  • -
  • Gateway/WS security: keep plaintext ws:// loopback-only by default, with explicit break-glass private-network opt-in via OPENCLAW_ALLOW_INSECURE_PRIVATE_WS=1; align onboarding/client/call validation and tests to this strict-default policy. (#28670) Thanks @dashed, @vincentkoc.
  • -
  • OpenAI Codex OAuth/TLS prerequisites: add an OAuth TLS cert-chain preflight with actionable remediation for cert trust failures, and gate doctor TLS prerequisite probing to OpenAI Codex OAuth-configured installs (or explicit doctor --deep) to avoid unconditional outbound probe latency. (#32051) Thanks @alexfilatov.
  • -
  • Security/Webhook request hardening: enforce auth-before-body parsing for BlueBubbles and Google Chat webhook handlers, add strict pre-auth body/time budgets for webhook auth paths (including LINE signature verification), and add shared in-flight/request guardrails plus regression tests/lint checks to prevent reintroducing unauthenticated slow-body DoS patterns. Thanks @GCXWLP for reporting.
  • -
  • CLI/Config validation and routing hardening: dedupe openclaw config validate failures to a single authoritative report, expose allowed-values metadata/hints across core Zod and plugin AJV validation (including --json fields), sanitize terminal-rendered validation text, and make command-path parsing root-option-aware across preaction/route/lazy registration (including routed config get/unset with split root options). Thanks @gumadeiras.
  • -
  • Browser/Extension relay reconnect tolerance: keep /json/version and /cdp reachable during short MV3 worker disconnects when attached targets still exist, and retain clients across reconnect grace windows. (#30232) Thanks @Sid-Qin.
  • -
  • CLI/Browser start timeout: honor openclaw browser --timeout start and stop by removing the fixed 15000ms override so slower Chrome startups can use caller-provided timeouts. (#22412, #23427) Thanks @vincentkoc.
  • -
  • Synology Chat/gateway lifecycle: keep startAccount pending until abort for inactive and active account paths to prevent webhook route restart loops under gateway supervision. (#23074) Thanks @druide67.
  • -
  • Exec approvals/allowlist matching: escape regex metacharacters in path-pattern literals (while preserving glob wildcards), preventing crashes on allowlisted executables like /usr/bin/g++ and correctly matching mixed wildcard/literal token paths. (#32162) Thanks @stakeswky.
  • -
  • Synology Chat/webhook compatibility: accept JSON and alias payload fields, allow token resolution from body/query/header sources, and ACK webhook requests with 204 to avoid persistent Processing... states in Synology Chat clients. (#26635) Thanks @memphislee09-source.
  • -
  • Voice-call/Twilio signature verification: retry signature validation across deterministic URL port variants (with/without port) to handle mixed Twilio signing behavior behind reverse proxies and non-standard ports. (#25140) Thanks @drvoss.
  • -
  • Slack/Bolt startup compatibility: remove invalid message.channels and message.groups event registrations so Slack providers no longer crash on startup with Bolt 4.6+; channel/group traffic continues through the unified message handler (channel_type). (#32033) Thanks @mahopan.
  • -
  • Slack/socket auth failure handling: fail fast on non-recoverable auth errors (account_inactive, invalid_auth, etc.) during startup and reconnect instead of retry-looping indefinitely, including unable_to_socket_mode_start error payload propagation. (#32377) Thanks @scoootscooob.
  • -
  • Gateway/macOS LaunchAgent hardening: write Umask=077 in generated gateway LaunchAgent plists so npm upgrades preserve owner-only default file permissions for gateway-created state files. (#31919) Fixes #31905. Thanks @liuxiaopai-ai.
  • -
  • macOS/LaunchAgent security defaults: write Umask=63 (octal 077) into generated gateway launchd plists so post-update service reinstalls keep owner-only file permissions by default instead of falling back to system 022. (#32022) Fixes #31905. Thanks @liuxiaopai-ai.
  • -
  • Media understanding/provider HTTP proxy routing: pass a proxy-aware fetch function from HTTPS_PROXY/HTTP_PROXY env vars into audio/video provider calls (with graceful malformed-proxy fallback) so transcription/video requests honor configured outbound proxies. (#27093) Thanks @mcaxtr.
  • -
  • Sandbox/workspace mount permissions: make primary /workspace bind mounts read-only whenever workspaceAccess is not rw (including none) across both core sandbox container and sandbox browser create flows. (#32227) Thanks @guanyu-zhang.
  • -
  • Tools/fsPolicy propagation: honor tools.fs.workspaceOnly for image/pdf local-root allowlists so non-sandbox media paths outside workspace are rejected when workspace-only mode is enabled. (#31882) Thanks @justinhuangcode.
  • -
  • Daemon/Homebrew runtime pinning: resolve Homebrew Cellar Node paths to stable Homebrew-managed symlinks (including versioned formulas like node@22) so gateway installs keep the intended runtime across brew upgrades. (#32185) Thanks @scoootscooob.
  • -
  • Browser/Security output boundary hardening: replace check-then-rename output commits with root-bound fd-verified writes, unify install/skills canonical path-boundary checks, and add regression coverage for symlink-rebind race paths across browser output and shared fs-safe write flows. Thanks @tdjackey for reporting.
  • -
  • Gateway/Security canonicalization hardening: decode plugin route path variants to canonical fixpoint (with bounded depth), fail closed on canonicalization anomalies, and enforce gateway auth for deeply encoded /api/channels/* variants to prevent alternate-path auth bypass through plugin handlers. Thanks @tdjackey for reporting.
  • -
  • Browser/Gateway hardening: preserve env credentials for OPENCLAW_GATEWAY_URL / CLAWDBOT_GATEWAY_URL while treating explicit --url as override-only auth, and make container browser hardening flags optional with safer defaults for Docker/LXC stability. (#31504) Thanks @vincentkoc.
  • -
  • Gateway/Control UI basePath webhook passthrough: let non-read methods under configured controlUiBasePath fall through to plugin routes (instead of returning Control UI 405), restoring webhook handlers behind basePath mounts. (#32311) Thanks @ademczuk.
  • -
  • Control UI/Legacy browser compatibility: replace toSorted-dependent cron suggestion sorting in app-render with a compatibility helper so older browsers without Array.prototype.toSorted no longer white-screen. (#31775) Thanks @liuxiaopai-ai.
  • -
  • macOS/PeekabooBridge: add compatibility socket symlinks for legacy clawdbot, clawdis, and moltbot Application Support socket paths so pre-rename clients can still connect. (#6033) Thanks @lumpinif and @vincentkoc.
  • -
  • Gateway/message tool reliability: avoid false Unknown channel failures when message.* actions receive platform-specific channel ids by falling back to toolContext.currentChannelProvider, and prevent health-monitor restart thrash for channels that just (re)started by adding a per-channel startup-connect grace window. (from #32367) Thanks @MunemHashmi.
  • -
  • Windows/Spawn canonicalization: unify non-core Windows spawn handling across ACP client, QMD/mcporter memory paths, and sandbox Docker execution using the shared wrapper-resolution policy, with targeted regression coverage for .cmd shim unwrapping and shell fallback behavior. (#31750) Thanks @Takhoffman.
  • -
  • Security/ACP sandbox inheritance: enforce fail-closed runtime guardrails for sessions_spawn with runtime="acp" by rejecting ACP spawns from sandboxed requester sessions and rejecting sandbox="require" for ACP runtime, preventing sandbox-boundary bypass via host-side ACP initialization. (#32254) Thanks @tdjackey for reporting, and @dutifulbob for the fix.
  • -
  • Security/Web tools SSRF guard: keep DNS pinning for untrusted web_fetch and citation-redirect URL checks when proxy env vars are set, and require explicit dangerous opt-in before env-proxy routing can bypass pinned dispatch for trusted/operator-controlled endpoints. Thanks @tdjackey for reporting.
  • -
  • Gemini schema sanitization: coerce malformed JSON Schema properties values (null, arrays, primitives) to {} before provider validation, preventing downstream strict-validator crashes on invalid plugin/tool schemas. (#32332) Thanks @webdevtodayjason.
  • -
  • Media understanding/malformed attachment guards: harden attachment selection and decision summary formatting against non-array or malformed attachment payloads to prevent runtime crashes on invalid inbound metadata shapes. (#28024) Thanks @claw9267.
  • -
  • Browser/Extension navigation reattach: preserve debugger re-attachment when relay is temporarily disconnected by deferring relay attach events until reconnect/re-announce, reducing post-navigation tab loss. (#28725) Thanks @stone-jin.
  • -
  • Browser/Extension relay stale tabs: evict stale cached targets from /json/list when extension targets are destroyed/crashed or commands fail with missing target/session errors. (#6175) Thanks @vincentkoc.
  • -
  • Browser/CDP startup readiness: wait for CDP websocket readiness after launching Chrome and cleanly stop/reset when readiness never arrives, reducing follow-up PortInUseError races after browser start/open. (#29538) Thanks @AaronWander.
  • -
  • OpenAI/Responses WebSocket tool-call id hygiene: normalize blank/whitespace streamed tool-call ids before persistence, and block empty function_call_output.call_id payloads in the WS conversion path to avoid OpenAI 400 errors (Invalid 'input[n].call_id': empty string), with regression coverage for both inbound stream normalization and outbound payload guards.
  • -
  • Security/Nodes camera URL downloads: bind node camera.snap/camera.clip URL payload downloads to the resolved node host, enforce fail-closed behavior when node remoteIp is unavailable, and use SSRF-guarded fetch with redirect host/protocol checks to prevent off-node fetch pivots. Thanks @tdjackey for reporting.
  • -
  • Config/backups hardening: enforce owner-only (0600) permissions on rotated config backups and clean orphan .bak.* files outside the managed backup ring, reducing credential leakage risk from stale or permissive backup artifacts. (#31718) Thanks @YUJIE2002.
  • -
  • Telegram/inbound media filenames: preserve original file_name metadata for document/audio/video/animation downloads (with fetch/path fallbacks), so saved inbound attachments keep sender-provided names instead of opaque Telegram file paths. (#31837) Thanks @Kay-051.
  • -
  • Gateway/OpenAI chat completions: honor x-openclaw-message-channel when building agentCommand input for /v1/chat/completions, preserving caller channel identity instead of forcing webchat. (#30462) Thanks @bmendonca3.
  • -
  • Plugin SDK/runtime hardening: add package export verification in CI/release checks to catch missing runtime exports before publish-time regressions. (#28575) Thanks @Glucksberg.
  • -
  • Media/MIME normalization: normalize parameterized/case-variant MIME strings in kindFromMime (for example Audio/Ogg; codecs=opus) so WhatsApp voice notes are classified as audio and routed through transcription correctly. (#32280) Thanks @Lucenx9.
  • -
  • Discord/audio preflight mentions: detect audio attachments via Discord content_type and gate preflight transcription on typed text (not media placeholders), so guild voice-note mentions are transcribed and matched correctly. (#32136) Thanks @jnMetaCode.
  • -
  • Feishu/topic session routing: use thread_id as topic session scope fallback when root_id is absent, keep first-turn topic keys stable across thread creation, and force thread replies when inbound events already carry topic/thread context. (#29788) Thanks @songyaolun.
  • -
  • Gateway/Webchat NO_REPLY streaming: suppress assistant lead-fragment deltas that are prefixes of NO_REPLY and keep final-message buffering in sync, preventing partial NO leaks on silent-response runs while preserving legitimate short replies. (#32073) Thanks @liuxiaopai-ai.
  • -
  • Telegram/models picker callbacks: keep long model buttons selectable by falling back to compact callback payloads and resolving provider ids on selection (with provider re-prompt on ambiguity), avoiding Telegram 64-byte callback truncation failures. (#31857) Thanks @bmendonca3.
  • -
  • Context-window metadata warmup: add exponential config-load retry backoff (1s -> 2s -> 4s, capped at 60s) so transient startup failures recover automatically without hot-loop retries.
  • -
  • Voice-call/Twilio external outbound: auto-register webhook-first outbound-api calls (initiated outside OpenClaw) so media streams are accepted and call direction metadata stays accurate. (#31181) Thanks @scoootscooob.
  • -
  • Feishu/topic root replies: prefer root_id as outbound replyTargetMessageId when present, and parse millisecond message_create_time values correctly so topic replies anchor to the root message in grouped thread flows. (#29968) Thanks @bmendonca3.
  • -
  • Feishu/DM pairing reply target: send pairing challenge replies to chat: instead of user: so Lark/Feishu private chats with user-id-only sender payloads receive pairing messages reliably. (#31403) Thanks @stakeswky.
  • -
  • Feishu/Lark private DM routing: treat inbound chat_type: "private" as direct-message context for pairing/mention-forward/reaction synthetic handling so Lark private chats behave like Feishu p2p DMs. (#31400) Thanks @stakeswky.
  • -
  • Signal/message actions: allow react to fall back to toolContext.currentMessageId when messageId is omitted, matching Telegram behavior and unblocking agent-initiated reactions on inbound turns. (#32217) Thanks @dunamismax.
  • -
  • Discord/message actions: allow react to fall back to toolContext.currentMessageId when messageId is omitted, matching Telegram/Signal reaction ergonomics in inbound turns.
  • -
  • Synology Chat/reply delivery: resolve webhook usernames to Chat API user_id values for outbound chatbot replies, avoiding mismatches between webhook user IDs and method=chatbot recipient IDs in multi-account setups. (#23709) Thanks @druide67.
  • -
  • Slack/thread context payloads: only inject thread starter/history text on first thread turn for new sessions while preserving thread metadata, reducing repeated context-token bloat on long-lived thread sessions. (#32133) Thanks @sourman.
  • -
  • Slack/session routing: keep top-level channel messages in one shared session when replyToMode=off, while preserving thread-scoped keys for true thread replies and non-off modes. (#32193) Thanks @bmendonca3.
  • -
  • Voice-call/webhook routing: require exact webhook path matches (instead of prefix matches) so lookalike paths cannot reach provider verification/dispatch logic. (#31930) Thanks @afurm.
  • -
  • Zalo/Pairing auth tests: add webhook regression coverage asserting DM pairing-store reads/writes remain account-scoped, preventing cross-account authorization bleed in multi-account setups. (#26121) Thanks @bmendonca3.
  • -
  • Zalouser/Pairing auth tests: add account-scoped DM pairing-store regression coverage (monitor.account-scope.test.ts) to prevent cross-account allowlist bleed in multi-account setups. (#26672) Thanks @bmendonca3.
  • -
  • Feishu/Send target prefixes: normalize explicit group:/dm: send targets and preserve explicit receive-id routing hints when resolving outbound Feishu targets. (#31594) Thanks @liuxiaopai-ai.
  • -
  • Webchat/Feishu session continuation: preserve routable OriginatingChannel/OriginatingTo metadata from session delivery context in chat.send, and prefer provider-normalized channel when deciding cross-channel route dispatch so Webchat replies continue on the selected Feishu session instead of falling back to main/internal session routing. (#31573)
  • -
  • Telegram/implicit mention forum handling: exclude Telegram forum system service messages (forum_topic_*, general_forum_topic_*) from reply-chain implicit mention detection so requireMention does not get bypassed inside bot-created topic lifecycle events. (#32262) Thanks @scoootscooob.
  • -
  • Slack/inbound debounce routing: isolate top-level non-DM message debounce keys by message timestamp to avoid cross-thread collisions, preserve DM batching, and flush pending top-level buffers before immediate non-debounce follow-ups to keep ordering stable. (#31951) Thanks @scoootscooob.
  • -
  • Feishu/Duplicate replies: suppress same-target reply dispatch when message-tool sends use generic provider metadata (provider: "message") and normalize lark/feishu provider aliases during duplicate-target checks, preventing double-delivery in Feishu sessions. (#31526)
  • -
  • Webchat/silent token leak: filter assistant NO_REPLY-only transcript entries from chat.history responses and add client-side defense-in-depth guards in the chat controller so internal silent tokens never render as visible chat bubbles. (#32015) Consolidates overlap from #32183, #32082, #32045, #32052, #32172, and #32112. Thanks @ademczuk, @liuxiaopai-ai, @ningding97, @bmendonca3, and @x4v13r1120.
  • -
  • Doctor/local memory provider checks: stop false-positive local-provider warnings when provider=local and no explicit modelPath is set by honoring default local model fallback while still warning when gateway probe reports local embeddings not ready. (#32014) Fixes #31998. Thanks @adhishthite.
  • -
  • Media understanding/parakeet CLI output parsing: read parakeet-mlx transcripts from --output-dir/.txt when txt output is requested (or default), with stdout fallback for non-txt formats. (#9177) Thanks @mac-110.
  • -
  • Media understanding/audio transcription guard: skip tiny/empty audio files (<1024 bytes) before provider/CLI transcription to avoid noisy invalid-audio failures and preserve clean fallback behavior. (#8388) Thanks @Glucksberg.
  • -
  • Gateway/Plugin HTTP route precedence: run explicit plugin HTTP routes before the Control UI SPA catch-all so registered plugin webhook/custom paths remain reachable, while unmatched paths still fall through to Control UI handling. (#31885) Thanks @Sid-Qin.
  • -
  • Gateway/Node browser proxy routing: honor profile from browser.request JSON body when query params omit it, while preserving query-profile precedence when both are present. (#28852) Thanks @Sid-Qin.
  • -
  • Gateway/Control UI basePath POST handling: return 405 for POST on exact basePath routes (for example /openclaw) instead of redirecting, and add end-to-end regression coverage that root-mounted webhook POST paths still pass through to plugin handlers. (#31349) Thanks @Sid-Qin.
  • -
  • Browser/default profile selection: default browser.defaultProfile behavior now prefers openclaw (managed standalone CDP) when no explicit default is configured, while still auto-provisioning the chrome relay profile for explicit opt-in use. (#32031) Fixes #31907. Thanks @liuxiaopai-ai.
  • -
  • Sandbox/mkdirp boundary checks: allow existing in-boundary directories to pass mkdirp boundary validation when directory open probes return platform-specific I/O errors, with regression coverage for directory-safe fallback behavior. (#31547) Thanks @stakeswky.
  • -
  • Models/config env propagation: apply config.env.vars before implicit provider discovery in models bootstrap so config-scoped credentials are visible to implicit provider resolution paths. (#32295) Thanks @hsiaoa.
  • -
  • Models/Codex usage labels: infer weekly secondary usage windows from reset cadence when API window seconds are ambiguously reported as 24h, so openclaw models status no longer mislabels weekly limits as daily. (#31938) Thanks @bmendonca3.
  • -
  • Gateway/Heartbeat model reload: treat models.* and agents.defaults.model config updates as heartbeat hot-reload triggers so heartbeat picks up model changes without a full gateway restart. (#32046) Thanks @stakeswky.
  • -
  • Memory/LanceDB embeddings: forward configured embedding.dimensions into OpenAI embeddings requests so vector size and API output dimensions stay aligned when dimensions are explicitly configured. (#32036) Thanks @scotthuang.
  • -
  • Gateway/Control UI method guard: allow POST requests to non-UI routes to fall through when no base path is configured, and add POST regression coverage for fallthrough and base-path 405 behavior. (#23970) Thanks @tyler6204.
  • -
  • Browser/CDP status accuracy: require a successful Browser.getVersion response over the CDP websocket (not just socket-open) before reporting cdpReady, so stale idle command channels are surfaced as unhealthy. (#23427) Thanks @vincentkoc.
  • -
  • Daemon/systemd checks in containers: treat missing systemctl invocations (including spawn systemctl ENOENT/EACCES) as unavailable service state during is-enabled checks, preventing container flows from failing with Gateway service check failed before install/status handling can continue. (#26089) Thanks @sahilsatralkar and @vincentkoc.
  • -
  • Security/Node exec approvals: revalidate approval-bound cwd identity immediately before execution/forwarding and fail closed with an explicit denial when cwd drifts after approval hardening.
  • -
  • Security audit/skills workspace hardening: add skills.workspace.symlink_escape warning in openclaw security audit when workspace skills/**/SKILL.md resolves outside the workspace root (for example symlink-chain drift), plus docs coverage in the security glossary.
  • -
  • Security/Node exec approvals: preserve shell/dispatch-wrapper argv semantics during approval hardening so approved wrapper commands (for example env sh -c ...) cannot drift into a different runtime command shape, and add regression coverage for both approval-plan generation and approved runtime execution paths. Thanks @tdjackey for reporting.
  • -
  • Security/fs-safe write hardening: make writeFileWithinRoot use same-directory temp writes plus atomic rename, add post-write inode/hardlink revalidation with security warnings on boundary drift, and avoid truncating existing targets when final rename fails.
  • -
  • Security/Skills archive extraction: unify tar extraction safety checks across tar.gz and tar.bz2 install flows, enforce tar compressed-size limits, and fail closed if tar.bz2 archives change between preflight and extraction to prevent bypasses of entry-type/size guardrails. Thanks @GCXWLP for reporting.
  • -
  • Security/Prompt spoofing hardening: stop injecting queued runtime events into user-role prompt text, route them through trusted system-prompt context, and neutralize inbound spoof markers like [System Message] and line-leading System: in untrusted message content. (#30448)
  • -
  • Sandbox/Docker setup command parsing: accept agents.*.sandbox.docker.setupCommand as either a string or a string array, and normalize arrays to newline-delimited shell scripts so multi-step setup commands no longer concatenate without separators. (#31953) Thanks @liuxiaopai-ai.
  • -
  • Sandbox/Bootstrap context boundary hardening: reject symlink/hardlink alias bootstrap seed files that resolve outside the source workspace and switch post-compaction AGENTS.md context reads to boundary-verified file opens, preventing host file content from being injected via workspace aliasing. Thanks @tdjackey for reporting.
  • -
  • Agents/Sandbox workdir mapping: map container workdir paths (for example /workspace) back to the host workspace before sandbox path validation so exec requests keep the intended directory in containerized runs instead of falling back to an unavailable host path. (#31841) Thanks @liuxiaopai-ai.
  • -
  • Docker/Sandbox bootstrap hardening: make OPENCLAW_SANDBOX opt-in parsing explicit (1|true|yes|on), support custom Docker socket paths via OPENCLAW_DOCKER_SOCKET, defer docker.sock exposure until sandbox prerequisites pass, and reset/roll back persisted sandbox mode to off when setup is skipped or partially fails to avoid stale broken sandbox state. (#29974) Thanks @jamtujest and @vincentkoc.
  • -
  • Hooks/webhook ACK compatibility: return 200 (instead of 202) for successful /hooks/agent requests so providers that require 200 (for example Forward Email) accept dispatched agent hook deliveries. (#28204) Thanks @Glucksberg.
  • -
  • Feishu/Run channel fallback: prefer Provider over Surface when inferring queued run messageProvider fallback (when OriginatingChannel is missing), preventing Feishu turns from being mislabeled as webchat in mixed relay metadata contexts. (#31880) Fixes #31859. Thanks @liuxiaopai-ai.
  • -
  • Skills/sherpa-onnx-tts: run the sherpa-onnx-tts bin under ESM (replace CommonJS require imports) and add regression coverage to prevent require is not defined in ES module scope startup crashes. (#31965) Thanks @bmendonca3.
  • -
  • Inbound metadata/direct relay context: restore direct-channel conversation metadata blocks for external channels (for example WhatsApp) while preserving webchat-direct suppression, so relay agents recover sender/message identifiers without reintroducing internal webchat metadata noise. (#31969) Fixes #29972. Thanks @Lucenx9.
  • -
  • Slack/Channel message subscriptions: register explicit message.channels and message.groups monitor handlers (alongside generic message) so channel/group event subscriptions are consumed even when Slack dispatches typed message event names. Fixes #31674.
  • -
  • Hooks/session-scoped memory context: expose ephemeral sessionId in embedded plugin tool contexts and before_tool_call/after_tool_call hook contexts (including compaction and client-tool wiring) so plugins can isolate per-conversation state across /new and /reset. Related #31253 and #31304. Thanks @Sid-Qin and @Servo-AIpex.
  • -
  • Voice-call/Twilio inbound greeting: run answered-call initial notify greeting for Twilio instead of skipping the manager speak path, with regression coverage for both Twilio and Plivo notify flows. (#29121) Thanks @xinhuagu.
  • -
  • Voice-call/stale call hydration: verify active calls with the provider before loading persisted in-progress calls so stale locally persisted records do not block or misroute new call handling after restarts. (#4325) Thanks @garnetlyx.
  • -
  • Feishu/File upload filenames: percent-encode non-ASCII/special-character file_name values in Feishu multipart uploads so Chinese/symbol-heavy filenames are sent as proper attachments instead of plain text links. (#31179) Thanks @Kay-051.
  • -
  • Media/MIME channel parity: route Telegram/Signal/iMessage media-kind checks through normalized kindFromMime so mixed-case/parameterized MIME values classify consistently across message channels.
  • -
  • WhatsApp/inbound self-message context: propagate inbound fromMe through the web inbox pipeline and annotate direct self messages as (self) in envelopes so agents can distinguish owner-authored turns from contact turns. (#32167) Thanks @scoootscooob.
  • -
  • Webchat/stream finalization: persist streamed assistant text when final events omit message, while keeping final payload precedence and skipping empty stream buffers to prevent disappearing replies after tool turns. (#31920) Thanks @Sid-Qin.
  • -
  • Feishu/Inbound ordering: serialize message handling per chat while preserving cross-chat concurrency to avoid same-chat race drops under bursty inbound traffic. (#31807)
  • -
  • Feishu/Typing notification suppression: skip typing keepalive reaction re-adds when the indicator is already active, preventing duplicate notification pings from repeated identical emoji adds. (#31580)
  • -
  • Feishu/Probe failure backoff: cache API and timeout probe failures for one minute per account key while preserving abort-aware probe timeouts, reducing repeated health-check retries during transient credential/network outages. (#29970)
  • -
  • Feishu/Streaming block fallback: preserve markdown block stream text as final streaming-card content when final payload text is missing, while still suppressing non-card internal block chunk delivery. (#30663)
  • -
  • Feishu/Bitable API errors: unify Feishu Bitable tool error handling with structured LarkApiError responses and consistent API/context attribution across wiki/base metadata, field, and record operations. (#31450)
  • -
  • Feishu/Missing-scope grant URL fix: rewrite known invalid scope aliases (contact:contact.base:readonly) to valid scope names in permission grant links, so remediation URLs open with correct Feishu consent scopes. (#31943)
  • -
  • BlueBubbles/Message metadata: harden send response ID extraction, include sender identity in DM context, and normalize inbound message_id selection to avoid duplicate ID metadata. (#23970) Thanks @tyler6204.
  • -
  • WebChat/markdown tables: ensure GitHub-flavored markdown table parsing is explicitly enabled at render time and add horizontal overflow handling for wide tables, with regression coverage for table-only and mixed text+table content. (#32365) Thanks @BlueBirdBack.
  • -
  • Feishu/default account resolution: always honor explicit channels.feishu.defaultAccount during outbound account selection (including top-level-credential setups where the preferred id is not present in accounts), instead of silently falling back to another account id. (#32253) Thanks @bmendonca3.
  • -
  • Feishu/Sender lookup permissions: suppress user-facing grant prompts for stale non-existent scope errors (contact:contact.base:readonly) during best-effort sender-name resolution so inbound messages continue without repeated false permission notices. (#31761)
  • -
  • Discord/dispatch + Slack formatting: restore parallel outbound dispatch across Discord channels with per-channel queues while preserving in-channel ordering, and run Slack preview/stream update text through mrkdwn normalization for consistent formatting. (#31927) Thanks @Sid-Qin.
  • -
  • Feishu/Inbound debounce: debounce rapid same-chat sender bursts into one ordered dispatch turn, skip already-processed retries when composing merged text, and preserve bot-mention intent across merged entries to reduce duplicate or late inbound handling. (#31548)
  • -
  • Tests/Sandbox + archive portability: use junction-compatible directory-link setup on Windows and explicit file-symlink platform guards in symlink escape tests where unprivileged file symlinks are unavailable, reducing false Windows CI failures while preserving traversal checks on supported paths. (#28747) Thanks @arosstale.
  • -
  • Browser/Extension re-announce reliability: keep relay state in connecting when re-announce forwarding fails and extend debugger re-attach retries after navigation to reduce false attached states and post-nav disconnect loops. (#27630) Thanks @markmusson.
  • -
  • Browser/Act request compatibility: accept legacy flattened action="act" params (kind/ref/text/...) in addition to request={...} so browser act calls no longer fail with request required. (#15120) Thanks @vincentkoc.
  • -
  • OpenRouter/x-ai compatibility: skip reasoning.effort injection for x-ai/* models (for example Grok) so OpenRouter requests no longer fail with invalid-arguments errors on unsupported reasoning params. (#32054) Thanks @scoootscooob.
  • -
  • Models/openai-completions developer-role compatibility: force supportsDeveloperRole=false for non-native endpoints, treat unparseable baseUrl values as non-native, and add regression coverage for empty/malformed baseUrl plus explicit-true override behavior. (#29479) thanks @akramcodez.
  • -
  • Browser/Profile attach-only override: support browser.profiles..attachOnly (fallback to global browser.attachOnly) so loopback proxy profiles can skip local launch/port-ownership checks without forcing attach-only mode for every profile. (#20595) Thanks @unblockedgamesstudio and @vincentkoc.
  • -
  • Sessions/Lock recovery: detect recycled Linux PIDs by comparing lock-file starttime with /proc//stat starttime, so stale .jsonl.lock files are reclaimed immediately in containerized PID-reuse scenarios while preserving compatibility for older lock files. (#26443) Fixes #27252. Thanks @HirokiKobayashi-R and @vincentkoc.
  • -
  • Cron/isolated delivery target fallback: remove early unresolved-target return so cron delivery can flow through shared outbound target resolution (including per-channel resolveDefaultTo fallback) when delivery.to is omitted. (#32364) Thanks @hclsys.
  • -
  • OpenAI media capabilities: include audio in the OpenAI provider capability list so audio transcription models are eligible in media-understanding provider selection. (#12717) Thanks @openjay.
  • -
  • Browser/Managed tab cap: limit loopback managed openclaw page tabs to 8 via best-effort cleanup after tab opens to reduce long-running renderer buildup while preserving attach-only and remote profile behavior. (#29724) Thanks @pandego.
  • -
  • Docker/Image health checks: add Dockerfile HEALTHCHECK that probes gateway GET /healthz so container runtimes can mark unhealthy instances without requiring auth credentials in the probe command. (#11478) Thanks @U-C4N and @vincentkoc.
  • -
  • Gateway/Node dangerous-command parity: include sms.send in default onboarding node denyCommands, share onboarding deny defaults with the gateway dangerous-command source of truth, and include sms.send in phone-control /phone arm writes handling so SMS follows the same break-glass flow as other dangerous node commands. Thanks @zpbrent.
  • -
  • Pairing/AllowFrom account fallback: handle omitted accountId values in readChannelAllowFromStore and readChannelAllowFromStoreSync as default, while preserving legacy unscoped allowFrom merges for default-account flows. Thanks @Sid-Qin and @vincentkoc.
  • -
  • Browser/Remote CDP ownership checks: skip local-process ownership errors for non-loopback remote CDP profiles when HTTP is reachable but the websocket handshake fails, and surface the remote websocket attach/retry path instead. (#15582) Landed from contributor (#28780) Thanks @stubbi, @bsormagec, @unblockedgamesstudio and @vincentkoc.
  • -
  • Browser/CDP proxy bypass: force direct loopback agent paths and scoped NO_PROXY expansion for localhost CDP HTTP/WS connections when proxy env vars are set, so browser relay/control still works behind global proxy settings. (#31469) Thanks @widingmarcus-cyber.
  • -
  • Sessions/idle reset correctness: preserve existing updatedAt during inbound metadata-only writes so idle-reset boundaries are not unintentionally refreshed before actual user turns. (#32379) Thanks @romeodiaz.
  • -
  • Sessions/lock recovery: reclaim orphan legacy same-PID lock files missing starttime when no in-process lock ownership exists, avoiding false lock timeouts after PID reuse while preserving active lock safety checks. (#32081) Thanks @bmendonca3.
  • -
  • Sessions/store cache invalidation: reload cached session stores when file size changes within the same mtime tick by keying cache validation on a single file-stat snapshot (mtimeMs + sizeBytes), with regression coverage for same-tick rewrites. (#32191) Thanks @jalehman.
  • -
  • Agents/Subagents sessions_spawn: reject malformed agentId inputs before normalization (for example error-message/path-like strings) to prevent unintended synthetic agent IDs and ghost workspace/session paths; includes strict validation regression coverage. (#31381) Thanks @openperf.
  • -
  • CLI/installer Node preflight: enforce Node.js v22.12+ consistently in both openclaw.mjs runtime bootstrap and installer active-shell checks, with actionable nvm recovery guidance for mismatched shell PATH/defaults. (#32356) Thanks @jasonhargrove.
  • -
  • Web UI/config form: support SecretInput string-or-secret-ref unions in map additionalProperties, so provider API key fields stay editable instead of being marked unsupported. (#31866) Thanks @ningding97.
  • -
  • Auto-reply/inline command cleanup: preserve newline structure when stripping inline /status and extracting inline slash commands by collapsing only horizontal whitespace, preventing paragraph flattening in multi-line replies. (#32224) Thanks @scoootscooob.
  • -
  • Config/raw redaction safety: preserve non-sensitive literals during raw redaction round-trips, scope SecretRef redaction to secret IDs (not structural fields like source/provider), and fall back to structured raw redaction when text replacement cannot restore the original config shape. (#32174) Thanks @bmendonca3.
  • -
  • Hooks/runtime stability: keep the internal hook handler registry on a globalThis singleton so hook registration/dispatch remains consistent when bundling emits duplicate module copies. (#32292) Thanks @Drickon.
  • -
  • Hooks/after_tool_call: include embedded session context (sessionKey, agentId) and fire the hook exactly once per tool execution by removing duplicate adapter-path dispatch in embedded runs. (#32201) Thanks @jbeno, @scoootscooob, @vincentkoc.
  • -
  • Hooks/tool-call correlation: include runId and toolCallId in plugin tool hook payloads/context and scope tool start/adjusted-param tracking by run to prevent cross-run collisions in before_tool_call and after_tool_call. (#32360) Thanks @vincentkoc.
  • -
  • Plugins/install diagnostics: reject legacy plugin package shapes without openclaw.extensions and return an explicit upgrade hint with troubleshooting docs for repackaging. (#32055) Thanks @liuxiaopai-ai.
  • -
  • Hooks/plugin context parity: ensure llm_input hooks in embedded attempts receive the same trigger and channelId-aware hookCtx used by the other hook phases, preserving channel/trigger-scoped plugin behavior. (#28623) Thanks @davidrudduck and @vincentkoc.
  • -
  • Plugins/hardlink install compatibility: allow bundled plugin manifests and entry files to load when installed via hardlink-based package managers (pnpm, bun) while keeping hardlink rejection enabled for non-bundled plugin sources. (#32119) Fixes #28175, #28404, #29455. Thanks @markfietje.
  • -
  • Cron/session reaper reliability: move cron session reaper sweeps into onTimer finally and keep pruning active even when timer ticks fail early (for example cron store parse failures), preventing stale isolated run sessions from accumulating indefinitely. (#31996) Fixes #31946. Thanks @scoootscooob.
  • -
  • Cron/HEARTBEAT_OK summary leak: suppress fallback main-session enqueue for heartbeat/internal ack summaries in isolated announce mode so HEARTBEAT_OK noise never appears in user chat while real summaries still forward. (#32093) Thanks @scoootscooob.
  • -
  • Authentication: classify permission_error as auth_permanent for profile fallback. (#31324) Thanks @Sid-Qin.
  • -
  • Agents/host edit reliability: treat host edit-tool throws as success only when on-disk post-check confirms replacement likely happened (newText present and oldText absent), preventing false failure reports while avoiding pre-write false positives. (#32383) Thanks @polooooo.
  • -
  • Plugins/install fallback safety: resolve bare install specs to bundled plugin ids before npm lookup (for example diffs -> bundled @openclaw/diffs), keep npm fallback limited to true package-not-found errors, and continue rejecting non-plugin npm packages that fail manifest validation. (#32096) Thanks @scoootscooob.
  • -
  • Web UI/inline code copy fidelity: disable forced mid-token wraps on inline spans so copied UUID/hash/token strings preserve exact content instead of inserting line-break spaces. (#32346) Thanks @hclsys.
  • -
  • Restart sentinel formatting: avoid duplicate Reason: lines when restart message text already matches stats.reason, keeping restart notifications concise for users and downstream parsers. (#32083) Thanks @velamints2.
  • -
  • Auto-reply/followup queue: avoid stale callback reuse across idle-window restarts by caching the followup runner only when a drain actually starts, preserving enqueue ordering after empty-finalize paths. (#31902) Thanks @Lanfei.
  • -
  • Agents/tool-result guard: always clear pending tool-call state on interruptions even when synthetic tool results are disabled, preventing orphaned tool-use transcripts that cause follow-up provider request failures. (#32120) Thanks @jnMetaCode.
  • -
  • Failover/error classification: treat HTTP 529 (provider overloaded, common with Anthropic-compatible APIs) as rate_limit so model failover can engage instead of misclassifying the error path. (#31854) Thanks @bugkill3r.
  • -
  • Logging: use local time for logged timestamps instead of UTC, aligning log output with documented local timezone behavior and avoiding confusion during local diagnostics. (#28434) Thanks @liuy.
  • -
  • Agents/Subagent announce cleanup: keep completion-message runs pending while descendants settle, add a 30 minute hard-expiry backstop to avoid indefinite pending state, and keep retry bookkeeping resumable across deferred wakes. (#23970) Thanks @tyler6204.
  • -
  • Secrets/exec resolver timeout defaults: use provider timeoutMs as the default inactivity (noOutputTimeoutMs) watchdog for exec secret providers, preventing premature no-output kills for resolvers that start producing output after 2s. (#32235) Thanks @bmendonca3.
  • -
  • Auto-reply/reminder guard note suppression: when a turn makes reminder-like commitments but schedules no new cron jobs, suppress the unscheduled-reminder warning note only if an enabled cron already exists for the same session; keep warnings for unrelated sessions, disabled jobs, or unreadable cron store paths. (#32255) Thanks @scoootscooob.
  • -
  • Cron/isolated announce heartbeat suppression: treat multi-payload runs as skippable when any payload is a heartbeat ack token and no payload has media, preventing internal narration + trailing HEARTBEAT_OK from being delivered to users. (#32131) Thanks @adhishthite.
  • -
  • Cron/store migration: normalize legacy cron jobs with string schedule and top-level command/timeout fields into canonical schedule/payload/session-target shape on load, preventing schedule-error loops on old persisted stores. (#31926) Thanks @bmendonca3.
  • -
  • Tests/Windows backup rotation: skip chmod-only backup permission assertions on Windows while retaining compose/rotation/prune coverage across platforms to avoid false CI failures from Windows non-POSIX mode semantics. (#32286) Thanks @jalehman.
  • -
  • Tests/Subagent announce: set OPENCLAW_TEST_FAST=1 before importing subagent-announce format suites so module-level fast-mode constants are captured deterministically on Windows CI, preventing timeout flakes in nested completion announce coverage. (#31370) Thanks @zwffff.
  • -
-

View full changelog

-]]>
- - -
\ No newline at end of file diff --git a/apps/android/README.md b/apps/android/README.md index 0a92e4c8ec5..9c6baf807c9 100644 --- a/apps/android/README.md +++ b/apps/android/README.md @@ -30,8 +30,12 @@ cd apps/android ./gradlew :app:assembleDebug ./gradlew :app:installDebug ./gradlew :app:testDebugUnitTest +cd ../.. +bun run android:bundle:release ``` +`bun run android:bundle:release` auto-bumps Android `versionName`/`versionCode` in `apps/android/app/build.gradle.kts`, then builds a signed release `.aab`. + ## Kotlin Lint + Format ```bash diff --git a/apps/android/app/build.gradle.kts b/apps/android/app/build.gradle.kts index 3b52bcf50de..46afccbc3bf 100644 --- a/apps/android/app/build.gradle.kts +++ b/apps/android/app/build.gradle.kts @@ -1,5 +1,7 @@ import com.android.build.api.variant.impl.VariantOutputImpl +val dnsjavaInetAddressResolverService = "META-INF/services/java.net.spi.InetAddressResolverProvider" + val androidStoreFile = providers.gradleProperty("OPENCLAW_ANDROID_STORE_FILE").orNull?.takeIf { it.isNotBlank() } val androidStorePassword = providers.gradleProperty("OPENCLAW_ANDROID_STORE_PASSWORD").orNull?.takeIf { it.isNotBlank() } val androidKeyAlias = providers.gradleProperty("OPENCLAW_ANDROID_KEY_ALIAS").orNull?.takeIf { it.isNotBlank() } @@ -63,8 +65,8 @@ android { applicationId = "ai.openclaw.app" minSdk = 31 targetSdk = 36 - versionCode = 202603090 - versionName = "2026.3.9" + versionCode = 2026031400 + versionName = "2026.3.14" ndk { // Support all major ABIs — native libs are tiny (~47 KB per ABI) abiFilters += listOf("armeabi-v7a", "arm64-v8a", "x86", "x86_64") @@ -78,6 +80,9 @@ android { } isMinifyEnabled = true isShrinkResources = true + ndk { + debugSymbolLevel = "SYMBOL_TABLE" + } proguardFiles(getDefaultProguardFile("proguard-android-optimize.txt"), "proguard-rules.pro") } debug { @@ -104,6 +109,10 @@ android { "/META-INF/LICENSE*.txt", "DebugProbesKt.bin", "kotlin-tooling-metadata.json", + "org/bouncycastle/pqc/crypto/picnic/lowmcL1.bin.properties", + "org/bouncycastle/pqc/crypto/picnic/lowmcL3.bin.properties", + "org/bouncycastle/pqc/crypto/picnic/lowmcL5.bin.properties", + "org/bouncycastle/x509/CertPathReviewerMessages*.properties", ) } } @@ -168,7 +177,6 @@ dependencies { // material-icons-extended pulled in full icon set (~20 MB DEX). Only ~18 icons used. // R8 will tree-shake unused icons when minify is enabled on release builds. implementation("androidx.compose.material:material-icons-extended") - implementation("androidx.navigation:navigation-compose:2.9.7") debugImplementation("androidx.compose.ui:ui-tooling") @@ -193,8 +201,7 @@ dependencies { implementation("androidx.camera:camera-camera2:1.5.2") implementation("androidx.camera:camera-lifecycle:1.5.2") implementation("androidx.camera:camera-video:1.5.2") - implementation("androidx.camera:camera-view:1.5.2") - implementation("com.journeyapps:zxing-android-embedded:4.3.0") + implementation("com.google.android.gms:play-services-code-scanner:16.1.0") // Unicast DNS-SD (Wide-Area Bonjour) for tailnet discovery domains. implementation("dnsjava:dnsjava:3.6.4") @@ -211,3 +218,45 @@ dependencies { tasks.withType().configureEach { useJUnitPlatform() } + +val stripReleaseDnsjavaServiceDescriptor = + tasks.register("stripReleaseDnsjavaServiceDescriptor") { + val mergedJar = + layout.buildDirectory.file( + "intermediates/merged_java_res/release/mergeReleaseJavaResource/base.jar", + ) + + inputs.file(mergedJar) + outputs.file(mergedJar) + + doLast { + val jarFile = mergedJar.get().asFile + if (!jarFile.exists()) { + return@doLast + } + + val unpackDir = temporaryDir.resolve("merged-java-res") + delete(unpackDir) + copy { + from(zipTree(jarFile)) + into(unpackDir) + exclude(dnsjavaInetAddressResolverService) + } + delete(jarFile) + ant.invokeMethod( + "zip", + mapOf( + "destfile" to jarFile.absolutePath, + "basedir" to unpackDir.absolutePath, + ), + ) + } + } + +tasks.matching { it.name == "stripReleaseDnsjavaServiceDescriptor" }.configureEach { + dependsOn("mergeReleaseJavaResource") +} + +tasks.matching { it.name == "minifyReleaseWithR8" }.configureEach { + dependsOn(stripReleaseDnsjavaServiceDescriptor) +} diff --git a/apps/android/app/proguard-rules.pro b/apps/android/app/proguard-rules.pro index 78e4a363919..7c04b96833a 100644 --- a/apps/android/app/proguard-rules.pro +++ b/apps/android/app/proguard-rules.pro @@ -1,26 +1,6 @@ -# ── App classes ─────────────────────────────────────────────────── --keep class ai.openclaw.app.** { *; } - -# ── Bouncy Castle ───────────────────────────────────────────────── --keep class org.bouncycastle.** { *; } -dontwarn org.bouncycastle.** - -# ── CameraX ─────────────────────────────────────────────────────── --keep class androidx.camera.** { *; } - -# ── kotlinx.serialization ──────────────────────────────────────── --keep class kotlinx.serialization.** { *; } --keepclassmembers class * { - @kotlinx.serialization.Serializable *; -} --keepattributes *Annotation*, InnerClasses - -# ── OkHttp ──────────────────────────────────────────────────────── -dontwarn okhttp3.** -dontwarn okio.** --keep class okhttp3.internal.platform.** { *; } - -# ── Misc suppressions ──────────────────────────────────────────── -dontwarn com.sun.jna.** -dontwarn javax.naming.** -dontwarn lombok.Generated diff --git a/apps/android/app/src/main/java/ai/openclaw/app/MainViewModel.kt b/apps/android/app/src/main/java/ai/openclaw/app/MainViewModel.kt index a1b6ba3d353..80f42e02843 100644 --- a/apps/android/app/src/main/java/ai/openclaw/app/MainViewModel.kt +++ b/apps/android/app/src/main/java/ai/openclaw/app/MainViewModel.kt @@ -116,6 +116,10 @@ class MainViewModel(app: Application) : AndroidViewModel(app) { runtime.setGatewayToken(value) } + fun setGatewayBootstrapToken(value: String) { + runtime.setGatewayBootstrapToken(value) + } + fun setGatewayPassword(value: String) { runtime.setGatewayPassword(value) } @@ -172,6 +176,10 @@ class MainViewModel(app: Application) : AndroidViewModel(app) { runtime.requestCanvasRehydrate(source = source, force = true) } + fun refreshHomeCanvasOverviewIfConnected() { + runtime.refreshHomeCanvasOverviewIfConnected() + } + fun loadChat(sessionKey: String) { runtime.loadChat(sessionKey) } diff --git a/apps/android/app/src/main/java/ai/openclaw/app/NodeRuntime.kt b/apps/android/app/src/main/java/ai/openclaw/app/NodeRuntime.kt index c4e5f6a5b1d..dcf1e3bee89 100644 --- a/apps/android/app/src/main/java/ai/openclaw/app/NodeRuntime.kt +++ b/apps/android/app/src/main/java/ai/openclaw/app/NodeRuntime.kt @@ -33,6 +33,8 @@ import kotlinx.coroutines.flow.asStateFlow import kotlinx.coroutines.flow.combine import kotlinx.coroutines.flow.distinctUntilChanged import kotlinx.coroutines.launch +import kotlinx.serialization.Serializable +import kotlinx.serialization.encodeToString import kotlinx.serialization.json.Json import kotlinx.serialization.json.JsonArray import kotlinx.serialization.json.JsonObject @@ -210,7 +212,8 @@ class NodeRuntime(context: Context) { private val _isForeground = MutableStateFlow(true) val isForeground: StateFlow = _isForeground.asStateFlow() - private var lastAutoA2uiUrl: String? = null + private var gatewayDefaultAgentId: String? = null + private var gatewayAgents: List = emptyList() private var didAutoRequestCanvasRehydrate = false private val canvasRehydrateSeq = AtomicLong(0) private var operatorConnected = false @@ -232,7 +235,7 @@ class NodeRuntime(context: Context) { updateStatus() micCapture.onGatewayConnectionChanged(true) scope.launch { - refreshBrandingFromGateway() + refreshHomeCanvasOverviewIfConnected() if (voiceReplySpeakerLazy.isInitialized()) { voiceReplySpeaker.refreshConfig() } @@ -270,7 +273,7 @@ class NodeRuntime(context: Context) { _canvasRehydratePending.value = false _canvasRehydrateErrorText.value = null updateStatus() - maybeNavigateToA2uiOnConnect() + showLocalCanvasOnConnect() }, onDisconnected = { message -> _nodeConnected.value = false @@ -396,6 +399,7 @@ class NodeRuntime(context: Context) { _mainSessionKey.value = trimmed talkMode.setMainSessionKey(trimmed) chat.applyMainSessionKey(trimmed) + updateHomeCanvasState() } private fun updateStatus() { @@ -415,6 +419,7 @@ class NodeRuntime(context: Context) { operator.isNotBlank() && operator != "Offline" -> operator else -> node } + updateHomeCanvasState() } private fun resolveMainSessionKey(): String { @@ -422,23 +427,31 @@ class NodeRuntime(context: Context) { return if (trimmed.isEmpty()) "main" else trimmed } - private fun maybeNavigateToA2uiOnConnect() { - val a2uiUrl = a2uiHandler.resolveA2uiHostUrl() ?: return - val current = canvas.currentUrl()?.trim().orEmpty() - if (current.isEmpty() || current == lastAutoA2uiUrl) { - lastAutoA2uiUrl = a2uiUrl - canvas.navigate(a2uiUrl) - } - } - - private fun showLocalCanvasOnDisconnect() { - lastAutoA2uiUrl = null + private fun showLocalCanvasOnConnect() { _canvasA2uiHydrated.value = false _canvasRehydratePending.value = false _canvasRehydrateErrorText.value = null canvas.navigate("") } + private fun showLocalCanvasOnDisconnect() { + _canvasA2uiHydrated.value = false + _canvasRehydratePending.value = false + _canvasRehydrateErrorText.value = null + canvas.navigate("") + } + + fun refreshHomeCanvasOverviewIfConnected() { + if (!operatorConnected) { + updateHomeCanvasState() + return + } + scope.launch { + refreshBrandingFromGateway() + refreshAgentsFromGateway() + } + } + fun requestCanvasRehydrate(source: String = "manual", force: Boolean = true) { scope.launch { if (!_nodeConnected.value) { @@ -503,6 +516,7 @@ class NodeRuntime(context: Context) { val gatewayToken: StateFlow = prefs.gatewayToken val onboardingCompleted: StateFlow = prefs.onboardingCompleted fun setGatewayToken(value: String) = prefs.setGatewayToken(value) + fun setGatewayBootstrapToken(value: String) = prefs.setGatewayBootstrapToken(value) fun setGatewayPassword(value: String) = prefs.setGatewayPassword(value) fun setOnboardingCompleted(value: Boolean) = prefs.setOnboardingCompleted(value) val lastDiscoveredStableId: StateFlow = prefs.lastDiscoveredStableId @@ -601,6 +615,8 @@ class NodeRuntime(context: Context) { canvas.setDebugStatus(status, server ?: remote) } } + + updateHomeCanvasState() } fun setForeground(value: Boolean) { @@ -698,10 +714,25 @@ class NodeRuntime(context: Context) { operatorStatusText = "Connecting…" updateStatus() val token = prefs.loadGatewayToken() + val bootstrapToken = prefs.loadGatewayBootstrapToken() val password = prefs.loadGatewayPassword() val tls = connectionManager.resolveTlsParams(endpoint) - operatorSession.connect(endpoint, token, password, connectionManager.buildOperatorConnectOptions(), tls) - nodeSession.connect(endpoint, token, password, connectionManager.buildNodeConnectOptions(), tls) + operatorSession.connect( + endpoint, + token, + bootstrapToken, + password, + connectionManager.buildOperatorConnectOptions(), + tls, + ) + nodeSession.connect( + endpoint, + token, + bootstrapToken, + password, + connectionManager.buildNodeConnectOptions(), + tls, + ) operatorSession.reconnect() nodeSession.reconnect() } @@ -726,9 +757,24 @@ class NodeRuntime(context: Context) { nodeStatusText = "Connecting…" updateStatus() val token = prefs.loadGatewayToken() + val bootstrapToken = prefs.loadGatewayBootstrapToken() val password = prefs.loadGatewayPassword() - operatorSession.connect(endpoint, token, password, connectionManager.buildOperatorConnectOptions(), tls) - nodeSession.connect(endpoint, token, password, connectionManager.buildNodeConnectOptions(), tls) + operatorSession.connect( + endpoint, + token, + bootstrapToken, + password, + connectionManager.buildOperatorConnectOptions(), + tls, + ) + nodeSession.connect( + endpoint, + token, + bootstrapToken, + password, + connectionManager.buildNodeConnectOptions(), + tls, + ) } fun acceptGatewayTrustPrompt() { @@ -897,11 +943,177 @@ class NodeRuntime(context: Context) { val parsed = parseHexColorArgb(raw) _seamColorArgb.value = parsed ?: DEFAULT_SEAM_COLOR_ARGB + updateHomeCanvasState() } catch (_: Throwable) { // ignore } } + private suspend fun refreshAgentsFromGateway() { + if (!operatorConnected) return + try { + val res = operatorSession.request("agents.list", "{}") + val root = json.parseToJsonElement(res).asObjectOrNull() ?: return + val defaultAgentId = root["defaultId"].asStringOrNull()?.trim().orEmpty() + val mainKey = normalizeMainKey(root["mainKey"].asStringOrNull()) + val agents = + (root["agents"] as? JsonArray)?.mapNotNull { item -> + val obj = item.asObjectOrNull() ?: return@mapNotNull null + val id = obj["id"].asStringOrNull()?.trim().orEmpty() + if (id.isEmpty()) return@mapNotNull null + val name = obj["name"].asStringOrNull()?.trim() + val emoji = obj["identity"].asObjectOrNull()?.get("emoji").asStringOrNull()?.trim() + GatewayAgentSummary( + id = id, + name = name?.takeIf { it.isNotEmpty() }, + emoji = emoji?.takeIf { it.isNotEmpty() }, + ) + } ?: emptyList() + + gatewayDefaultAgentId = defaultAgentId.ifEmpty { null } + gatewayAgents = agents + applyMainSessionKey(mainKey) + updateHomeCanvasState() + } catch (_: Throwable) { + // ignore + } + } + + private fun updateHomeCanvasState() { + val payload = + try { + json.encodeToString(makeHomeCanvasPayload()) + } catch (_: Throwable) { + null + } + canvas.updateHomeCanvasState(payload) + } + + private fun makeHomeCanvasPayload(): HomeCanvasPayload { + val state = resolveHomeCanvasGatewayState() + val gatewayName = normalized(_serverName.value) + val gatewayAddress = normalized(_remoteAddress.value) + val gatewayLabel = gatewayName ?: gatewayAddress ?: "Gateway" + val activeAgentId = resolveActiveAgentId() + val agents = homeCanvasAgents(activeAgentId) + + return when (state) { + HomeCanvasGatewayState.Connected -> + HomeCanvasPayload( + gatewayState = "connected", + eyebrow = "Connected to $gatewayLabel", + title = "Your agents are ready", + subtitle = + "This phone stays dormant until the gateway needs it, then wakes, syncs, and goes back to sleep.", + gatewayLabel = gatewayLabel, + activeAgentName = resolveActiveAgentName(activeAgentId), + activeAgentBadge = agents.firstOrNull { it.isActive }?.badge ?: "OC", + activeAgentCaption = "Selected on this phone", + agentCount = agents.size, + agents = agents.take(6), + footer = "The overview refreshes on reconnect and when this screen opens.", + ) + HomeCanvasGatewayState.Connecting -> + HomeCanvasPayload( + gatewayState = "connecting", + eyebrow = "Reconnecting", + title = "OpenClaw is syncing back up", + subtitle = + "The gateway session is coming back online. Agent shortcuts should settle automatically in a moment.", + gatewayLabel = gatewayLabel, + activeAgentName = resolveActiveAgentName(activeAgentId), + activeAgentBadge = "OC", + activeAgentCaption = "Gateway session in progress", + agentCount = agents.size, + agents = agents.take(4), + footer = "If the gateway is reachable, reconnect should complete without intervention.", + ) + HomeCanvasGatewayState.Error, HomeCanvasGatewayState.Offline -> + HomeCanvasPayload( + gatewayState = if (state == HomeCanvasGatewayState.Error) "error" else "offline", + eyebrow = "Welcome to OpenClaw", + title = "Your phone stays quiet until it is needed", + subtitle = + "Pair this device to your gateway to wake it only for real work, keep a live agent overview handy, and avoid battery-draining background loops.", + gatewayLabel = gatewayLabel, + activeAgentName = "Main", + activeAgentBadge = "OC", + activeAgentCaption = "Connect to load your agents", + agentCount = agents.size, + agents = agents.take(4), + footer = "When connected, the gateway can wake the phone with a silent push instead of holding an always-on session.", + ) + } + } + + private fun resolveHomeCanvasGatewayState(): HomeCanvasGatewayState { + val lower = _statusText.value.trim().lowercase() + return when { + _isConnected.value -> HomeCanvasGatewayState.Connected + lower.contains("connecting") || lower.contains("reconnecting") -> HomeCanvasGatewayState.Connecting + lower.contains("error") || lower.contains("failed") -> HomeCanvasGatewayState.Error + else -> HomeCanvasGatewayState.Offline + } + } + + private fun resolveActiveAgentId(): String { + val mainKey = _mainSessionKey.value.trim() + if (mainKey.startsWith("agent:")) { + val agentId = mainKey.removePrefix("agent:").substringBefore(':').trim() + if (agentId.isNotEmpty()) return agentId + } + return gatewayDefaultAgentId?.trim().orEmpty() + } + + private fun resolveActiveAgentName(activeAgentId: String): String { + if (activeAgentId.isNotEmpty()) { + gatewayAgents.firstOrNull { it.id == activeAgentId }?.let { agent -> + return normalized(agent.name) ?: agent.id + } + return activeAgentId + } + return gatewayAgents.firstOrNull()?.let { normalized(it.name) ?: it.id } ?: "Main" + } + + private fun homeCanvasAgents(activeAgentId: String): List { + val defaultAgentId = gatewayDefaultAgentId?.trim().orEmpty() + return gatewayAgents + .map { agent -> + val isActive = activeAgentId.isNotEmpty() && agent.id == activeAgentId + val isDefault = defaultAgentId.isNotEmpty() && agent.id == defaultAgentId + HomeCanvasAgentCard( + id = agent.id, + name = normalized(agent.name) ?: agent.id, + badge = homeCanvasBadge(agent), + caption = + when { + isActive -> "Active on this phone" + isDefault -> "Default agent" + else -> "Ready" + }, + isActive = isActive, + ) + }.sortedWith(compareByDescending { it.isActive }.thenBy { it.name.lowercase() }) + } + + private fun homeCanvasBadge(agent: GatewayAgentSummary): String { + val emoji = normalized(agent.emoji) + if (emoji != null) return emoji + val initials = + (normalized(agent.name) ?: agent.id) + .split(' ', '-', '_') + .filter { it.isNotBlank() } + .take(2) + .mapNotNull { token -> token.firstOrNull()?.uppercaseChar()?.toString() } + .joinToString("") + return if (initials.isNotEmpty()) initials else "OC" + } + + private fun normalized(value: String?): String? { + val trimmed = value?.trim().orEmpty() + return trimmed.ifEmpty { null } + } + private fun triggerCameraFlash() { // Token is used as a pulse trigger; value doesn't matter as long as it changes. _cameraFlashToken.value = SystemClock.elapsedRealtimeNanos() @@ -920,3 +1132,40 @@ class NodeRuntime(context: Context) { } } + +private enum class HomeCanvasGatewayState { + Connected, + Connecting, + Error, + Offline, +} + +private data class GatewayAgentSummary( + val id: String, + val name: String?, + val emoji: String?, +) + +@Serializable +private data class HomeCanvasPayload( + val gatewayState: String, + val eyebrow: String, + val title: String, + val subtitle: String, + val gatewayLabel: String, + val activeAgentName: String, + val activeAgentBadge: String, + val activeAgentCaption: String, + val agentCount: Int, + val agents: List, + val footer: String, +) + +@Serializable +private data class HomeCanvasAgentCard( + val id: String, + val name: String, + val badge: String, + val caption: String, + val isActive: Boolean, +) diff --git a/apps/android/app/src/main/java/ai/openclaw/app/SecurePrefs.kt b/apps/android/app/src/main/java/ai/openclaw/app/SecurePrefs.kt index b7e72ee4126..a1aabeb1b3c 100644 --- a/apps/android/app/src/main/java/ai/openclaw/app/SecurePrefs.kt +++ b/apps/android/app/src/main/java/ai/openclaw/app/SecurePrefs.kt @@ -15,7 +15,10 @@ import kotlinx.serialization.json.JsonNull import kotlinx.serialization.json.JsonPrimitive import java.util.UUID -class SecurePrefs(context: Context) { +class SecurePrefs( + context: Context, + private val securePrefsOverride: SharedPreferences? = null, +) { companion object { val defaultWakeWords: List = listOf("openclaw", "claude") private const val displayNameKey = "node.displayName" @@ -35,7 +38,7 @@ class SecurePrefs(context: Context) { .setKeyScheme(MasterKey.KeyScheme.AES256_GCM) .build() } - private val securePrefs: SharedPreferences by lazy { createSecurePrefs(appContext, securePrefsName) } + private val securePrefs: SharedPreferences by lazy { securePrefsOverride ?: createSecurePrefs(appContext, securePrefsName) } private val _instanceId = MutableStateFlow(loadOrCreateInstanceId()) val instanceId: StateFlow = _instanceId @@ -76,6 +79,9 @@ class SecurePrefs(context: Context) { private val _gatewayToken = MutableStateFlow("") val gatewayToken: StateFlow = _gatewayToken + private val _gatewayBootstrapToken = MutableStateFlow("") + val gatewayBootstrapToken: StateFlow = _gatewayBootstrapToken + private val _onboardingCompleted = MutableStateFlow(plainPrefs.getBoolean("onboarding.completed", false)) val onboardingCompleted: StateFlow = _onboardingCompleted @@ -165,6 +171,10 @@ class SecurePrefs(context: Context) { saveGatewayPassword(value) } + fun setGatewayBootstrapToken(value: String) { + saveGatewayBootstrapToken(value) + } + fun setOnboardingCompleted(value: Boolean) { plainPrefs.edit { putBoolean("onboarding.completed", value) } _onboardingCompleted.value = value @@ -193,6 +203,26 @@ class SecurePrefs(context: Context) { securePrefs.edit { putString(key, token.trim()) } } + fun loadGatewayBootstrapToken(): String? { + val key = "gateway.bootstrapToken.${_instanceId.value}" + val stored = + _gatewayBootstrapToken.value.trim().ifEmpty { + val persisted = securePrefs.getString(key, null)?.trim().orEmpty() + if (persisted.isNotEmpty()) { + _gatewayBootstrapToken.value = persisted + } + persisted + } + return stored.takeIf { it.isNotEmpty() } + } + + fun saveGatewayBootstrapToken(token: String) { + val key = "gateway.bootstrapToken.${_instanceId.value}" + val trimmed = token.trim() + securePrefs.edit { putString(key, trimmed) } + _gatewayBootstrapToken.value = trimmed + } + fun loadGatewayPassword(): String? { val key = "gateway.password.${_instanceId.value}" val stored = securePrefs.getString(key, null)?.trim() diff --git a/apps/android/app/src/main/java/ai/openclaw/app/gateway/DeviceAuthStore.kt b/apps/android/app/src/main/java/ai/openclaw/app/gateway/DeviceAuthStore.kt index d1ac63a90ff..202ea4820e1 100644 --- a/apps/android/app/src/main/java/ai/openclaw/app/gateway/DeviceAuthStore.kt +++ b/apps/android/app/src/main/java/ai/openclaw/app/gateway/DeviceAuthStore.kt @@ -5,6 +5,7 @@ import ai.openclaw.app.SecurePrefs interface DeviceAuthTokenStore { fun loadToken(deviceId: String, role: String): String? fun saveToken(deviceId: String, role: String, token: String) + fun clearToken(deviceId: String, role: String) } class DeviceAuthStore(private val prefs: SecurePrefs) : DeviceAuthTokenStore { @@ -18,7 +19,7 @@ class DeviceAuthStore(private val prefs: SecurePrefs) : DeviceAuthTokenStore { prefs.putString(key, token.trim()) } - fun clearToken(deviceId: String, role: String) { + override fun clearToken(deviceId: String, role: String) { val key = tokenKey(deviceId, role) prefs.remove(key) } diff --git a/apps/android/app/src/main/java/ai/openclaw/app/gateway/GatewaySession.kt b/apps/android/app/src/main/java/ai/openclaw/app/gateway/GatewaySession.kt index aee47eaada8..55e371a57c7 100644 --- a/apps/android/app/src/main/java/ai/openclaw/app/gateway/GatewaySession.kt +++ b/apps/android/app/src/main/java/ai/openclaw/app/gateway/GatewaySession.kt @@ -52,6 +52,33 @@ data class GatewayConnectOptions( val userAgent: String? = null, ) +private enum class GatewayConnectAuthSource { + DEVICE_TOKEN, + SHARED_TOKEN, + BOOTSTRAP_TOKEN, + PASSWORD, + NONE, +} + +data class GatewayConnectErrorDetails( + val code: String?, + val canRetryWithDeviceToken: Boolean, + val recommendedNextStep: String?, +) + +private data class SelectedConnectAuth( + val authToken: String?, + val authBootstrapToken: String?, + val authDeviceToken: String?, + val authPassword: String?, + val signatureToken: String?, + val authSource: GatewayConnectAuthSource, + val attemptedDeviceTokenRetry: Boolean, +) + +private class GatewayConnectFailure(val gatewayError: GatewaySession.ErrorShape) : + IllegalStateException(gatewayError.message) + class GatewaySession( private val scope: CoroutineScope, private val identityStore: DeviceIdentityStore, @@ -83,7 +110,11 @@ class GatewaySession( } } - data class ErrorShape(val code: String, val message: String) + data class ErrorShape( + val code: String, + val message: String, + val details: GatewayConnectErrorDetails? = null, + ) private val json = Json { ignoreUnknownKeys = true } private val writeLock = Mutex() @@ -95,6 +126,7 @@ class GatewaySession( private data class DesiredConnection( val endpoint: GatewayEndpoint, val token: String?, + val bootstrapToken: String?, val password: String?, val options: GatewayConnectOptions, val tls: GatewayTlsParams?, @@ -103,15 +135,22 @@ class GatewaySession( private var desired: DesiredConnection? = null private var job: Job? = null @Volatile private var currentConnection: Connection? = null + @Volatile private var pendingDeviceTokenRetry = false + @Volatile private var deviceTokenRetryBudgetUsed = false + @Volatile private var reconnectPausedForAuthFailure = false fun connect( endpoint: GatewayEndpoint, token: String?, + bootstrapToken: String?, password: String?, options: GatewayConnectOptions, tls: GatewayTlsParams? = null, ) { - desired = DesiredConnection(endpoint, token, password, options, tls) + desired = DesiredConnection(endpoint, token, bootstrapToken, password, options, tls) + pendingDeviceTokenRetry = false + deviceTokenRetryBudgetUsed = false + reconnectPausedForAuthFailure = false if (job == null) { job = scope.launch(Dispatchers.IO) { runLoop() } } @@ -119,6 +158,9 @@ class GatewaySession( fun disconnect() { desired = null + pendingDeviceTokenRetry = false + deviceTokenRetryBudgetUsed = false + reconnectPausedForAuthFailure = false currentConnection?.closeQuietly() scope.launch(Dispatchers.IO) { job?.cancelAndJoin() @@ -130,6 +172,7 @@ class GatewaySession( } fun reconnect() { + reconnectPausedForAuthFailure = false currentConnection?.closeQuietly() } @@ -219,6 +262,7 @@ class GatewaySession( private inner class Connection( private val endpoint: GatewayEndpoint, private val token: String?, + private val bootstrapToken: String?, private val password: String?, private val options: GatewayConnectOptions, private val tls: GatewayTlsParams?, @@ -344,15 +388,48 @@ class GatewaySession( private suspend fun sendConnect(connectNonce: String) { val identity = identityStore.loadOrCreate() - val storedToken = deviceAuthStore.loadToken(identity.deviceId, options.role) - val trimmedToken = token?.trim().orEmpty() - // QR/setup/manual shared token must take precedence; stale role tokens can survive re-onboarding. - val authToken = if (trimmedToken.isNotBlank()) trimmedToken else storedToken.orEmpty() - val payload = buildConnectParams(identity, connectNonce, authToken, password?.trim()) + val storedToken = deviceAuthStore.loadToken(identity.deviceId, options.role)?.trim() + val selectedAuth = + selectConnectAuth( + endpoint = endpoint, + tls = tls, + role = options.role, + explicitGatewayToken = token?.trim()?.takeIf { it.isNotEmpty() }, + explicitBootstrapToken = bootstrapToken?.trim()?.takeIf { it.isNotEmpty() }, + explicitPassword = password?.trim()?.takeIf { it.isNotEmpty() }, + storedToken = storedToken?.takeIf { it.isNotEmpty() }, + ) + if (selectedAuth.attemptedDeviceTokenRetry) { + pendingDeviceTokenRetry = false + } + val payload = + buildConnectParams( + identity = identity, + connectNonce = connectNonce, + selectedAuth = selectedAuth, + ) val res = request("connect", payload, timeoutMs = CONNECT_RPC_TIMEOUT_MS) if (!res.ok) { - val msg = res.error?.message ?: "connect failed" - throw IllegalStateException(msg) + val error = res.error ?: ErrorShape("UNAVAILABLE", "connect failed") + val shouldRetryWithDeviceToken = + shouldRetryWithStoredDeviceToken( + error = error, + explicitGatewayToken = token?.trim()?.takeIf { it.isNotEmpty() }, + storedToken = storedToken?.takeIf { it.isNotEmpty() }, + attemptedDeviceTokenRetry = selectedAuth.attemptedDeviceTokenRetry, + endpoint = endpoint, + tls = tls, + ) + if (shouldRetryWithDeviceToken) { + pendingDeviceTokenRetry = true + deviceTokenRetryBudgetUsed = true + } else if ( + selectedAuth.attemptedDeviceTokenRetry && + shouldClearStoredDeviceTokenAfterRetry(error) + ) { + deviceAuthStore.clearToken(identity.deviceId, options.role) + } + throw GatewayConnectFailure(error) } handleConnectSuccess(res, identity.deviceId) connectDeferred.complete(Unit) @@ -361,6 +438,9 @@ class GatewaySession( private fun handleConnectSuccess(res: RpcResponse, deviceId: String) { val payloadJson = res.payloadJson ?: throw IllegalStateException("connect failed: missing payload") val obj = json.parseToJsonElement(payloadJson).asObjectOrNull() ?: throw IllegalStateException("connect failed") + pendingDeviceTokenRetry = false + deviceTokenRetryBudgetUsed = false + reconnectPausedForAuthFailure = false val serverName = obj["server"].asObjectOrNull()?.get("host").asStringOrNull() val authObj = obj["auth"].asObjectOrNull() val deviceToken = authObj?.get("deviceToken").asStringOrNull() @@ -380,8 +460,7 @@ class GatewaySession( private fun buildConnectParams( identity: DeviceIdentity, connectNonce: String, - authToken: String, - authPassword: String?, + selectedAuth: SelectedConnectAuth, ): JsonObject { val client = options.client val locale = Locale.getDefault().toLanguageTag() @@ -397,16 +476,20 @@ class GatewaySession( client.modelIdentifier?.let { put("modelIdentifier", JsonPrimitive(it)) } } - val password = authPassword?.trim().orEmpty() val authJson = when { - authToken.isNotEmpty() -> + selectedAuth.authToken != null -> buildJsonObject { - put("token", JsonPrimitive(authToken)) + put("token", JsonPrimitive(selectedAuth.authToken)) + selectedAuth.authDeviceToken?.let { put("deviceToken", JsonPrimitive(it)) } } - password.isNotEmpty() -> + selectedAuth.authBootstrapToken != null -> buildJsonObject { - put("password", JsonPrimitive(password)) + put("bootstrapToken", JsonPrimitive(selectedAuth.authBootstrapToken)) + } + selectedAuth.authPassword != null -> + buildJsonObject { + put("password", JsonPrimitive(selectedAuth.authPassword)) } else -> null } @@ -420,7 +503,7 @@ class GatewaySession( role = options.role, scopes = options.scopes, signedAtMs = signedAtMs, - token = if (authToken.isNotEmpty()) authToken else null, + token = selectedAuth.signatureToken, nonce = connectNonce, platform = client.platform, deviceFamily = client.deviceFamily, @@ -483,7 +566,16 @@ class GatewaySession( frame["error"]?.asObjectOrNull()?.let { obj -> val code = obj["code"].asStringOrNull() ?: "UNAVAILABLE" val msg = obj["message"].asStringOrNull() ?: "request failed" - ErrorShape(code, msg) + val detailObj = obj["details"].asObjectOrNull() + val details = + detailObj?.let { + GatewayConnectErrorDetails( + code = it["code"].asStringOrNull(), + canRetryWithDeviceToken = it["canRetryWithDeviceToken"].asBooleanOrNull() == true, + recommendedNextStep = it["recommendedNextStep"].asStringOrNull(), + ) + } + ErrorShape(code, msg, details) } pending.remove(id)?.complete(RpcResponse(id, ok, payloadJson, error)) } @@ -607,6 +699,10 @@ class GatewaySession( delay(250) continue } + if (reconnectPausedForAuthFailure) { + delay(250) + continue + } try { onDisconnected(if (attempt == 0) "Connecting…" else "Reconnecting…") @@ -615,6 +711,13 @@ class GatewaySession( } catch (err: Throwable) { attempt += 1 onDisconnected("Gateway error: ${err.message ?: err::class.java.simpleName}") + if ( + err is GatewayConnectFailure && + shouldPauseReconnectAfterAuthFailure(err.gatewayError) + ) { + reconnectPausedForAuthFailure = true + continue + } val sleepMs = minOf(8_000L, (350.0 * Math.pow(1.7, attempt.toDouble())).toLong()) delay(sleepMs) } @@ -622,7 +725,15 @@ class GatewaySession( } private suspend fun connectOnce(target: DesiredConnection) = withContext(Dispatchers.IO) { - val conn = Connection(target.endpoint, target.token, target.password, target.options, target.tls) + val conn = + Connection( + target.endpoint, + target.token, + target.bootstrapToken, + target.password, + target.options, + target.tls, + ) currentConnection = conn try { conn.connect() @@ -698,6 +809,100 @@ class GatewaySession( if (host == "0.0.0.0" || host == "::") return true return host.startsWith("127.") } + + private fun selectConnectAuth( + endpoint: GatewayEndpoint, + tls: GatewayTlsParams?, + role: String, + explicitGatewayToken: String?, + explicitBootstrapToken: String?, + explicitPassword: String?, + storedToken: String?, + ): SelectedConnectAuth { + val shouldUseDeviceRetryToken = + pendingDeviceTokenRetry && + explicitGatewayToken != null && + storedToken != null && + isTrustedDeviceRetryEndpoint(endpoint, tls) + val authToken = + explicitGatewayToken + ?: if ( + explicitPassword == null && + (explicitBootstrapToken == null || storedToken != null) + ) { + storedToken + } else { + null + } + val authDeviceToken = if (shouldUseDeviceRetryToken) storedToken else null + val authBootstrapToken = if (authToken == null) explicitBootstrapToken else null + val authSource = + when { + authDeviceToken != null || (explicitGatewayToken == null && authToken != null) -> + GatewayConnectAuthSource.DEVICE_TOKEN + authToken != null -> GatewayConnectAuthSource.SHARED_TOKEN + authBootstrapToken != null -> GatewayConnectAuthSource.BOOTSTRAP_TOKEN + explicitPassword != null -> GatewayConnectAuthSource.PASSWORD + else -> GatewayConnectAuthSource.NONE + } + return SelectedConnectAuth( + authToken = authToken, + authBootstrapToken = authBootstrapToken, + authDeviceToken = authDeviceToken, + authPassword = explicitPassword, + signatureToken = authToken ?: authBootstrapToken, + authSource = authSource, + attemptedDeviceTokenRetry = shouldUseDeviceRetryToken, + ) + } + + private fun shouldRetryWithStoredDeviceToken( + error: ErrorShape, + explicitGatewayToken: String?, + storedToken: String?, + attemptedDeviceTokenRetry: Boolean, + endpoint: GatewayEndpoint, + tls: GatewayTlsParams?, + ): Boolean { + if (deviceTokenRetryBudgetUsed) return false + if (attemptedDeviceTokenRetry) return false + if (explicitGatewayToken == null || storedToken == null) return false + if (!isTrustedDeviceRetryEndpoint(endpoint, tls)) return false + val detailCode = error.details?.code + val recommendedNextStep = error.details?.recommendedNextStep + return error.details?.canRetryWithDeviceToken == true || + recommendedNextStep == "retry_with_device_token" || + detailCode == "AUTH_TOKEN_MISMATCH" + } + + private fun shouldPauseReconnectAfterAuthFailure(error: ErrorShape): Boolean { + return when (error.details?.code) { + "AUTH_TOKEN_MISSING", + "AUTH_BOOTSTRAP_TOKEN_INVALID", + "AUTH_PASSWORD_MISSING", + "AUTH_PASSWORD_MISMATCH", + "AUTH_RATE_LIMITED", + "PAIRING_REQUIRED", + "CONTROL_UI_DEVICE_IDENTITY_REQUIRED", + "DEVICE_IDENTITY_REQUIRED" -> true + "AUTH_TOKEN_MISMATCH" -> deviceTokenRetryBudgetUsed && !pendingDeviceTokenRetry + else -> false + } + } + + private fun shouldClearStoredDeviceTokenAfterRetry(error: ErrorShape): Boolean { + return error.details?.code == "AUTH_DEVICE_TOKEN_MISMATCH" + } + + private fun isTrustedDeviceRetryEndpoint( + endpoint: GatewayEndpoint, + tls: GatewayTlsParams?, + ): Boolean { + if (isLoopbackHost(endpoint.host)) { + return true + } + return tls?.expectedFingerprint?.trim()?.isNotEmpty() == true + } } private fun JsonElement?.asObjectOrNull(): JsonObject? = this as? JsonObject diff --git a/apps/android/app/src/main/java/ai/openclaw/app/node/CanvasController.kt b/apps/android/app/src/main/java/ai/openclaw/app/node/CanvasController.kt index 9efb2a924d7..0eab9d75a5b 100644 --- a/apps/android/app/src/main/java/ai/openclaw/app/node/CanvasController.kt +++ b/apps/android/app/src/main/java/ai/openclaw/app/node/CanvasController.kt @@ -34,6 +34,7 @@ class CanvasController { @Volatile private var debugStatusEnabled: Boolean = false @Volatile private var debugStatusTitle: String? = null @Volatile private var debugStatusSubtitle: String? = null + @Volatile private var homeCanvasStateJson: String? = null private val _currentUrl = MutableStateFlow(null) val currentUrl: StateFlow = _currentUrl.asStateFlow() @@ -56,6 +57,7 @@ class CanvasController { this.webView = webView reload() applyDebugStatus() + applyHomeCanvasState() } fun detach(webView: WebView) { @@ -88,6 +90,12 @@ class CanvasController { fun onPageFinished() { applyDebugStatus() + applyHomeCanvasState() + } + + fun updateHomeCanvasState(json: String?) { + homeCanvasStateJson = json + applyHomeCanvasState() } private inline fun withWebViewOnMain(crossinline block: (WebView) -> Unit) { @@ -142,6 +150,22 @@ class CanvasController { } } + private fun applyHomeCanvasState() { + val payload = homeCanvasStateJson ?: "null" + withWebViewOnMain { wv -> + val js = """ + (() => { + try { + const api = globalThis.__openclaw; + if (!api || typeof api.renderHome !== 'function') return; + api.renderHome($payload); + } catch (_) {} + })(); + """.trimIndent() + wv.evaluateJavascript(js, null) + } + } + suspend fun eval(javaScript: String): String = withContext(Dispatchers.Main) { val wv = webView ?: throw IllegalStateException("no webview") diff --git a/apps/android/app/src/main/java/ai/openclaw/app/ui/ConnectTabScreen.kt b/apps/android/app/src/main/java/ai/openclaw/app/ui/ConnectTabScreen.kt index 4b8ac2c8e5d..448336d8e41 100644 --- a/apps/android/app/src/main/java/ai/openclaw/app/ui/ConnectTabScreen.kt +++ b/apps/android/app/src/main/java/ai/openclaw/app/ui/ConnectTabScreen.kt @@ -8,6 +8,7 @@ import androidx.compose.foundation.layout.Box import androidx.compose.foundation.layout.Column import androidx.compose.foundation.layout.PaddingValues import androidx.compose.foundation.layout.Row +import androidx.compose.foundation.layout.Spacer import androidx.compose.foundation.layout.fillMaxWidth import androidx.compose.foundation.layout.height import androidx.compose.foundation.layout.padding @@ -18,8 +19,11 @@ import androidx.compose.foundation.shape.RoundedCornerShape import androidx.compose.foundation.text.KeyboardOptions import androidx.compose.foundation.verticalScroll import androidx.compose.material.icons.Icons +import androidx.compose.material.icons.filled.Cloud import androidx.compose.material.icons.filled.ExpandLess import androidx.compose.material.icons.filled.ExpandMore +import androidx.compose.material.icons.filled.Link +import androidx.compose.material.icons.filled.PowerSettingsNew import androidx.compose.material3.AlertDialog import androidx.compose.material3.Button import androidx.compose.material3.ButtonDefaults @@ -128,93 +132,142 @@ fun ConnectTabScreen(viewModel: MainViewModel) { verticalArrangement = Arrangement.spacedBy(14.dp), ) { Column(verticalArrangement = Arrangement.spacedBy(6.dp)) { - Text("Connection Control", style = mobileCaption1.copy(fontWeight = FontWeight.Bold), color = mobileAccent) Text("Gateway Connection", style = mobileTitle1, color = mobileText) Text( - "One primary action. Open advanced controls only when needed.", + if (isConnected) "Your gateway is active and ready." else "Connect to your gateway to get started.", style = mobileCallout, color = mobileTextSecondary, ) } + // Status cards in a unified card group Surface( modifier = Modifier.fillMaxWidth(), shape = RoundedCornerShape(14.dp), - color = mobileSurface, + color = Color.White, border = BorderStroke(1.dp, mobileBorder), ) { - Column(modifier = Modifier.padding(horizontal = 14.dp, vertical = 12.dp), verticalArrangement = Arrangement.spacedBy(4.dp)) { - Text("Active endpoint", style = mobileCaption1.copy(fontWeight = FontWeight.SemiBold), color = mobileTextSecondary) - Text(activeEndpoint, style = mobileBody.copy(fontFamily = FontFamily.Monospace), color = mobileText) + Column { + Row( + modifier = Modifier.fillMaxWidth().padding(horizontal = 14.dp, vertical = 12.dp), + verticalAlignment = Alignment.CenterVertically, + horizontalArrangement = Arrangement.spacedBy(12.dp), + ) { + Surface( + shape = RoundedCornerShape(10.dp), + color = mobileAccentSoft, + ) { + Icon( + imageVector = Icons.Default.Link, + contentDescription = null, + modifier = Modifier.padding(8.dp).size(18.dp), + tint = mobileAccent, + ) + } + Column(verticalArrangement = Arrangement.spacedBy(2.dp)) { + Text("Endpoint", style = mobileCaption1.copy(fontWeight = FontWeight.SemiBold), color = mobileTextSecondary) + Text(activeEndpoint, style = mobileBody.copy(fontFamily = FontFamily.Monospace), color = mobileText) + } + } + HorizontalDivider(color = mobileBorder) + Row( + modifier = Modifier.fillMaxWidth().padding(horizontal = 14.dp, vertical = 12.dp), + verticalAlignment = Alignment.CenterVertically, + horizontalArrangement = Arrangement.spacedBy(12.dp), + ) { + Surface( + shape = RoundedCornerShape(10.dp), + color = if (isConnected) mobileSuccessSoft else mobileSurface, + ) { + Icon( + imageVector = Icons.Default.Cloud, + contentDescription = null, + modifier = Modifier.padding(8.dp).size(18.dp), + tint = if (isConnected) mobileSuccess else mobileTextTertiary, + ) + } + Column(verticalArrangement = Arrangement.spacedBy(2.dp)) { + Text("Status", style = mobileCaption1.copy(fontWeight = FontWeight.SemiBold), color = mobileTextSecondary) + Text(statusText, style = mobileBody, color = if (isConnected) mobileSuccess else mobileText) + } + } } } - Surface( - modifier = Modifier.fillMaxWidth(), - shape = RoundedCornerShape(14.dp), - color = mobileSurface, - border = BorderStroke(1.dp, mobileBorder), - ) { - Column(modifier = Modifier.padding(horizontal = 14.dp, vertical = 12.dp), verticalArrangement = Arrangement.spacedBy(4.dp)) { - Text("Gateway state", style = mobileCaption1.copy(fontWeight = FontWeight.SemiBold), color = mobileTextSecondary) - Text(statusText, style = mobileBody, color = mobileText) - } - } - - Button( - onClick = { - if (isConnected) { + if (isConnected) { + // Outlined secondary button when connected — don't scream "danger" + Button( + onClick = { viewModel.disconnect() validationText = null - return@Button - } - if (statusText.contains("operator offline", ignoreCase = true)) { + }, + modifier = Modifier.fillMaxWidth().height(48.dp), + shape = RoundedCornerShape(14.dp), + colors = + ButtonDefaults.buttonColors( + containerColor = Color.White, + contentColor = mobileDanger, + ), + border = BorderStroke(1.dp, mobileDanger.copy(alpha = 0.4f)), + ) { + Icon(Icons.Default.PowerSettingsNew, contentDescription = null, modifier = Modifier.size(18.dp)) + Spacer(modifier = Modifier.width(8.dp)) + Text("Disconnect", style = mobileHeadline.copy(fontWeight = FontWeight.SemiBold)) + } + } else { + Button( + onClick = { + if (statusText.contains("operator offline", ignoreCase = true)) { + validationText = null + viewModel.refreshGatewayConnection() + return@Button + } + + val config = + resolveGatewayConnectConfig( + useSetupCode = inputMode == ConnectInputMode.SetupCode, + setupCode = setupCode, + manualHost = manualHostInput, + manualPort = manualPortInput, + manualTls = manualTlsInput, + fallbackToken = gatewayToken, + fallbackPassword = passwordInput, + ) + + if (config == null) { + validationText = + if (inputMode == ConnectInputMode.SetupCode) { + "Paste a valid setup code to connect." + } else { + "Enter a valid manual host and port to connect." + } + return@Button + } + validationText = null - viewModel.refreshGatewayConnection() - return@Button - } - - val config = - resolveGatewayConnectConfig( - useSetupCode = inputMode == ConnectInputMode.SetupCode, - setupCode = setupCode, - manualHost = manualHostInput, - manualPort = manualPortInput, - manualTls = manualTlsInput, - fallbackToken = gatewayToken, - fallbackPassword = passwordInput, - ) - - if (config == null) { - validationText = - if (inputMode == ConnectInputMode.SetupCode) { - "Paste a valid setup code to connect." - } else { - "Enter a valid manual host and port to connect." - } - return@Button - } - - validationText = null - viewModel.setManualEnabled(true) - viewModel.setManualHost(config.host) - viewModel.setManualPort(config.port) - viewModel.setManualTls(config.tls) - if (config.token.isNotBlank()) { - viewModel.setGatewayToken(config.token) - } - viewModel.setGatewayPassword(config.password) - viewModel.connectManual() - }, - modifier = Modifier.fillMaxWidth().height(52.dp), - shape = RoundedCornerShape(14.dp), - colors = - ButtonDefaults.buttonColors( - containerColor = if (isConnected) mobileDanger else mobileAccent, - contentColor = Color.White, - ), - ) { - Text(primaryLabel, style = mobileHeadline.copy(fontWeight = FontWeight.Bold)) + viewModel.setManualEnabled(true) + viewModel.setManualHost(config.host) + viewModel.setManualPort(config.port) + viewModel.setManualTls(config.tls) + viewModel.setGatewayBootstrapToken(config.bootstrapToken) + if (config.token.isNotBlank()) { + viewModel.setGatewayToken(config.token) + } else if (config.bootstrapToken.isNotBlank()) { + viewModel.setGatewayToken("") + } + viewModel.setGatewayPassword(config.password) + viewModel.connectManual() + }, + modifier = Modifier.fillMaxWidth().height(52.dp), + shape = RoundedCornerShape(14.dp), + colors = + ButtonDefaults.buttonColors( + containerColor = mobileAccent, + contentColor = Color.White, + ), + ) { + Text("Connect Gateway", style = mobileHeadline.copy(fontWeight = FontWeight.Bold)) + } } Surface( diff --git a/apps/android/app/src/main/java/ai/openclaw/app/ui/GatewayConfigResolver.kt b/apps/android/app/src/main/java/ai/openclaw/app/ui/GatewayConfigResolver.kt index 93b4fc1bb60..3416900ed5b 100644 --- a/apps/android/app/src/main/java/ai/openclaw/app/ui/GatewayConfigResolver.kt +++ b/apps/android/app/src/main/java/ai/openclaw/app/ui/GatewayConfigResolver.kt @@ -1,8 +1,8 @@ package ai.openclaw.app.ui -import androidx.core.net.toUri import java.util.Base64 import java.util.Locale +import java.net.URI import kotlinx.serialization.json.Json import kotlinx.serialization.json.JsonObject import kotlinx.serialization.json.JsonPrimitive @@ -18,6 +18,7 @@ internal data class GatewayEndpointConfig( internal data class GatewaySetupCode( val url: String, + val bootstrapToken: String?, val token: String?, val password: String?, ) @@ -26,6 +27,7 @@ internal data class GatewayConnectConfig( val host: String, val port: Int, val tls: Boolean, + val bootstrapToken: String, val token: String, val password: String, ) @@ -44,12 +46,26 @@ internal fun resolveGatewayConnectConfig( if (useSetupCode) { val setup = decodeGatewaySetupCode(setupCode) ?: return null val parsed = parseGatewayEndpoint(setup.url) ?: return null + val setupBootstrapToken = setup.bootstrapToken?.trim().orEmpty() + val sharedToken = + when { + !setup.token.isNullOrBlank() -> setup.token.trim() + setupBootstrapToken.isNotEmpty() -> "" + else -> fallbackToken.trim() + } + val sharedPassword = + when { + !setup.password.isNullOrBlank() -> setup.password.trim() + setupBootstrapToken.isNotEmpty() -> "" + else -> fallbackPassword.trim() + } return GatewayConnectConfig( host = parsed.host, port = parsed.port, tls = parsed.tls, - token = setup.token ?: fallbackToken.trim(), - password = setup.password ?: fallbackPassword.trim(), + bootstrapToken = setupBootstrapToken, + token = sharedToken, + password = sharedPassword, ) } @@ -59,6 +75,7 @@ internal fun resolveGatewayConnectConfig( host = parsed.host, port = parsed.port, tls = parsed.tls, + bootstrapToken = "", token = fallbackToken.trim(), password = fallbackPassword.trim(), ) @@ -69,7 +86,7 @@ internal fun parseGatewayEndpoint(rawInput: String): GatewayEndpointConfig? { if (raw.isEmpty()) return null val normalized = if (raw.contains("://")) raw else "https://$raw" - val uri = normalized.toUri() + val uri = runCatching { URI(normalized) }.getOrNull() ?: return null val host = uri.host?.trim().orEmpty() if (host.isEmpty()) return null @@ -80,7 +97,7 @@ internal fun parseGatewayEndpoint(rawInput: String): GatewayEndpointConfig? { "wss", "https" -> true else -> true } - val port = uri.port.takeIf { it in 1..65535 } ?: 18789 + val port = uri.port.takeIf { it in 1..65535 } ?: if (tls) 443 else 18789 val displayUrl = "${if (tls) "https" else "http"}://$host:$port" return GatewayEndpointConfig(host = host, port = port, tls = tls, displayUrl = displayUrl) @@ -104,9 +121,10 @@ internal fun decodeGatewaySetupCode(rawInput: String): GatewaySetupCode? { val obj = parseJsonObject(decoded) ?: return null val url = jsonField(obj, "url").orEmpty() if (url.isEmpty()) return null + val bootstrapToken = jsonField(obj, "bootstrapToken") val token = jsonField(obj, "token") val password = jsonField(obj, "password") - GatewaySetupCode(url = url, token = token, password = password) + GatewaySetupCode(url = url, bootstrapToken = bootstrapToken, token = token, password = password) } catch (_: IllegalArgumentException) { null } diff --git a/apps/android/app/src/main/java/ai/openclaw/app/ui/OnboardingFlow.kt b/apps/android/app/src/main/java/ai/openclaw/app/ui/OnboardingFlow.kt index 8810ea93fcb..db550ded615 100644 --- a/apps/android/app/src/main/java/ai/openclaw/app/ui/OnboardingFlow.kt +++ b/apps/android/app/src/main/java/ai/openclaw/app/ui/OnboardingFlow.kt @@ -57,8 +57,16 @@ import androidx.compose.material3.Text import androidx.compose.material3.TextButton import androidx.compose.material.icons.Icons import androidx.compose.material.icons.automirrored.filled.ArrowBack +import androidx.compose.material.icons.filled.ChatBubble +import androidx.compose.material.icons.filled.CheckCircle +import androidx.compose.material.icons.filled.Cloud import androidx.compose.material.icons.filled.ExpandLess import androidx.compose.material.icons.filled.ExpandMore +import androidx.compose.material.icons.filled.Link +import androidx.compose.material.icons.filled.Security +import androidx.compose.material.icons.filled.Tune +import androidx.compose.material.icons.filled.Wifi +import androidx.compose.ui.graphics.vector.ImageVector import androidx.compose.runtime.Composable import androidx.compose.runtime.DisposableEffect import androidx.compose.runtime.collectAsState @@ -68,6 +76,7 @@ import androidx.compose.runtime.remember import androidx.compose.runtime.saveable.rememberSaveable import androidx.compose.runtime.setValue import androidx.compose.ui.Alignment +import androidx.compose.ui.draw.clip import androidx.compose.ui.Modifier import androidx.compose.ui.graphics.Brush import androidx.compose.ui.graphics.Color @@ -87,8 +96,9 @@ import ai.openclaw.app.LocationMode import ai.openclaw.app.MainViewModel import ai.openclaw.app.R import ai.openclaw.app.node.DeviceNotificationListenerService -import com.journeyapps.barcodescanner.ScanContract -import com.journeyapps.barcodescanner.ScanOptions +import com.google.mlkit.vision.barcode.common.Barcode +import com.google.mlkit.vision.codescanner.GmsBarcodeScannerOptions +import com.google.mlkit.vision.codescanner.GmsBarcodeScanning private enum class OnboardingStep(val index: Int, val label: String) { Welcome(1, "Welcome"), @@ -232,6 +242,13 @@ fun OnboardingFlow(viewModel: MainViewModel, modifier: Modifier = Modifier) { var attemptedConnect by rememberSaveable { mutableStateOf(false) } val lifecycleOwner = LocalLifecycleOwner.current + val qrScannerOptions = + remember { + GmsBarcodeScannerOptions.Builder() + .setBarcodeFormats(Barcode.FORMAT_QR_CODE) + .build() + } + val qrScanner = remember(context, qrScannerOptions) { GmsBarcodeScanning.getClient(context, qrScannerOptions) } val smsAvailable = remember(context) { @@ -451,23 +468,6 @@ fun OnboardingFlow(viewModel: MainViewModel, modifier: Modifier = Modifier) { onDispose { lifecycleOwner.lifecycle.removeObserver(observer) } } - val qrScanLauncher = - rememberLauncherForActivityResult(ScanContract()) { result -> - val contents = result.contents?.trim().orEmpty() - if (contents.isEmpty()) { - return@rememberLauncherForActivityResult - } - val scannedSetupCode = resolveScannedSetupCode(contents) - if (scannedSetupCode == null) { - gatewayError = "QR code did not contain a valid setup code." - return@rememberLauncherForActivityResult - } - setupCode = scannedSetupCode - gatewayInputMode = GatewayInputMode.SetupCode - gatewayError = null - attemptedConnect = false - } - if (pendingTrust != null) { val prompt = pendingTrust!! AlertDialog( @@ -513,25 +513,20 @@ fun OnboardingFlow(viewModel: MainViewModel, modifier: Modifier = Modifier) { ) { Column( modifier = Modifier.padding(top = 12.dp), - verticalArrangement = Arrangement.spacedBy(8.dp), + verticalArrangement = Arrangement.spacedBy(4.dp), ) { Text( - "FIRST RUN", - style = onboardingCaption1Style.copy(fontWeight = FontWeight.Bold, letterSpacing = 1.5.sp), - color = onboardingAccent, - ) - Text( - "OpenClaw\nMobile Setup", - style = onboardingDisplayStyle.copy(lineHeight = 38.sp), + "OpenClaw", + style = onboardingDisplayStyle, color = onboardingText, ) Text( - "Step ${step.index} of 4", - style = onboardingCaption1Style, - color = onboardingAccent, + "Mobile Setup", + style = onboardingTitle1Style, + color = onboardingTextSecondary, ) } - StepRailWrap(current = step) + StepRail(current = step) when (step) { OnboardingStep.Welcome -> WelcomeStep() @@ -548,14 +543,28 @@ fun OnboardingFlow(viewModel: MainViewModel, modifier: Modifier = Modifier) { gatewayError = gatewayError, onScanQrClick = { gatewayError = null - qrScanLauncher.launch( - ScanOptions().apply { - setDesiredBarcodeFormats(ScanOptions.QR_CODE) - setPrompt("Scan OpenClaw onboarding QR") - setBeepEnabled(false) - setOrientationLocked(false) - }, - ) + qrScanner.startScan() + .addOnSuccessListener { barcode -> + val contents = barcode.rawValue?.trim().orEmpty() + if (contents.isEmpty()) { + return@addOnSuccessListener + } + val scannedSetupCode = resolveScannedSetupCode(contents) + if (scannedSetupCode == null) { + gatewayError = "QR code did not contain a valid setup code." + return@addOnSuccessListener + } + setupCode = scannedSetupCode + gatewayInputMode = GatewayInputMode.SetupCode + gatewayError = null + attemptedConnect = false + } + .addOnCanceledListener { + // User dismissed the scanner; preserve current form state. + } + .addOnFailureListener { + gatewayError = qrScannerErrorMessage() + } }, onAdvancedOpenChange = { gatewayAdvancedOpen = it }, onInputModeChange = { @@ -772,8 +781,18 @@ fun OnboardingFlow(viewModel: MainViewModel, modifier: Modifier = Modifier) { return@Button } gatewayUrl = parsedSetup.url - parsedSetup.token?.let { viewModel.setGatewayToken(it) } - gatewayPassword = parsedSetup.password.orEmpty() + viewModel.setGatewayBootstrapToken(parsedSetup.bootstrapToken.orEmpty()) + val sharedToken = parsedSetup.token.orEmpty().trim() + val password = parsedSetup.password.orEmpty().trim() + if (sharedToken.isNotEmpty()) { + viewModel.setGatewayToken(sharedToken) + } else if (!parsedSetup.bootstrapToken.isNullOrBlank()) { + viewModel.setGatewayToken("") + } + gatewayPassword = password + if (password.isEmpty() && !parsedSetup.bootstrapToken.isNullOrBlank()) { + viewModel.setGatewayPassword("") + } } else { val manualUrl = composeGatewayManualUrl(manualHost, manualPort, manualTls) val parsedGateway = manualUrl?.let(::parseGatewayEndpoint) @@ -782,6 +801,7 @@ fun OnboardingFlow(viewModel: MainViewModel, modifier: Modifier = Modifier) { return@Button } gatewayUrl = parsedGateway.displayUrl + viewModel.setGatewayBootstrapToken("") } step = OnboardingStep.Permissions }, @@ -850,8 +870,13 @@ fun OnboardingFlow(viewModel: MainViewModel, modifier: Modifier = Modifier) { viewModel.setManualHost(parsed.host) viewModel.setManualPort(parsed.port) viewModel.setManualTls(parsed.tls) + if (gatewayInputMode == GatewayInputMode.Manual) { + viewModel.setGatewayBootstrapToken("") + } if (token.isNotEmpty()) { viewModel.setGatewayToken(token) + } else { + viewModel.setGatewayToken("") } viewModel.setGatewayPassword(password) viewModel.connectManual() @@ -876,15 +901,6 @@ fun OnboardingFlow(viewModel: MainViewModel, modifier: Modifier = Modifier) { } } -@Composable -private fun StepRailWrap(current: OnboardingStep) { - Column(verticalArrangement = Arrangement.spacedBy(10.dp)) { - HorizontalDivider(color = onboardingBorder) - StepRail(current = current) - HorizontalDivider(color = onboardingBorder) - } -} - @Composable private fun StepRail(current: OnboardingStep) { val steps = OnboardingStep.entries @@ -926,11 +942,31 @@ private fun StepRail(current: OnboardingStep) { @Composable private fun WelcomeStep() { - StepShell(title = "What You Get") { - Bullet("Control the gateway and operator chat from one mobile surface.") - Bullet("Connect with setup code and recover pairing with CLI commands.") - Bullet("Enable only the permissions and capabilities you want.") - Bullet("Finish with a real connection check before entering the app.") + Column(verticalArrangement = Arrangement.spacedBy(10.dp)) { + FeatureCard( + icon = Icons.Default.Wifi, + title = "Connect to your gateway", + subtitle = "Scan a QR code or enter your host manually", + accentColor = onboardingAccent, + ) + FeatureCard( + icon = Icons.Default.Tune, + title = "Choose your permissions", + subtitle = "Enable only what you need, change anytime", + accentColor = Color(0xFF7C5AC7), + ) + FeatureCard( + icon = Icons.Default.ChatBubble, + title = "Chat, voice, and screen", + subtitle = "Full operator control from your phone", + accentColor = onboardingSuccess, + ) + FeatureCard( + icon = Icons.Default.CheckCircle, + title = "Verify your connection", + subtitle = "Live check before you enter the app", + accentColor = Color(0xFFC8841A), + ) } } @@ -959,11 +995,12 @@ private fun GatewayStep( val manualResolvedEndpoint = remember(manualHost, manualPort, manualTls) { composeGatewayManualUrl(manualHost, manualPort, manualTls)?.let { parseGatewayEndpoint(it)?.displayUrl } } StepShell(title = "Gateway Connection") { - GuideBlock(title = "Scan onboarding QR") { - Text("Run these on the gateway host:", style = onboardingCalloutStyle, color = onboardingTextSecondary) - CommandBlock("openclaw qr") - Text("Then scan with this device.", style = onboardingCalloutStyle, color = onboardingTextSecondary) - } + Text( + "Run `openclaw qr` on your gateway host, then scan the code with this device.", + style = onboardingCalloutStyle, + color = onboardingTextSecondary, + ) + CommandBlock("openclaw qr") Button( onClick = onScanQrClick, modifier = Modifier.fillMaxWidth().height(48.dp), @@ -1007,21 +1044,6 @@ private fun GatewayStep( AnimatedVisibility(visible = advancedOpen) { Column(verticalArrangement = Arrangement.spacedBy(12.dp)) { - GuideBlock(title = "Manual setup commands") { - Text("Run these on the gateway host:", style = onboardingCalloutStyle, color = onboardingTextSecondary) - CommandBlock("openclaw qr --setup-code-only") - CommandBlock("openclaw qr --json") - Text( - "`--json` prints `setupCode` and `gatewayUrl`.", - style = onboardingCalloutStyle, - color = onboardingTextSecondary, - ) - Text( - "Auto URL discovery is not wired yet. Android emulator uses `10.0.2.2`; real devices need LAN/Tailscale host.", - style = onboardingCalloutStyle, - color = onboardingTextSecondary, - ) - } GatewayModeToggle(inputMode = inputMode, onInputModeChange = onInputModeChange) if (inputMode == GatewayInputMode.SetupCode) { @@ -1290,13 +1312,9 @@ private fun StepShell( title: String, content: @Composable ColumnScope.() -> Unit, ) { - Column(verticalArrangement = Arrangement.spacedBy(0.dp)) { - HorizontalDivider(color = onboardingBorder) - Column(modifier = Modifier.padding(vertical = 14.dp), verticalArrangement = Arrangement.spacedBy(12.dp)) { - Text(title, style = onboardingTitle1Style, color = onboardingText) - content() - } - HorizontalDivider(color = onboardingBorder) + Column(modifier = Modifier.padding(vertical = 4.dp), verticalArrangement = Arrangement.spacedBy(12.dp)) { + Text(title, style = onboardingTitle1Style, color = onboardingText) + content() } } @@ -1362,13 +1380,15 @@ private fun PermissionsStep( StepShell(title = "Permissions") { Text( - "Enable only what you need now. You can change everything later in Settings.", + "Enable only what you need. You can change these anytime in Settings.", style = onboardingCalloutStyle, color = onboardingTextSecondary, ) + + PermissionSectionHeader("System") PermissionToggleRow( title = "Gateway discovery", - subtitle = if (Build.VERSION.SDK_INT >= 33) "Nearby devices" else "Location (for NSD)", + subtitle = "Find gateways on your local network", checked = enableDiscovery, granted = isPermissionGranted(context, discoveryPermission), onCheckedChange = onDiscoveryChange, @@ -1376,7 +1396,7 @@ private fun PermissionsStep( InlineDivider() PermissionToggleRow( title = "Location", - subtitle = "location.get (while app is open)", + subtitle = "Share device location while app is open", checked = enableLocation, granted = locationGranted, onCheckedChange = onLocationChange, @@ -1385,7 +1405,7 @@ private fun PermissionsStep( if (Build.VERSION.SDK_INT >= 33) { PermissionToggleRow( title = "Notifications", - subtitle = "system.notify and foreground alerts", + subtitle = "Alerts and foreground service notices", checked = enableNotifications, granted = isPermissionGranted(context, Manifest.permission.POST_NOTIFICATIONS), onCheckedChange = onNotificationsChange, @@ -1394,15 +1414,16 @@ private fun PermissionsStep( } PermissionToggleRow( title = "Notification listener", - subtitle = "notifications.list and notifications.actions (opens Android Settings)", + subtitle = "Read and act on your notifications", checked = enableNotificationListener, granted = notificationListenerGranted, onCheckedChange = onNotificationListenerChange, ) - InlineDivider() + + PermissionSectionHeader("Media") PermissionToggleRow( title = "Microphone", - subtitle = "Foreground Voice tab transcription", + subtitle = "Voice transcription in the Voice tab", checked = enableMicrophone, granted = isPermissionGranted(context, Manifest.permission.RECORD_AUDIO), onCheckedChange = onMicrophoneChange, @@ -1410,7 +1431,7 @@ private fun PermissionsStep( InlineDivider() PermissionToggleRow( title = "Camera", - subtitle = "camera.snap and camera.clip", + subtitle = "Take photos and short video clips", checked = enableCamera, granted = isPermissionGranted(context, Manifest.permission.CAMERA), onCheckedChange = onCameraChange, @@ -1418,15 +1439,16 @@ private fun PermissionsStep( InlineDivider() PermissionToggleRow( title = "Photos", - subtitle = "photos.latest", + subtitle = "Access your recent photos", checked = enablePhotos, granted = isPermissionGranted(context, photosPermission), onCheckedChange = onPhotosChange, ) - InlineDivider() + + PermissionSectionHeader("Personal Data") PermissionToggleRow( title = "Contacts", - subtitle = "contacts.search and contacts.add", + subtitle = "Search and add contacts", checked = enableContacts, granted = contactsGranted, onCheckedChange = onContactsChange, @@ -1434,7 +1456,7 @@ private fun PermissionsStep( InlineDivider() PermissionToggleRow( title = "Calendar", - subtitle = "calendar.events and calendar.add", + subtitle = "Read and create calendar events", checked = enableCalendar, granted = calendarGranted, onCheckedChange = onCalendarChange, @@ -1442,7 +1464,7 @@ private fun PermissionsStep( InlineDivider() PermissionToggleRow( title = "Motion", - subtitle = "motion.activity and motion.pedometer", + subtitle = "Activity and step tracking", checked = enableMotion, granted = motionGranted, onCheckedChange = onMotionChange, @@ -1453,16 +1475,25 @@ private fun PermissionsStep( InlineDivider() PermissionToggleRow( title = "SMS", - subtitle = "Allow gateway-triggered SMS sending", + subtitle = "Send text messages via the gateway", checked = enableSms, granted = isPermissionGranted(context, Manifest.permission.SEND_SMS), onCheckedChange = onSmsChange, ) } - Text("All settings can be changed later in Settings.", style = onboardingCalloutStyle, color = onboardingTextSecondary) } } +@Composable +private fun PermissionSectionHeader(title: String) { + Text( + title.uppercase(), + style = onboardingCaption1Style.copy(fontWeight = FontWeight.Bold, letterSpacing = 1.2.sp), + color = onboardingAccent, + modifier = Modifier.padding(top = 8.dp), + ) +} + @Composable private fun PermissionToggleRow( title: String, @@ -1473,6 +1504,12 @@ private fun PermissionToggleRow( statusOverride: String? = null, onCheckedChange: (Boolean) -> Unit, ) { + val statusText = statusOverride ?: if (granted) "Granted" else "Not granted" + val statusColor = when { + statusOverride != null -> onboardingTextTertiary + granted -> onboardingSuccess + else -> onboardingWarning + } Row( modifier = Modifier.fillMaxWidth().heightIn(min = 50.dp), verticalAlignment = Alignment.CenterVertically, @@ -1481,11 +1518,7 @@ private fun PermissionToggleRow( Column(modifier = Modifier.weight(1f), verticalArrangement = Arrangement.spacedBy(2.dp)) { Text(title, style = onboardingHeadlineStyle, color = onboardingText) Text(subtitle, style = onboardingCalloutStyle.copy(lineHeight = 18.sp), color = onboardingTextSecondary) - Text( - statusOverride ?: if (granted) "Granted" else "Not granted", - style = onboardingCaption1Style, - color = if (granted) onboardingSuccess else onboardingTextSecondary, - ) + Text(statusText, style = onboardingCaption1Style, color = statusColor) } Switch( checked = checked, @@ -1513,20 +1546,131 @@ private fun FinalStep( enabledPermissions: String, methodLabel: String, ) { - StepShell(title = "Review") { - SummaryField(label = "Method", value = methodLabel) - SummaryField(label = "Gateway", value = parsedGateway?.displayUrl ?: "Invalid gateway URL") - SummaryField(label = "Enabled Permissions", value = enabledPermissions) + Column(verticalArrangement = Arrangement.spacedBy(10.dp)) { + Text("Review", style = onboardingTitle1Style, color = onboardingText) + + SummaryCard( + icon = Icons.Default.Link, + label = "Method", + value = methodLabel, + accentColor = onboardingAccent, + ) + SummaryCard( + icon = Icons.Default.Cloud, + label = "Gateway", + value = parsedGateway?.displayUrl ?: "Invalid gateway URL", + accentColor = Color(0xFF7C5AC7), + ) + SummaryCard( + icon = Icons.Default.Security, + label = "Permissions", + value = enabledPermissions, + accentColor = onboardingSuccess, + ) if (!attemptedConnect) { - Text("Press Connect to verify gateway reachability and auth.", style = onboardingCalloutStyle, color = onboardingTextSecondary) + Surface( + modifier = Modifier.fillMaxWidth(), + shape = RoundedCornerShape(14.dp), + color = onboardingAccentSoft, + border = androidx.compose.foundation.BorderStroke(1.dp, onboardingAccent.copy(alpha = 0.2f)), + ) { + Row( + modifier = Modifier.padding(14.dp), + horizontalArrangement = Arrangement.spacedBy(12.dp), + verticalAlignment = Alignment.CenterVertically, + ) { + Box( + modifier = + Modifier + .size(42.dp) + .background(onboardingAccent.copy(alpha = 0.1f), RoundedCornerShape(11.dp)), + contentAlignment = Alignment.Center, + ) { + Icon( + imageVector = Icons.Default.Wifi, + contentDescription = null, + tint = onboardingAccent, + modifier = Modifier.size(22.dp), + ) + } + Text( + "Tap Connect to verify your gateway is reachable.", + style = onboardingCalloutStyle, + color = onboardingAccent, + ) + } + } + } else if (isConnected) { + Surface( + modifier = Modifier.fillMaxWidth(), + shape = RoundedCornerShape(14.dp), + color = Color(0xFFEEF9F3), + border = androidx.compose.foundation.BorderStroke(1.dp, onboardingSuccess.copy(alpha = 0.2f)), + ) { + Row( + modifier = Modifier.padding(14.dp), + horizontalArrangement = Arrangement.spacedBy(12.dp), + verticalAlignment = Alignment.CenterVertically, + ) { + Box( + modifier = + Modifier + .size(42.dp) + .background(onboardingSuccess.copy(alpha = 0.1f), RoundedCornerShape(11.dp)), + contentAlignment = Alignment.Center, + ) { + Icon( + imageVector = Icons.Default.CheckCircle, + contentDescription = null, + tint = onboardingSuccess, + modifier = Modifier.size(22.dp), + ) + } + Column(verticalArrangement = Arrangement.spacedBy(2.dp)) { + Text("Connected", style = onboardingHeadlineStyle, color = onboardingSuccess) + Text( + serverName ?: remoteAddress ?: "gateway", + style = onboardingCalloutStyle, + color = onboardingSuccess.copy(alpha = 0.8f), + ) + } + } + } } else { - Text("Status: $statusText", style = onboardingCalloutStyle, color = if (isConnected) onboardingSuccess else onboardingTextSecondary) - if (isConnected) { - Text("Connected to ${serverName ?: remoteAddress ?: "gateway"}", style = onboardingCalloutStyle, color = onboardingSuccess) - } else { - GuideBlock(title = "Pairing Required") { - Text("Run these on the gateway host:", style = onboardingCalloutStyle, color = onboardingTextSecondary) + Surface( + modifier = Modifier.fillMaxWidth(), + shape = RoundedCornerShape(14.dp), + color = Color(0xFFFFF8EC), + border = androidx.compose.foundation.BorderStroke(1.dp, onboardingWarning.copy(alpha = 0.2f)), + ) { + Column( + modifier = Modifier.padding(14.dp), + verticalArrangement = Arrangement.spacedBy(10.dp), + ) { + Row( + horizontalArrangement = Arrangement.spacedBy(12.dp), + verticalAlignment = Alignment.CenterVertically, + ) { + Box( + modifier = + Modifier + .size(42.dp) + .background(onboardingWarning.copy(alpha = 0.1f), RoundedCornerShape(11.dp)), + contentAlignment = Alignment.Center, + ) { + Icon( + imageVector = Icons.Default.Link, + contentDescription = null, + tint = onboardingWarning, + modifier = Modifier.size(22.dp), + ) + } + Column(verticalArrangement = Arrangement.spacedBy(2.dp)) { + Text("Pairing Required", style = onboardingHeadlineStyle, color = onboardingWarning) + Text("Run these on your gateway host:", style = onboardingCalloutStyle, color = onboardingTextSecondary) + } + } CommandBlock("openclaw devices list") CommandBlock("openclaw devices approve ") Text("Then tap Connect again.", style = onboardingCalloutStyle, color = onboardingTextSecondary) @@ -1537,15 +1681,46 @@ private fun FinalStep( } @Composable -private fun SummaryField(label: String, value: String) { - Column(verticalArrangement = Arrangement.spacedBy(4.dp)) { - Text( - label, - style = onboardingCaption2Style.copy(fontWeight = FontWeight.SemiBold, letterSpacing = 0.6.sp), - color = onboardingTextSecondary, - ) - Text(value, style = onboardingHeadlineStyle, color = onboardingText) - HorizontalDivider(color = onboardingBorder) +private fun SummaryCard( + icon: ImageVector, + label: String, + value: String, + accentColor: Color, +) { + Surface( + modifier = Modifier.fillMaxWidth(), + shape = RoundedCornerShape(14.dp), + color = onboardingSurface, + border = androidx.compose.foundation.BorderStroke(1.dp, onboardingBorder), + ) { + Row( + modifier = Modifier.padding(14.dp), + horizontalArrangement = Arrangement.spacedBy(14.dp), + verticalAlignment = Alignment.Top, + ) { + Box( + modifier = + Modifier + .size(42.dp) + .background(accentColor.copy(alpha = 0.1f), RoundedCornerShape(11.dp)), + contentAlignment = Alignment.Center, + ) { + Icon( + imageVector = icon, + contentDescription = null, + tint = accentColor, + modifier = Modifier.size(22.dp), + ) + } + Column(modifier = Modifier.weight(1f), verticalArrangement = Arrangement.spacedBy(2.dp)) { + Text( + label.uppercase(), + style = onboardingCaption1Style.copy(fontWeight = FontWeight.Bold, letterSpacing = 0.8.sp), + color = onboardingTextSecondary, + ) + Text(value, style = onboardingHeadlineStyle, color = onboardingText) + } + } } } @@ -1555,10 +1730,12 @@ private fun CommandBlock(command: String) { modifier = Modifier .fillMaxWidth() - .background(onboardingCommandBg, RoundedCornerShape(12.dp)) + .height(IntrinsicSize.Min) + .clip(RoundedCornerShape(12.dp)) + .background(onboardingCommandBg) .border(width = 1.dp, color = onboardingCommandBorder, shape = RoundedCornerShape(12.dp)), ) { - Box(modifier = Modifier.width(3.dp).height(42.dp).background(onboardingCommandAccent)) + Box(modifier = Modifier.width(3.dp).fillMaxHeight().background(onboardingCommandAccent)) Text( command, modifier = Modifier.padding(horizontal = 12.dp, vertical = 10.dp), @@ -1570,23 +1747,42 @@ private fun CommandBlock(command: String) { } @Composable -private fun Bullet(text: String) { - Row(horizontalArrangement = Arrangement.spacedBy(10.dp), verticalAlignment = Alignment.Top) { - Box( - modifier = - Modifier - .padding(top = 7.dp) - .size(8.dp) - .background(onboardingAccentSoft, CircleShape), - ) - Box( - modifier = - Modifier - .padding(top = 9.dp) - .size(4.dp) - .background(onboardingAccent, CircleShape), - ) - Text(text, style = onboardingBodyStyle, color = onboardingTextSecondary, modifier = Modifier.weight(1f)) +private fun FeatureCard( + icon: ImageVector, + title: String, + subtitle: String, + accentColor: Color, +) { + Surface( + modifier = Modifier.fillMaxWidth(), + shape = RoundedCornerShape(14.dp), + color = onboardingSurface, + border = androidx.compose.foundation.BorderStroke(1.dp, onboardingBorder), + ) { + Row( + modifier = Modifier.padding(14.dp), + horizontalArrangement = Arrangement.spacedBy(14.dp), + verticalAlignment = Alignment.CenterVertically, + ) { + Box( + modifier = + Modifier + .size(42.dp) + .background(accentColor.copy(alpha = 0.1f), RoundedCornerShape(11.dp)), + contentAlignment = Alignment.Center, + ) { + Icon( + imageVector = icon, + contentDescription = null, + tint = accentColor, + modifier = Modifier.size(22.dp), + ) + } + Column(verticalArrangement = Arrangement.spacedBy(2.dp)) { + Text(title, style = onboardingHeadlineStyle, color = onboardingText) + Text(subtitle, style = onboardingCalloutStyle, color = onboardingTextSecondary) + } + } } } @@ -1594,6 +1790,10 @@ private fun isPermissionGranted(context: Context, permission: String): Boolean { return ContextCompat.checkSelfPermission(context, permission) == PackageManager.PERMISSION_GRANTED } +private fun qrScannerErrorMessage(): String { + return "Google Code Scanner could not start. Update Google Play services or use the setup code manually." +} + private fun isNotificationListenerEnabled(context: Context): Boolean { return DeviceNotificationListenerService.isAccessEnabled(context) } diff --git a/apps/android/app/src/main/java/ai/openclaw/app/ui/PostOnboardingTabs.kt b/apps/android/app/src/main/java/ai/openclaw/app/ui/PostOnboardingTabs.kt index 0642f9b3a7e..c3a14fe5a54 100644 --- a/apps/android/app/src/main/java/ai/openclaw/app/ui/PostOnboardingTabs.kt +++ b/apps/android/app/src/main/java/ai/openclaw/app/ui/PostOnboardingTabs.kt @@ -134,43 +134,14 @@ fun PostOnboardingTabs(viewModel: MainViewModel, modifier: Modifier = Modifier) @Composable private fun ScreenTabScreen(viewModel: MainViewModel) { val isConnected by viewModel.isConnected.collectAsState() - val isNodeConnected by viewModel.isNodeConnected.collectAsState() - val canvasUrl by viewModel.canvasCurrentUrl.collectAsState() - val canvasA2uiHydrated by viewModel.canvasA2uiHydrated.collectAsState() - val canvasRehydratePending by viewModel.canvasRehydratePending.collectAsState() - val canvasRehydrateErrorText by viewModel.canvasRehydrateErrorText.collectAsState() - val isA2uiUrl = canvasUrl?.contains("/__openclaw__/a2ui/") == true - val showRestoreCta = isConnected && isNodeConnected && (canvasUrl.isNullOrBlank() || (isA2uiUrl && !canvasA2uiHydrated)) - val restoreCtaText = - when { - canvasRehydratePending -> "Restore requested. Waiting for agent…" - !canvasRehydrateErrorText.isNullOrBlank() -> canvasRehydrateErrorText!! - else -> "Canvas reset. Tap to restore dashboard." + LaunchedEffect(isConnected) { + if (isConnected) { + viewModel.refreshHomeCanvasOverviewIfConnected() } + } Box(modifier = Modifier.fillMaxSize()) { CanvasScreen(viewModel = viewModel, modifier = Modifier.fillMaxSize()) - - if (showRestoreCta) { - Surface( - onClick = { - if (canvasRehydratePending) return@Surface - viewModel.requestCanvasRehydrate(source = "screen_tab_cta") - }, - modifier = Modifier.align(Alignment.TopCenter).padding(horizontal = 16.dp, vertical = 16.dp), - shape = RoundedCornerShape(12.dp), - color = mobileSurface.copy(alpha = 0.9f), - border = BorderStroke(1.dp, mobileBorder), - shadowElevation = 4.dp, - ) { - Text( - text = restoreCtaText, - modifier = Modifier.padding(horizontal = 12.dp, vertical = 10.dp), - style = mobileCallout.copy(fontWeight = FontWeight.Medium), - color = mobileText, - ) - } - } } } diff --git a/apps/android/app/src/main/java/ai/openclaw/app/ui/SettingsSheet.kt b/apps/android/app/src/main/java/ai/openclaw/app/ui/SettingsSheet.kt index a3f7868fa90..c7cdf8289ff 100644 --- a/apps/android/app/src/main/java/ai/openclaw/app/ui/SettingsSheet.kt +++ b/apps/android/app/src/main/java/ai/openclaw/app/ui/SettingsSheet.kt @@ -345,179 +345,90 @@ fun SettingsSheet(viewModel: MainViewModel) { contentPadding = PaddingValues(horizontal = 20.dp, vertical = 16.dp), verticalArrangement = Arrangement.spacedBy(8.dp), ) { - item { - Column(verticalArrangement = Arrangement.spacedBy(6.dp)) { - Text( - "SETTINGS", - style = mobileCaption1.copy(fontWeight = FontWeight.Bold, letterSpacing = 1.sp), - color = mobileAccent, - ) - Text("Device Configuration", style = mobileTitle2, color = mobileText) - Text( - "Manage capabilities, permissions, and diagnostics.", - style = mobileCallout, - color = mobileTextSecondary, - ) - } - } - item { HorizontalDivider(color = mobileBorder) } - - // Order parity: Node → Voice → Camera → Messaging → Location → Screen. + // ── Node ── item { Text( - "NODE", - style = mobileCaption1.copy(fontWeight = FontWeight.Bold, letterSpacing = 1.sp), - color = mobileAccent, - ) - } - item { - OutlinedTextField( - value = displayName, - onValueChange = viewModel::setDisplayName, - label = { Text("Name", style = mobileCaption1, color = mobileTextSecondary) }, - modifier = Modifier.fillMaxWidth(), - textStyle = mobileBody.copy(color = mobileText), - colors = settingsTextFieldColors(), - ) - } - item { Text("Instance ID: $instanceId", style = mobileCallout.copy(fontFamily = FontFamily.Monospace), color = mobileTextSecondary) } - item { Text("Device: $deviceModel", style = mobileCallout, color = mobileTextSecondary) } - item { Text("Version: $appVersion", style = mobileCallout, color = mobileTextSecondary) } - - item { HorizontalDivider(color = mobileBorder) } - - // Voice - item { - Text( - "VOICE", + "DEVICE", style = mobileCaption1.copy(fontWeight = FontWeight.Bold, letterSpacing = 1.sp), color = mobileAccent, ) } item { - ListItem( - modifier = Modifier.settingsRowModifier(), - colors = listItemColors, - headlineContent = { Text("Microphone permission", style = mobileHeadline) }, - supportingContent = { + Column(modifier = Modifier.settingsRowModifier()) { + OutlinedTextField( + value = displayName, + onValueChange = viewModel::setDisplayName, + label = { Text("Name", style = mobileCaption1, color = mobileTextSecondary) }, + modifier = Modifier.fillMaxWidth().padding(horizontal = 14.dp, vertical = 10.dp), + textStyle = mobileBody.copy(color = mobileText), + colors = settingsTextFieldColors(), + ) + HorizontalDivider(color = mobileBorder) + Column( + modifier = Modifier.padding(horizontal = 14.dp, vertical = 10.dp), + verticalArrangement = Arrangement.spacedBy(2.dp), + ) { + Text("$deviceModel · $appVersion", style = mobileCallout, color = mobileTextSecondary) Text( - if (micPermissionGranted) { - "Granted. Use the Voice tab mic button to capture transcript while the app is open." - } else { - "Required for foreground Voice tab transcription." - }, - style = mobileCallout, + instanceId.take(8) + "…", + style = mobileCaption1.copy(fontFamily = FontFamily.Monospace), + color = mobileTextTertiary, ) - }, - trailingContent = { - Button( - onClick = { - if (micPermissionGranted) { - openAppSettings(context) - } else { - audioPermissionLauncher.launch(Manifest.permission.RECORD_AUDIO) - } - }, - colors = settingsPrimaryButtonColors(), - shape = RoundedCornerShape(14.dp), - ) { - Text( - if (micPermissionGranted) "Manage" else "Grant", - style = mobileCallout.copy(fontWeight = FontWeight.Bold), - ) - } - }, - ) - } - item { - Text( - "Voice wake and talk modes were removed. Voice now uses one mic on/off flow in the Voice tab while the app is open.", - style = mobileCallout, - color = mobileTextSecondary, - ) - } - - item { HorizontalDivider(color = mobileBorder) } - - // Camera - item { - Text( - "CAMERA", - style = mobileCaption1.copy(fontWeight = FontWeight.Bold, letterSpacing = 1.sp), - color = mobileAccent, - ) - } - item { - ListItem( - modifier = Modifier.settingsRowModifier(), - colors = listItemColors, - headlineContent = { Text("Allow Camera", style = mobileHeadline) }, - supportingContent = { Text("Allows the gateway to request photos or short video clips (foreground only).", style = mobileCallout) }, - trailingContent = { Switch(checked = cameraEnabled, onCheckedChange = ::setCameraEnabledChecked) }, - ) - } - item { - Text( - "Tip: grant Microphone permission for video clips with audio.", - style = mobileCallout, - color = mobileTextSecondary, - ) - } - - item { HorizontalDivider(color = mobileBorder) } - - // Messaging - item { - Text( - "MESSAGING", - style = mobileCaption1.copy(fontWeight = FontWeight.Bold, letterSpacing = 1.sp), - color = mobileAccent, - ) - } - item { - val buttonLabel = - when { - !smsPermissionAvailable -> "Unavailable" - smsPermissionGranted -> "Manage" - else -> "Grant" + } } - ListItem( - modifier = Modifier.settingsRowModifier(), - colors = listItemColors, - headlineContent = { Text("SMS Permission", style = mobileHeadline) }, - supportingContent = { - Text( - if (smsPermissionAvailable) { - "Allow the gateway to send SMS from this device." - } else { - "SMS requires a device with telephony hardware." + } + + // ── Media ── + item { + Text( + "MEDIA", + style = mobileCaption1.copy(fontWeight = FontWeight.Bold, letterSpacing = 1.sp), + color = mobileAccent, + ) + } + item { + Column(modifier = Modifier.settingsRowModifier()) { + ListItem( + modifier = Modifier.fillMaxWidth(), + colors = listItemColors, + headlineContent = { Text("Microphone", style = mobileHeadline) }, + supportingContent = { + Text( + if (micPermissionGranted) "Granted" else "Required for voice transcription.", + style = mobileCallout, + ) }, - style = mobileCallout, - ) - }, - trailingContent = { - Button( - onClick = { - if (!smsPermissionAvailable) return@Button - if (smsPermissionGranted) { - openAppSettings(context) - } else { - smsPermissionLauncher.launch(Manifest.permission.SEND_SMS) + trailingContent = { + Button( + onClick = { + if (micPermissionGranted) { + openAppSettings(context) + } else { + audioPermissionLauncher.launch(Manifest.permission.RECORD_AUDIO) + } + }, + colors = settingsPrimaryButtonColors(), + shape = RoundedCornerShape(14.dp), + ) { + Text( + if (micPermissionGranted) "Manage" else "Grant", + style = mobileCallout.copy(fontWeight = FontWeight.Bold), + ) } }, - enabled = smsPermissionAvailable, - colors = settingsPrimaryButtonColors(), - shape = RoundedCornerShape(14.dp), - ) { - Text(buttonLabel, style = mobileCallout.copy(fontWeight = FontWeight.Bold)) - } - }, - ) - } + ) + HorizontalDivider(color = mobileBorder) + ListItem( + modifier = Modifier.fillMaxWidth(), + colors = listItemColors, + headlineContent = { Text("Camera", style = mobileHeadline) }, + supportingContent = { Text("Photos and video clips (foreground only).", style = mobileCallout) }, + trailingContent = { Switch(checked = cameraEnabled, onCheckedChange = ::setCameraEnabledChecked) }, + ) + } + } - item { HorizontalDivider(color = mobileBorder) } - - // Notifications + // ── Notifications & Messaging ── item { Text( "NOTIFICATIONS", @@ -526,67 +437,87 @@ fun SettingsSheet(viewModel: MainViewModel) { ) } item { - val buttonLabel = - if (notificationsPermissionGranted) { - "Manage" - } else { - "Grant" - } - ListItem( - modifier = Modifier.settingsRowModifier(), - colors = listItemColors, - headlineContent = { Text("System Notifications", style = mobileHeadline) }, - supportingContent = { - Text( - "Required for `system.notify` and Android foreground service alerts.", - style = mobileCallout, - ) - }, - trailingContent = { - Button( - onClick = { - if (notificationsPermissionGranted || Build.VERSION.SDK_INT < 33) { - openAppSettings(context) - } else { - notificationsPermissionLauncher.launch(Manifest.permission.POST_NOTIFICATIONS) + Column(modifier = Modifier.settingsRowModifier()) { + ListItem( + modifier = Modifier.fillMaxWidth(), + colors = listItemColors, + headlineContent = { Text("System Notifications", style = mobileHeadline) }, + supportingContent = { + Text("Alerts and foreground service.", style = mobileCallout) + }, + trailingContent = { + Button( + onClick = { + if (notificationsPermissionGranted || Build.VERSION.SDK_INT < 33) { + openAppSettings(context) + } else { + notificationsPermissionLauncher.launch(Manifest.permission.POST_NOTIFICATIONS) + } + }, + colors = settingsPrimaryButtonColors(), + shape = RoundedCornerShape(14.dp), + ) { + Text( + if (notificationsPermissionGranted) "Manage" else "Grant", + style = mobileCallout.copy(fontWeight = FontWeight.Bold), + ) + } + }, + ) + HorizontalDivider(color = mobileBorder) + ListItem( + modifier = Modifier.fillMaxWidth(), + colors = listItemColors, + headlineContent = { Text("Notification Listener", style = mobileHeadline) }, + supportingContent = { + Text("Read and interact with notifications.", style = mobileCallout) + }, + trailingContent = { + Button( + onClick = { openNotificationListenerSettings(context) }, + colors = settingsPrimaryButtonColors(), + shape = RoundedCornerShape(14.dp), + ) { + Text( + if (notificationListenerEnabled) "Manage" else "Enable", + style = mobileCallout.copy(fontWeight = FontWeight.Bold), + ) + } + }, + ) + if (smsPermissionAvailable) { + HorizontalDivider(color = mobileBorder) + ListItem( + modifier = Modifier.fillMaxWidth(), + colors = listItemColors, + headlineContent = { Text("SMS", style = mobileHeadline) }, + supportingContent = { + Text("Send SMS from this device.", style = mobileCallout) + }, + trailingContent = { + Button( + onClick = { + if (smsPermissionGranted) { + openAppSettings(context) + } else { + smsPermissionLauncher.launch(Manifest.permission.SEND_SMS) + } + }, + colors = settingsPrimaryButtonColors(), + shape = RoundedCornerShape(14.dp), + ) { + Text( + if (smsPermissionGranted) "Manage" else "Grant", + style = mobileCallout.copy(fontWeight = FontWeight.Bold), + ) } }, - colors = settingsPrimaryButtonColors(), - shape = RoundedCornerShape(14.dp), - ) { - Text(buttonLabel, style = mobileCallout.copy(fontWeight = FontWeight.Bold)) - } - }, - ) - } - item { - ListItem( - modifier = Modifier.settingsRowModifier(), - colors = listItemColors, - headlineContent = { Text("Notification Listener Access", style = mobileHeadline) }, - supportingContent = { - Text( - "Required for `notifications.list` and `notifications.actions`.", - style = mobileCallout, ) - }, - trailingContent = { - Button( - onClick = { openNotificationListenerSettings(context) }, - colors = settingsPrimaryButtonColors(), - shape = RoundedCornerShape(14.dp), - ) { - Text( - if (notificationListenerEnabled) "Manage" else "Enable", - style = mobileCallout.copy(fontWeight = FontWeight.Bold), - ) - } - }, - ) + } + } } - item { HorizontalDivider(color = mobileBorder) } - // Data access + // ── Data Access ── item { Text( "DATA ACCESS", @@ -595,142 +526,115 @@ fun SettingsSheet(viewModel: MainViewModel) { ) } item { - ListItem( - modifier = Modifier.settingsRowModifier(), - colors = listItemColors, - headlineContent = { Text("Photos Permission", style = mobileHeadline) }, - supportingContent = { - Text( - "Required for `photos.latest`.", - style = mobileCallout, - ) - }, - trailingContent = { - Button( - onClick = { - if (photosPermissionGranted) { - openAppSettings(context) - } else { - photosPermissionLauncher.launch(photosPermission) + Column(modifier = Modifier.settingsRowModifier()) { + ListItem( + modifier = Modifier.fillMaxWidth(), + colors = listItemColors, + headlineContent = { Text("Photos", style = mobileHeadline) }, + supportingContent = { Text("Access recent photos.", style = mobileCallout) }, + trailingContent = { + Button( + onClick = { + if (photosPermissionGranted) { + openAppSettings(context) + } else { + photosPermissionLauncher.launch(photosPermission) + } + }, + colors = settingsPrimaryButtonColors(), + shape = RoundedCornerShape(14.dp), + ) { + Text( + if (photosPermissionGranted) "Manage" else "Grant", + style = mobileCallout.copy(fontWeight = FontWeight.Bold), + ) + } + }, + ) + HorizontalDivider(color = mobileBorder) + ListItem( + modifier = Modifier.fillMaxWidth(), + colors = listItemColors, + headlineContent = { Text("Contacts", style = mobileHeadline) }, + supportingContent = { Text("Search and add contacts.", style = mobileCallout) }, + trailingContent = { + Button( + onClick = { + if (contactsPermissionGranted) { + openAppSettings(context) + } else { + contactsPermissionLauncher.launch(arrayOf(Manifest.permission.READ_CONTACTS, Manifest.permission.WRITE_CONTACTS)) + } + }, + colors = settingsPrimaryButtonColors(), + shape = RoundedCornerShape(14.dp), + ) { + Text( + if (contactsPermissionGranted) "Manage" else "Grant", + style = mobileCallout.copy(fontWeight = FontWeight.Bold), + ) + } + }, + ) + HorizontalDivider(color = mobileBorder) + ListItem( + modifier = Modifier.fillMaxWidth(), + colors = listItemColors, + headlineContent = { Text("Calendar", style = mobileHeadline) }, + supportingContent = { Text("Read and create events.", style = mobileCallout) }, + trailingContent = { + Button( + onClick = { + if (calendarPermissionGranted) { + openAppSettings(context) + } else { + calendarPermissionLauncher.launch(arrayOf(Manifest.permission.READ_CALENDAR, Manifest.permission.WRITE_CALENDAR)) + } + }, + colors = settingsPrimaryButtonColors(), + shape = RoundedCornerShape(14.dp), + ) { + Text( + if (calendarPermissionGranted) "Manage" else "Grant", + style = mobileCallout.copy(fontWeight = FontWeight.Bold), + ) + } + }, + ) + if (motionAvailable) { + HorizontalDivider(color = mobileBorder) + ListItem( + modifier = Modifier.fillMaxWidth(), + colors = listItemColors, + headlineContent = { Text("Motion", style = mobileHeadline) }, + supportingContent = { Text("Track steps and activity.", style = mobileCallout) }, + trailingContent = { + val motionButtonLabel = + when { + !motionPermissionRequired -> "Manage" + motionPermissionGranted -> "Manage" + else -> "Grant" + } + Button( + onClick = { + if (!motionPermissionRequired || motionPermissionGranted) { + openAppSettings(context) + } else { + motionPermissionLauncher.launch(Manifest.permission.ACTIVITY_RECOGNITION) + } + }, + colors = settingsPrimaryButtonColors(), + shape = RoundedCornerShape(14.dp), + ) { + Text(motionButtonLabel, style = mobileCallout.copy(fontWeight = FontWeight.Bold)) } }, - colors = settingsPrimaryButtonColors(), - shape = RoundedCornerShape(14.dp), - ) { - Text( - if (photosPermissionGranted) "Manage" else "Grant", - style = mobileCallout.copy(fontWeight = FontWeight.Bold), - ) - } - }, - ) - } - item { - ListItem( - modifier = Modifier.settingsRowModifier(), - colors = listItemColors, - headlineContent = { Text("Contacts Permission", style = mobileHeadline) }, - supportingContent = { - Text( - "Required for `contacts.search` and `contacts.add`.", - style = mobileCallout, ) - }, - trailingContent = { - Button( - onClick = { - if (contactsPermissionGranted) { - openAppSettings(context) - } else { - contactsPermissionLauncher.launch(arrayOf(Manifest.permission.READ_CONTACTS, Manifest.permission.WRITE_CONTACTS)) - } - }, - colors = settingsPrimaryButtonColors(), - shape = RoundedCornerShape(14.dp), - ) { - Text( - if (contactsPermissionGranted) "Manage" else "Grant", - style = mobileCallout.copy(fontWeight = FontWeight.Bold), - ) - } - }, - ) - } - item { - ListItem( - modifier = Modifier.settingsRowModifier(), - colors = listItemColors, - headlineContent = { Text("Calendar Permission", style = mobileHeadline) }, - supportingContent = { - Text( - "Required for `calendar.events` and `calendar.add`.", - style = mobileCallout, - ) - }, - trailingContent = { - Button( - onClick = { - if (calendarPermissionGranted) { - openAppSettings(context) - } else { - calendarPermissionLauncher.launch(arrayOf(Manifest.permission.READ_CALENDAR, Manifest.permission.WRITE_CALENDAR)) - } - }, - colors = settingsPrimaryButtonColors(), - shape = RoundedCornerShape(14.dp), - ) { - Text( - if (calendarPermissionGranted) "Manage" else "Grant", - style = mobileCallout.copy(fontWeight = FontWeight.Bold), - ) - } - }, - ) - } - item { - val motionButtonLabel = - when { - !motionAvailable -> "Unavailable" - !motionPermissionRequired -> "Manage" - motionPermissionGranted -> "Manage" - else -> "Grant" } - ListItem( - modifier = Modifier.settingsRowModifier(), - colors = listItemColors, - headlineContent = { Text("Motion Permission", style = mobileHeadline) }, - supportingContent = { - Text( - if (!motionAvailable) { - "This device does not expose accelerometer or step-counter motion sensors." - } else { - "Required for `motion.activity` and `motion.pedometer`." - }, - style = mobileCallout, - ) - }, - trailingContent = { - Button( - onClick = { - if (!motionAvailable) return@Button - if (!motionPermissionRequired || motionPermissionGranted) { - openAppSettings(context) - } else { - motionPermissionLauncher.launch(Manifest.permission.ACTIVITY_RECOGNITION) - } - }, - enabled = motionAvailable, - colors = settingsPrimaryButtonColors(), - shape = RoundedCornerShape(14.dp), - ) { - Text(motionButtonLabel, style = mobileCallout.copy(fontWeight = FontWeight.Bold)) - } - }, - ) + } } - item { HorizontalDivider(color = mobileBorder) } - // Location + // ── Location ── item { Text( "LOCATION", @@ -739,7 +643,7 @@ fun SettingsSheet(viewModel: MainViewModel) { ) } item { - Column(modifier = Modifier.settingsRowModifier(), verticalArrangement = Arrangement.spacedBy(0.dp)) { + Column(modifier = Modifier.settingsRowModifier()) { ListItem( modifier = Modifier.fillMaxWidth(), colors = listItemColors, @@ -781,50 +685,39 @@ fun SettingsSheet(viewModel: MainViewModel) { ) } } - item { HorizontalDivider(color = mobileBorder) } - // Screen + // ── Preferences ── item { Text( - "SCREEN", + "PREFERENCES", style = mobileCaption1.copy(fontWeight = FontWeight.Bold, letterSpacing = 1.sp), color = mobileAccent, ) } - item { - ListItem( - modifier = Modifier.settingsRowModifier(), - colors = listItemColors, - headlineContent = { Text("Prevent Sleep", style = mobileHeadline) }, - supportingContent = { Text("Keeps the screen awake while OpenClaw is open.", style = mobileCallout) }, - trailingContent = { Switch(checked = preventSleep, onCheckedChange = viewModel::setPreventSleep) }, - ) - } - - item { HorizontalDivider(color = mobileBorder) } - - // Debug item { - Text( - "DEBUG", - style = mobileCaption1.copy(fontWeight = FontWeight.Bold, letterSpacing = 1.sp), - color = mobileAccent, - ) - } - item { - ListItem( - modifier = Modifier.settingsRowModifier(), - colors = listItemColors, - headlineContent = { Text("Debug Canvas Status", style = mobileHeadline) }, - supportingContent = { Text("Show status text in the canvas when debug is enabled.", style = mobileCallout) }, - trailingContent = { - Switch( - checked = canvasDebugStatusEnabled, - onCheckedChange = viewModel::setCanvasDebugStatusEnabled, + Column(modifier = Modifier.settingsRowModifier()) { + ListItem( + modifier = Modifier.fillMaxWidth(), + colors = listItemColors, + headlineContent = { Text("Prevent Sleep", style = mobileHeadline) }, + supportingContent = { Text("Keep screen awake while open.", style = mobileCallout) }, + trailingContent = { Switch(checked = preventSleep, onCheckedChange = viewModel::setPreventSleep) }, ) - }, - ) - } + HorizontalDivider(color = mobileBorder) + ListItem( + modifier = Modifier.fillMaxWidth(), + colors = listItemColors, + headlineContent = { Text("Debug Canvas", style = mobileHeadline) }, + supportingContent = { Text("Show status overlay on canvas.", style = mobileCallout) }, + trailingContent = { + Switch( + checked = canvasDebugStatusEnabled, + onCheckedChange = viewModel::setCanvasDebugStatusEnabled, + ) + }, + ) + } + } item { Spacer(modifier = Modifier.height(24.dp)) } } diff --git a/apps/android/app/src/main/java/ai/openclaw/app/ui/VoiceTabScreen.kt b/apps/android/app/src/main/java/ai/openclaw/app/ui/VoiceTabScreen.kt index be66f42bef3..f8e17a17c6b 100644 --- a/apps/android/app/src/main/java/ai/openclaw/app/ui/VoiceTabScreen.kt +++ b/apps/android/app/src/main/java/ai/openclaw/app/ui/VoiceTabScreen.kt @@ -17,10 +17,12 @@ import androidx.compose.foundation.layout.Box import androidx.compose.foundation.layout.Column import androidx.compose.foundation.layout.PaddingValues import androidx.compose.foundation.layout.Row +import androidx.compose.foundation.layout.Spacer import androidx.compose.foundation.layout.WindowInsets import androidx.compose.foundation.layout.WindowInsetsSides import androidx.compose.foundation.layout.fillMaxSize import androidx.compose.foundation.layout.fillMaxWidth +import androidx.compose.foundation.layout.height import androidx.compose.foundation.layout.imePadding import androidx.compose.foundation.layout.only import androidx.compose.foundation.layout.padding @@ -212,19 +214,26 @@ fun VoiceTabScreen(viewModel: MainViewModel) { verticalAlignment = Alignment.CenterVertically, ) { // Speaker toggle - IconButton( - onClick = { viewModel.setSpeakerEnabled(!speakerEnabled) }, - modifier = Modifier.size(48.dp), - colors = - IconButtonDefaults.iconButtonColors( - containerColor = if (speakerEnabled) mobileSurface else mobileDangerSoft, - ), - ) { - Icon( - imageVector = if (speakerEnabled) Icons.AutoMirrored.Filled.VolumeUp else Icons.AutoMirrored.Filled.VolumeOff, - contentDescription = if (speakerEnabled) "Mute speaker" else "Unmute speaker", - modifier = Modifier.size(22.dp), - tint = if (speakerEnabled) mobileTextSecondary else mobileDanger, + Column(horizontalAlignment = Alignment.CenterHorizontally, verticalArrangement = Arrangement.spacedBy(4.dp)) { + IconButton( + onClick = { viewModel.setSpeakerEnabled(!speakerEnabled) }, + modifier = Modifier.size(48.dp), + colors = + IconButtonDefaults.iconButtonColors( + containerColor = if (speakerEnabled) mobileSurface else mobileDangerSoft, + ), + ) { + Icon( + imageVector = if (speakerEnabled) Icons.AutoMirrored.Filled.VolumeUp else Icons.AutoMirrored.Filled.VolumeOff, + contentDescription = if (speakerEnabled) "Mute speaker" else "Unmute speaker", + modifier = Modifier.size(22.dp), + tint = if (speakerEnabled) mobileTextSecondary else mobileDanger, + ) + } + Text( + if (speakerEnabled) "Speaker" else "Muted", + style = mobileCaption2, + color = if (speakerEnabled) mobileTextTertiary else mobileDanger, ) } @@ -278,8 +287,12 @@ fun VoiceTabScreen(viewModel: MainViewModel) { } } - // Invisible spacer to balance the row (same size as speaker button) - Box(modifier = Modifier.size(48.dp)) + // Invisible spacer to balance the row (matches speaker column width) + Column(horizontalAlignment = Alignment.CenterHorizontally) { + Box(modifier = Modifier.size(48.dp)) + Spacer(modifier = Modifier.height(4.dp)) + Text("", style = mobileCaption2) + } } // Status + labels @@ -292,11 +305,24 @@ fun VoiceTabScreen(viewModel: MainViewModel) { micEnabled -> "Listening" else -> "Mic off" } - Text( - "$gatewayStatus · $stateText", - style = mobileCaption1, - color = mobileTextSecondary, - ) + val stateColor = + when { + micEnabled -> mobileSuccess + micIsSending -> mobileAccent + else -> mobileTextSecondary + } + Surface( + shape = RoundedCornerShape(999.dp), + color = if (micEnabled) mobileSuccessSoft else mobileSurface, + border = BorderStroke(1.dp, if (micEnabled) mobileSuccess.copy(alpha = 0.3f) else mobileBorder), + ) { + Text( + "$gatewayStatus · $stateText", + style = mobileCallout.copy(fontWeight = FontWeight.SemiBold), + color = stateColor, + modifier = Modifier.padding(horizontal = 14.dp, vertical = 6.dp), + ) + } if (!hasMicPermission) { val showRationale = diff --git a/apps/android/app/src/main/java/ai/openclaw/app/ui/chat/ChatComposer.kt b/apps/android/app/src/main/java/ai/openclaw/app/ui/chat/ChatComposer.kt index 9601febfa31..25fafe95073 100644 --- a/apps/android/app/src/main/java/ai/openclaw/app/ui/chat/ChatComposer.kt +++ b/apps/android/app/src/main/java/ai/openclaw/app/ui/chat/ChatComposer.kt @@ -26,7 +26,6 @@ import androidx.compose.material3.ButtonDefaults import androidx.compose.material3.CircularProgressIndicator import androidx.compose.material3.DropdownMenu import androidx.compose.material3.DropdownMenuItem -import androidx.compose.material3.HorizontalDivider import androidx.compose.material3.Icon import androidx.compose.material3.MaterialTheme import androidx.compose.material3.OutlinedTextField @@ -78,65 +77,15 @@ fun ChatComposer( val sendBusy = pendingRunCount > 0 Column(modifier = Modifier.fillMaxWidth(), verticalArrangement = Arrangement.spacedBy(8.dp)) { - Row( - modifier = Modifier.fillMaxWidth(), - verticalAlignment = Alignment.CenterVertically, - horizontalArrangement = Arrangement.spacedBy(8.dp), - ) { - Box(modifier = Modifier.weight(1f)) { - Surface( - onClick = { showThinkingMenu = true }, - shape = RoundedCornerShape(14.dp), - color = mobileAccentSoft, - border = BorderStroke(1.dp, mobileBorderStrong), - ) { - Row( - modifier = Modifier.fillMaxWidth().padding(horizontal = 12.dp, vertical = 8.dp), - verticalAlignment = Alignment.CenterVertically, - horizontalArrangement = Arrangement.SpaceBetween, - ) { - Text( - text = "Thinking: ${thinkingLabel(thinkingLevel)}", - style = mobileCaption1.copy(fontWeight = FontWeight.SemiBold), - color = mobileText, - ) - Icon(Icons.Default.ArrowDropDown, contentDescription = "Select thinking level", tint = mobileTextSecondary) - } - } - - DropdownMenu(expanded = showThinkingMenu, onDismissRequest = { showThinkingMenu = false }) { - ThinkingMenuItem("off", thinkingLevel, onSetThinkingLevel) { showThinkingMenu = false } - ThinkingMenuItem("low", thinkingLevel, onSetThinkingLevel) { showThinkingMenu = false } - ThinkingMenuItem("medium", thinkingLevel, onSetThinkingLevel) { showThinkingMenu = false } - ThinkingMenuItem("high", thinkingLevel, onSetThinkingLevel) { showThinkingMenu = false } - } - } - - SecondaryActionButton( - label = "Attach", - icon = Icons.Default.AttachFile, - enabled = true, - onClick = onPickImages, - ) - } - if (attachments.isNotEmpty()) { AttachmentsStrip(attachments = attachments, onRemoveAttachment = onRemoveAttachment) } - HorizontalDivider(color = mobileBorder) - - Text( - text = "MESSAGE", - style = mobileCaption1.copy(fontWeight = FontWeight.Bold, letterSpacing = 0.9.sp), - color = mobileTextSecondary, - ) - OutlinedTextField( value = input, onValueChange = { input = it }, - modifier = Modifier.fillMaxWidth().height(92.dp), - placeholder = { Text("Type a message", style = mobileBodyStyle(), color = mobileTextTertiary) }, + modifier = Modifier.fillMaxWidth(), + placeholder = { Text("Type a message…", style = mobileBodyStyle(), color = mobileTextTertiary) }, minLines = 2, maxLines = 5, textStyle = mobileBodyStyle().copy(color = mobileText), @@ -155,26 +104,62 @@ fun ChatComposer( Row( modifier = Modifier.fillMaxWidth(), verticalAlignment = Alignment.CenterVertically, - horizontalArrangement = Arrangement.spacedBy(10.dp), + horizontalArrangement = Arrangement.spacedBy(8.dp), ) { - Row(horizontalArrangement = Arrangement.spacedBy(8.dp)) { - SecondaryActionButton( - label = "Refresh", - icon = Icons.Default.Refresh, - enabled = true, - compact = true, - onClick = onRefresh, - ) + Box { + Surface( + onClick = { showThinkingMenu = true }, + shape = RoundedCornerShape(14.dp), + color = Color.White, + border = BorderStroke(1.dp, mobileBorderStrong), + ) { + Row( + modifier = Modifier.padding(horizontal = 10.dp, vertical = 10.dp), + verticalAlignment = Alignment.CenterVertically, + ) { + Text( + text = thinkingLabel(thinkingLevel), + style = mobileCaption1.copy(fontWeight = FontWeight.SemiBold), + color = mobileTextSecondary, + ) + Icon(Icons.Default.ArrowDropDown, contentDescription = "Select thinking level", modifier = Modifier.size(18.dp), tint = mobileTextTertiary) + } + } - SecondaryActionButton( - label = "Abort", - icon = Icons.Default.Stop, - enabled = pendingRunCount > 0, - compact = true, - onClick = onAbort, - ) + DropdownMenu(expanded = showThinkingMenu, onDismissRequest = { showThinkingMenu = false }) { + ThinkingMenuItem("off", thinkingLevel, onSetThinkingLevel) { showThinkingMenu = false } + ThinkingMenuItem("low", thinkingLevel, onSetThinkingLevel) { showThinkingMenu = false } + ThinkingMenuItem("medium", thinkingLevel, onSetThinkingLevel) { showThinkingMenu = false } + ThinkingMenuItem("high", thinkingLevel, onSetThinkingLevel) { showThinkingMenu = false } + } } + SecondaryActionButton( + label = "Attach", + icon = Icons.Default.AttachFile, + enabled = true, + compact = true, + onClick = onPickImages, + ) + + SecondaryActionButton( + label = "Refresh", + icon = Icons.Default.Refresh, + enabled = true, + compact = true, + onClick = onRefresh, + ) + + SecondaryActionButton( + label = "Abort", + icon = Icons.Default.Stop, + enabled = pendingRunCount > 0, + compact = true, + onClick = onAbort, + ) + + Spacer(modifier = Modifier.weight(1f)) + Button( onClick = { val text = input @@ -182,8 +167,9 @@ fun ChatComposer( onSend(text) }, enabled = canSend, - modifier = Modifier.weight(1f).height(48.dp), + modifier = Modifier.height(44.dp), shape = RoundedCornerShape(14.dp), + contentPadding = PaddingValues(horizontal = 20.dp), colors = ButtonDefaults.buttonColors( containerColor = mobileAccent, @@ -198,7 +184,7 @@ fun ChatComposer( } else { Icon(Icons.AutoMirrored.Filled.Send, contentDescription = null, modifier = Modifier.size(16.dp)) } - Spacer(modifier = Modifier.width(8.dp)) + Spacer(modifier = Modifier.width(6.dp)) Text( text = "Send", style = mobileHeadline.copy(fontWeight = FontWeight.Bold), diff --git a/apps/android/app/src/main/java/ai/openclaw/app/ui/chat/ChatMessageViews.kt b/apps/android/app/src/main/java/ai/openclaw/app/ui/chat/ChatMessageViews.kt index 9d08352a3f0..f61195f43fb 100644 --- a/apps/android/app/src/main/java/ai/openclaw/app/ui/chat/ChatMessageViews.kt +++ b/apps/android/app/src/main/java/ai/openclaw/app/ui/chat/ChatMessageViews.kt @@ -151,7 +151,7 @@ fun ChatPendingToolsBubble(toolCalls: List) { ChatBubbleContainer( style = bubbleStyle("assistant"), - roleLabel = "TOOLS", + roleLabel = "Tools", ) { Column(verticalArrangement = Arrangement.spacedBy(4.dp)) { Text("Running tools...", style = mobileCaption1.copy(fontWeight = FontWeight.SemiBold), color = mobileTextSecondary) @@ -188,7 +188,7 @@ fun ChatPendingToolsBubble(toolCalls: List) { fun ChatStreamingAssistantBubble(text: String) { ChatBubbleContainer( style = bubbleStyle("assistant").copy(borderColor = mobileAccent), - roleLabel = "ASSISTANT · LIVE", + roleLabel = "OpenClaw · Live", ) { ChatMarkdown(text = text, textColor = mobileText) } @@ -224,9 +224,9 @@ private fun bubbleStyle(role: String): ChatBubbleStyle { private fun roleLabel(role: String): String { return when (role) { - "user" -> "USER" - "system" -> "SYSTEM" - else -> "ASSISTANT" + "user" -> "You" + "system" -> "System" + else -> "OpenClaw" } } diff --git a/apps/android/app/src/main/java/ai/openclaw/app/ui/chat/ChatSheetContent.kt b/apps/android/app/src/main/java/ai/openclaw/app/ui/chat/ChatSheetContent.kt index 2c09f4488b0..e20b57ac3f5 100644 --- a/apps/android/app/src/main/java/ai/openclaw/app/ui/chat/ChatSheetContent.kt +++ b/apps/android/app/src/main/java/ai/openclaw/app/ui/chat/ChatSheetContent.kt @@ -42,12 +42,8 @@ import ai.openclaw.app.ui.mobileCallout import ai.openclaw.app.ui.mobileCaption1 import ai.openclaw.app.ui.mobileCaption2 import ai.openclaw.app.ui.mobileDanger -import ai.openclaw.app.ui.mobileSuccess -import ai.openclaw.app.ui.mobileSuccessSoft import ai.openclaw.app.ui.mobileText import ai.openclaw.app.ui.mobileTextSecondary -import ai.openclaw.app.ui.mobileWarning -import ai.openclaw.app.ui.mobileWarningSoft import java.io.ByteArrayOutputStream import kotlinx.coroutines.Dispatchers import kotlinx.coroutines.launch @@ -106,7 +102,6 @@ fun ChatSheetContent(viewModel: MainViewModel) { sessionKey = sessionKey, sessions = sessions, mainSessionKey = mainSessionKey, - healthOk = healthOk, onSelectSession = { key -> viewModel.switchChatSession(key) }, ) @@ -160,77 +155,34 @@ private fun ChatThreadSelector( sessionKey: String, sessions: List, mainSessionKey: String, - healthOk: Boolean, onSelectSession: (String) -> Unit, ) { val sessionOptions = resolveSessionChoices(sessionKey, sessions, mainSessionKey = mainSessionKey) - val currentSessionLabel = - friendlySessionName(sessionOptions.firstOrNull { it.key == sessionKey }?.displayName ?: sessionKey) - Column(modifier = Modifier.fillMaxWidth(), verticalArrangement = Arrangement.spacedBy(8.dp)) { - Row( - modifier = Modifier.fillMaxWidth(), - horizontalArrangement = Arrangement.SpaceBetween, - verticalAlignment = androidx.compose.ui.Alignment.CenterVertically, - ) { - Text( - text = "SESSION", - style = mobileCaption1.copy(fontWeight = FontWeight.Bold, letterSpacing = 0.8.sp), - color = mobileTextSecondary, - ) - Row(horizontalArrangement = Arrangement.spacedBy(6.dp), verticalAlignment = androidx.compose.ui.Alignment.CenterVertically) { + Row( + modifier = Modifier.fillMaxWidth().horizontalScroll(rememberScrollState()), + horizontalArrangement = Arrangement.spacedBy(8.dp), + ) { + for (entry in sessionOptions) { + val active = entry.key == sessionKey + Surface( + onClick = { onSelectSession(entry.key) }, + shape = RoundedCornerShape(14.dp), + color = if (active) mobileAccent else Color.White, + border = BorderStroke(1.dp, if (active) Color(0xFF154CAD) else mobileBorderStrong), + tonalElevation = 0.dp, + shadowElevation = 0.dp, + ) { Text( - text = currentSessionLabel, - style = mobileCallout.copy(fontWeight = FontWeight.SemiBold), - color = mobileText, + text = friendlySessionName(entry.displayName ?: entry.key), + style = mobileCaption1.copy(fontWeight = if (active) FontWeight.Bold else FontWeight.SemiBold), + color = if (active) Color.White else mobileText, maxLines = 1, overflow = TextOverflow.Ellipsis, + modifier = Modifier.padding(horizontal = 12.dp, vertical = 8.dp), ) - ChatConnectionPill(healthOk = healthOk) } } - - Row( - modifier = Modifier.fillMaxWidth().horizontalScroll(rememberScrollState()), - horizontalArrangement = Arrangement.spacedBy(8.dp), - ) { - for (entry in sessionOptions) { - val active = entry.key == sessionKey - Surface( - onClick = { onSelectSession(entry.key) }, - shape = RoundedCornerShape(14.dp), - color = if (active) mobileAccent else Color.White, - border = BorderStroke(1.dp, if (active) Color(0xFF154CAD) else mobileBorderStrong), - tonalElevation = 0.dp, - shadowElevation = 0.dp, - ) { - Text( - text = friendlySessionName(entry.displayName ?: entry.key), - style = mobileCaption1.copy(fontWeight = if (active) FontWeight.Bold else FontWeight.SemiBold), - color = if (active) Color.White else mobileText, - maxLines = 1, - overflow = TextOverflow.Ellipsis, - modifier = Modifier.padding(horizontal = 12.dp, vertical = 8.dp), - ) - } - } - } - } -} - -@Composable -private fun ChatConnectionPill(healthOk: Boolean) { - Surface( - shape = RoundedCornerShape(999.dp), - color = if (healthOk) mobileSuccessSoft else mobileWarningSoft, - border = BorderStroke(1.dp, if (healthOk) mobileSuccess.copy(alpha = 0.35f) else mobileWarning.copy(alpha = 0.35f)), - ) { - Text( - text = if (healthOk) "Connected" else "Offline", - style = mobileCaption1.copy(fontWeight = FontWeight.SemiBold), - color = if (healthOk) mobileSuccess else mobileWarning, - modifier = Modifier.padding(horizontal = 8.dp, vertical = 3.dp), - ) } } diff --git a/apps/android/app/src/main/java/ai/openclaw/app/voice/TalkModeVoiceResolver.kt b/apps/android/app/src/main/java/ai/openclaw/app/voice/TalkModeVoiceResolver.kt index eff52017624..7ada19e166b 100644 --- a/apps/android/app/src/main/java/ai/openclaw/app/voice/TalkModeVoiceResolver.kt +++ b/apps/android/app/src/main/java/ai/openclaw/app/voice/TalkModeVoiceResolver.kt @@ -79,26 +79,30 @@ internal object TalkModeVoiceResolver { return withContext(Dispatchers.IO) { val url = URL("https://api.elevenlabs.io/v1/voices") val conn = url.openConnection() as HttpURLConnection - conn.requestMethod = "GET" - conn.connectTimeout = 15_000 - conn.readTimeout = 15_000 - conn.setRequestProperty("xi-api-key", apiKey) + try { + conn.requestMethod = "GET" + conn.connectTimeout = 15_000 + conn.readTimeout = 15_000 + conn.setRequestProperty("xi-api-key", apiKey) - val code = conn.responseCode - val stream = if (code >= 400) conn.errorStream else conn.inputStream - val data = stream.readBytes() - if (code >= 400) { - val message = data.toString(Charsets.UTF_8) - throw IllegalStateException("ElevenLabs voices failed: $code $message") - } + val code = conn.responseCode + val stream = if (code >= 400) conn.errorStream else conn.inputStream + val data = stream?.use { it.readBytes() } ?: byteArrayOf() + if (code >= 400) { + val message = data.toString(Charsets.UTF_8) + throw IllegalStateException("ElevenLabs voices failed: $code $message") + } - val root = json.parseToJsonElement(data.toString(Charsets.UTF_8)).asObjectOrNull() - val voices = (root?.get("voices") as? JsonArray) ?: JsonArray(emptyList()) - voices.mapNotNull { entry -> - val obj = entry.asObjectOrNull() ?: return@mapNotNull null - val voiceId = obj["voice_id"].asStringOrNull() ?: return@mapNotNull null - val name = obj["name"].asStringOrNull() - ElevenLabsVoice(voiceId, name) + val root = json.parseToJsonElement(data.toString(Charsets.UTF_8)).asObjectOrNull() + val voices = (root?.get("voices") as? JsonArray) ?: JsonArray(emptyList()) + voices.mapNotNull { entry -> + val obj = entry.asObjectOrNull() ?: return@mapNotNull null + val voiceId = obj["voice_id"].asStringOrNull() ?: return@mapNotNull null + val name = obj["name"].asStringOrNull() + ElevenLabsVoice(voiceId, name) + } + } finally { + conn.disconnect() } } } diff --git a/apps/android/app/src/test/java/ai/openclaw/app/SecurePrefsTest.kt b/apps/android/app/src/test/java/ai/openclaw/app/SecurePrefsTest.kt index cd72bf75dff..1ef860e29b4 100644 --- a/apps/android/app/src/test/java/ai/openclaw/app/SecurePrefsTest.kt +++ b/apps/android/app/src/test/java/ai/openclaw/app/SecurePrefsTest.kt @@ -20,4 +20,19 @@ class SecurePrefsTest { assertEquals(LocationMode.WhileUsing, prefs.locationMode.value) assertEquals("whileUsing", plainPrefs.getString("location.enabledMode", null)) } + + @Test + fun saveGatewayBootstrapToken_persistsSeparatelyFromSharedToken() { + val context = RuntimeEnvironment.getApplication() + val securePrefs = context.getSharedPreferences("openclaw.node.secure.test", Context.MODE_PRIVATE) + securePrefs.edit().clear().commit() + val prefs = SecurePrefs(context, securePrefsOverride = securePrefs) + + prefs.setGatewayToken("shared-token") + prefs.setGatewayBootstrapToken("bootstrap-token") + + assertEquals("shared-token", prefs.loadGatewayToken()) + assertEquals("bootstrap-token", prefs.loadGatewayBootstrapToken()) + assertEquals("bootstrap-token", prefs.gatewayBootstrapToken.value) + } } diff --git a/apps/android/app/src/test/java/ai/openclaw/app/gateway/GatewaySessionInvokeTest.kt b/apps/android/app/src/test/java/ai/openclaw/app/gateway/GatewaySessionInvokeTest.kt index a3f301498c8..2cfa1be4866 100644 --- a/apps/android/app/src/test/java/ai/openclaw/app/gateway/GatewaySessionInvokeTest.kt +++ b/apps/android/app/src/test/java/ai/openclaw/app/gateway/GatewaySessionInvokeTest.kt @@ -27,6 +27,7 @@ import org.junit.runner.RunWith import org.robolectric.RobolectricTestRunner import org.robolectric.RuntimeEnvironment import org.robolectric.annotation.Config +import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent.atomic.AtomicReference private const val TEST_TIMEOUT_MS = 8_000L @@ -41,11 +42,16 @@ private class InMemoryDeviceAuthStore : DeviceAuthTokenStore { override fun saveToken(deviceId: String, role: String, token: String) { tokens["${deviceId.trim()}|${role.trim()}"] = token.trim() } + + override fun clearToken(deviceId: String, role: String) { + tokens.remove("${deviceId.trim()}|${role.trim()}") + } } private data class NodeHarness( val session: GatewaySession, val sessionJob: Job, + val deviceAuthStore: InMemoryDeviceAuthStore, ) private data class InvokeScenarioResult( @@ -56,6 +62,157 @@ private data class InvokeScenarioResult( @RunWith(RobolectricTestRunner::class) @Config(sdk = [34]) class GatewaySessionInvokeTest { + @Test + fun connect_usesBootstrapTokenWhenSharedAndDeviceTokensAreAbsent() = runBlocking { + val json = testJson() + val connected = CompletableDeferred() + val connectAuth = CompletableDeferred() + val lastDisconnect = AtomicReference("") + val server = + startGatewayServer(json) { webSocket, id, method, frame -> + when (method) { + "connect" -> { + if (!connectAuth.isCompleted) { + connectAuth.complete(frame["params"]?.jsonObject?.get("auth")?.jsonObject) + } + webSocket.send(connectResponseFrame(id)) + webSocket.close(1000, "done") + } + } + } + + val harness = + createNodeHarness( + connected = connected, + lastDisconnect = lastDisconnect, + ) { GatewaySession.InvokeResult.ok("""{"handled":true}""") } + + try { + connectNodeSession( + session = harness.session, + port = server.port, + token = null, + bootstrapToken = "bootstrap-token", + ) + awaitConnectedOrThrow(connected, lastDisconnect, server) + + val auth = withTimeout(TEST_TIMEOUT_MS) { connectAuth.await() } + assertEquals("bootstrap-token", auth?.get("bootstrapToken")?.jsonPrimitive?.content) + assertNull(auth?.get("token")) + } finally { + shutdownHarness(harness, server) + } + } + + @Test + fun connect_prefersStoredDeviceTokenOverBootstrapToken() = runBlocking { + val json = testJson() + val connected = CompletableDeferred() + val connectAuth = CompletableDeferred() + val lastDisconnect = AtomicReference("") + val server = + startGatewayServer(json) { webSocket, id, method, frame -> + when (method) { + "connect" -> { + if (!connectAuth.isCompleted) { + connectAuth.complete(frame["params"]?.jsonObject?.get("auth")?.jsonObject) + } + webSocket.send(connectResponseFrame(id)) + webSocket.close(1000, "done") + } + } + } + + val harness = + createNodeHarness( + connected = connected, + lastDisconnect = lastDisconnect, + ) { GatewaySession.InvokeResult.ok("""{"handled":true}""") } + + try { + val deviceId = DeviceIdentityStore(RuntimeEnvironment.getApplication()).loadOrCreate().deviceId + harness.deviceAuthStore.saveToken(deviceId, "node", "device-token") + + connectNodeSession( + session = harness.session, + port = server.port, + token = null, + bootstrapToken = "bootstrap-token", + ) + awaitConnectedOrThrow(connected, lastDisconnect, server) + + val auth = withTimeout(TEST_TIMEOUT_MS) { connectAuth.await() } + assertEquals("device-token", auth?.get("token")?.jsonPrimitive?.content) + assertNull(auth?.get("bootstrapToken")) + } finally { + shutdownHarness(harness, server) + } + } + + @Test + fun connect_retriesWithStoredDeviceTokenAfterSharedTokenMismatch() = runBlocking { + val json = testJson() + val connected = CompletableDeferred() + val firstConnectAuth = CompletableDeferred() + val secondConnectAuth = CompletableDeferred() + val connectAttempts = AtomicInteger(0) + val lastDisconnect = AtomicReference("") + val server = + startGatewayServer(json) { webSocket, id, method, frame -> + when (method) { + "connect" -> { + val auth = frame["params"]?.jsonObject?.get("auth")?.jsonObject + when (connectAttempts.incrementAndGet()) { + 1 -> { + if (!firstConnectAuth.isCompleted) { + firstConnectAuth.complete(auth) + } + webSocket.send( + """{"type":"res","id":"$id","ok":false,"error":{"code":"INVALID_REQUEST","message":"unauthorized","details":{"code":"AUTH_TOKEN_MISMATCH","canRetryWithDeviceToken":true,"recommendedNextStep":"retry_with_device_token"}}}""", + ) + webSocket.close(1000, "retry") + } + else -> { + if (!secondConnectAuth.isCompleted) { + secondConnectAuth.complete(auth) + } + webSocket.send(connectResponseFrame(id)) + webSocket.close(1000, "done") + } + } + } + } + } + + val harness = + createNodeHarness( + connected = connected, + lastDisconnect = lastDisconnect, + ) { GatewaySession.InvokeResult.ok("""{"handled":true}""") } + + try { + val deviceId = DeviceIdentityStore(RuntimeEnvironment.getApplication()).loadOrCreate().deviceId + harness.deviceAuthStore.saveToken(deviceId, "node", "stored-device-token") + + connectNodeSession( + session = harness.session, + port = server.port, + token = "shared-auth-token", + bootstrapToken = null, + ) + awaitConnectedOrThrow(connected, lastDisconnect, server) + + val firstAuth = withTimeout(TEST_TIMEOUT_MS) { firstConnectAuth.await() } + val secondAuth = withTimeout(TEST_TIMEOUT_MS) { secondConnectAuth.await() } + assertEquals("shared-auth-token", firstAuth?.get("token")?.jsonPrimitive?.content) + assertNull(firstAuth?.get("deviceToken")) + assertEquals("shared-auth-token", secondAuth?.get("token")?.jsonPrimitive?.content) + assertEquals("stored-device-token", secondAuth?.get("deviceToken")?.jsonPrimitive?.content) + } finally { + shutdownHarness(harness, server) + } + } + @Test fun nodeInvokeRequest_roundTripsInvokeResult() = runBlocking { val handshakeOrigin = AtomicReference(null) @@ -182,11 +339,12 @@ class GatewaySessionInvokeTest { ): NodeHarness { val app = RuntimeEnvironment.getApplication() val sessionJob = SupervisorJob() + val deviceAuthStore = InMemoryDeviceAuthStore() val session = GatewaySession( scope = CoroutineScope(sessionJob + Dispatchers.Default), identityStore = DeviceIdentityStore(app), - deviceAuthStore = InMemoryDeviceAuthStore(), + deviceAuthStore = deviceAuthStore, onConnected = { _, _, _ -> if (!connected.isCompleted) connected.complete(Unit) }, @@ -197,10 +355,15 @@ class GatewaySessionInvokeTest { onInvoke = onInvoke, ) - return NodeHarness(session = session, sessionJob = sessionJob) + return NodeHarness(session = session, sessionJob = sessionJob, deviceAuthStore = deviceAuthStore) } - private suspend fun connectNodeSession(session: GatewaySession, port: Int) { + private suspend fun connectNodeSession( + session: GatewaySession, + port: Int, + token: String? = "test-token", + bootstrapToken: String? = null, + ) { session.connect( endpoint = GatewayEndpoint( @@ -210,7 +373,8 @@ class GatewaySessionInvokeTest { port = port, tlsEnabled = false, ), - token = "test-token", + token = token, + bootstrapToken = bootstrapToken, password = null, options = GatewayConnectOptions( diff --git a/apps/android/app/src/test/java/ai/openclaw/app/ui/GatewayConfigResolverTest.kt b/apps/android/app/src/test/java/ai/openclaw/app/ui/GatewayConfigResolverTest.kt index 72738843ff0..5c24631cf0b 100644 --- a/apps/android/app/src/test/java/ai/openclaw/app/ui/GatewayConfigResolverTest.kt +++ b/apps/android/app/src/test/java/ai/openclaw/app/ui/GatewayConfigResolverTest.kt @@ -8,7 +8,8 @@ import org.junit.Test class GatewayConfigResolverTest { @Test fun resolveScannedSetupCodeAcceptsRawSetupCode() { - val setupCode = encodeSetupCode("""{"url":"wss://gateway.example:18789","token":"token-1"}""") + val setupCode = + encodeSetupCode("""{"url":"wss://gateway.example:18789","bootstrapToken":"bootstrap-1"}""") val resolved = resolveScannedSetupCode(setupCode) @@ -17,7 +18,8 @@ class GatewayConfigResolverTest { @Test fun resolveScannedSetupCodeAcceptsQrJsonPayload() { - val setupCode = encodeSetupCode("""{"url":"wss://gateway.example:18789","password":"pw-1"}""") + val setupCode = + encodeSetupCode("""{"url":"wss://gateway.example:18789","bootstrapToken":"bootstrap-1"}""") val qrJson = """ { @@ -53,6 +55,67 @@ class GatewayConfigResolverTest { assertNull(resolved) } + @Test + fun decodeGatewaySetupCodeParsesBootstrapToken() { + val setupCode = + encodeSetupCode("""{"url":"wss://gateway.example:18789","bootstrapToken":"bootstrap-1"}""") + + val decoded = decodeGatewaySetupCode(setupCode) + + assertEquals("wss://gateway.example:18789", decoded?.url) + assertEquals("bootstrap-1", decoded?.bootstrapToken) + assertNull(decoded?.token) + assertNull(decoded?.password) + } + + @Test + fun resolveGatewayConnectConfigPrefersBootstrapTokenFromSetupCode() { + val setupCode = + encodeSetupCode("""{"url":"wss://gateway.example:18789","bootstrapToken":"bootstrap-1"}""") + + val resolved = + resolveGatewayConnectConfig( + useSetupCode = true, + setupCode = setupCode, + manualHost = "", + manualPort = "", + manualTls = true, + fallbackToken = "shared-token", + fallbackPassword = "shared-password", + ) + + assertEquals("gateway.example", resolved?.host) + assertEquals(18789, resolved?.port) + assertEquals(true, resolved?.tls) + assertEquals("bootstrap-1", resolved?.bootstrapToken) + assertNull(resolved?.token?.takeIf { it.isNotEmpty() }) + assertNull(resolved?.password?.takeIf { it.isNotEmpty() }) + } + + @Test + fun resolveGatewayConnectConfigDefaultsPortlessWssSetupCodeTo443() { + val setupCode = + encodeSetupCode("""{"url":"wss://gateway.example","bootstrapToken":"bootstrap-1"}""") + + val resolved = + resolveGatewayConnectConfig( + useSetupCode = true, + setupCode = setupCode, + manualHost = "", + manualPort = "", + manualTls = true, + fallbackToken = "shared-token", + fallbackPassword = "shared-password", + ) + + assertEquals("gateway.example", resolved?.host) + assertEquals(443, resolved?.port) + assertEquals(true, resolved?.tls) + assertEquals("bootstrap-1", resolved?.bootstrapToken) + assertNull(resolved?.token?.takeIf { it.isNotEmpty() }) + assertNull(resolved?.password?.takeIf { it.isNotEmpty() }) + } + private fun encodeSetupCode(payloadJson: String): String { return Base64.getUrlEncoder().withoutPadding().encodeToString(payloadJson.toByteArray(Charsets.UTF_8)) } diff --git a/apps/android/scripts/build-release-aab.ts b/apps/android/scripts/build-release-aab.ts new file mode 100644 index 00000000000..30e4bb0390b --- /dev/null +++ b/apps/android/scripts/build-release-aab.ts @@ -0,0 +1,125 @@ +#!/usr/bin/env bun + +import { $ } from "bun"; +import { dirname, join } from "node:path"; +import { fileURLToPath } from "node:url"; + +const scriptDir = dirname(fileURLToPath(import.meta.url)); +const androidDir = join(scriptDir, ".."); +const buildGradlePath = join(androidDir, "app", "build.gradle.kts"); +const bundlePath = join(androidDir, "app", "build", "outputs", "bundle", "release", "app-release.aab"); + +type VersionState = { + versionName: string; + versionCode: number; +}; + +type ParsedVersionMatches = { + versionNameMatch: RegExpMatchArray; + versionCodeMatch: RegExpMatchArray; +}; + +function formatVersionName(date: Date): string { + const year = date.getFullYear(); + const month = date.getMonth() + 1; + const day = date.getDate(); + return `${year}.${month}.${day}`; +} + +function formatVersionCodePrefix(date: Date): string { + const year = date.getFullYear().toString(); + const month = (date.getMonth() + 1).toString().padStart(2, "0"); + const day = date.getDate().toString().padStart(2, "0"); + return `${year}${month}${day}`; +} + +function parseVersionMatches(buildGradleText: string): ParsedVersionMatches { + const versionCodeMatch = buildGradleText.match(/versionCode = (\d+)/); + const versionNameMatch = buildGradleText.match(/versionName = "([^"]+)"/); + if (!versionCodeMatch || !versionNameMatch) { + throw new Error(`Couldn't parse versionName/versionCode from ${buildGradlePath}`); + } + return { versionCodeMatch, versionNameMatch }; +} + +function resolveNextVersionCode(currentVersionCode: number, todayPrefix: string): number { + const currentRaw = currentVersionCode.toString(); + let nextSuffix = 0; + + if (currentRaw.startsWith(todayPrefix)) { + const suffixRaw = currentRaw.slice(todayPrefix.length); + nextSuffix = (suffixRaw ? Number.parseInt(suffixRaw, 10) : 0) + 1; + } + + if (!Number.isInteger(nextSuffix) || nextSuffix < 0 || nextSuffix > 99) { + throw new Error( + `Can't auto-bump Android versionCode for ${todayPrefix}: next suffix ${nextSuffix} is invalid`, + ); + } + + return Number.parseInt(`${todayPrefix}${nextSuffix.toString().padStart(2, "0")}`, 10); +} + +function resolveNextVersion(buildGradleText: string, date: Date): VersionState { + const { versionCodeMatch } = parseVersionMatches(buildGradleText); + const currentVersionCode = Number.parseInt(versionCodeMatch[1] ?? "", 10); + if (!Number.isInteger(currentVersionCode)) { + throw new Error(`Invalid Android versionCode in ${buildGradlePath}`); + } + + const versionName = formatVersionName(date); + const versionCode = resolveNextVersionCode(currentVersionCode, formatVersionCodePrefix(date)); + return { versionName, versionCode }; +} + +function updateBuildGradleVersions(buildGradleText: string, nextVersion: VersionState): string { + return buildGradleText + .replace(/versionCode = \d+/, `versionCode = ${nextVersion.versionCode}`) + .replace(/versionName = "[^"]+"/, `versionName = "${nextVersion.versionName}"`); +} + +async function sha256Hex(path: string): Promise { + const buffer = await Bun.file(path).arrayBuffer(); + const digest = await crypto.subtle.digest("SHA-256", buffer); + return Array.from(new Uint8Array(digest), (byte) => byte.toString(16).padStart(2, "0")).join(""); +} + +async function verifyBundleSignature(path: string): Promise { + await $`jarsigner -verify ${path}`.quiet(); +} + +async function main() { + const buildGradleFile = Bun.file(buildGradlePath); + const originalText = await buildGradleFile.text(); + const nextVersion = resolveNextVersion(originalText, new Date()); + const updatedText = updateBuildGradleVersions(originalText, nextVersion); + + if (updatedText === originalText) { + throw new Error("Android version bump produced no change"); + } + + console.log(`Android versionName -> ${nextVersion.versionName}`); + console.log(`Android versionCode -> ${nextVersion.versionCode}`); + + await Bun.write(buildGradlePath, updatedText); + + try { + await $`./gradlew :app:bundleRelease`.cwd(androidDir); + } catch (error) { + await Bun.write(buildGradlePath, originalText); + throw error; + } + + const bundleFile = Bun.file(bundlePath); + if (!(await bundleFile.exists())) { + throw new Error(`Signed bundle missing at ${bundlePath}`); + } + + await verifyBundleSignature(bundlePath); + const hash = await sha256Hex(bundlePath); + + console.log(`Signed AAB: ${bundlePath}`); + console.log(`SHA-256: ${hash}`); +} + +await main(); diff --git a/apps/ios/ActivityWidget/Info.plist b/apps/ios/ActivityWidget/Info.plist index 4c2d89e1566..4c965121bf9 100644 --- a/apps/ios/ActivityWidget/Info.plist +++ b/apps/ios/ActivityWidget/Info.plist @@ -17,9 +17,9 @@ CFBundlePackageType XPC! CFBundleShortVersionString - 2026.3.9 + $(OPENCLAW_MARKETING_VERSION) CFBundleVersion - 20260308 + $(OPENCLAW_BUILD_VERSION) NSExtension NSExtensionPointIdentifier diff --git a/apps/ios/ActivityWidget/OpenClawLiveActivity.swift b/apps/ios/ActivityWidget/OpenClawLiveActivity.swift index 836803f403f..497fbd45a08 100644 --- a/apps/ios/ActivityWidget/OpenClawLiveActivity.swift +++ b/apps/ios/ActivityWidget/OpenClawLiveActivity.swift @@ -47,6 +47,7 @@ struct OpenClawLiveActivity: Widget { Spacer() trailingView(state: context.state) } + .padding(.horizontal, 12) .padding(.vertical, 4) } diff --git a/apps/ios/Config/Signing.xcconfig b/apps/ios/Config/Signing.xcconfig index 1285d2a38a4..4fef287a09d 100644 --- a/apps/ios/Config/Signing.xcconfig +++ b/apps/ios/Config/Signing.xcconfig @@ -1,10 +1,12 @@ // Shared iOS signing defaults for local development + CI. +#include "Version.xcconfig" + OPENCLAW_IOS_DEFAULT_TEAM = Y5PE65HELJ OPENCLAW_IOS_SELECTED_TEAM = $(OPENCLAW_IOS_DEFAULT_TEAM) -OPENCLAW_APP_BUNDLE_ID = ai.openclaw.ios -OPENCLAW_WATCH_APP_BUNDLE_ID = ai.openclaw.ios.watchkitapp -OPENCLAW_WATCH_EXTENSION_BUNDLE_ID = ai.openclaw.ios.watchkitapp.extension -OPENCLAW_ACTIVITY_WIDGET_BUNDLE_ID = ai.openclaw.ios.activitywidget +OPENCLAW_APP_BUNDLE_ID = ai.openclaw.client +OPENCLAW_WATCH_APP_BUNDLE_ID = ai.openclaw.client.watchkitapp +OPENCLAW_WATCH_EXTENSION_BUNDLE_ID = ai.openclaw.client.watchkitapp.extension +OPENCLAW_ACTIVITY_WIDGET_BUNDLE_ID = ai.openclaw.client.activitywidget // Local contributors can override this by running scripts/ios-configure-signing.sh. // Keep include after defaults: xcconfig is evaluated top-to-bottom. diff --git a/apps/ios/Config/Version.xcconfig b/apps/ios/Config/Version.xcconfig new file mode 100644 index 00000000000..4297bc8ff57 --- /dev/null +++ b/apps/ios/Config/Version.xcconfig @@ -0,0 +1,8 @@ +// Shared iOS version defaults. +// Generated overrides live in build/Version.xcconfig (git-ignored). + +OPENCLAW_GATEWAY_VERSION = 2026.3.14 +OPENCLAW_MARKETING_VERSION = 2026.3.14 +OPENCLAW_BUILD_VERSION = 202603140 + +#include? "../build/Version.xcconfig" diff --git a/apps/ios/README.md b/apps/ios/README.md index c7c501fcbff..8e591839bd0 100644 --- a/apps/ios/README.md +++ b/apps/ios/README.md @@ -1,15 +1,12 @@ # OpenClaw iOS (Super Alpha) -NO TEST FLIGHT AVAILABLE AT THIS POINT - This iPhone app is super-alpha and internal-use only. It connects to an OpenClaw Gateway as a `role: node`. ## Distribution Status -NO TEST FLIGHT AVAILABLE AT THIS POINT - -- Current distribution: local/manual deploy from source via Xcode. -- App Store flow is not part of the current internal development path. +- Public distribution: not available. +- Internal beta distribution: local archive + TestFlight upload via Fastlane. +- Local/manual deploy from source via Xcode remains the default development path. ## Super-Alpha Disclaimer @@ -50,14 +47,93 @@ Shortcut command (same flow + open project): pnpm ios:open ``` +## Local Beta Release Flow + +Prereqs: + +- Xcode 16+ +- `pnpm` +- `xcodegen` +- `fastlane` +- Apple account signed into Xcode for automatic signing/provisioning +- App Store Connect API key set up in Keychain via `scripts/ios-asc-keychain-setup.sh` when auto-resolving a beta build number or uploading to TestFlight + +Release behavior: + +- Local development keeps using unique per-developer bundle IDs from `scripts/ios-configure-signing.sh`. +- Beta release uses canonical `ai.openclaw.client*` bundle IDs through a temporary generated xcconfig in `apps/ios/build/BetaRelease.xcconfig`. +- Beta release also switches the app to `OpenClawPushTransport=relay`, `OpenClawPushDistribution=official`, and `OpenClawPushAPNsEnvironment=production`. +- The beta flow does not modify `apps/ios/.local-signing.xcconfig` or `apps/ios/LocalSigning.xcconfig`. +- Root `package.json.version` is the only version source for iOS. +- A root version like `2026.3.13-beta.1` becomes: + - `CFBundleShortVersionString = 2026.3.13` + - `CFBundleVersion = next TestFlight build number for 2026.3.13` + +Required env for beta builds: + +- `OPENCLAW_PUSH_RELAY_BASE_URL=https://relay.example.com` + This must be a plain `https://host[:port][/path]` base URL without whitespace, query params, fragments, or xcconfig metacharacters. + +Archive without upload: + +```bash +pnpm ios:beta:archive +``` + +Archive and upload to TestFlight: + +```bash +pnpm ios:beta +``` + +If you need to force a specific build number: + +```bash +pnpm ios:beta -- --build-number 7 +``` + ## APNs Expectations For Local/Manual Builds - The app calls `registerForRemoteNotifications()` at launch. - `apps/ios/Sources/OpenClaw.entitlements` sets `aps-environment` to `development`. - APNs token registration to gateway happens only after gateway connection (`push.apns.register`). +- Local/manual builds default to `OpenClawPushTransport=direct` and `OpenClawPushDistribution=local`. - Your selected team/profile must support Push Notifications for the app bundle ID you are signing. - If push capability or provisioning is wrong, APNs registration fails at runtime (check Xcode logs for `APNs registration failed`). -- Debug builds register as APNs sandbox; Release builds use production. +- Debug builds default to `OpenClawPushAPNsEnvironment=sandbox`; Release builds default to `production`. + +## APNs Expectations For Official Builds + +- Official/TestFlight builds register with the external push relay before they publish `push.apns.register` to the gateway. +- The gateway registration for relay mode contains an opaque relay handle, a registration-scoped send grant, relay origin metadata, and installation metadata instead of the raw APNs token. +- The relay registration is bound to the gateway identity fetched from `gateway.identity.get`, so another gateway cannot reuse that stored registration. +- The app persists the relay handle metadata locally so reconnects can republish the gateway registration without re-registering on every connect. +- If the relay base URL changes in a later build, the app refreshes the relay registration instead of reusing the old relay origin. +- Relay mode requires a reachable relay base URL and uses App Attest plus the app receipt during registration. +- Gateway-side relay sending is configured through `gateway.push.apns.relay.baseUrl` in `openclaw.json`. `OPENCLAW_APNS_RELAY_BASE_URL` remains a temporary env override only. + +## Official Build Relay Trust Model + +- `iOS -> gateway` + - The app must pair with the gateway and establish both node and operator sessions. + - The operator session is used to fetch `gateway.identity.get`. +- `iOS -> relay` + - The app registers with the relay over HTTPS using App Attest plus the app receipt. + - The relay requires the official production/TestFlight distribution path, which is why local + Xcode/dev installs cannot use the hosted relay. +- `gateway delegation` + - The app includes the gateway identity in relay registration. + - The relay returns a relay handle and registration-scoped send grant delegated to that gateway. +- `gateway -> relay` + - The gateway signs relay send requests with its own device identity. + - The relay verifies both the delegated send grant and the gateway signature before it sends to + APNs. +- `relay -> APNs` + - Production APNs credentials and raw official-build APNs tokens stay in the relay deployment, + not on the gateway. + +This exists to keep the hosted relay limited to genuine OpenClaw official builds and to ensure a +gateway can only send pushes for iOS devices that paired with that gateway. ## What Works Now (Concrete) diff --git a/apps/ios/ShareExtension/Info.plist b/apps/ios/ShareExtension/Info.plist index 90a7e09e0fc..9469daa08a8 100644 --- a/apps/ios/ShareExtension/Info.plist +++ b/apps/ios/ShareExtension/Info.plist @@ -17,9 +17,9 @@ CFBundlePackageType XPC! CFBundleShortVersionString - 2026.3.9 + $(OPENCLAW_MARKETING_VERSION) CFBundleVersion - 20260308 + $(OPENCLAW_BUILD_VERSION) NSExtension NSExtensionAttributes diff --git a/apps/ios/ShareExtension/ShareViewController.swift b/apps/ios/ShareExtension/ShareViewController.swift index 1181641e330..00f1b06f9dc 100644 --- a/apps/ios/ShareExtension/ShareViewController.swift +++ b/apps/ios/ShareExtension/ShareViewController.swift @@ -189,6 +189,7 @@ final class ShareViewController: UIViewController { try await gateway.connect( url: url, token: config.token, + bootstrapToken: nil, password: config.password, connectOptions: makeOptions("openclaw-ios"), sessionBox: nil, @@ -208,6 +209,7 @@ final class ShareViewController: UIViewController { try await gateway.connect( url: url, token: config.token, + bootstrapToken: nil, password: config.password, connectOptions: makeOptions("moltbot-ios"), sessionBox: nil, diff --git a/apps/ios/Signing.xcconfig b/apps/ios/Signing.xcconfig index 5966d6e2c2f..d6acc35dee8 100644 --- a/apps/ios/Signing.xcconfig +++ b/apps/ios/Signing.xcconfig @@ -2,6 +2,8 @@ // Auto-selected local team overrides live in .local-signing.xcconfig (git-ignored). // Manual local overrides can go in LocalSigning.xcconfig (git-ignored). +#include "Config/Version.xcconfig" + OPENCLAW_CODE_SIGN_STYLE = Manual OPENCLAW_DEVELOPMENT_TEAM = Y5PE65HELJ diff --git a/apps/ios/Sources/Chat/IOSGatewayChatTransport.swift b/apps/ios/Sources/Chat/IOSGatewayChatTransport.swift index 67f01138803..297811d3ee7 100644 --- a/apps/ios/Sources/Chat/IOSGatewayChatTransport.swift +++ b/apps/ios/Sources/Chat/IOSGatewayChatTransport.swift @@ -39,6 +39,13 @@ struct IOSGatewayChatTransport: OpenClawChatTransport, Sendable { // (chat.subscribe is a node event, not an operator RPC method.) } + func resetSession(sessionKey: String) async throws { + struct Params: Codable { var key: String } + let data = try JSONEncoder().encode(Params(key: sessionKey)) + let json = String(data: data, encoding: .utf8) + _ = try await self.gateway.request(method: "sessions.reset", paramsJSON: json, timeoutSeconds: 10) + } + func requestHistory(sessionKey: String) async throws -> OpenClawChatHistoryPayload { struct Params: Codable { var sessionKey: String } let data = try JSONEncoder().encode(Params(sessionKey: sessionKey)) diff --git a/apps/ios/Sources/Gateway/GatewayConnectConfig.swift b/apps/ios/Sources/Gateway/GatewayConnectConfig.swift index 7f4e93380b0..0abea0e312c 100644 --- a/apps/ios/Sources/Gateway/GatewayConnectConfig.swift +++ b/apps/ios/Sources/Gateway/GatewayConnectConfig.swift @@ -14,6 +14,7 @@ struct GatewayConnectConfig: Sendable { let stableID: String let tls: GatewayTLSParams? let token: String? + let bootstrapToken: String? let password: String? let nodeOptions: GatewayConnectOptions diff --git a/apps/ios/Sources/Gateway/GatewayConnectionController.swift b/apps/ios/Sources/Gateway/GatewayConnectionController.swift index 259768a4df1..dc94f3d0797 100644 --- a/apps/ios/Sources/Gateway/GatewayConnectionController.swift +++ b/apps/ios/Sources/Gateway/GatewayConnectionController.swift @@ -101,6 +101,7 @@ final class GatewayConnectionController { return "Missing instanceId (node.instanceId). Try restarting the app." } let token = GatewaySettingsStore.loadGatewayToken(instanceId: instanceId) + let bootstrapToken = GatewaySettingsStore.loadGatewayBootstrapToken(instanceId: instanceId) let password = GatewaySettingsStore.loadGatewayPassword(instanceId: instanceId) // Resolve the service endpoint (SRV/A/AAAA). TXT is unauthenticated; do not route via TXT. @@ -151,6 +152,7 @@ final class GatewayConnectionController { gatewayStableID: stableID, tls: tlsParams, token: token, + bootstrapToken: bootstrapToken, password: password) return nil } @@ -163,6 +165,7 @@ final class GatewayConnectionController { let instanceId = UserDefaults.standard.string(forKey: "node.instanceId")? .trimmingCharacters(in: .whitespacesAndNewlines) ?? "" let token = GatewaySettingsStore.loadGatewayToken(instanceId: instanceId) + let bootstrapToken = GatewaySettingsStore.loadGatewayBootstrapToken(instanceId: instanceId) let password = GatewaySettingsStore.loadGatewayPassword(instanceId: instanceId) let resolvedUseTLS = self.resolveManualUseTLS(host: host, useTLS: useTLS) guard let resolvedPort = self.resolveManualPort(host: host, port: port, useTLS: resolvedUseTLS) @@ -203,6 +206,7 @@ final class GatewayConnectionController { gatewayStableID: stableID, tls: tlsParams, token: token, + bootstrapToken: bootstrapToken, password: password) } @@ -229,6 +233,7 @@ final class GatewayConnectionController { stableID: cfg.stableID, tls: cfg.tls, token: cfg.token, + bootstrapToken: cfg.bootstrapToken, password: cfg.password, nodeOptions: self.makeConnectOptions(stableID: cfg.stableID)) appModel.applyGatewayConnectConfig(refreshedConfig) @@ -261,6 +266,7 @@ final class GatewayConnectionController { let instanceId = UserDefaults.standard.string(forKey: "node.instanceId")? .trimmingCharacters(in: .whitespacesAndNewlines) ?? "" let token = GatewaySettingsStore.loadGatewayToken(instanceId: instanceId) + let bootstrapToken = GatewaySettingsStore.loadGatewayBootstrapToken(instanceId: instanceId) let password = GatewaySettingsStore.loadGatewayPassword(instanceId: instanceId) let tlsParams = GatewayTLSParams( required: true, @@ -274,6 +280,7 @@ final class GatewayConnectionController { gatewayStableID: pending.stableID, tls: tlsParams, token: token, + bootstrapToken: bootstrapToken, password: password) } @@ -319,6 +326,7 @@ final class GatewayConnectionController { guard !instanceId.isEmpty else { return } let token = GatewaySettingsStore.loadGatewayToken(instanceId: instanceId) + let bootstrapToken = GatewaySettingsStore.loadGatewayBootstrapToken(instanceId: instanceId) let password = GatewaySettingsStore.loadGatewayPassword(instanceId: instanceId) if manualEnabled { @@ -353,6 +361,7 @@ final class GatewayConnectionController { gatewayStableID: stableID, tls: tlsParams, token: token, + bootstrapToken: bootstrapToken, password: password) return } @@ -379,6 +388,7 @@ final class GatewayConnectionController { gatewayStableID: stableID, tls: tlsParams, token: token, + bootstrapToken: bootstrapToken, password: password) return } @@ -448,6 +458,7 @@ final class GatewayConnectionController { gatewayStableID: String, tls: GatewayTLSParams?, token: String?, + bootstrapToken: String?, password: String?) { guard let appModel else { return } @@ -463,6 +474,7 @@ final class GatewayConnectionController { stableID: gatewayStableID, tls: tls, token: token, + bootstrapToken: bootstrapToken, password: password, nodeOptions: connectOptions) appModel.applyGatewayConnectConfig(cfg) diff --git a/apps/ios/Sources/Gateway/GatewaySettingsStore.swift b/apps/ios/Sources/Gateway/GatewaySettingsStore.swift index 37c039d69d1..92dc71259e5 100644 --- a/apps/ios/Sources/Gateway/GatewaySettingsStore.swift +++ b/apps/ios/Sources/Gateway/GatewaySettingsStore.swift @@ -104,6 +104,21 @@ enum GatewaySettingsStore { account: self.gatewayTokenAccount(instanceId: instanceId)) } + static func loadGatewayBootstrapToken(instanceId: String) -> String? { + let account = self.gatewayBootstrapTokenAccount(instanceId: instanceId) + let token = KeychainStore.loadString(service: self.gatewayService, account: account)? + .trimmingCharacters(in: .whitespacesAndNewlines) + if token?.isEmpty == false { return token } + return nil + } + + static func saveGatewayBootstrapToken(_ token: String, instanceId: String) { + _ = KeychainStore.saveString( + token, + service: self.gatewayService, + account: self.gatewayBootstrapTokenAccount(instanceId: instanceId)) + } + static func loadGatewayPassword(instanceId: String) -> String? { KeychainStore.loadString( service: self.gatewayService, @@ -278,6 +293,9 @@ enum GatewaySettingsStore { _ = KeychainStore.delete( service: self.gatewayService, account: self.gatewayTokenAccount(instanceId: trimmed)) + _ = KeychainStore.delete( + service: self.gatewayService, + account: self.gatewayBootstrapTokenAccount(instanceId: trimmed)) _ = KeychainStore.delete( service: self.gatewayService, account: self.gatewayPasswordAccount(instanceId: trimmed)) @@ -331,6 +349,10 @@ enum GatewaySettingsStore { "gateway-token.\(instanceId)" } + private static func gatewayBootstrapTokenAccount(instanceId: String) -> String { + "gateway-bootstrap-token.\(instanceId)" + } + private static func gatewayPasswordAccount(instanceId: String) -> String { "gateway-password.\(instanceId)" } diff --git a/apps/ios/Sources/Gateway/GatewaySetupCode.swift b/apps/ios/Sources/Gateway/GatewaySetupCode.swift index 8ccbab42da7..d52ca023563 100644 --- a/apps/ios/Sources/Gateway/GatewaySetupCode.swift +++ b/apps/ios/Sources/Gateway/GatewaySetupCode.swift @@ -5,6 +5,7 @@ struct GatewaySetupPayload: Codable { var host: String? var port: Int? var tls: Bool? + var bootstrapToken: String? var token: String? var password: String? } @@ -39,4 +40,3 @@ enum GatewaySetupCode { return String(data: data, encoding: .utf8) } } - diff --git a/apps/ios/Sources/HomeToolbar.swift b/apps/ios/Sources/HomeToolbar.swift new file mode 100644 index 00000000000..924d95d7919 --- /dev/null +++ b/apps/ios/Sources/HomeToolbar.swift @@ -0,0 +1,223 @@ +import SwiftUI + +struct HomeToolbar: View { + var gateway: StatusPill.GatewayState + var voiceWakeEnabled: Bool + var activity: StatusPill.Activity? + var brighten: Bool + var talkButtonEnabled: Bool + var talkActive: Bool + var talkTint: Color + var onStatusTap: () -> Void + var onChatTap: () -> Void + var onTalkTap: () -> Void + var onSettingsTap: () -> Void + + @Environment(\.colorSchemeContrast) private var contrast + + var body: some View { + VStack(spacing: 0) { + Rectangle() + .fill(.white.opacity(self.contrast == .increased ? 0.46 : (self.brighten ? 0.18 : 0.12))) + .frame(height: self.contrast == .increased ? 1.0 : 0.6) + .allowsHitTesting(false) + + HStack(spacing: 12) { + HomeToolbarStatusButton( + gateway: self.gateway, + voiceWakeEnabled: self.voiceWakeEnabled, + activity: self.activity, + brighten: self.brighten, + onTap: self.onStatusTap) + + Spacer(minLength: 0) + + HStack(spacing: 8) { + HomeToolbarActionButton( + systemImage: "text.bubble.fill", + accessibilityLabel: "Chat", + brighten: self.brighten, + action: self.onChatTap) + + if self.talkButtonEnabled { + HomeToolbarActionButton( + systemImage: self.talkActive ? "waveform.circle.fill" : "waveform.circle", + accessibilityLabel: self.talkActive ? "Talk Mode On" : "Talk Mode Off", + brighten: self.brighten, + tint: self.talkTint, + isActive: self.talkActive, + action: self.onTalkTap) + } + + HomeToolbarActionButton( + systemImage: "gearshape.fill", + accessibilityLabel: "Settings", + brighten: self.brighten, + action: self.onSettingsTap) + } + } + .padding(.horizontal, 12) + .padding(.top, 10) + .padding(.bottom, 8) + } + .frame(maxWidth: .infinity) + .background(.ultraThinMaterial) + .overlay(alignment: .top) { + LinearGradient( + colors: [ + .white.opacity(self.brighten ? 0.10 : 0.06), + .clear, + ], + startPoint: .top, + endPoint: .bottom) + .allowsHitTesting(false) + } + } +} + +private struct HomeToolbarStatusButton: View { + @Environment(\.scenePhase) private var scenePhase + @Environment(\.accessibilityReduceMotion) private var reduceMotion + @Environment(\.colorSchemeContrast) private var contrast + + var gateway: StatusPill.GatewayState + var voiceWakeEnabled: Bool + var activity: StatusPill.Activity? + var brighten: Bool + var onTap: () -> Void + + @State private var pulse: Bool = false + + var body: some View { + Button(action: self.onTap) { + HStack(spacing: 8) { + HStack(spacing: 6) { + Circle() + .fill(self.gateway.color) + .frame(width: 8, height: 8) + .scaleEffect( + self.gateway == .connecting && !self.reduceMotion + ? (self.pulse ? 1.15 : 0.85) + : 1.0 + ) + .opacity(self.gateway == .connecting && !self.reduceMotion ? (self.pulse ? 1.0 : 0.6) : 1.0) + + Text(self.gateway.title) + .font(.footnote.weight(.semibold)) + .foregroundStyle(.primary) + .lineLimit(1) + } + + if let activity { + Image(systemName: activity.systemImage) + .font(.footnote.weight(.semibold)) + .foregroundStyle(activity.tint ?? .primary) + .transition(.opacity.combined(with: .move(edge: .top))) + } else { + Image(systemName: self.voiceWakeEnabled ? "mic.fill" : "mic.slash") + .font(.footnote.weight(.semibold)) + .foregroundStyle(self.voiceWakeEnabled ? .primary : .secondary) + .transition(.opacity.combined(with: .move(edge: .top))) + } + } + .padding(.horizontal, 12) + .padding(.vertical, 8) + .background { + RoundedRectangle(cornerRadius: 14, style: .continuous) + .fill(Color.black.opacity(self.brighten ? 0.12 : 0.18)) + .overlay { + RoundedRectangle(cornerRadius: 14, style: .continuous) + .strokeBorder( + .white.opacity(self.contrast == .increased ? 0.46 : (self.brighten ? 0.22 : 0.16)), + lineWidth: self.contrast == .increased ? 1.0 : 0.6) + } + } + } + .buttonStyle(.plain) + .accessibilityLabel("Connection Status") + .accessibilityValue(self.accessibilityValue) + .accessibilityHint(self.gateway == .connected ? "Double tap for gateway actions" : "Double tap to open settings") + .onAppear { self.updatePulse(for: self.gateway, scenePhase: self.scenePhase, reduceMotion: self.reduceMotion) } + .onDisappear { self.pulse = false } + .onChange(of: self.gateway) { _, newValue in + self.updatePulse(for: newValue, scenePhase: self.scenePhase, reduceMotion: self.reduceMotion) + } + .onChange(of: self.scenePhase) { _, newValue in + self.updatePulse(for: self.gateway, scenePhase: newValue, reduceMotion: self.reduceMotion) + } + .onChange(of: self.reduceMotion) { _, newValue in + self.updatePulse(for: self.gateway, scenePhase: self.scenePhase, reduceMotion: newValue) + } + .animation(.easeInOut(duration: 0.18), value: self.activity?.title) + } + + private var accessibilityValue: String { + if let activity { + return "\(self.gateway.title), \(activity.title)" + } + return "\(self.gateway.title), Voice Wake \(self.voiceWakeEnabled ? "enabled" : "disabled")" + } + + private func updatePulse(for gateway: StatusPill.GatewayState, scenePhase: ScenePhase, reduceMotion: Bool) { + guard gateway == .connecting, scenePhase == .active, !reduceMotion else { + withAnimation(reduceMotion ? .none : .easeOut(duration: 0.2)) { self.pulse = false } + return + } + + guard !self.pulse else { return } + withAnimation(.easeInOut(duration: 0.9).repeatForever(autoreverses: true)) { + self.pulse = true + } + } +} + +private struct HomeToolbarActionButton: View { + @Environment(\.colorSchemeContrast) private var contrast + + let systemImage: String + let accessibilityLabel: String + let brighten: Bool + var tint: Color? + var isActive: Bool = false + let action: () -> Void + + var body: some View { + Button(action: self.action) { + Image(systemName: self.systemImage) + .font(.system(size: 16, weight: .semibold)) + .foregroundStyle(self.isActive ? (self.tint ?? .primary) : .primary) + .frame(width: 40, height: 40) + .background { + RoundedRectangle(cornerRadius: 12, style: .continuous) + .fill(Color.black.opacity(self.brighten ? 0.12 : 0.18)) + .overlay { + if let tint { + RoundedRectangle(cornerRadius: 12, style: .continuous) + .fill( + LinearGradient( + colors: [ + tint.opacity(self.isActive ? 0.22 : 0.14), + tint.opacity(self.isActive ? 0.08 : 0.04), + .clear, + ], + startPoint: .topLeading, + endPoint: .bottomTrailing)) + .blendMode(.overlay) + } + } + .overlay { + RoundedRectangle(cornerRadius: 12, style: .continuous) + .strokeBorder( + (self.tint ?? .white).opacity( + self.isActive + ? 0.34 + : (self.contrast == .increased ? 0.4 : (self.brighten ? 0.22 : 0.16)) + ), + lineWidth: self.contrast == .increased ? 1.0 : (self.isActive ? 0.8 : 0.6)) + } + } + } + .buttonStyle(.plain) + .accessibilityLabel(self.accessibilityLabel) + } +} diff --git a/apps/ios/Sources/Info.plist b/apps/ios/Sources/Info.plist index 2f1f03d24a1..5908021fad3 100644 --- a/apps/ios/Sources/Info.plist +++ b/apps/ios/Sources/Info.plist @@ -23,7 +23,7 @@ CFBundlePackageType APPL CFBundleShortVersionString - 2026.3.9 + $(OPENCLAW_MARKETING_VERSION) CFBundleURLTypes @@ -36,7 +36,7 @@ CFBundleVersion - 20260308 + $(OPENCLAW_BUILD_VERSION) ITSAppUsesNonExemptEncryption NSAppTransportSecurity @@ -66,6 +66,14 @@ OpenClaw uses on-device speech recognition for voice wake. NSSupportsLiveActivities + OpenClawPushAPNsEnvironment + $(OPENCLAW_PUSH_APNS_ENVIRONMENT) + OpenClawPushDistribution + $(OPENCLAW_PUSH_DISTRIBUTION) + OpenClawPushRelayBaseURL + $(OPENCLAW_PUSH_RELAY_BASE_URL) + OpenClawPushTransport + $(OPENCLAW_PUSH_TRANSPORT) UIApplicationSceneManifest UIApplicationSupportsMultipleScenes diff --git a/apps/ios/Sources/Model/NodeAppModel+Canvas.swift b/apps/ios/Sources/Model/NodeAppModel+Canvas.swift index 73e13fa0992..028983d1a5b 100644 --- a/apps/ios/Sources/Model/NodeAppModel+Canvas.swift +++ b/apps/ios/Sources/Model/NodeAppModel+Canvas.swift @@ -34,18 +34,11 @@ extension NodeAppModel { } func showA2UIOnConnectIfNeeded() async { - let current = self.screen.urlString.trimmingCharacters(in: .whitespacesAndNewlines) - if current.isEmpty || current == self.lastAutoA2uiURL { - if let canvasUrl = await self.resolveCanvasHostURLWithCapabilityRefresh(), - let url = URL(string: canvasUrl), - await Self.probeTCP(url: url, timeoutSeconds: 2.5) - { - self.screen.navigate(to: canvasUrl) - self.lastAutoA2uiURL = canvasUrl - } else { - self.lastAutoA2uiURL = nil - self.screen.showDefaultCanvas() - } + await MainActor.run { + // Keep the bundled home canvas as the default connected view. + // Agents can still explicitly present a remote or local canvas later. + self.lastAutoA2uiURL = nil + self.screen.showDefaultCanvas() } } diff --git a/apps/ios/Sources/Model/NodeAppModel.swift b/apps/ios/Sources/Model/NodeAppModel.swift index e5a8c216161..4c0ab81f1a1 100644 --- a/apps/ios/Sources/Model/NodeAppModel.swift +++ b/apps/ios/Sources/Model/NodeAppModel.swift @@ -12,6 +12,12 @@ import UserNotifications private struct NotificationCallError: Error, Sendable { let message: String } + +private struct GatewayRelayIdentityResponse: Decodable { + let deviceId: String + let publicKey: String +} + // Ensures notification requests return promptly even if the system prompt blocks. private final class NotificationInvokeLatch: @unchecked Sendable { private let lock = NSLock() @@ -88,6 +94,7 @@ final class NodeAppModel { var selectedAgentId: String? var gatewayDefaultAgentId: String? var gatewayAgents: [AgentSummary] = [] + var homeCanvasRevision: Int = 0 var lastShareEventText: String = "No share events yet." var openChatRequestID: Int = 0 private(set) var pendingAgentDeepLinkPrompt: AgentDeepLinkPrompt? @@ -139,6 +146,7 @@ final class NodeAppModel { private var shareDeliveryTo: String? private var apnsDeviceTokenHex: String? private var apnsLastRegisteredTokenHex: String? + @ObservationIgnored private let pushRegistrationManager = PushRegistrationManager() var gatewaySession: GatewayNodeSession { self.nodeGateway } var operatorSession: GatewayNodeSession { self.operatorGateway } private(set) var activeGatewayConnectConfig: GatewayConnectConfig? @@ -362,7 +370,14 @@ final class NodeAppModel { await MainActor.run { self.operatorConnected = false self.gatewayConnected = false + // Foreground recovery must actively restart the saved gateway config. + // Disconnecting stale sockets alone can leave us idle if the old + // reconnect tasks were suppressed or otherwise got stuck in background. + self.gatewayStatusText = "Reconnecting…" self.talkMode.updateGatewayConnected(false) + if let cfg = self.activeGatewayConnectConfig { + self.applyGatewayConnectConfig(cfg) + } } } } @@ -520,13 +535,6 @@ final class NodeAppModel { private static let apnsDeviceTokenUserDefaultsKey = "push.apns.deviceTokenHex" private static let deepLinkKeyUserDefaultsKey = "deeplink.agent.key" private static let canvasUnattendedDeepLinkKey: String = NodeAppModel.generateDeepLinkKey() - private static var apnsEnvironment: String { -#if DEBUG - "sandbox" -#else - "production" -#endif - } private func refreshBrandingFromGateway() async { do { @@ -541,6 +549,7 @@ final class NodeAppModel { self.seamColorHex = raw.isEmpty ? nil : raw self.mainSessionBaseKey = mainKey self.talkMode.updateMainSessionKey(self.mainSessionKey) + self.homeCanvasRevision &+= 1 } } catch { if let gatewayError = error as? GatewayResponseError { @@ -567,12 +576,19 @@ final class NodeAppModel { self.selectedAgentId = nil } self.talkMode.updateMainSessionKey(self.mainSessionKey) + self.homeCanvasRevision &+= 1 } } catch { // Best-effort only. } } + func refreshGatewayOverviewIfConnected() async { + guard await self.isOperatorConnected() else { return } + await self.refreshBrandingFromGateway() + await self.refreshAgentsFromGateway() + } + func setSelectedAgentId(_ agentId: String?) { let trimmed = (agentId ?? "").trimmingCharacters(in: .whitespacesAndNewlines) let stableID = (self.connectedGatewayID ?? "").trimmingCharacters(in: .whitespacesAndNewlines) @@ -583,6 +599,7 @@ final class NodeAppModel { GatewaySettingsStore.saveGatewaySelectedAgentId(stableID: stableID, agentId: self.selectedAgentId) } self.talkMode.updateMainSessionKey(self.mainSessionKey) + self.homeCanvasRevision &+= 1 if let relay = ShareGatewayRelaySettings.loadConfig() { ShareGatewayRelaySettings.saveConfig( ShareGatewayRelayConfig( @@ -1172,7 +1189,15 @@ final class NodeAppModel { _ = try await notificationCenter.requestAuthorization(options: [.alert, .sound, .badge]) } - return await self.notificationAuthorizationStatus() + let updatedStatus = await self.notificationAuthorizationStatus() + if Self.isNotificationAuthorizationAllowed(updatedStatus) { + // Refresh APNs registration immediately after the first permission grant so the + // gateway can receive a push registration without requiring an app relaunch. + await MainActor.run { + UIApplication.shared.registerForRemoteNotifications() + } + } + return updatedStatus } private func notificationAuthorizationStatus() async -> NotificationAuthorizationStatus { @@ -1187,6 +1212,17 @@ final class NodeAppModel { } } + private static func isNotificationAuthorizationAllowed( + _ status: NotificationAuthorizationStatus + ) -> Bool { + switch status { + case .authorized, .provisional, .ephemeral: + true + case .denied, .notDetermined: + false + } + } + private func runNotificationCall( timeoutSeconds: Double, operation: @escaping @Sendable () async throws -> T @@ -1622,11 +1658,9 @@ extension NodeAppModel { } var chatSessionKey: String { - let base = "ios" - let agentId = (self.selectedAgentId ?? "").trimmingCharacters(in: .whitespacesAndNewlines) - let defaultId = (self.gatewayDefaultAgentId ?? "").trimmingCharacters(in: .whitespacesAndNewlines) - if agentId.isEmpty || (!defaultId.isEmpty && agentId == defaultId) { return base } - return SessionKey.makeAgentSessionKey(agentId: agentId, baseKey: base) + // Keep chat aligned with the gateway's resolved main session key. + // A hardcoded "ios" base creates synthetic placeholder sessions in the chat UI. + self.mainSessionKey } var activeAgentName: String { @@ -1646,6 +1680,7 @@ extension NodeAppModel { gatewayStableID: String, tls: GatewayTLSParams?, token: String?, + bootstrapToken: String?, password: String?, connectOptions: GatewayConnectOptions) { @@ -1658,6 +1693,7 @@ extension NodeAppModel { stableID: stableID, tls: tls, token: token, + bootstrapToken: bootstrapToken, password: password, nodeOptions: connectOptions) self.prepareForGatewayConnect(url: url, stableID: effectiveStableID) @@ -1665,6 +1701,7 @@ extension NodeAppModel { url: url, stableID: effectiveStableID, token: token, + bootstrapToken: bootstrapToken, password: password, nodeOptions: connectOptions, sessionBox: sessionBox) @@ -1672,6 +1709,7 @@ extension NodeAppModel { url: url, stableID: effectiveStableID, token: token, + bootstrapToken: bootstrapToken, password: password, nodeOptions: connectOptions, sessionBox: sessionBox) @@ -1687,6 +1725,7 @@ extension NodeAppModel { gatewayStableID: cfg.stableID, tls: cfg.tls, token: cfg.token, + bootstrapToken: cfg.bootstrapToken, password: cfg.password, connectOptions: cfg.nodeOptions) } @@ -1742,6 +1781,7 @@ private extension NodeAppModel { self.gatewayDefaultAgentId = nil self.gatewayAgents = [] self.selectedAgentId = GatewaySettingsStore.loadGatewaySelectedAgentId(stableID: stableID) + self.homeCanvasRevision &+= 1 self.apnsLastRegisteredTokenHex = nil } @@ -1766,6 +1806,7 @@ private extension NodeAppModel { url: URL, stableID: String, token: String?, + bootstrapToken: String?, password: String?, nodeOptions: GatewayConnectOptions, sessionBox: WebSocketSessionBox?) @@ -1803,6 +1844,7 @@ private extension NodeAppModel { try await self.operatorGateway.connect( url: url, token: token, + bootstrapToken: bootstrapToken, password: password, connectOptions: operatorOptions, sessionBox: sessionBox, @@ -1818,6 +1860,7 @@ private extension NodeAppModel { await self.refreshBrandingFromGateway() await self.refreshAgentsFromGateway() await self.refreshShareRouteFromGateway() + await self.registerAPNsTokenIfNeeded() await self.startVoiceWakeSync() await MainActor.run { LiveActivityManager.shared.handleReconnect() } await MainActor.run { self.startGatewayHealthMonitor() } @@ -1860,6 +1903,7 @@ private extension NodeAppModel { url: URL, stableID: String, token: String?, + bootstrapToken: String?, password: String?, nodeOptions: GatewayConnectOptions, sessionBox: WebSocketSessionBox?) @@ -1908,6 +1952,7 @@ private extension NodeAppModel { try await self.nodeGateway.connect( url: url, token: token, + bootstrapToken: bootstrapToken, password: password, connectOptions: currentOptions, sessionBox: sessionBox, @@ -2239,8 +2284,7 @@ extension NodeAppModel { from: payload) guard !decoded.actions.isEmpty else { return } self.pendingActionLogger.info( - "Pending actions pulled trigger=\(trigger, privacy: .public) " - + "count=\(decoded.actions.count, privacy: .public)") + "Pending actions pulled trigger=\(trigger, privacy: .public) count=\(decoded.actions.count, privacy: .public)") await self.applyPendingForegroundNodeActions(decoded.actions, trigger: trigger) } catch { // Best-effort only. @@ -2263,9 +2307,7 @@ extension NodeAppModel { paramsJSON: action.paramsJSON) let result = await self.handleInvoke(req) self.pendingActionLogger.info( - "Pending action replay trigger=\(trigger, privacy: .public) " - + "id=\(action.id, privacy: .public) command=\(action.command, privacy: .public) " - + "ok=\(result.ok, privacy: .public)") + "Pending action replay trigger=\(trigger, privacy: .public) id=\(action.id, privacy: .public) command=\(action.command, privacy: .public) ok=\(result.ok, privacy: .public)") guard result.ok else { return } let acked = await self.ackPendingForegroundNodeAction( id: action.id, @@ -2290,9 +2332,7 @@ extension NodeAppModel { return true } catch { self.pendingActionLogger.error( - "Pending action ack failed trigger=\(trigger, privacy: .public) " - + "id=\(id, privacy: .public) command=\(command, privacy: .public) " - + "error=\(String(describing: error), privacy: .public)") + "Pending action ack failed trigger=\(trigger, privacy: .public) id=\(id, privacy: .public) command=\(command, privacy: .public) error=\(String(describing: error), privacy: .public)") return false } } @@ -2468,7 +2508,8 @@ extension NodeAppModel { else { return } - if token == self.apnsLastRegisteredTokenHex { + let usesRelayTransport = await self.pushRegistrationManager.usesRelayTransport + if !usesRelayTransport && token == self.apnsLastRegisteredTokenHex { return } guard let topic = Bundle.main.bundleIdentifier?.trimmingCharacters(in: .whitespacesAndNewlines), @@ -2477,25 +2518,40 @@ extension NodeAppModel { return } - struct PushRegistrationPayload: Codable { - var token: String - var topic: String - var environment: String - } - - let payload = PushRegistrationPayload( - token: token, - topic: topic, - environment: Self.apnsEnvironment) do { - let json = try Self.encodePayload(payload) - await self.nodeGateway.sendEvent(event: "push.apns.register", payloadJSON: json) + let gatewayIdentity: PushRelayGatewayIdentity? + if usesRelayTransport { + guard self.operatorConnected else { return } + gatewayIdentity = try await self.fetchPushRelayGatewayIdentity() + } else { + gatewayIdentity = nil + } + let payloadJSON = try await self.pushRegistrationManager.makeGatewayRegistrationPayload( + apnsTokenHex: token, + topic: topic, + gatewayIdentity: gatewayIdentity) + await self.nodeGateway.sendEvent(event: "push.apns.register", payloadJSON: payloadJSON) self.apnsLastRegisteredTokenHex = token } catch { - // Best-effort only. + self.pushWakeLogger.error( + "APNs registration publish failed: \(error.localizedDescription, privacy: .public)") } } + private func fetchPushRelayGatewayIdentity() async throws -> PushRelayGatewayIdentity { + let response = try await self.operatorGateway.request( + method: "gateway.identity.get", + paramsJSON: "{}", + timeoutSeconds: 8) + let decoded = try JSONDecoder().decode(GatewayRelayIdentityResponse.self, from: response) + let deviceId = decoded.deviceId.trimmingCharacters(in: .whitespacesAndNewlines) + let publicKey = decoded.publicKey.trimmingCharacters(in: .whitespacesAndNewlines) + guard !deviceId.isEmpty, !publicKey.isEmpty else { + throw PushRelayError.relayMisconfigured("Gateway identity response missing required fields") + } + return PushRelayGatewayIdentity(deviceId: deviceId, publicKey: publicKey) + } + private static func isSilentPushPayload(_ userInfo: [AnyHashable: Any]) -> Bool { guard let apsAny = userInfo["aps"] else { return false } if let aps = apsAny as? [AnyHashable: Any] { diff --git a/apps/ios/Sources/Onboarding/GatewayOnboardingView.swift b/apps/ios/Sources/Onboarding/GatewayOnboardingView.swift index b8b6e267755..f160b37d798 100644 --- a/apps/ios/Sources/Onboarding/GatewayOnboardingView.swift +++ b/apps/ios/Sources/Onboarding/GatewayOnboardingView.swift @@ -275,9 +275,21 @@ private struct ManualEntryStep: View { if let token = payload.token, !token.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty { self.manualToken = token.trimmingCharacters(in: .whitespacesAndNewlines) + } else if payload.bootstrapToken?.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty == false { + self.manualToken = "" } if let password = payload.password, !password.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty { self.manualPassword = password.trimmingCharacters(in: .whitespacesAndNewlines) + } else if payload.bootstrapToken?.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty == false { + self.manualPassword = "" + } + + let trimmedInstanceId = UserDefaults.standard.string(forKey: "node.instanceId")? + .trimmingCharacters(in: .whitespacesAndNewlines) ?? "" + if !trimmedInstanceId.isEmpty { + let trimmedBootstrapToken = + payload.bootstrapToken?.trimmingCharacters(in: .whitespacesAndNewlines) ?? "" + GatewaySettingsStore.saveGatewayBootstrapToken(trimmedBootstrapToken, instanceId: trimmedInstanceId) } self.setupStatusText = "Setup code applied." diff --git a/apps/ios/Sources/Onboarding/OnboardingStateStore.swift b/apps/ios/Sources/Onboarding/OnboardingStateStore.swift index 9822ac1706f..dc2859d86d9 100644 --- a/apps/ios/Sources/Onboarding/OnboardingStateStore.swift +++ b/apps/ios/Sources/Onboarding/OnboardingStateStore.swift @@ -19,6 +19,7 @@ enum OnboardingConnectionMode: String, CaseIterable { enum OnboardingStateStore { private static let completedDefaultsKey = "onboarding.completed" + private static let firstRunIntroSeenDefaultsKey = "onboarding.first_run_intro_seen" private static let lastModeDefaultsKey = "onboarding.last_mode" private static let lastSuccessTimeDefaultsKey = "onboarding.last_success_time" @@ -39,10 +40,23 @@ enum OnboardingStateStore { defaults.set(Int(Date().timeIntervalSince1970), forKey: Self.lastSuccessTimeDefaultsKey) } + static func shouldPresentFirstRunIntro(defaults: UserDefaults = .standard) -> Bool { + !defaults.bool(forKey: Self.firstRunIntroSeenDefaultsKey) + } + + static func markFirstRunIntroSeen(defaults: UserDefaults = .standard) { + defaults.set(true, forKey: Self.firstRunIntroSeenDefaultsKey) + } + static func markIncomplete(defaults: UserDefaults = .standard) { defaults.set(false, forKey: Self.completedDefaultsKey) } + static func reset(defaults: UserDefaults = .standard) { + defaults.set(false, forKey: Self.completedDefaultsKey) + defaults.set(false, forKey: Self.firstRunIntroSeenDefaultsKey) + } + static func lastMode(defaults: UserDefaults = .standard) -> OnboardingConnectionMode? { let raw = defaults.string(forKey: Self.lastModeDefaultsKey)? .trimmingCharacters(in: .whitespacesAndNewlines) ?? "" diff --git a/apps/ios/Sources/Onboarding/OnboardingWizardView.swift b/apps/ios/Sources/Onboarding/OnboardingWizardView.swift index 8a97b20e0c7..516e7b373eb 100644 --- a/apps/ios/Sources/Onboarding/OnboardingWizardView.swift +++ b/apps/ios/Sources/Onboarding/OnboardingWizardView.swift @@ -6,6 +6,7 @@ import SwiftUI import UIKit private enum OnboardingStep: Int, CaseIterable { + case intro case welcome case mode case connect @@ -29,7 +30,8 @@ private enum OnboardingStep: Int, CaseIterable { var title: String { switch self { - case .welcome: "Welcome" + case .intro: "Welcome" + case .welcome: "Connect Gateway" case .mode: "Connection Mode" case .connect: "Connect" case .auth: "Authentication" @@ -38,7 +40,7 @@ private enum OnboardingStep: Int, CaseIterable { } var canGoBack: Bool { - self != .welcome && self != .success + self != .intro && self != .welcome && self != .success } } @@ -49,7 +51,7 @@ struct OnboardingWizardView: View { @AppStorage("node.instanceId") private var instanceId: String = UUID().uuidString @AppStorage("gateway.discovery.domain") private var discoveryDomain: String = "" @AppStorage("onboarding.developerMode") private var developerModeEnabled: Bool = false - @State private var step: OnboardingStep = .welcome + @State private var step: OnboardingStep @State private var selectedMode: OnboardingConnectionMode? @State private var manualHost: String = "" @State private var manualPort: Int = 18789 @@ -58,11 +60,10 @@ struct OnboardingWizardView: View { @State private var gatewayToken: String = "" @State private var gatewayPassword: String = "" @State private var connectMessage: String? - @State private var statusLine: String = "Scan the QR code from your gateway to connect." + @State private var statusLine: String = "In your OpenClaw chat, run /pair qr, then scan the code here." @State private var connectingGatewayID: String? @State private var issue: GatewayConnectionIssue = .none @State private var didMarkCompleted = false - @State private var didAutoPresentQR = false @State private var pairingRequestId: String? @State private var discoveryRestartTask: Task? @State private var showQRScanner: Bool = false @@ -74,14 +75,23 @@ struct OnboardingWizardView: View { let allowSkip: Bool let onClose: () -> Void + init(allowSkip: Bool, onClose: @escaping () -> Void) { + self.allowSkip = allowSkip + self.onClose = onClose + _step = State( + initialValue: OnboardingStateStore.shouldPresentFirstRunIntro() ? .intro : .welcome) + } + private var isFullScreenStep: Bool { - self.step == .welcome || self.step == .success + self.step == .intro || self.step == .welcome || self.step == .success } var body: some View { NavigationStack { Group { switch self.step { + case .intro: + self.introStep case .welcome: self.welcomeStep case .success: @@ -293,6 +303,83 @@ struct OnboardingWizardView: View { } } + @ViewBuilder + private var introStep: some View { + VStack(spacing: 0) { + Spacer() + + Image(systemName: "iphone.gen3") + .font(.system(size: 60, weight: .semibold)) + .foregroundStyle(.tint) + .padding(.bottom, 18) + + Text("Welcome to OpenClaw") + .font(.largeTitle.weight(.bold)) + .multilineTextAlignment(.center) + .padding(.bottom, 10) + + Text("Turn this iPhone into a secure OpenClaw node for chat, voice, camera, and device tools.") + .font(.subheadline) + .foregroundStyle(.secondary) + .multilineTextAlignment(.center) + .padding(.horizontal, 32) + .padding(.bottom, 24) + + VStack(alignment: .leading, spacing: 14) { + Label("Connect to your gateway", systemImage: "link") + Label("Choose device permissions", systemImage: "hand.raised") + Label("Use OpenClaw from your phone", systemImage: "message.fill") + } + .font(.subheadline.weight(.semibold)) + .frame(maxWidth: .infinity, alignment: .leading) + .padding(18) + .background { + RoundedRectangle(cornerRadius: 20, style: .continuous) + .fill(Color(uiColor: .secondarySystemBackground)) + } + .padding(.horizontal, 24) + .padding(.bottom, 16) + + HStack(alignment: .top, spacing: 12) { + Image(systemName: "exclamationmark.triangle.fill") + .font(.title3.weight(.semibold)) + .foregroundStyle(.orange) + .frame(width: 24) + .padding(.top, 2) + + VStack(alignment: .leading, spacing: 6) { + Text("Security notice") + .font(.headline) + Text( + "The connected OpenClaw agent can use device capabilities you enable, such as camera, microphone, photos, contacts, calendar, and location. Continue only if you trust the gateway and agent you connect to.") + .font(.footnote) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + } + } + .frame(maxWidth: .infinity, alignment: .leading) + .padding(18) + .background { + RoundedRectangle(cornerRadius: 20, style: .continuous) + .fill(Color(uiColor: .secondarySystemBackground)) + } + .padding(.horizontal, 24) + + Spacer() + + Button { + self.advanceFromIntro() + } label: { + Text("Continue") + .frame(maxWidth: .infinity) + } + .buttonStyle(.borderedProminent) + .controlSize(.large) + .padding(.horizontal, 24) + .padding(.bottom, 48) + } + } + @ViewBuilder private var welcomeStep: some View { VStack(spacing: 0) { @@ -303,16 +390,37 @@ struct OnboardingWizardView: View { .foregroundStyle(.tint) .padding(.bottom, 20) - Text("Welcome") + Text("Connect Gateway") .font(.largeTitle.weight(.bold)) .padding(.bottom, 8) - Text("Connect to your OpenClaw gateway") + Text("Scan a QR code from your OpenClaw gateway or continue with manual setup.") .font(.subheadline) .foregroundStyle(.secondary) .multilineTextAlignment(.center) .padding(.horizontal, 32) + VStack(alignment: .leading, spacing: 8) { + Text("How to pair") + .font(.headline) + Text("In your OpenClaw chat, run") + .font(.footnote) + .foregroundStyle(.secondary) + Text("/pair qr") + .font(.system(.footnote, design: .monospaced).weight(.semibold)) + Text("Then scan the QR code here to connect this iPhone.") + .font(.footnote) + .foregroundStyle(.secondary) + } + .frame(maxWidth: .infinity, alignment: .leading) + .padding(16) + .background { + RoundedRectangle(cornerRadius: 18, style: .continuous) + .fill(Color(uiColor: .secondarySystemBackground)) + } + .padding(.horizontal, 24) + .padding(.top, 20) + Spacer() VStack(spacing: 12) { @@ -342,8 +450,7 @@ struct OnboardingWizardView: View { .foregroundStyle(.secondary) .multilineTextAlignment(.center) .padding(.horizontal, 24) - .padding(.horizontal, 24) - .padding(.bottom, 48) + .padding(.bottom, 48) } } @@ -536,7 +643,7 @@ struct OnboardingWizardView: View { Text( "Approve this device on the gateway.\n" + "1) `openclaw devices approve` (or `openclaw devices approve `)\n" - + "2) `/pair approve` in Telegram\n" + + "2) `/pair approve` in your OpenClaw chat\n" + "\(requestLine)\n" + "OpenClaw will also retry automatically when you return to this app.") } @@ -642,11 +749,17 @@ struct OnboardingWizardView: View { self.manualHost = link.host self.manualPort = link.port self.manualTLS = link.tls - if let token = link.token { + let trimmedBootstrapToken = link.bootstrapToken?.trimmingCharacters(in: .whitespacesAndNewlines) + self.saveGatewayBootstrapToken(trimmedBootstrapToken) + if let token = link.token?.trimmingCharacters(in: .whitespacesAndNewlines), !token.isEmpty { self.gatewayToken = token + } else if trimmedBootstrapToken?.isEmpty == false { + self.gatewayToken = "" } - if let password = link.password { + if let password = link.password?.trimmingCharacters(in: .whitespacesAndNewlines), !password.isEmpty { self.gatewayPassword = password + } else if trimmedBootstrapToken?.isEmpty == false { + self.gatewayPassword = "" } self.saveGatewayCredentials(token: self.gatewayToken, password: self.gatewayPassword) self.showQRScanner = false @@ -721,6 +834,12 @@ struct OnboardingWizardView: View { return nil } + private func advanceFromIntro() { + OnboardingStateStore.markFirstRunIntroSeen() + self.statusLine = "In your OpenClaw chat, run /pair qr, then scan the code here." + self.step = .welcome + } + private func navigateBack() { guard let target = self.step.previous else { return } self.connectingGatewayID = nil @@ -769,10 +888,8 @@ struct OnboardingWizardView: View { let hasSavedGateway = GatewaySettingsStore.loadLastGatewayConnection() != nil let hasToken = !self.gatewayToken.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty let hasPassword = !self.gatewayPassword.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty - if !self.didAutoPresentQR, !hasSavedGateway, !hasToken, !hasPassword { - self.didAutoPresentQR = true - self.statusLine = "No saved pairing found. Scan QR code to connect." - self.showQRScanner = true + if !hasSavedGateway, !hasToken, !hasPassword { + self.statusLine = "No saved pairing found. In your OpenClaw chat, run /pair qr, then scan the code here." } } @@ -794,6 +911,13 @@ struct OnboardingWizardView: View { GatewaySettingsStore.saveGatewayPassword(trimmedPassword, instanceId: trimmedInstanceId) } + private func saveGatewayBootstrapToken(_ token: String?) { + let trimmedInstanceId = self.instanceId.trimmingCharacters(in: .whitespacesAndNewlines) + guard !trimmedInstanceId.isEmpty else { return } + let trimmedToken = token?.trimmingCharacters(in: .whitespacesAndNewlines) ?? "" + GatewaySettingsStore.saveGatewayBootstrapToken(trimmedToken, instanceId: trimmedInstanceId) + } + private func connectDiscoveredGateway(_ gateway: GatewayDiscoveryModel.DiscoveredGateway) async { self.connectingGatewayID = gateway.id self.issue = .none diff --git a/apps/ios/Sources/OpenClawApp.swift b/apps/ios/Sources/OpenClawApp.swift index c94b1209f8d..ae980b0216a 100644 --- a/apps/ios/Sources/OpenClawApp.swift +++ b/apps/ios/Sources/OpenClawApp.swift @@ -407,6 +407,13 @@ enum WatchPromptNotificationBridge { let granted = (try? await center.requestAuthorization(options: [.alert, .sound, .badge])) ?? false if !granted { return false } let updatedStatus = await self.notificationAuthorizationStatus(center: center) + if self.isAuthorizationStatusAllowed(updatedStatus) { + // Refresh APNs registration immediately after the first permission grant so the + // gateway can receive a push registration without requiring an app relaunch. + await MainActor.run { + UIApplication.shared.registerForRemoteNotifications() + } + } return self.isAuthorizationStatusAllowed(updatedStatus) case .denied: return false diff --git a/apps/ios/Sources/Push/PushBuildConfig.swift b/apps/ios/Sources/Push/PushBuildConfig.swift new file mode 100644 index 00000000000..d1665921552 --- /dev/null +++ b/apps/ios/Sources/Push/PushBuildConfig.swift @@ -0,0 +1,75 @@ +import Foundation + +enum PushTransportMode: String { + case direct + case relay +} + +enum PushDistributionMode: String { + case local + case official +} + +enum PushAPNsEnvironment: String { + case sandbox + case production +} + +struct PushBuildConfig { + let transport: PushTransportMode + let distribution: PushDistributionMode + let relayBaseURL: URL? + let apnsEnvironment: PushAPNsEnvironment + + static let current = PushBuildConfig() + + init(bundle: Bundle = .main) { + self.transport = Self.readEnum( + bundle: bundle, + key: "OpenClawPushTransport", + fallback: .direct) + self.distribution = Self.readEnum( + bundle: bundle, + key: "OpenClawPushDistribution", + fallback: .local) + self.apnsEnvironment = Self.readEnum( + bundle: bundle, + key: "OpenClawPushAPNsEnvironment", + fallback: Self.defaultAPNsEnvironment) + self.relayBaseURL = Self.readURL(bundle: bundle, key: "OpenClawPushRelayBaseURL") + } + + var usesRelay: Bool { + self.transport == .relay + } + + private static func readURL(bundle: Bundle, key: String) -> URL? { + guard let raw = bundle.object(forInfoDictionaryKey: key) as? String else { return nil } + let trimmed = raw.trimmingCharacters(in: .whitespacesAndNewlines) + guard !trimmed.isEmpty else { return nil } + guard let components = URLComponents(string: trimmed), + components.scheme?.lowercased() == "https", + let host = components.host, + !host.isEmpty, + components.user == nil, + components.password == nil, + components.query == nil, + components.fragment == nil + else { + return nil + } + return components.url + } + + private static func readEnum( + bundle: Bundle, + key: String, + fallback: T) + -> T where T.RawValue == String { + guard let raw = bundle.object(forInfoDictionaryKey: key) as? String else { return fallback } + let trimmed = raw.trimmingCharacters(in: .whitespacesAndNewlines).lowercased() + return T(rawValue: trimmed) ?? fallback + } + + private static let defaultAPNsEnvironment: PushAPNsEnvironment = .sandbox +} diff --git a/apps/ios/Sources/Push/PushRegistrationManager.swift b/apps/ios/Sources/Push/PushRegistrationManager.swift new file mode 100644 index 00000000000..77f54f8d108 --- /dev/null +++ b/apps/ios/Sources/Push/PushRegistrationManager.swift @@ -0,0 +1,169 @@ +import CryptoKit +import Foundation + +private struct DirectGatewayPushRegistrationPayload: Encodable { + var transport: String = PushTransportMode.direct.rawValue + var token: String + var topic: String + var environment: String +} + +private struct RelayGatewayPushRegistrationPayload: Encodable { + var transport: String = PushTransportMode.relay.rawValue + var relayHandle: String + var sendGrant: String + var gatewayDeviceId: String + var installationId: String + var topic: String + var environment: String + var distribution: String + var tokenDebugSuffix: String? +} + +struct PushRelayGatewayIdentity: Codable { + var deviceId: String + var publicKey: String +} + +actor PushRegistrationManager { + private let buildConfig: PushBuildConfig + private let relayClient: PushRelayClient? + + var usesRelayTransport: Bool { + self.buildConfig.transport == .relay + } + + init(buildConfig: PushBuildConfig = .current) { + self.buildConfig = buildConfig + self.relayClient = buildConfig.relayBaseURL.map { PushRelayClient(baseURL: $0) } + } + + func makeGatewayRegistrationPayload( + apnsTokenHex: String, + topic: String, + gatewayIdentity: PushRelayGatewayIdentity?) + async throws -> String { + switch self.buildConfig.transport { + case .direct: + return try Self.encodePayload( + DirectGatewayPushRegistrationPayload( + token: apnsTokenHex, + topic: topic, + environment: self.buildConfig.apnsEnvironment.rawValue)) + case .relay: + guard let gatewayIdentity else { + throw PushRelayError.relayMisconfigured("Missing gateway identity for relay registration") + } + return try await self.makeRelayPayload( + apnsTokenHex: apnsTokenHex, + topic: topic, + gatewayIdentity: gatewayIdentity) + } + } + + private func makeRelayPayload( + apnsTokenHex: String, + topic: String, + gatewayIdentity: PushRelayGatewayIdentity) + async throws -> String { + guard self.buildConfig.distribution == .official else { + throw PushRelayError.relayMisconfigured( + "Relay transport requires OpenClawPushDistribution=official") + } + guard self.buildConfig.apnsEnvironment == .production else { + throw PushRelayError.relayMisconfigured( + "Relay transport requires OpenClawPushAPNsEnvironment=production") + } + guard let relayClient = self.relayClient else { + throw PushRelayError.relayBaseURLMissing + } + guard let bundleId = Bundle.main.bundleIdentifier?.trimmingCharacters(in: .whitespacesAndNewlines), + !bundleId.isEmpty + else { + throw PushRelayError.relayMisconfigured("Missing bundle identifier for relay registration") + } + guard let installationId = GatewaySettingsStore.loadStableInstanceID()? + .trimmingCharacters(in: .whitespacesAndNewlines), + !installationId.isEmpty + else { + throw PushRelayError.relayMisconfigured("Missing stable installation ID for relay registration") + } + + let tokenHashHex = Self.sha256Hex(apnsTokenHex) + let relayOrigin = relayClient.normalizedBaseURLString + if let stored = PushRelayRegistrationStore.loadRegistrationState(), + stored.installationId == installationId, + stored.gatewayDeviceId == gatewayIdentity.deviceId, + stored.relayOrigin == relayOrigin, + stored.lastAPNsTokenHashHex == tokenHashHex, + !Self.isExpired(stored.relayHandleExpiresAtMs) + { + return try Self.encodePayload( + RelayGatewayPushRegistrationPayload( + relayHandle: stored.relayHandle, + sendGrant: stored.sendGrant, + gatewayDeviceId: gatewayIdentity.deviceId, + installationId: installationId, + topic: topic, + environment: self.buildConfig.apnsEnvironment.rawValue, + distribution: self.buildConfig.distribution.rawValue, + tokenDebugSuffix: stored.tokenDebugSuffix)) + } + + let response = try await relayClient.register( + installationId: installationId, + bundleId: bundleId, + appVersion: DeviceInfoHelper.appVersion(), + environment: self.buildConfig.apnsEnvironment, + distribution: self.buildConfig.distribution, + apnsTokenHex: apnsTokenHex, + gatewayIdentity: gatewayIdentity) + let registrationState = PushRelayRegistrationStore.RegistrationState( + relayHandle: response.relayHandle, + sendGrant: response.sendGrant, + relayOrigin: relayOrigin, + gatewayDeviceId: gatewayIdentity.deviceId, + relayHandleExpiresAtMs: response.expiresAtMs, + tokenDebugSuffix: Self.normalizeTokenSuffix(response.tokenSuffix), + lastAPNsTokenHashHex: tokenHashHex, + installationId: installationId, + lastTransport: self.buildConfig.transport.rawValue) + _ = PushRelayRegistrationStore.saveRegistrationState(registrationState) + return try Self.encodePayload( + RelayGatewayPushRegistrationPayload( + relayHandle: response.relayHandle, + sendGrant: response.sendGrant, + gatewayDeviceId: gatewayIdentity.deviceId, + installationId: installationId, + topic: topic, + environment: self.buildConfig.apnsEnvironment.rawValue, + distribution: self.buildConfig.distribution.rawValue, + tokenDebugSuffix: registrationState.tokenDebugSuffix)) + } + + private static func isExpired(_ expiresAtMs: Int64?) -> Bool { + guard let expiresAtMs else { return true } + let nowMs = Int64(Date().timeIntervalSince1970 * 1000) + // Refresh shortly before expiry so reconnect-path republishes a live handle. + return expiresAtMs <= nowMs + 60_000 + } + + private static func sha256Hex(_ value: String) -> String { + let digest = SHA256.hash(data: Data(value.utf8)) + return digest.map { String(format: "%02x", $0) }.joined() + } + + private static func normalizeTokenSuffix(_ value: String?) -> String? { + guard let value else { return nil } + let trimmed = value.trimmingCharacters(in: .whitespacesAndNewlines).lowercased() + return trimmed.isEmpty ? nil : trimmed + } + + private static func encodePayload(_ payload: some Encodable) throws -> String { + let data = try JSONEncoder().encode(payload) + guard let json = String(data: data, encoding: .utf8) else { + throw PushRelayError.relayMisconfigured("Failed to encode push registration payload as UTF-8") + } + return json + } +} diff --git a/apps/ios/Sources/Push/PushRelayClient.swift b/apps/ios/Sources/Push/PushRelayClient.swift new file mode 100644 index 00000000000..07bb5caa3b7 --- /dev/null +++ b/apps/ios/Sources/Push/PushRelayClient.swift @@ -0,0 +1,349 @@ +import CryptoKit +import DeviceCheck +import Foundation +import StoreKit + +enum PushRelayError: LocalizedError { + case relayBaseURLMissing + case relayMisconfigured(String) + case invalidResponse(String) + case requestFailed(status: Int, message: String) + case unsupportedAppAttest + case missingReceipt + + var errorDescription: String? { + switch self { + case .relayBaseURLMissing: + "Push relay base URL missing" + case let .relayMisconfigured(message): + message + case let .invalidResponse(message): + message + case let .requestFailed(status, message): + "Push relay request failed (\(status)): \(message)" + case .unsupportedAppAttest: + "App Attest unavailable on this device" + case .missingReceipt: + "App Store receipt missing after refresh" + } + } +} + +private struct PushRelayChallengeResponse: Decodable { + var challengeId: String + var challenge: String + var expiresAtMs: Int64 +} + +private struct PushRelayRegisterSignedPayload: Encodable { + var challengeId: String + var installationId: String + var bundleId: String + var environment: String + var distribution: String + var gateway: PushRelayGatewayIdentity + var appVersion: String + var apnsToken: String +} + +private struct PushRelayAppAttestPayload: Encodable { + var keyId: String + var attestationObject: String? + var assertion: String + var clientDataHash: String + var signedPayloadBase64: String +} + +private struct PushRelayReceiptPayload: Encodable { + var base64: String +} + +private struct PushRelayRegisterRequest: Encodable { + var challengeId: String + var installationId: String + var bundleId: String + var environment: String + var distribution: String + var gateway: PushRelayGatewayIdentity + var appVersion: String + var apnsToken: String + var appAttest: PushRelayAppAttestPayload + var receipt: PushRelayReceiptPayload +} + +struct PushRelayRegisterResponse: Decodable { + var relayHandle: String + var sendGrant: String + var expiresAtMs: Int64? + var tokenSuffix: String? + var status: String +} + +private struct RelayErrorResponse: Decodable { + var error: String? + var message: String? + var reason: String? +} + +private final class PushRelayReceiptRefreshCoordinator: NSObject, SKRequestDelegate { + private var continuation: CheckedContinuation? + private var activeRequest: SKReceiptRefreshRequest? + + func refresh() async throws { + try await withCheckedThrowingContinuation { continuation in + self.continuation = continuation + let request = SKReceiptRefreshRequest() + self.activeRequest = request + request.delegate = self + request.start() + } + } + + func requestDidFinish(_ request: SKRequest) { + self.continuation?.resume(returning: ()) + self.continuation = nil + self.activeRequest = nil + } + + func request(_ request: SKRequest, didFailWithError error: Error) { + self.continuation?.resume(throwing: error) + self.continuation = nil + self.activeRequest = nil + } +} + +private struct PushRelayAppAttestProof { + var keyId: String + var attestationObject: String? + var assertion: String + var clientDataHash: String + var signedPayloadBase64: String +} + +private final class PushRelayAppAttestService { + func createProof(challenge: String, signedPayload: Data) async throws -> PushRelayAppAttestProof { + let service = DCAppAttestService.shared + guard service.isSupported else { + throw PushRelayError.unsupportedAppAttest + } + + let keyID = try await self.loadOrCreateKeyID(using: service) + let attestationObject = try await self.attestKeyIfNeeded( + service: service, + keyID: keyID, + challenge: challenge) + let signedPayloadHash = Data(SHA256.hash(data: signedPayload)) + let assertion = try await self.generateAssertion( + service: service, + keyID: keyID, + signedPayloadHash: signedPayloadHash) + + return PushRelayAppAttestProof( + keyId: keyID, + attestationObject: attestationObject, + assertion: assertion.base64EncodedString(), + clientDataHash: Self.base64URL(signedPayloadHash), + signedPayloadBase64: signedPayload.base64EncodedString()) + } + + private func loadOrCreateKeyID(using service: DCAppAttestService) async throws -> String { + if let existing = PushRelayRegistrationStore.loadAppAttestKeyID(), !existing.isEmpty { + return existing + } + let keyID = try await service.generateKey() + _ = PushRelayRegistrationStore.saveAppAttestKeyID(keyID) + return keyID + } + + private func attestKeyIfNeeded( + service: DCAppAttestService, + keyID: String, + challenge: String) + async throws -> String? { + if PushRelayRegistrationStore.loadAttestedKeyID() == keyID { + return nil + } + let challengeData = Data(challenge.utf8) + let clientDataHash = Data(SHA256.hash(data: challengeData)) + let attestation = try await service.attestKey(keyID, clientDataHash: clientDataHash) + // Apple treats App Attest key attestation as a one-time operation. Save the + // attested marker immediately so later receipt/network failures do not cause a + // permanently broken re-attestation loop on the same key. + _ = PushRelayRegistrationStore.saveAttestedKeyID(keyID) + return attestation.base64EncodedString() + } + + private func generateAssertion( + service: DCAppAttestService, + keyID: String, + signedPayloadHash: Data) + async throws -> Data { + do { + return try await service.generateAssertion(keyID, clientDataHash: signedPayloadHash) + } catch { + _ = PushRelayRegistrationStore.clearAppAttestKeyID() + _ = PushRelayRegistrationStore.clearAttestedKeyID() + throw error + } + } + + private static func base64URL(_ data: Data) -> String { + data.base64EncodedString() + .replacingOccurrences(of: "+", with: "-") + .replacingOccurrences(of: "/", with: "_") + .replacingOccurrences(of: "=", with: "") + } +} + +private final class PushRelayReceiptProvider { + func loadReceiptBase64() async throws -> String { + if let receipt = self.readReceiptData() { + return receipt.base64EncodedString() + } + let refreshCoordinator = PushRelayReceiptRefreshCoordinator() + try await refreshCoordinator.refresh() + if let refreshed = self.readReceiptData() { + return refreshed.base64EncodedString() + } + throw PushRelayError.missingReceipt + } + + private func readReceiptData() -> Data? { + guard let url = Bundle.main.appStoreReceiptURL else { return nil } + guard let data = try? Data(contentsOf: url), !data.isEmpty else { return nil } + return data + } +} + +// The client is constructed once and used behind PushRegistrationManager actor isolation. +final class PushRelayClient: @unchecked Sendable { + private let baseURL: URL + private let session: URLSession + private let jsonDecoder = JSONDecoder() + private let jsonEncoder = JSONEncoder() + private let appAttest = PushRelayAppAttestService() + private let receiptProvider = PushRelayReceiptProvider() + + init(baseURL: URL, session: URLSession = .shared) { + self.baseURL = baseURL + self.session = session + } + + var normalizedBaseURLString: String { + Self.normalizeBaseURLString(self.baseURL) + } + + func register( + installationId: String, + bundleId: String, + appVersion: String, + environment: PushAPNsEnvironment, + distribution: PushDistributionMode, + apnsTokenHex: String, + gatewayIdentity: PushRelayGatewayIdentity) + async throws -> PushRelayRegisterResponse { + let challenge = try await self.fetchChallenge() + let signedPayload = PushRelayRegisterSignedPayload( + challengeId: challenge.challengeId, + installationId: installationId, + bundleId: bundleId, + environment: environment.rawValue, + distribution: distribution.rawValue, + gateway: gatewayIdentity, + appVersion: appVersion, + apnsToken: apnsTokenHex) + let signedPayloadData = try self.jsonEncoder.encode(signedPayload) + let appAttest = try await self.appAttest.createProof( + challenge: challenge.challenge, + signedPayload: signedPayloadData) + let receiptBase64 = try await self.receiptProvider.loadReceiptBase64() + let requestBody = PushRelayRegisterRequest( + challengeId: signedPayload.challengeId, + installationId: signedPayload.installationId, + bundleId: signedPayload.bundleId, + environment: signedPayload.environment, + distribution: signedPayload.distribution, + gateway: signedPayload.gateway, + appVersion: signedPayload.appVersion, + apnsToken: signedPayload.apnsToken, + appAttest: PushRelayAppAttestPayload( + keyId: appAttest.keyId, + attestationObject: appAttest.attestationObject, + assertion: appAttest.assertion, + clientDataHash: appAttest.clientDataHash, + signedPayloadBase64: appAttest.signedPayloadBase64), + receipt: PushRelayReceiptPayload(base64: receiptBase64)) + + let endpoint = self.baseURL.appending(path: "v1/push/register") + var request = URLRequest(url: endpoint) + request.httpMethod = "POST" + request.timeoutInterval = 20 + request.setValue("application/json", forHTTPHeaderField: "Content-Type") + request.httpBody = try self.jsonEncoder.encode(requestBody) + + let (data, response) = try await self.session.data(for: request) + let status = Self.statusCode(from: response) + guard (200..<300).contains(status) else { + if status == 401 { + // If the relay rejects registration, drop local App Attest state so the next + // attempt re-attests instead of getting stuck without an attestation object. + _ = PushRelayRegistrationStore.clearAppAttestKeyID() + _ = PushRelayRegistrationStore.clearAttestedKeyID() + } + throw PushRelayError.requestFailed( + status: status, + message: Self.decodeErrorMessage(data: data)) + } + let decoded = try self.decode(PushRelayRegisterResponse.self, from: data) + return decoded + } + + private func fetchChallenge() async throws -> PushRelayChallengeResponse { + let endpoint = self.baseURL.appending(path: "v1/push/challenge") + var request = URLRequest(url: endpoint) + request.httpMethod = "POST" + request.timeoutInterval = 10 + request.setValue("application/json", forHTTPHeaderField: "Content-Type") + request.httpBody = Data("{}".utf8) + + let (data, response) = try await self.session.data(for: request) + let status = Self.statusCode(from: response) + guard (200..<300).contains(status) else { + throw PushRelayError.requestFailed( + status: status, + message: Self.decodeErrorMessage(data: data)) + } + return try self.decode(PushRelayChallengeResponse.self, from: data) + } + + private func decode(_ type: T.Type, from data: Data) throws -> T { + do { + return try self.jsonDecoder.decode(type, from: data) + } catch { + throw PushRelayError.invalidResponse(error.localizedDescription) + } + } + + private static func statusCode(from response: URLResponse) -> Int { + (response as? HTTPURLResponse)?.statusCode ?? 0 + } + + private static func normalizeBaseURLString(_ url: URL) -> String { + var absolute = url.absoluteString + while absolute.hasSuffix("/") { + absolute.removeLast() + } + return absolute + } + + private static func decodeErrorMessage(data: Data) -> String { + if let decoded = try? JSONDecoder().decode(RelayErrorResponse.self, from: data) { + let message = decoded.message ?? decoded.reason ?? decoded.error ?? "" + if !message.isEmpty { + return message + } + } + let raw = String(data: data, encoding: .utf8)?.trimmingCharacters(in: .whitespacesAndNewlines) ?? "" + return raw.isEmpty ? "unknown relay error" : raw + } +} diff --git a/apps/ios/Sources/Push/PushRelayKeychainStore.swift b/apps/ios/Sources/Push/PushRelayKeychainStore.swift new file mode 100644 index 00000000000..4d7df09cd14 --- /dev/null +++ b/apps/ios/Sources/Push/PushRelayKeychainStore.swift @@ -0,0 +1,112 @@ +import Foundation + +private struct StoredPushRelayRegistrationState: Codable { + var relayHandle: String + var sendGrant: String + var relayOrigin: String? + var gatewayDeviceId: String + var relayHandleExpiresAtMs: Int64? + var tokenDebugSuffix: String? + var lastAPNsTokenHashHex: String + var installationId: String + var lastTransport: String +} + +enum PushRelayRegistrationStore { + private static let service = "ai.openclaw.pushrelay" + private static let registrationStateAccount = "registration-state" + private static let appAttestKeyIDAccount = "app-attest-key-id" + private static let appAttestedKeyIDAccount = "app-attested-key-id" + + struct RegistrationState: Codable { + var relayHandle: String + var sendGrant: String + var relayOrigin: String? + var gatewayDeviceId: String + var relayHandleExpiresAtMs: Int64? + var tokenDebugSuffix: String? + var lastAPNsTokenHashHex: String + var installationId: String + var lastTransport: String + } + + static func loadRegistrationState() -> RegistrationState? { + guard let raw = KeychainStore.loadString( + service: self.service, + account: self.registrationStateAccount), + let data = raw.data(using: .utf8), + let decoded = try? JSONDecoder().decode(StoredPushRelayRegistrationState.self, from: data) + else { + return nil + } + return RegistrationState( + relayHandle: decoded.relayHandle, + sendGrant: decoded.sendGrant, + relayOrigin: decoded.relayOrigin, + gatewayDeviceId: decoded.gatewayDeviceId, + relayHandleExpiresAtMs: decoded.relayHandleExpiresAtMs, + tokenDebugSuffix: decoded.tokenDebugSuffix, + lastAPNsTokenHashHex: decoded.lastAPNsTokenHashHex, + installationId: decoded.installationId, + lastTransport: decoded.lastTransport) + } + + @discardableResult + static func saveRegistrationState(_ state: RegistrationState) -> Bool { + let stored = StoredPushRelayRegistrationState( + relayHandle: state.relayHandle, + sendGrant: state.sendGrant, + relayOrigin: state.relayOrigin, + gatewayDeviceId: state.gatewayDeviceId, + relayHandleExpiresAtMs: state.relayHandleExpiresAtMs, + tokenDebugSuffix: state.tokenDebugSuffix, + lastAPNsTokenHashHex: state.lastAPNsTokenHashHex, + installationId: state.installationId, + lastTransport: state.lastTransport) + guard let data = try? JSONEncoder().encode(stored), + let raw = String(data: data, encoding: .utf8) + else { + return false + } + return KeychainStore.saveString(raw, service: self.service, account: self.registrationStateAccount) + } + + @discardableResult + static func clearRegistrationState() -> Bool { + KeychainStore.delete(service: self.service, account: self.registrationStateAccount) + } + + static func loadAppAttestKeyID() -> String? { + let value = KeychainStore.loadString(service: self.service, account: self.appAttestKeyIDAccount)? + .trimmingCharacters(in: .whitespacesAndNewlines) + if value?.isEmpty == false { return value } + return nil + } + + @discardableResult + static func saveAppAttestKeyID(_ keyID: String) -> Bool { + KeychainStore.saveString(keyID, service: self.service, account: self.appAttestKeyIDAccount) + } + + @discardableResult + static func clearAppAttestKeyID() -> Bool { + KeychainStore.delete(service: self.service, account: self.appAttestKeyIDAccount) + } + + static func loadAttestedKeyID() -> String? { + let value = KeychainStore.loadString(service: self.service, account: self.appAttestedKeyIDAccount)? + .trimmingCharacters(in: .whitespacesAndNewlines) + if value?.isEmpty == false { return value } + return nil + } + + @discardableResult + static func saveAttestedKeyID(_ keyID: String) -> Bool { + KeychainStore.saveString(keyID, service: self.service, account: self.appAttestedKeyIDAccount) + } + + @discardableResult + static func clearAttestedKeyID() -> Bool { + KeychainStore.delete(service: self.service, account: self.appAttestedKeyIDAccount) + } +} diff --git a/apps/ios/Sources/RootCanvas.swift b/apps/ios/Sources/RootCanvas.swift index 1eb8459a642..3a078f271c4 100644 --- a/apps/ios/Sources/RootCanvas.swift +++ b/apps/ios/Sources/RootCanvas.swift @@ -1,5 +1,6 @@ import SwiftUI import UIKit +import OpenClawProtocol struct RootCanvas: View { @Environment(NodeAppModel.self) private var appModel @@ -137,16 +138,33 @@ struct RootCanvas: View { .environment(self.gatewayController) } .onAppear { self.updateIdleTimer() } + .onAppear { self.updateHomeCanvasState() } .onAppear { self.evaluateOnboardingPresentation(force: false) } .onAppear { self.maybeAutoOpenSettings() } .onChange(of: self.preventSleep) { _, _ in self.updateIdleTimer() } - .onChange(of: self.scenePhase) { _, _ in self.updateIdleTimer() } + .onChange(of: self.scenePhase) { _, newValue in + self.updateIdleTimer() + self.updateHomeCanvasState() + guard newValue == .active else { return } + Task { + await self.appModel.refreshGatewayOverviewIfConnected() + await MainActor.run { + self.updateHomeCanvasState() + } + } + } .onAppear { self.maybeShowQuickSetup() } .onChange(of: self.gatewayController.gateways.count) { _, _ in self.maybeShowQuickSetup() } .onAppear { self.updateCanvasDebugStatus() } .onChange(of: self.canvasDebugStatusEnabled) { _, _ in self.updateCanvasDebugStatus() } - .onChange(of: self.appModel.gatewayStatusText) { _, _ in self.updateCanvasDebugStatus() } - .onChange(of: self.appModel.gatewayServerName) { _, _ in self.updateCanvasDebugStatus() } + .onChange(of: self.appModel.gatewayStatusText) { _, _ in + self.updateCanvasDebugStatus() + self.updateHomeCanvasState() + } + .onChange(of: self.appModel.gatewayServerName) { _, _ in + self.updateCanvasDebugStatus() + self.updateHomeCanvasState() + } .onChange(of: self.appModel.gatewayServerName) { _, newValue in if newValue != nil { self.showOnboarding = false @@ -155,7 +173,13 @@ struct RootCanvas: View { .onChange(of: self.onboardingRequestID) { _, _ in self.evaluateOnboardingPresentation(force: true) } - .onChange(of: self.appModel.gatewayRemoteAddress) { _, _ in self.updateCanvasDebugStatus() } + .onChange(of: self.appModel.gatewayRemoteAddress) { _, _ in + self.updateCanvasDebugStatus() + self.updateHomeCanvasState() + } + .onChange(of: self.appModel.homeCanvasRevision) { _, _ in + self.updateHomeCanvasState() + } .onChange(of: self.appModel.gatewayServerName) { _, newValue in if newValue != nil { self.onboardingComplete = true @@ -209,6 +233,134 @@ struct RootCanvas: View { self.appModel.screen.updateDebugStatus(title: title, subtitle: subtitle) } + private func updateHomeCanvasState() { + let payload = self.makeHomeCanvasPayload() + guard let data = try? JSONEncoder().encode(payload), + let json = String(data: data, encoding: .utf8) + else { + self.appModel.screen.updateHomeCanvasState(json: nil) + return + } + self.appModel.screen.updateHomeCanvasState(json: json) + } + + private func makeHomeCanvasPayload() -> HomeCanvasPayload { + let gatewayName = self.normalized(self.appModel.gatewayServerName) + let gatewayAddress = self.normalized(self.appModel.gatewayRemoteAddress) + let gatewayLabel = gatewayName ?? gatewayAddress ?? "Gateway" + let activeAgentID = self.resolveActiveAgentID() + let agents = self.homeCanvasAgents(activeAgentID: activeAgentID) + + switch self.gatewayStatus { + case .connected: + return HomeCanvasPayload( + gatewayState: "connected", + eyebrow: "Connected to \(gatewayLabel)", + title: "Your agents are ready", + subtitle: + "This phone stays dormant until the gateway needs it, then wakes, syncs, and goes back to sleep.", + gatewayLabel: gatewayLabel, + activeAgentName: self.appModel.activeAgentName, + activeAgentBadge: agents.first(where: { $0.isActive })?.badge ?? "OC", + activeAgentCaption: "Selected on this phone", + agentCount: agents.count, + agents: Array(agents.prefix(6)), + footer: "The overview refreshes on reconnect and when the app returns to foreground.") + case .connecting: + return HomeCanvasPayload( + gatewayState: "connecting", + eyebrow: "Reconnecting", + title: "OpenClaw is syncing back up", + subtitle: + "The gateway session is coming back online. " + + "Agent shortcuts should settle automatically in a moment.", + gatewayLabel: gatewayLabel, + activeAgentName: self.appModel.activeAgentName, + activeAgentBadge: "OC", + activeAgentCaption: "Gateway session in progress", + agentCount: agents.count, + agents: Array(agents.prefix(4)), + footer: "If the gateway is reachable, reconnect should complete without intervention.") + case .error, .disconnected: + return HomeCanvasPayload( + gatewayState: self.gatewayStatus == .error ? "error" : "offline", + eyebrow: "Welcome to OpenClaw", + title: "Your phone stays quiet until it is needed", + subtitle: + "Pair this device to your gateway to wake it only for real work, " + + "keep a live agent overview handy, and avoid battery-draining background loops.", + gatewayLabel: gatewayLabel, + activeAgentName: "Main", + activeAgentBadge: "OC", + activeAgentCaption: "Connect to load your agents", + agentCount: agents.count, + agents: Array(agents.prefix(4)), + footer: + "When connected, the gateway can wake the phone with a silent push " + + "instead of holding an always-on session.") + } + } + + private func resolveActiveAgentID() -> String { + let selected = self.normalized(self.appModel.selectedAgentId) ?? "" + if !selected.isEmpty { + return selected + } + return self.resolveDefaultAgentID() + } + + private func resolveDefaultAgentID() -> String { + self.normalized(self.appModel.gatewayDefaultAgentId) ?? "" + } + + private func homeCanvasAgents(activeAgentID: String) -> [HomeCanvasAgentCard] { + let defaultAgentID = self.resolveDefaultAgentID() + let cards = self.appModel.gatewayAgents.map { agent -> HomeCanvasAgentCard in + let isActive = !activeAgentID.isEmpty && agent.id == activeAgentID + let isDefault = !defaultAgentID.isEmpty && agent.id == defaultAgentID + return HomeCanvasAgentCard( + id: agent.id, + name: self.homeCanvasName(for: agent), + badge: self.homeCanvasBadge(for: agent), + caption: isActive ? "Active on this phone" : (isDefault ? "Default agent" : "Ready"), + isActive: isActive) + } + + return cards.sorted { lhs, rhs in + if lhs.isActive != rhs.isActive { + return lhs.isActive + } + return lhs.name.localizedCaseInsensitiveCompare(rhs.name) == .orderedAscending + } + } + + private func homeCanvasName(for agent: AgentSummary) -> String { + self.normalized(agent.name) ?? agent.id + } + + private func homeCanvasBadge(for agent: AgentSummary) -> String { + if let identity = agent.identity, + let emoji = identity["emoji"]?.value as? String, + let normalizedEmoji = self.normalized(emoji) + { + return normalizedEmoji + } + let words = self.homeCanvasName(for: agent) + .split(whereSeparator: { $0.isWhitespace || $0 == "-" || $0 == "_" }) + .prefix(2) + let initials = words.compactMap { $0.first }.map(String.init).joined() + if !initials.isEmpty { + return initials.uppercased() + } + return "OC" + } + + private func normalized(_ value: String?) -> String? { + guard let value else { return nil } + let trimmed = value.trimmingCharacters(in: .whitespacesAndNewlines) + return trimmed.isEmpty ? nil : trimmed + } + private func evaluateOnboardingPresentation(force: Bool) { if force { self.onboardingAllowSkip = true @@ -274,6 +426,28 @@ struct RootCanvas: View { } } +private struct HomeCanvasPayload: Codable { + var gatewayState: String + var eyebrow: String + var title: String + var subtitle: String + var gatewayLabel: String + var activeAgentName: String + var activeAgentBadge: String + var activeAgentCaption: String + var agentCount: Int + var agents: [HomeCanvasAgentCard] + var footer: String +} + +private struct HomeCanvasAgentCard: Codable { + var id: String + var name: String + var badge: String + var caption: String + var isActive: Bool +} + private struct CanvasContent: View { @Environment(NodeAppModel.self) private var appModel @AppStorage("talk.enabled") private var talkEnabled: Bool = false @@ -301,53 +475,33 @@ private struct CanvasContent: View { .transition(.opacity) } } - .overlay(alignment: .topLeading) { - HStack(alignment: .top, spacing: 8) { - StatusPill( - gateway: self.gatewayStatus, - voiceWakeEnabled: self.voiceWakeEnabled, - activity: self.statusActivity, - brighten: self.brightenButtons, - onTap: { - if self.gatewayStatus == .connected { - self.showGatewayActions = true - } else { - self.openSettings() - } - }) - .layoutPriority(1) - - Spacer(minLength: 8) - - HStack(spacing: 8) { - OverlayButton(systemImage: "text.bubble.fill", brighten: self.brightenButtons) { - self.openChat() - } - .accessibilityLabel("Chat") - - if self.talkButtonEnabled { - // Keep Talk mode near status controls while freeing right-side screen real estate. - OverlayButton( - systemImage: self.talkActive ? "waveform.circle.fill" : "waveform.circle", - brighten: self.brightenButtons, - tint: self.appModel.seamColor, - isActive: self.talkActive) - { - let next = !self.talkActive - self.talkEnabled = next - self.appModel.setTalkEnabled(next) - } - .accessibilityLabel("Talk Mode") - } - - OverlayButton(systemImage: "gearshape.fill", brighten: self.brightenButtons) { + .safeAreaInset(edge: .bottom, spacing: 0) { + HomeToolbar( + gateway: self.gatewayStatus, + voiceWakeEnabled: self.voiceWakeEnabled, + activity: self.statusActivity, + brighten: self.brightenButtons, + talkButtonEnabled: self.talkButtonEnabled, + talkActive: self.talkActive, + talkTint: self.appModel.seamColor, + onStatusTap: { + if self.gatewayStatus == .connected { + self.showGatewayActions = true + } else { self.openSettings() } - .accessibilityLabel("Settings") - } - } - .padding(.horizontal, 10) - .safeAreaPadding(.top, 10) + }, + onChatTap: { + self.openChat() + }, + onTalkTap: { + let next = !self.talkActive + self.talkEnabled = next + self.appModel.setTalkEnabled(next) + }, + onSettingsTap: { + self.openSettings() + }) } .overlay(alignment: .topLeading) { if let voiceWakeToastText, !voiceWakeToastText.isEmpty { @@ -380,63 +534,6 @@ private struct CanvasContent: View { } } -private struct OverlayButton: View { - let systemImage: String - let brighten: Bool - var tint: Color? - var isActive: Bool = false - let action: () -> Void - - var body: some View { - Button(action: self.action) { - Image(systemName: self.systemImage) - .font(.system(size: 16, weight: .semibold)) - .foregroundStyle(self.isActive ? (self.tint ?? .primary) : .primary) - .padding(10) - .background { - RoundedRectangle(cornerRadius: 12, style: .continuous) - .fill(.ultraThinMaterial) - .overlay { - RoundedRectangle(cornerRadius: 12, style: .continuous) - .fill( - LinearGradient( - colors: [ - .white.opacity(self.brighten ? 0.26 : 0.18), - .white.opacity(self.brighten ? 0.08 : 0.04), - .clear, - ], - startPoint: .topLeading, - endPoint: .bottomTrailing)) - .blendMode(.overlay) - } - .overlay { - if let tint { - RoundedRectangle(cornerRadius: 12, style: .continuous) - .fill( - LinearGradient( - colors: [ - tint.opacity(self.isActive ? 0.22 : 0.14), - tint.opacity(self.isActive ? 0.10 : 0.06), - .clear, - ], - startPoint: .topLeading, - endPoint: .bottomTrailing)) - .blendMode(.overlay) - } - } - .overlay { - RoundedRectangle(cornerRadius: 12, style: .continuous) - .strokeBorder( - (self.tint ?? .white).opacity(self.isActive ? 0.34 : (self.brighten ? 0.24 : 0.18)), - lineWidth: self.isActive ? 0.7 : 0.5) - } - .shadow(color: .black.opacity(0.35), radius: 12, y: 6) - } - } - .buttonStyle(.plain) - } -} - private struct CameraFlashOverlay: View { var nonce: Int diff --git a/apps/ios/Sources/Screen/ScreenController.swift b/apps/ios/Sources/Screen/ScreenController.swift index 5c945033551..4c9f3ff5085 100644 --- a/apps/ios/Sources/Screen/ScreenController.swift +++ b/apps/ios/Sources/Screen/ScreenController.swift @@ -20,6 +20,7 @@ final class ScreenController { private var debugStatusEnabled: Bool = false private var debugStatusTitle: String? private var debugStatusSubtitle: String? + private var homeCanvasStateJSON: String? init() { self.reload() @@ -94,6 +95,26 @@ final class ScreenController { subtitle: self.debugStatusSubtitle) } + func updateHomeCanvasState(json: String?) { + self.homeCanvasStateJSON = json + self.applyHomeCanvasStateIfNeeded() + } + + func applyHomeCanvasStateIfNeeded() { + guard let webView = self.activeWebView else { return } + let payload = self.homeCanvasStateJSON ?? "null" + let js = """ + (() => { + try { + const api = globalThis.__openclaw; + if (!api || typeof api.renderHome !== 'function') return; + api.renderHome(\(payload)); + } catch (_) {} + })() + """ + webView.evaluateJavaScript(js) { _, _ in } + } + func waitForA2UIReady(timeoutMs: Int) async -> Bool { let clock = ContinuousClock() let deadline = clock.now.advanced(by: .milliseconds(timeoutMs)) @@ -191,6 +212,7 @@ final class ScreenController { self.activeWebView = webView self.reload() self.applyDebugStatusIfNeeded() + self.applyHomeCanvasStateIfNeeded() } func detachWebView(_ webView: WKWebView) { diff --git a/apps/ios/Sources/Screen/ScreenTab.swift b/apps/ios/Sources/Screen/ScreenTab.swift index 16b5f857496..deabd38331d 100644 --- a/apps/ios/Sources/Screen/ScreenTab.swift +++ b/apps/ios/Sources/Screen/ScreenTab.swift @@ -7,7 +7,7 @@ struct ScreenTab: View { var body: some View { ZStack(alignment: .top) { ScreenWebView(controller: self.appModel.screen) - .ignoresSafeArea() + .ignoresSafeArea(.container, edges: [.top, .leading, .trailing]) .overlay(alignment: .top) { if let errorText = self.appModel.screen.errorText, self.appModel.gatewayServerName == nil diff --git a/apps/ios/Sources/Screen/ScreenWebView.swift b/apps/ios/Sources/Screen/ScreenWebView.swift index a30d78cbd00..61f9af6515c 100644 --- a/apps/ios/Sources/Screen/ScreenWebView.swift +++ b/apps/ios/Sources/Screen/ScreenWebView.swift @@ -161,6 +161,7 @@ private final class ScreenNavigationDelegate: NSObject, WKNavigationDelegate { func webView(_: WKWebView, didFinish _: WKNavigation?) { self.controller?.errorText = nil self.controller?.applyDebugStatusIfNeeded() + self.controller?.applyHomeCanvasStateIfNeeded() } func webView(_: WKWebView, didFail _: WKNavigation?, withError error: any Error) { diff --git a/apps/ios/Sources/Settings/SettingsTab.swift b/apps/ios/Sources/Settings/SettingsTab.swift index 7186c7205b5..6df8c1ec510 100644 --- a/apps/ios/Sources/Settings/SettingsTab.swift +++ b/apps/ios/Sources/Settings/SettingsTab.swift @@ -65,10 +65,10 @@ struct SettingsTab: View { DisclosureGroup(isExpanded: self.$gatewayExpanded) { if !self.isGatewayConnected { Text( - "1. Open Telegram and message your bot: /pair\n" + "1. Open a chat with your OpenClaw agent and send /pair\n" + "2. Copy the setup code it returns\n" + "3. Paste here and tap Connect\n" - + "4. Back in Telegram, run /pair approve") + + "4. Back in that chat, run /pair approve") .font(.footnote) .foregroundStyle(.secondary) @@ -340,9 +340,9 @@ struct SettingsTab: View { .foregroundStyle(.secondary) } self.featureToggle( - "Show Talk Button", + "Show Talk Control", isOn: self.$talkButtonEnabled, - help: "Shows the floating Talk button in the main interface.") + help: "Shows the Talk control in the main toolbar.") TextField("Default Share Instruction", text: self.$defaultShareInstruction, axis: .vertical) .lineLimit(2 ... 6) .textInputAutocapitalization(.sentences) @@ -767,12 +767,22 @@ struct SettingsTab: View { } let trimmedInstanceId = self.instanceId.trimmingCharacters(in: .whitespacesAndNewlines) + let trimmedBootstrapToken = + payload.bootstrapToken?.trimmingCharacters(in: .whitespacesAndNewlines) ?? "" + if !trimmedInstanceId.isEmpty { + GatewaySettingsStore.saveGatewayBootstrapToken(trimmedBootstrapToken, instanceId: trimmedInstanceId) + } if let token = payload.token, !token.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty { let trimmedToken = token.trimmingCharacters(in: .whitespacesAndNewlines) self.gatewayToken = trimmedToken if !trimmedInstanceId.isEmpty { GatewaySettingsStore.saveGatewayToken(trimmedToken, instanceId: trimmedInstanceId) } + } else if !trimmedBootstrapToken.isEmpty { + self.gatewayToken = "" + if !trimmedInstanceId.isEmpty { + GatewaySettingsStore.saveGatewayToken("", instanceId: trimmedInstanceId) + } } if let password = payload.password, !password.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty { let trimmedPassword = password.trimmingCharacters(in: .whitespacesAndNewlines) @@ -780,6 +790,11 @@ struct SettingsTab: View { if !trimmedInstanceId.isEmpty { GatewaySettingsStore.saveGatewayPassword(trimmedPassword, instanceId: trimmedInstanceId) } + } else if !trimmedBootstrapToken.isEmpty { + self.gatewayPassword = "" + if !trimmedInstanceId.isEmpty { + GatewaySettingsStore.saveGatewayPassword("", instanceId: trimmedInstanceId) + } } return true @@ -896,7 +911,7 @@ struct SettingsTab: View { guard !trimmed.isEmpty else { return nil } let lower = trimmed.lowercased() if lower.contains("pairing required") { - return "Pairing required. Go back to Telegram and run /pair approve, then tap Connect again." + return "Pairing required. Go back to your OpenClaw chat and run /pair approve, then tap Connect again." } if lower.contains("device nonce required") || lower.contains("device nonce mismatch") { return "Secure handshake failed. Make sure Tailscale is connected, then tap Connect again." @@ -993,6 +1008,7 @@ struct SettingsTab: View { // Reset onboarding state + clear saved gateway connection (the two things RootCanvas checks). GatewaySettingsStore.clearLastGatewayConnection() + OnboardingStateStore.reset() // RootCanvas also short-circuits onboarding when these are true. self.onboardingComplete = false diff --git a/apps/ios/Sources/Status/StatusPill.swift b/apps/ios/Sources/Status/StatusPill.swift index a723ce5eb39..d6f94185b40 100644 --- a/apps/ios/Sources/Status/StatusPill.swift +++ b/apps/ios/Sources/Status/StatusPill.swift @@ -38,6 +38,7 @@ struct StatusPill: View { var gateway: GatewayState var voiceWakeEnabled: Bool var activity: Activity? + var compact: Bool = false var brighten: Bool = false var onTap: () -> Void @@ -45,11 +46,11 @@ struct StatusPill: View { var body: some View { Button(action: self.onTap) { - HStack(spacing: 10) { - HStack(spacing: 8) { + HStack(spacing: self.compact ? 8 : 10) { + HStack(spacing: self.compact ? 6 : 8) { Circle() .fill(self.gateway.color) - .frame(width: 9, height: 9) + .frame(width: self.compact ? 8 : 9, height: self.compact ? 8 : 9) .scaleEffect( self.gateway == .connecting && !self.reduceMotion ? (self.pulse ? 1.15 : 0.85) @@ -58,34 +59,38 @@ struct StatusPill: View { .opacity(self.gateway == .connecting && !self.reduceMotion ? (self.pulse ? 1.0 : 0.6) : 1.0) Text(self.gateway.title) - .font(.subheadline.weight(.semibold)) + .font((self.compact ? Font.footnote : Font.subheadline).weight(.semibold)) .foregroundStyle(.primary) } - Divider() - .frame(height: 14) - .opacity(0.35) - if let activity { - HStack(spacing: 6) { + if !self.compact { + Divider() + .frame(height: 14) + .opacity(0.35) + } + + HStack(spacing: self.compact ? 4 : 6) { Image(systemName: activity.systemImage) - .font(.subheadline.weight(.semibold)) + .font((self.compact ? Font.footnote : Font.subheadline).weight(.semibold)) .foregroundStyle(activity.tint ?? .primary) - Text(activity.title) - .font(.subheadline.weight(.semibold)) - .foregroundStyle(.primary) - .lineLimit(1) + if !self.compact { + Text(activity.title) + .font(.subheadline.weight(.semibold)) + .foregroundStyle(.primary) + .lineLimit(1) + } } .transition(.opacity.combined(with: .move(edge: .top))) } else { Image(systemName: self.voiceWakeEnabled ? "mic.fill" : "mic.slash") - .font(.subheadline.weight(.semibold)) + .font((self.compact ? Font.footnote : Font.subheadline).weight(.semibold)) .foregroundStyle(self.voiceWakeEnabled ? .primary : .secondary) .accessibilityLabel(self.voiceWakeEnabled ? "Voice Wake enabled" : "Voice Wake disabled") .transition(.opacity.combined(with: .move(edge: .top))) } } - .statusGlassCard(brighten: self.brighten, verticalPadding: 8) + .statusGlassCard(brighten: self.brighten, verticalPadding: self.compact ? 6 : 8) } .buttonStyle(.plain) .accessibilityLabel("Connection Status") diff --git a/apps/ios/Tests/DeepLinkParserTests.swift b/apps/ios/Tests/DeepLinkParserTests.swift index 7f24aa3e34e..bac3288add1 100644 --- a/apps/ios/Tests/DeepLinkParserTests.swift +++ b/apps/ios/Tests/DeepLinkParserTests.swift @@ -86,7 +86,13 @@ private func agentAction( string: "openclaw://gateway?host=openclaw.local&port=18789&tls=1&token=abc&password=def")! #expect( DeepLinkParser.parse(url) == .gateway( - .init(host: "openclaw.local", port: 18789, tls: true, token: "abc", password: "def"))) + .init( + host: "openclaw.local", + port: 18789, + tls: true, + bootstrapToken: nil, + token: "abc", + password: "def"))) } @Test func parseGatewayLinkRejectsInsecureNonLoopbackWs() { @@ -102,14 +108,15 @@ private func agentAction( } @Test func parseGatewaySetupCodeParsesBase64UrlPayload() { - let payload = #"{"url":"wss://gateway.example.com:443","token":"tok","password":"pw"}"# + let payload = #"{"url":"wss://gateway.example.com:443","bootstrapToken":"tok","password":"pw"}"# let link = GatewayConnectDeepLink.fromSetupCode(setupCode(from: payload)) #expect(link == .init( host: "gateway.example.com", port: 443, tls: true, - token: "tok", + bootstrapToken: "tok", + token: nil, password: "pw")) } @@ -118,38 +125,40 @@ private func agentAction( } @Test func parseGatewaySetupCodeDefaultsTo443ForWssWithoutPort() { - let payload = #"{"url":"wss://gateway.example.com","token":"tok"}"# + let payload = #"{"url":"wss://gateway.example.com","bootstrapToken":"tok"}"# let link = GatewayConnectDeepLink.fromSetupCode(setupCode(from: payload)) #expect(link == .init( host: "gateway.example.com", port: 443, tls: true, - token: "tok", + bootstrapToken: "tok", + token: nil, password: nil)) } @Test func parseGatewaySetupCodeRejectsInsecureNonLoopbackWs() { - let payload = #"{"url":"ws://attacker.example:18789","token":"tok"}"# + let payload = #"{"url":"ws://attacker.example:18789","bootstrapToken":"tok"}"# let link = GatewayConnectDeepLink.fromSetupCode(setupCode(from: payload)) #expect(link == nil) } @Test func parseGatewaySetupCodeRejectsInsecurePrefixBypassHost() { - let payload = #"{"url":"ws://127.attacker.example:18789","token":"tok"}"# + let payload = #"{"url":"ws://127.attacker.example:18789","bootstrapToken":"tok"}"# let link = GatewayConnectDeepLink.fromSetupCode(setupCode(from: payload)) #expect(link == nil) } @Test func parseGatewaySetupCodeAllowsLoopbackWs() { - let payload = #"{"url":"ws://127.0.0.1:18789","token":"tok"}"# + let payload = #"{"url":"ws://127.0.0.1:18789","bootstrapToken":"tok"}"# let link = GatewayConnectDeepLink.fromSetupCode(setupCode(from: payload)) #expect(link == .init( host: "127.0.0.1", port: 18789, tls: false, - token: "tok", + bootstrapToken: "tok", + token: nil, password: nil)) } } diff --git a/apps/ios/Tests/IOSGatewayChatTransportTests.swift b/apps/ios/Tests/IOSGatewayChatTransportTests.swift index f49f242ff24..42526dd21c4 100644 --- a/apps/ios/Tests/IOSGatewayChatTransportTests.swift +++ b/apps/ios/Tests/IOSGatewayChatTransportTests.swift @@ -26,5 +26,10 @@ import Testing _ = try await transport.requestHealth(timeoutMs: 250) Issue.record("Expected requestHealth to throw when gateway not connected") } catch {} + + do { + try await transport.resetSession(sessionKey: "node-test") + Issue.record("Expected resetSession to throw when gateway not connected") + } catch {} } } diff --git a/apps/ios/Tests/Info.plist b/apps/ios/Tests/Info.plist index 46e3fb97eb1..5bcf88ff5ad 100644 --- a/apps/ios/Tests/Info.plist +++ b/apps/ios/Tests/Info.plist @@ -17,8 +17,8 @@ CFBundlePackageType BNDL CFBundleShortVersionString - 2026.3.9 + $(OPENCLAW_MARKETING_VERSION) CFBundleVersion - 20260308 + $(OPENCLAW_BUILD_VERSION) diff --git a/apps/ios/Tests/NodeAppModelInvokeTests.swift b/apps/ios/Tests/NodeAppModelInvokeTests.swift index 7413b0295f9..d2ec7039ad7 100644 --- a/apps/ios/Tests/NodeAppModelInvokeTests.swift +++ b/apps/ios/Tests/NodeAppModelInvokeTests.swift @@ -83,16 +83,16 @@ private final class MockWatchMessagingService: @preconcurrency WatchMessagingSer #expect(json.contains("\"value\"")) } - @Test @MainActor func chatSessionKeyDefaultsToIOSBase() { + @Test @MainActor func chatSessionKeyDefaultsToMainBase() { let appModel = NodeAppModel() - #expect(appModel.chatSessionKey == "ios") + #expect(appModel.chatSessionKey == "main") } @Test @MainActor func chatSessionKeyUsesAgentScopedKeyForNonDefaultAgent() { let appModel = NodeAppModel() appModel.gatewayDefaultAgentId = "main" appModel.setSelectedAgentId("agent-123") - #expect(appModel.chatSessionKey == SessionKey.makeAgentSessionKey(agentId: "agent-123", baseKey: "ios")) + #expect(appModel.chatSessionKey == SessionKey.makeAgentSessionKey(agentId: "agent-123", baseKey: "main")) #expect(appModel.mainSessionKey == "agent:agent-123:main") } diff --git a/apps/ios/Tests/OnboardingStateStoreTests.swift b/apps/ios/Tests/OnboardingStateStoreTests.swift index 30c014647b6..06a6a0f3ec2 100644 --- a/apps/ios/Tests/OnboardingStateStoreTests.swift +++ b/apps/ios/Tests/OnboardingStateStoreTests.swift @@ -39,6 +39,35 @@ import Testing #expect(OnboardingStateStore.shouldPresentOnLaunch(appModel: appModel, defaults: defaults)) } + @Test func firstRunIntroDefaultsToVisibleThenPersists() { + let testDefaults = self.makeDefaults() + let defaults = testDefaults.defaults + defer { self.reset(testDefaults) } + + #expect(OnboardingStateStore.shouldPresentFirstRunIntro(defaults: defaults)) + + OnboardingStateStore.markFirstRunIntroSeen(defaults: defaults) + #expect(!OnboardingStateStore.shouldPresentFirstRunIntro(defaults: defaults)) + } + + @Test @MainActor func resetClearsCompletionAndIntroSeen() { + let testDefaults = self.makeDefaults() + let defaults = testDefaults.defaults + defer { self.reset(testDefaults) } + + OnboardingStateStore.markCompleted(mode: .homeNetwork, defaults: defaults) + OnboardingStateStore.markFirstRunIntroSeen(defaults: defaults) + + OnboardingStateStore.reset(defaults: defaults) + + let appModel = NodeAppModel() + appModel.gatewayServerName = nil + + #expect(OnboardingStateStore.shouldPresentOnLaunch(appModel: appModel, defaults: defaults)) + #expect(OnboardingStateStore.shouldPresentFirstRunIntro(defaults: defaults)) + #expect(OnboardingStateStore.lastMode(defaults: defaults) == .homeNetwork) + } + private struct TestDefaults { var suiteName: String var defaults: UserDefaults diff --git a/apps/ios/WatchApp/Info.plist b/apps/ios/WatchApp/Info.plist index fa45d719b9c..3eea1e6ff09 100644 --- a/apps/ios/WatchApp/Info.plist +++ b/apps/ios/WatchApp/Info.plist @@ -17,9 +17,9 @@ CFBundlePackageType APPL CFBundleShortVersionString - 2026.3.9 + $(OPENCLAW_MARKETING_VERSION) CFBundleVersion - 20260308 + $(OPENCLAW_BUILD_VERSION) WKCompanionAppBundleIdentifier $(OPENCLAW_APP_BUNDLE_ID) WKWatchKitApp diff --git a/apps/ios/WatchExtension/Info.plist b/apps/ios/WatchExtension/Info.plist index 1d898d43757..87313064945 100644 --- a/apps/ios/WatchExtension/Info.plist +++ b/apps/ios/WatchExtension/Info.plist @@ -15,9 +15,9 @@ CFBundleName $(PRODUCT_NAME) CFBundleShortVersionString - 2026.3.9 + $(OPENCLAW_MARKETING_VERSION) CFBundleVersion - 20260308 + $(OPENCLAW_BUILD_VERSION) NSExtension NSExtensionAttributes diff --git a/apps/ios/fastlane/Fastfile b/apps/ios/fastlane/Fastfile index 33e6bfa8adb..74cbcec4b68 100644 --- a/apps/ios/fastlane/Fastfile +++ b/apps/ios/fastlane/Fastfile @@ -1,8 +1,11 @@ require "shellwords" require "open3" +require "json" default_platform(:ios) +BETA_APP_IDENTIFIER = "ai.openclaw.client" + def load_env_file(path) return unless File.exist?(path) @@ -84,6 +87,111 @@ def read_asc_key_content_from_keychain end end +def repo_root + File.expand_path("../../..", __dir__) +end + +def ios_root + File.expand_path("..", __dir__) +end + +def normalize_release_version(raw_value) + version = raw_value.to_s.strip.sub(/\Av/, "") + UI.user_error!("Missing root package.json version.") unless env_present?(version) + unless version.match?(/\A\d+\.\d+\.\d+(?:[.-]?beta[.-]\d+)?\z/i) + UI.user_error!("Invalid package.json version '#{raw_value}'. Expected 2026.3.13 or 2026.3.13-beta.1.") + end + + version +end + +def read_root_package_version + package_json_path = File.join(repo_root, "package.json") + UI.user_error!("Missing package.json at #{package_json_path}.") unless File.exist?(package_json_path) + + parsed = JSON.parse(File.read(package_json_path)) + normalize_release_version(parsed["version"]) +rescue JSON::ParserError => e + UI.user_error!("Invalid package.json at #{package_json_path}: #{e.message}") +end + +def short_release_version(version) + normalize_release_version(version).sub(/([.-]?beta[.-]\d+)\z/i, "") +end + +def shell_join(parts) + Shellwords.join(parts.compact) +end + +def resolve_beta_build_number(api_key:, version:) + explicit = ENV["IOS_BETA_BUILD_NUMBER"] + if env_present?(explicit) + UI.user_error!("Invalid IOS_BETA_BUILD_NUMBER '#{explicit}'. Expected digits only.") unless explicit.match?(/\A\d+\z/) + UI.message("Using explicit iOS beta build number #{explicit}.") + return explicit + end + + short_version = short_release_version(version) + latest_build = latest_testflight_build_number( + api_key: api_key, + app_identifier: BETA_APP_IDENTIFIER, + version: short_version, + initial_build_number: 0 + ) + next_build = latest_build.to_i + 1 + UI.message("Resolved iOS beta build number #{next_build} for #{short_version} (latest TestFlight build: #{latest_build}).") + next_build.to_s +end + +def beta_build_number_needs_asc_auth? + explicit = ENV["IOS_BETA_BUILD_NUMBER"] + !env_present?(explicit) +end + +def prepare_beta_release!(version:, build_number:) + script_path = File.join(repo_root, "scripts", "ios-beta-prepare.sh") + UI.message("Preparing iOS beta release #{version} (build #{build_number}).") + sh(shell_join(["bash", script_path, "--build-number", build_number])) + + beta_xcconfig = File.join(ios_root, "build", "BetaRelease.xcconfig") + UI.user_error!("Missing beta xcconfig at #{beta_xcconfig}.") unless File.exist?(beta_xcconfig) + + ENV["XCODE_XCCONFIG_FILE"] = beta_xcconfig + beta_xcconfig +end + +def build_beta_release(context) + version = context[:version] + output_directory = File.join("build", "beta") + archive_path = File.join(output_directory, "OpenClaw-#{version}.xcarchive") + + build_app( + project: "OpenClaw.xcodeproj", + scheme: "OpenClaw", + configuration: "Release", + export_method: "app-store", + clean: true, + skip_profile_detection: true, + build_path: "build", + archive_path: archive_path, + output_directory: output_directory, + output_name: "OpenClaw-#{version}.ipa", + xcargs: "-allowProvisioningUpdates", + export_xcargs: "-allowProvisioningUpdates", + export_options: { + signingStyle: "automatic" + } + ) + + { + archive_path: archive_path, + build_number: context[:build_number], + ipa_path: lane_context[SharedValues::IPA_OUTPUT_PATH], + short_version: context[:short_version], + version: version + } +end + platform :ios do private_lane :asc_api_key do load_env_file(File.join(__dir__, ".env")) @@ -132,38 +240,48 @@ platform :ios do api_key end - desc "Build + upload to TestFlight" + private_lane :prepare_beta_context do |options| + require_api_key = options[:require_api_key] == true + needs_api_key = require_api_key || beta_build_number_needs_asc_auth? + api_key = needs_api_key ? asc_api_key : nil + version = read_root_package_version + build_number = resolve_beta_build_number(api_key: api_key, version: version) + beta_xcconfig = prepare_beta_release!(version: version, build_number: build_number) + + { + api_key: api_key, + beta_xcconfig: beta_xcconfig, + build_number: build_number, + short_version: short_release_version(version), + version: version + } + end + + desc "Build a beta archive locally without uploading" + lane :beta_archive do + context = prepare_beta_context(require_api_key: false) + build = build_beta_release(context) + UI.success("Built iOS beta archive: version=#{build[:version]} short=#{build[:short_version]} build=#{build[:build_number]}") + build + ensure + ENV.delete("XCODE_XCCONFIG_FILE") + end + + desc "Build + upload a beta to TestFlight" lane :beta do - api_key = asc_api_key - - team_id = ENV["IOS_DEVELOPMENT_TEAM"] - if team_id.nil? || team_id.strip.empty? - helper_path = File.expand_path("../../../scripts/ios-team-id.sh", __dir__) - if File.exist?(helper_path) - # Keep CI/local compatibility where teams are present in keychain but not Xcode account metadata. - team_id = sh("IOS_ALLOW_KEYCHAIN_TEAM_FALLBACK=1 bash #{helper_path.shellescape}").strip - end - end - UI.user_error!("Missing IOS_DEVELOPMENT_TEAM (Apple Team ID). Add it to fastlane/.env or export it in your shell.") if team_id.nil? || team_id.strip.empty? - - build_app( - project: "OpenClaw.xcodeproj", - scheme: "OpenClaw", - export_method: "app-store", - clean: true, - skip_profile_detection: true, - xcargs: "DEVELOPMENT_TEAM=#{team_id} -allowProvisioningUpdates", - export_xcargs: "-allowProvisioningUpdates", - export_options: { - signingStyle: "automatic" - } - ) + context = prepare_beta_context(require_api_key: true) + build = build_beta_release(context) upload_to_testflight( - api_key: api_key, + api_key: context[:api_key], + ipa: build[:ipa_path], skip_waiting_for_build_processing: true, uses_non_exempt_encryption: false ) + + UI.success("Uploaded iOS beta: version=#{build[:version]} short=#{build[:short_version]} build=#{build[:build_number]}") + ensure + ENV.delete("XCODE_XCCONFIG_FILE") end desc "Upload App Store metadata (and optionally screenshots)" diff --git a/apps/ios/fastlane/SETUP.md b/apps/ios/fastlane/SETUP.md index 8dccf264b41..67d4fcc843a 100644 --- a/apps/ios/fastlane/SETUP.md +++ b/apps/ios/fastlane/SETUP.md @@ -32,9 +32,9 @@ ASC_KEYCHAIN_ACCOUNT=YOUR_MAC_USERNAME Optional app targeting variables (helpful if Fastlane cannot auto-resolve app by bundle): ```bash -ASC_APP_IDENTIFIER=ai.openclaw.ios +ASC_APP_IDENTIFIER=ai.openclaw.client # or -ASC_APP_ID=6760218713 +ASC_APP_ID=YOUR_APP_STORE_CONNECT_APP_ID ``` File-based fallback (CI/non-macOS): @@ -60,9 +60,37 @@ cd apps/ios fastlane ios auth_check ``` -Run: +ASC auth is only required when: + +- uploading to TestFlight +- auto-resolving the next build number from App Store Connect + +If you pass `--build-number` to `pnpm ios:beta:archive`, the local archive path does not need ASC auth. + +Archive locally without upload: + +```bash +pnpm ios:beta:archive +``` + +Upload to TestFlight: + +```bash +pnpm ios:beta +``` + +Direct Fastlane entry point: ```bash cd apps/ios -fastlane beta +fastlane ios beta ``` + +Versioning rules: + +- Root `package.json.version` is the single source of truth for iOS +- Use `YYYY.M.D` for stable versions and `YYYY.M.D-beta.N` for beta versions +- Fastlane stamps `CFBundleShortVersionString` to `YYYY.M.D` +- Fastlane resolves `CFBundleVersion` as the next integer TestFlight build number for that short version +- The beta flow regenerates `apps/ios/OpenClaw.xcodeproj` from `apps/ios/project.yml` before archiving +- Local beta signing uses a temporary generated xcconfig and leaves local development signing overrides untouched diff --git a/apps/ios/fastlane/metadata/README.md b/apps/ios/fastlane/metadata/README.md index 74eb7df87d3..07e7824311f 100644 --- a/apps/ios/fastlane/metadata/README.md +++ b/apps/ios/fastlane/metadata/README.md @@ -6,7 +6,7 @@ This directory is used by `fastlane deliver` for App Store Connect text metadata ```bash cd apps/ios -ASC_APP_ID=6760218713 \ +ASC_APP_ID=YOUR_APP_STORE_CONNECT_APP_ID \ DELIVER_METADATA=1 fastlane ios metadata ``` diff --git a/apps/ios/project.yml b/apps/ios/project.yml index 0664db9c6be..53e6489a25b 100644 --- a/apps/ios/project.yml +++ b/apps/ios/project.yml @@ -98,6 +98,17 @@ targets: SUPPORTS_LIVE_ACTIVITIES: YES ENABLE_APPINTENTS_METADATA: NO ENABLE_APP_INTENTS_METADATA_GENERATION: NO + configs: + Debug: + OPENCLAW_PUSH_TRANSPORT: direct + OPENCLAW_PUSH_DISTRIBUTION: local + OPENCLAW_PUSH_RELAY_BASE_URL: "" + OPENCLAW_PUSH_APNS_ENVIRONMENT: sandbox + Release: + OPENCLAW_PUSH_TRANSPORT: direct + OPENCLAW_PUSH_DISTRIBUTION: local + OPENCLAW_PUSH_RELAY_BASE_URL: "" + OPENCLAW_PUSH_APNS_ENVIRONMENT: production info: path: Sources/Info.plist properties: @@ -107,8 +118,8 @@ targets: - CFBundleURLName: ai.openclaw.ios CFBundleURLSchemes: - openclaw - CFBundleShortVersionString: "2026.3.9" - CFBundleVersion: "20260308" + CFBundleShortVersionString: "$(OPENCLAW_MARKETING_VERSION)" + CFBundleVersion: "$(OPENCLAW_BUILD_VERSION)" UILaunchScreen: {} UIApplicationSceneManifest: UIApplicationSupportsMultipleScenes: false @@ -131,6 +142,10 @@ targets: NSSpeechRecognitionUsageDescription: OpenClaw uses on-device speech recognition for voice wake. NSSupportsLiveActivities: true ITSAppUsesNonExemptEncryption: false + OpenClawPushTransport: "$(OPENCLAW_PUSH_TRANSPORT)" + OpenClawPushDistribution: "$(OPENCLAW_PUSH_DISTRIBUTION)" + OpenClawPushRelayBaseURL: "$(OPENCLAW_PUSH_RELAY_BASE_URL)" + OpenClawPushAPNsEnvironment: "$(OPENCLAW_PUSH_APNS_ENVIRONMENT)" UISupportedInterfaceOrientations: - UIInterfaceOrientationPortrait - UIInterfaceOrientationPortraitUpsideDown @@ -168,8 +183,8 @@ targets: path: ShareExtension/Info.plist properties: CFBundleDisplayName: OpenClaw Share - CFBundleShortVersionString: "2026.3.9" - CFBundleVersion: "20260308" + CFBundleShortVersionString: "$(OPENCLAW_MARKETING_VERSION)" + CFBundleVersion: "$(OPENCLAW_BUILD_VERSION)" NSExtension: NSExtensionPointIdentifier: com.apple.share-services NSExtensionPrincipalClass: "$(PRODUCT_MODULE_NAME).ShareViewController" @@ -205,8 +220,8 @@ targets: path: ActivityWidget/Info.plist properties: CFBundleDisplayName: OpenClaw Activity - CFBundleShortVersionString: "2026.3.9" - CFBundleVersion: "20260308" + CFBundleShortVersionString: "$(OPENCLAW_MARKETING_VERSION)" + CFBundleVersion: "$(OPENCLAW_BUILD_VERSION)" NSSupportsLiveActivities: true NSExtension: NSExtensionPointIdentifier: com.apple.widgetkit-extension @@ -224,6 +239,7 @@ targets: Release: Config/Signing.xcconfig settings: base: + ASSETCATALOG_COMPILER_APPICON_NAME: AppIcon ENABLE_APPINTENTS_METADATA: NO ENABLE_APP_INTENTS_METADATA_GENERATION: NO PRODUCT_BUNDLE_IDENTIFIER: "$(OPENCLAW_WATCH_APP_BUNDLE_ID)" @@ -231,8 +247,8 @@ targets: path: WatchApp/Info.plist properties: CFBundleDisplayName: OpenClaw - CFBundleShortVersionString: "2026.3.9" - CFBundleVersion: "20260308" + CFBundleShortVersionString: "$(OPENCLAW_MARKETING_VERSION)" + CFBundleVersion: "$(OPENCLAW_BUILD_VERSION)" WKCompanionAppBundleIdentifier: "$(OPENCLAW_APP_BUNDLE_ID)" WKWatchKitApp: true @@ -256,8 +272,8 @@ targets: path: WatchExtension/Info.plist properties: CFBundleDisplayName: OpenClaw - CFBundleShortVersionString: "2026.3.9" - CFBundleVersion: "20260308" + CFBundleShortVersionString: "$(OPENCLAW_MARKETING_VERSION)" + CFBundleVersion: "$(OPENCLAW_BUILD_VERSION)" NSExtension: NSExtensionAttributes: WKAppBundleIdentifier: "$(OPENCLAW_WATCH_APP_BUNDLE_ID)" @@ -293,8 +309,8 @@ targets: path: Tests/Info.plist properties: CFBundleDisplayName: OpenClawTests - CFBundleShortVersionString: "2026.3.9" - CFBundleVersion: "20260308" + CFBundleShortVersionString: "$(OPENCLAW_MARKETING_VERSION)" + CFBundleVersion: "$(OPENCLAW_BUILD_VERSION)" OpenClawLogicTests: type: bundle.unit-test @@ -319,5 +335,5 @@ targets: path: Tests/Info.plist properties: CFBundleDisplayName: OpenClawLogicTests - CFBundleShortVersionString: "2026.3.9" - CFBundleVersion: "20260308" + CFBundleShortVersionString: "$(OPENCLAW_MARKETING_VERSION)" + CFBundleVersion: "$(OPENCLAW_BUILD_VERSION)" diff --git a/apps/macos/Sources/OpenClaw/AppState.swift b/apps/macos/Sources/OpenClaw/AppState.swift index 5e8238ebe92..d503686ba57 100644 --- a/apps/macos/Sources/OpenClaw/AppState.swift +++ b/apps/macos/Sources/OpenClaw/AppState.swift @@ -600,30 +600,29 @@ final class AppState { private func syncGatewayConfigIfNeeded() { guard !self.isPreview, !self.isInitializing else { return } - let connectionMode = self.connectionMode - let remoteTarget = self.remoteTarget - let remoteIdentity = self.remoteIdentity - let remoteTransport = self.remoteTransport - let remoteUrl = self.remoteUrl - let remoteToken = self.remoteToken - let remoteTokenDirty = self.remoteTokenDirty - Task { @MainActor in - // Keep app-only connection settings local to avoid overwriting remote gateway config. - let synced = Self.syncedGatewayRoot( - currentRoot: OpenClawConfigFile.loadDict(), - connectionMode: connectionMode, - remoteTransport: remoteTransport, - remoteTarget: remoteTarget, - remoteIdentity: remoteIdentity, - remoteUrl: remoteUrl, - remoteToken: remoteToken, - remoteTokenDirty: remoteTokenDirty) - guard synced.changed else { return } - OpenClawConfigFile.saveDict(synced.root) + self.syncGatewayConfigNow() } } + @MainActor + func syncGatewayConfigNow() { + guard !self.isPreview, !self.isInitializing else { return } + + // Keep app-only connection settings local to avoid overwriting remote gateway config. + let synced = Self.syncedGatewayRoot( + currentRoot: OpenClawConfigFile.loadDict(), + connectionMode: self.connectionMode, + remoteTransport: self.remoteTransport, + remoteTarget: self.remoteTarget, + remoteIdentity: self.remoteIdentity, + remoteUrl: self.remoteUrl, + remoteToken: self.remoteToken, + remoteTokenDirty: self.remoteTokenDirty) + guard synced.changed else { return } + OpenClawConfigFile.saveDict(synced.root) + } + func triggerVoiceEars(ttl: TimeInterval? = 5) { self.earBoostTask?.cancel() self.earBoostActive = true diff --git a/apps/macos/Sources/OpenClaw/ControlChannel.swift b/apps/macos/Sources/OpenClaw/ControlChannel.swift index aecf9539ef5..607aab47940 100644 --- a/apps/macos/Sources/OpenClaw/ControlChannel.swift +++ b/apps/macos/Sources/OpenClaw/ControlChannel.swift @@ -188,6 +188,10 @@ final class ControlChannel { return desc } + if let authIssue = RemoteGatewayAuthIssue(error: error) { + return authIssue.statusMessage + } + // If the gateway explicitly rejects the hello (e.g., auth/token mismatch), surface it. if let urlErr = error as? URLError, urlErr.code == .dataNotAllowed // used for WS close 1008 auth failures @@ -320,6 +324,8 @@ final class ControlChannel { switch source { case .deviceToken: return "Auth: device token (paired device)" + case .bootstrapToken: + return "Auth: bootstrap token (setup code)" case .sharedToken: return "Auth: shared token (\(isRemote ? "gateway.remote.token" : "gateway.auth.token"))" case .password: diff --git a/apps/macos/Sources/OpenClaw/CronJobEditor+Helpers.swift b/apps/macos/Sources/OpenClaw/CronJobEditor+Helpers.swift index 26b64ea7c65..41b98111b4e 100644 --- a/apps/macos/Sources/OpenClaw/CronJobEditor+Helpers.swift +++ b/apps/macos/Sources/OpenClaw/CronJobEditor+Helpers.swift @@ -16,7 +16,14 @@ extension CronJobEditor { self.agentId = job.agentId ?? "" self.enabled = job.enabled self.deleteAfterRun = job.deleteAfterRun ?? false - self.sessionTarget = job.sessionTarget + switch job.parsedSessionTarget { + case .predefined(let target): + self.sessionTarget = target + self.preservedSessionTargetRaw = nil + case .session(let id): + self.sessionTarget = .isolated + self.preservedSessionTargetRaw = "session:\(id)" + } self.wakeMode = job.wakeMode switch job.schedule { @@ -51,7 +58,7 @@ extension CronJobEditor { self.channel = trimmed.isEmpty ? "last" : trimmed self.to = delivery.to ?? "" self.bestEffortDeliver = delivery.bestEffort ?? false - } else if self.sessionTarget == .isolated { + } else if self.isIsolatedLikeSessionTarget { self.deliveryMode = .announce } } @@ -80,7 +87,7 @@ extension CronJobEditor { "name": name, "enabled": self.enabled, "schedule": schedule, - "sessionTarget": self.sessionTarget.rawValue, + "sessionTarget": self.effectiveSessionTargetRaw, "wakeMode": self.wakeMode.rawValue, "payload": payload, ] @@ -92,7 +99,7 @@ extension CronJobEditor { root["agentId"] = NSNull() } - if self.sessionTarget == .isolated { + if self.isIsolatedLikeSessionTarget { root["delivery"] = self.buildDelivery() } @@ -160,7 +167,7 @@ extension CronJobEditor { } func buildSelectedPayload() throws -> [String: Any] { - if self.sessionTarget == .isolated { return self.buildAgentTurnPayload() } + if self.isIsolatedLikeSessionTarget { return self.buildAgentTurnPayload() } switch self.payloadKind { case .systemEvent: let text = self.trimmed(self.systemEventText) @@ -171,7 +178,7 @@ extension CronJobEditor { } func validateSessionTarget(_ payload: [String: Any]) throws { - if self.sessionTarget == .main, payload["kind"] as? String == "agentTurn" { + if self.effectiveSessionTargetRaw == "main", payload["kind"] as? String == "agentTurn" { throw NSError( domain: "Cron", code: 0, @@ -181,7 +188,7 @@ extension CronJobEditor { ]) } - if self.sessionTarget == .isolated, payload["kind"] as? String == "systemEvent" { + if self.effectiveSessionTargetRaw != "main", payload["kind"] as? String == "systemEvent" { throw NSError( domain: "Cron", code: 0, @@ -257,6 +264,17 @@ extension CronJobEditor { return Int(floor(n * factor)) } + var effectiveSessionTargetRaw: String { + if self.sessionTarget == .isolated, let preserved = self.preservedSessionTargetRaw?.trimmingCharacters(in: .whitespacesAndNewlines), !preserved.isEmpty { + return preserved + } + return self.sessionTarget.rawValue + } + + var isIsolatedLikeSessionTarget: Bool { + self.effectiveSessionTargetRaw != "main" + } + func formatDuration(ms: Int) -> String { DurationFormattingSupport.conciseDuration(ms: ms) } diff --git a/apps/macos/Sources/OpenClaw/CronJobEditor.swift b/apps/macos/Sources/OpenClaw/CronJobEditor.swift index a7d88a4f2fb..292f3a63284 100644 --- a/apps/macos/Sources/OpenClaw/CronJobEditor.swift +++ b/apps/macos/Sources/OpenClaw/CronJobEditor.swift @@ -16,7 +16,7 @@ struct CronJobEditor: View { + "Use an isolated session for agent turns so your main chat stays clean." static let sessionTargetNote = "Main jobs post a system event into the current main session. " - + "Isolated jobs run OpenClaw in a dedicated session and can announce results to a channel." + + "Current and isolated-style jobs run agent turns and can announce results to a channel." static let scheduleKindNote = "“At” runs once, “Every” repeats with a duration, “Cron” uses a 5-field Unix expression." static let isolatedPayloadNote = @@ -29,6 +29,7 @@ struct CronJobEditor: View { @State var agentId: String = "" @State var enabled: Bool = true @State var sessionTarget: CronSessionTarget = .main + @State var preservedSessionTargetRaw: String? @State var wakeMode: CronWakeMode = .now @State var deleteAfterRun: Bool = false @@ -117,6 +118,7 @@ struct CronJobEditor: View { Picker("", selection: self.$sessionTarget) { Text("main").tag(CronSessionTarget.main) Text("isolated").tag(CronSessionTarget.isolated) + Text("current").tag(CronSessionTarget.current) } .labelsHidden() .pickerStyle(.segmented) @@ -209,7 +211,7 @@ struct CronJobEditor: View { GroupBox("Payload") { VStack(alignment: .leading, spacing: 10) { - if self.sessionTarget == .isolated { + if self.isIsolatedLikeSessionTarget { Text(Self.isolatedPayloadNote) .font(.footnote) .foregroundStyle(.secondary) @@ -289,8 +291,11 @@ struct CronJobEditor: View { self.sessionTarget = .isolated } } - .onChange(of: self.sessionTarget) { _, newValue in - if newValue == .isolated { + .onChange(of: self.sessionTarget) { oldValue, newValue in + if oldValue != newValue { + self.preservedSessionTargetRaw = nil + } + if newValue != .main { self.payloadKind = .agentTurn } else if newValue == .main, self.payloadKind == .agentTurn { self.payloadKind = .systemEvent diff --git a/apps/macos/Sources/OpenClaw/CronModels.swift b/apps/macos/Sources/OpenClaw/CronModels.swift index e0ce46c13da..40079453974 100644 --- a/apps/macos/Sources/OpenClaw/CronModels.swift +++ b/apps/macos/Sources/OpenClaw/CronModels.swift @@ -3,12 +3,39 @@ import Foundation enum CronSessionTarget: String, CaseIterable, Identifiable, Codable { case main case isolated + case current var id: String { self.rawValue } } +enum CronCustomSessionTarget: Codable, Equatable { + case predefined(CronSessionTarget) + case session(id: String) + + var rawValue: String { + switch self { + case .predefined(let target): + return target.rawValue + case .session(let id): + return "session:\(id)" + } + } + + static func from(_ value: String) -> CronCustomSessionTarget { + if let predefined = CronSessionTarget(rawValue: value) { + return .predefined(predefined) + } + if value.hasPrefix("session:") { + let sessionId = String(value.dropFirst(8)) + return .session(id: sessionId) + } + // Fallback to isolated for unknown values + return .predefined(.isolated) + } +} + enum CronWakeMode: String, CaseIterable, Identifiable, Codable { case now case nextHeartbeat = "next-heartbeat" @@ -204,12 +231,69 @@ struct CronJob: Identifiable, Codable, Equatable { let createdAtMs: Int let updatedAtMs: Int let schedule: CronSchedule - let sessionTarget: CronSessionTarget + private let sessionTargetRaw: String let wakeMode: CronWakeMode let payload: CronPayload let delivery: CronDelivery? let state: CronJobState + enum CodingKeys: String, CodingKey { + case id + case agentId + case name + case description + case enabled + case deleteAfterRun + case createdAtMs + case updatedAtMs + case schedule + case sessionTargetRaw = "sessionTarget" + case wakeMode + case payload + case delivery + case state + } + + /// Parsed session target (predefined or custom session ID) + var parsedSessionTarget: CronCustomSessionTarget { + CronCustomSessionTarget.from(self.sessionTargetRaw) + } + + /// Compatibility shim for existing editor/UI code paths that still use the + /// predefined enum. + var sessionTarget: CronSessionTarget { + switch self.parsedSessionTarget { + case .predefined(let target): + return target + case .session: + return .isolated + } + } + + var sessionTargetDisplayValue: String { + self.parsedSessionTarget.rawValue + } + + var transcriptSessionKey: String? { + switch self.parsedSessionTarget { + case .predefined(.main): + return nil + case .predefined(.isolated), .predefined(.current): + return "cron:\(self.id)" + case .session(let id): + return id + } + } + + var supportsAnnounceDelivery: Bool { + switch self.parsedSessionTarget { + case .predefined(.main): + return false + case .predefined(.isolated), .predefined(.current), .session: + return true + } + } + var displayName: String { let trimmed = self.name.trimmingCharacters(in: .whitespacesAndNewlines) return trimmed.isEmpty ? "Untitled job" : trimmed diff --git a/apps/macos/Sources/OpenClaw/CronSettings+Rows.swift b/apps/macos/Sources/OpenClaw/CronSettings+Rows.swift index 69655bdc302..85e45928853 100644 --- a/apps/macos/Sources/OpenClaw/CronSettings+Rows.swift +++ b/apps/macos/Sources/OpenClaw/CronSettings+Rows.swift @@ -18,7 +18,7 @@ extension CronSettings { } } HStack(spacing: 6) { - StatusPill(text: job.sessionTarget.rawValue, tint: .secondary) + StatusPill(text: job.sessionTargetDisplayValue, tint: .secondary) StatusPill(text: job.wakeMode.rawValue, tint: .secondary) if let agentId = job.agentId, !agentId.isEmpty { StatusPill(text: "agent \(agentId)", tint: .secondary) @@ -34,9 +34,9 @@ extension CronSettings { @ViewBuilder func jobContextMenu(_ job: CronJob) -> some View { Button("Run now") { Task { await self.store.runJob(id: job.id, force: true) } } - if job.sessionTarget == .isolated { + if let transcriptSessionKey = job.transcriptSessionKey { Button("Open transcript") { - WebChatManager.shared.show(sessionKey: "cron:\(job.id)") + WebChatManager.shared.show(sessionKey: transcriptSessionKey) } } Divider() @@ -75,9 +75,9 @@ extension CronSettings { .labelsHidden() Button("Run") { Task { await self.store.runJob(id: job.id, force: true) } } .buttonStyle(.borderedProminent) - if job.sessionTarget == .isolated { + if let transcriptSessionKey = job.transcriptSessionKey { Button("Transcript") { - WebChatManager.shared.show(sessionKey: "cron:\(job.id)") + WebChatManager.shared.show(sessionKey: transcriptSessionKey) } .buttonStyle(.bordered) } @@ -103,7 +103,7 @@ extension CronSettings { if let agentId = job.agentId, !agentId.isEmpty { LabeledContent("Agent") { Text(agentId) } } - LabeledContent("Session") { Text(job.sessionTarget.rawValue) } + LabeledContent("Session") { Text(job.sessionTargetDisplayValue) } LabeledContent("Wake") { Text(job.wakeMode.rawValue) } LabeledContent("Next run") { if let date = job.nextRunDate { @@ -224,7 +224,7 @@ extension CronSettings { HStack(spacing: 8) { if let thinking, !thinking.isEmpty { StatusPill(text: "think \(thinking)", tint: .secondary) } if let timeoutSeconds { StatusPill(text: "\(timeoutSeconds)s", tint: .secondary) } - if job.sessionTarget == .isolated { + if job.supportsAnnounceDelivery { let delivery = job.delivery if let delivery { if delivery.mode == .announce { diff --git a/apps/macos/Sources/OpenClaw/ExecApprovalEvaluation.swift b/apps/macos/Sources/OpenClaw/ExecApprovalEvaluation.swift index c7d9d0928e1..a36e58db1d8 100644 --- a/apps/macos/Sources/OpenClaw/ExecApprovalEvaluation.swift +++ b/apps/macos/Sources/OpenClaw/ExecApprovalEvaluation.swift @@ -45,8 +45,8 @@ enum ExecApprovalEvaluator { let skillAllow: Bool if approvals.agent.autoAllowSkills, !allowlistResolutions.isEmpty { - let bins = await SkillBinsCache.shared.currentBins() - skillAllow = allowlistResolutions.allSatisfy { bins.contains($0.executableName) } + let bins = await SkillBinsCache.shared.currentTrust() + skillAllow = self.isSkillAutoAllowed(allowlistResolutions, trustedBinsByName: bins) } else { skillAllow = false } @@ -65,4 +65,26 @@ enum ExecApprovalEvaluator { allowlistMatch: allowlistSatisfied ? allowlistMatches.first : nil, skillAllow: skillAllow) } + + static func isSkillAutoAllowed( + _ resolutions: [ExecCommandResolution], + trustedBinsByName: [String: Set]) -> Bool + { + guard !resolutions.isEmpty, !trustedBinsByName.isEmpty else { return false } + return resolutions.allSatisfy { resolution in + guard let executableName = SkillBinsCache.normalizeSkillBinName(resolution.executableName), + let resolvedPath = SkillBinsCache.normalizeResolvedPath(resolution.resolvedPath) + else { + return false + } + return trustedBinsByName[executableName]?.contains(resolvedPath) == true + } + } + + static func _testIsSkillAutoAllowed( + _ resolutions: [ExecCommandResolution], + trustedBinsByName: [String: Set]) -> Bool + { + self.isSkillAutoAllowed(resolutions, trustedBinsByName: trustedBinsByName) + } } diff --git a/apps/macos/Sources/OpenClaw/ExecApprovals.swift b/apps/macos/Sources/OpenClaw/ExecApprovals.swift index ba49b37cd9f..141da33ad48 100644 --- a/apps/macos/Sources/OpenClaw/ExecApprovals.swift +++ b/apps/macos/Sources/OpenClaw/ExecApprovals.swift @@ -370,6 +370,17 @@ enum ExecApprovalsStore { static func resolve(agentId: String?) -> ExecApprovalsResolved { let file = self.ensureFile() + return self.resolveFromFile(file, agentId: agentId) + } + + /// Read-only resolve: loads file without writing (no ensureFile side effects). + /// Safe to call from background threads / off MainActor. + static func resolveReadOnly(agentId: String?) -> ExecApprovalsResolved { + let file = self.loadFile() + return self.resolveFromFile(file, agentId: agentId) + } + + private static func resolveFromFile(_ file: ExecApprovalsFile, agentId: String?) -> ExecApprovalsResolved { let defaults = file.defaults ?? ExecApprovalsDefaults() let resolvedDefaults = ExecApprovalsResolvedDefaults( security: defaults.security ?? self.defaultSecurity, @@ -777,6 +788,7 @@ actor SkillBinsCache { static let shared = SkillBinsCache() private var bins: Set = [] + private var trustByName: [String: Set] = [:] private var lastRefresh: Date? private let refreshInterval: TimeInterval = 90 @@ -787,27 +799,90 @@ actor SkillBinsCache { return self.bins } + func currentTrust(force: Bool = false) async -> [String: Set] { + if force || self.isStale() { + await self.refresh() + } + return self.trustByName + } + func refresh() async { do { let report = try await GatewayConnection.shared.skillsStatus() - var next = Set() - for skill in report.skills { - for bin in skill.requirements.bins { - let trimmed = bin.trimmingCharacters(in: .whitespacesAndNewlines) - if !trimmed.isEmpty { next.insert(trimmed) } - } - } - self.bins = next + let trust = Self.buildTrustIndex(report: report, searchPaths: CommandResolver.preferredPaths()) + self.bins = trust.names + self.trustByName = trust.pathsByName self.lastRefresh = Date() } catch { if self.lastRefresh == nil { self.bins = [] + self.trustByName = [:] } } } + static func normalizeSkillBinName(_ value: String) -> String? { + let trimmed = value.trimmingCharacters(in: .whitespacesAndNewlines).lowercased() + return trimmed.isEmpty ? nil : trimmed + } + + static func normalizeResolvedPath(_ value: String?) -> String? { + let trimmed = value?.trimmingCharacters(in: .whitespacesAndNewlines) ?? "" + guard !trimmed.isEmpty else { return nil } + return URL(fileURLWithPath: trimmed).standardizedFileURL.path + } + + static func buildTrustIndex( + report: SkillsStatusReport, + searchPaths: [String]) -> SkillBinTrustIndex + { + var names = Set() + var pathsByName: [String: Set] = [:] + + for skill in report.skills { + for bin in skill.requirements.bins { + let trimmed = bin.trimmingCharacters(in: .whitespacesAndNewlines) + guard !trimmed.isEmpty else { continue } + names.insert(trimmed) + + guard let name = self.normalizeSkillBinName(trimmed), + let resolvedPath = self.resolveSkillBinPath(trimmed, searchPaths: searchPaths), + let normalizedPath = self.normalizeResolvedPath(resolvedPath) + else { + continue + } + + var paths = pathsByName[name] ?? Set() + paths.insert(normalizedPath) + pathsByName[name] = paths + } + } + + return SkillBinTrustIndex(names: names, pathsByName: pathsByName) + } + + private static func resolveSkillBinPath(_ bin: String, searchPaths: [String]) -> String? { + let expanded = bin.hasPrefix("~") ? (bin as NSString).expandingTildeInPath : bin + if expanded.contains("/") || expanded.contains("\\") { + return FileManager().isExecutableFile(atPath: expanded) ? expanded : nil + } + return CommandResolver.findExecutable(named: expanded, searchPaths: searchPaths) + } + private func isStale() -> Bool { guard let lastRefresh else { return true } return Date().timeIntervalSince(lastRefresh) > self.refreshInterval } + + static func _testBuildTrustIndex( + report: SkillsStatusReport, + searchPaths: [String]) -> SkillBinTrustIndex + { + self.buildTrustIndex(report: report, searchPaths: searchPaths) + } +} + +struct SkillBinTrustIndex { + let names: Set + let pathsByName: [String: Set] } diff --git a/apps/macos/Sources/OpenClaw/ExecApprovalsGatewayPrompter.swift b/apps/macos/Sources/OpenClaw/ExecApprovalsGatewayPrompter.swift index 379e8c0f559..08e60b84d2b 100644 --- a/apps/macos/Sources/OpenClaw/ExecApprovalsGatewayPrompter.swift +++ b/apps/macos/Sources/OpenClaw/ExecApprovalsGatewayPrompter.swift @@ -43,7 +43,33 @@ final class ExecApprovalsGatewayPrompter { do { let data = try JSONEncoder().encode(payload) let request = try JSONDecoder().decode(GatewayApprovalRequest.self, from: data) - guard self.shouldPresent(request: request) else { return } + let presentation = self.shouldPresent(request: request) + guard presentation.shouldAsk else { + // Ask policy says no prompt needed – resolve based on security policy + let decision: ExecApprovalDecision = presentation.security == .full ? .allowOnce : .deny + try await GatewayConnection.shared.requestVoid( + method: .execApprovalResolve, + params: [ + "id": AnyCodable(request.id), + "decision": AnyCodable(decision.rawValue), + ], + timeoutMs: 10000) + return + } + guard presentation.canPresent else { + let decision = Self.fallbackDecision( + request: request.request, + askFallback: presentation.askFallback, + allowlist: presentation.allowlist) + try await GatewayConnection.shared.requestVoid( + method: .execApprovalResolve, + params: [ + "id": AnyCodable(request.id), + "decision": AnyCodable(decision.rawValue), + ], + timeoutMs: 10000) + return + } let decision = ExecApprovalsPromptPresenter.prompt(request.request) try await GatewayConnection.shared.requestVoid( method: .execApprovalResolve, @@ -57,16 +83,89 @@ final class ExecApprovalsGatewayPrompter { } } - private func shouldPresent(request: GatewayApprovalRequest) -> Bool { + /// Whether the ask policy requires prompting the user. + /// Note: this only determines if a prompt is shown, not whether the action is allowed. + /// The security policy (full/deny/allowlist) decides the actual outcome. + private static func shouldAsk(security: ExecSecurity, ask: ExecAsk) -> Bool { + switch ask { + case .always: + return true + case .onMiss: + return security == .allowlist + case .off: + return false + } + } + + struct PresentationDecision { + /// Whether the ask policy requires prompting the user (not whether the action is allowed). + var shouldAsk: Bool + /// Whether the prompt can actually be shown (session match, recent activity, etc.). + var canPresent: Bool + /// The resolved security policy, used to determine allow/deny when no prompt is shown. + var security: ExecSecurity + /// Fallback security policy when a prompt is needed but can't be presented. + var askFallback: ExecSecurity + var allowlist: [ExecAllowlistEntry] + } + + private func shouldPresent(request: GatewayApprovalRequest) -> PresentationDecision { let mode = AppStateStore.shared.connectionMode let activeSession = WebChatManager.shared.activeSessionKey?.trimmingCharacters(in: .whitespacesAndNewlines) let requestSession = request.request.sessionKey?.trimmingCharacters(in: .whitespacesAndNewlines) - return Self.shouldPresent( + + // Read-only resolve to avoid disk writes on the MainActor + let approvals = ExecApprovalsStore.resolveReadOnly(agentId: request.request.agentId) + let security = approvals.agent.security + let ask = approvals.agent.ask + + let shouldAsk = Self.shouldAsk(security: security, ask: ask) + + let canPresent = shouldAsk && Self.shouldPresent( mode: mode, activeSession: activeSession, requestSession: requestSession, lastInputSeconds: Self.lastInputSeconds(), thresholdSeconds: 120) + + return PresentationDecision( + shouldAsk: shouldAsk, + canPresent: canPresent, + security: security, + askFallback: approvals.agent.askFallback, + allowlist: approvals.allowlist) + } + + private static func fallbackDecision( + request: ExecApprovalPromptRequest, + askFallback: ExecSecurity, + allowlist: [ExecAllowlistEntry]) -> ExecApprovalDecision + { + guard askFallback == .allowlist else { + return askFallback == .full ? .allowOnce : .deny + } + let resolution = self.fallbackResolution(for: request) + let match = ExecAllowlistMatcher.match(entries: allowlist, resolution: resolution) + return match == nil ? .deny : .allowOnce + } + + private static func fallbackResolution(for request: ExecApprovalPromptRequest) -> ExecCommandResolution? { + let resolvedPath = request.resolvedPath?.trimmingCharacters(in: .whitespacesAndNewlines) + let trimmedResolvedPath = (resolvedPath?.isEmpty == false) ? resolvedPath : nil + let rawExecutable = self.firstToken(from: request.command) ?? trimmedResolvedPath ?? "" + guard !rawExecutable.isEmpty || trimmedResolvedPath != nil else { return nil } + let executableName = trimmedResolvedPath.map { URL(fileURLWithPath: $0).lastPathComponent } ?? rawExecutable + return ExecCommandResolution( + rawExecutable: rawExecutable, + resolvedPath: trimmedResolvedPath, + executableName: executableName, + cwd: request.cwd) + } + + private static func firstToken(from command: String) -> String? { + let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines) + guard !trimmed.isEmpty else { return nil } + return trimmed.split(whereSeparator: { $0.isWhitespace }).first.map(String.init) } private static func shouldPresent( @@ -117,5 +216,29 @@ extension ExecApprovalsGatewayPrompter { lastInputSeconds: lastInputSeconds, thresholdSeconds: thresholdSeconds) } + + static func _testShouldAsk(security: ExecSecurity, ask: ExecAsk) -> Bool { + self.shouldAsk(security: security, ask: ask) + } + + static func _testFallbackDecision( + command: String, + resolvedPath: String?, + askFallback: ExecSecurity, + allowlistPatterns: [String]) -> ExecApprovalDecision + { + self.fallbackDecision( + request: ExecApprovalPromptRequest( + command: command, + cwd: nil, + host: nil, + security: nil, + ask: nil, + agentId: nil, + resolvedPath: resolvedPath, + sessionKey: nil), + askFallback: askFallback, + allowlist: allowlistPatterns.map { ExecAllowlistEntry(pattern: $0) }) + } } #endif diff --git a/apps/macos/Sources/OpenClaw/ExecCommandResolution.swift b/apps/macos/Sources/OpenClaw/ExecCommandResolution.swift index 91a22153f3c..f89293a81aa 100644 --- a/apps/macos/Sources/OpenClaw/ExecCommandResolution.swift +++ b/apps/macos/Sources/OpenClaw/ExecCommandResolution.swift @@ -37,8 +37,7 @@ struct ExecCommandResolution { var resolutions: [ExecCommandResolution] = [] resolutions.reserveCapacity(segments.count) for segment in segments { - guard let token = self.parseFirstToken(segment), - let resolution = self.resolveExecutable(rawExecutable: token, cwd: cwd, env: env) + guard let resolution = self.resolveShellSegmentExecutable(segment, cwd: cwd, env: env) else { return [] } @@ -88,6 +87,20 @@ struct ExecCommandResolution { cwd: cwd) } + private static func resolveShellSegmentExecutable( + _ segment: String, + cwd: String?, + env: [String: String]?) -> ExecCommandResolution? + { + let tokens = self.tokenizeShellWords(segment) + guard !tokens.isEmpty else { return nil } + let effective = ExecEnvInvocationUnwrapper.unwrapDispatchWrappersForResolution(tokens) + guard let raw = effective.first?.trimmingCharacters(in: .whitespacesAndNewlines), !raw.isEmpty else { + return nil + } + return self.resolveExecutable(rawExecutable: raw, cwd: cwd, env: env) + } + private static func parseFirstToken(_ command: String) -> String? { let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines) guard !trimmed.isEmpty else { return nil } @@ -102,6 +115,59 @@ struct ExecCommandResolution { return trimmed.split(whereSeparator: { $0.isWhitespace }).first.map(String.init) } + private static func tokenizeShellWords(_ command: String) -> [String] { + let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines) + guard !trimmed.isEmpty else { return [] } + + var tokens: [String] = [] + var current = "" + var inSingle = false + var inDouble = false + var escaped = false + + func appendCurrent() { + guard !current.isEmpty else { return } + tokens.append(current) + current.removeAll(keepingCapacity: true) + } + + for ch in trimmed { + if escaped { + current.append(ch) + escaped = false + continue + } + + if ch == "\\", !inSingle { + escaped = true + continue + } + + if ch == "'", !inDouble { + inSingle.toggle() + continue + } + + if ch == "\"", !inSingle { + inDouble.toggle() + continue + } + + if ch.isWhitespace, !inSingle, !inDouble { + appendCurrent() + continue + } + + current.append(ch) + } + + if escaped { + current.append("\\") + } + appendCurrent() + return tokens + } + private enum ShellTokenContext { case unquoted case doubleQuoted @@ -148,8 +214,14 @@ struct ExecCommandResolution { while idx < chars.count { let ch = chars[idx] let next: Character? = idx + 1 < chars.count ? chars[idx + 1] : nil + let lookahead = self.nextShellSignificantCharacter(chars: chars, after: idx, inSingle: inSingle) if escaped { + if ch == "\n" { + escaped = false + idx += 1 + continue + } current.append(ch) escaped = false idx += 1 @@ -157,6 +229,10 @@ struct ExecCommandResolution { } if ch == "\\", !inSingle { + if next == "\n" { + idx += 2 + continue + } current.append(ch) escaped = true idx += 1 @@ -177,7 +253,7 @@ struct ExecCommandResolution { continue } - if !inSingle, self.shouldFailClosedForShell(ch: ch, next: next, inDouble: inDouble) { + if !inSingle, self.shouldFailClosedForShell(ch: ch, next: lookahead, inDouble: inDouble) { // Fail closed on command/process substitution in allowlist mode, // including command substitution inside double-quoted shell strings. return nil @@ -201,6 +277,25 @@ struct ExecCommandResolution { return segments } + private static func nextShellSignificantCharacter( + chars: [Character], + after idx: Int, + inSingle: Bool) -> Character? + { + guard !inSingle else { + return idx + 1 < chars.count ? chars[idx + 1] : nil + } + var cursor = idx + 1 + while cursor < chars.count { + if chars[cursor] == "\\", cursor + 1 < chars.count, chars[cursor + 1] == "\n" { + cursor += 2 + continue + } + return chars[cursor] + } + return nil + } + private static func shouldFailClosedForShell(ch: Character, next: Character?, inDouble: Bool) -> Bool { let context: ShellTokenContext = inDouble ? .doubleQuoted : .unquoted guard let rules = self.shellFailClosedRules[context] else { diff --git a/apps/macos/Sources/OpenClaw/GeneralSettings.swift b/apps/macos/Sources/OpenClaw/GeneralSettings.swift index b55ed439489..633879367ea 100644 --- a/apps/macos/Sources/OpenClaw/GeneralSettings.swift +++ b/apps/macos/Sources/OpenClaw/GeneralSettings.swift @@ -348,10 +348,18 @@ struct GeneralSettings: View { Text("Testing…") .font(.caption) .foregroundStyle(.secondary) - case .ok: - Label("Ready", systemImage: "checkmark.circle.fill") - .font(.caption) - .foregroundStyle(.green) + case let .ok(success): + VStack(alignment: .leading, spacing: 2) { + Label(success.title, systemImage: "checkmark.circle.fill") + .font(.caption) + .foregroundStyle(.green) + if let detail = success.detail { + Text(detail) + .font(.caption) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + } + } case let .failed(message): Text(message) .font(.caption) @@ -518,7 +526,7 @@ struct GeneralSettings: View { private enum RemoteStatus: Equatable { case idle case checking - case ok + case ok(RemoteGatewayProbeSuccess) case failed(String) } @@ -558,114 +566,14 @@ extension GeneralSettings { @MainActor func testRemote() async { self.remoteStatus = .checking - let settings = CommandResolver.connectionSettings() - if self.state.remoteTransport == .direct { - let trimmedUrl = self.state.remoteUrl.trimmingCharacters(in: .whitespacesAndNewlines) - guard !trimmedUrl.isEmpty else { - self.remoteStatus = .failed("Set a gateway URL first") - return - } - guard Self.isValidWsUrl(trimmedUrl) else { - self.remoteStatus = .failed( - "Gateway URL must use wss:// for remote hosts (ws:// only for localhost)") - return - } - } else { - guard !settings.target.isEmpty else { - self.remoteStatus = .failed("Set an SSH target first") - return - } - - // Step 1: basic SSH reachability check - guard let sshCommand = Self.sshCheckCommand( - target: settings.target, - identity: settings.identity) - else { - self.remoteStatus = .failed("SSH target is invalid") - return - } - let sshResult = await ShellExecutor.run( - command: sshCommand, - cwd: nil, - env: nil, - timeout: 8) - - guard sshResult.ok else { - self.remoteStatus = .failed(self.formatSSHFailure(sshResult, target: settings.target)) - return - } + switch await RemoteGatewayProbe.run() { + case let .ready(success): + self.remoteStatus = .ok(success) + case let .authIssue(issue): + self.remoteStatus = .failed(issue.statusMessage) + case let .failed(message): + self.remoteStatus = .failed(message) } - - // Step 2: control channel health check - let originalMode = AppStateStore.shared.connectionMode - do { - try await ControlChannel.shared.configure(mode: .remote( - target: settings.target, - identity: settings.identity)) - let data = try await ControlChannel.shared.health(timeout: 10) - if decodeHealthSnapshot(from: data) != nil { - self.remoteStatus = .ok - } else { - self.remoteStatus = .failed("Control channel returned invalid health JSON") - } - } catch { - self.remoteStatus = .failed(error.localizedDescription) - } - - // Restore original mode if we temporarily switched - switch originalMode { - case .remote: - break - case .local: - try? await ControlChannel.shared.configure(mode: .local) - case .unconfigured: - await ControlChannel.shared.disconnect() - } - } - - private static func isValidWsUrl(_ raw: String) -> Bool { - GatewayRemoteConfig.normalizeGatewayUrl(raw) != nil - } - - private static func sshCheckCommand(target: String, identity: String) -> [String]? { - guard let parsed = CommandResolver.parseSSHTarget(target) else { return nil } - let options = [ - "-o", "BatchMode=yes", - "-o", "ConnectTimeout=5", - "-o", "StrictHostKeyChecking=accept-new", - "-o", "UpdateHostKeys=yes", - ] - let args = CommandResolver.sshArguments( - target: parsed, - identity: identity, - options: options, - remoteCommand: ["echo", "ok"]) - return ["/usr/bin/ssh"] + args - } - - private func formatSSHFailure(_ response: Response, target: String) -> String { - let payload = response.payload.flatMap { String(data: $0, encoding: .utf8) } - let trimmed = payload? - .trimmingCharacters(in: .whitespacesAndNewlines) - .split(whereSeparator: \.isNewline) - .joined(separator: " ") - if let trimmed, - trimmed.localizedCaseInsensitiveContains("host key verification failed") - { - let host = CommandResolver.parseSSHTarget(target)?.host ?? target - return "SSH check failed: Host key verification failed. Remove the old key with " + - "`ssh-keygen -R \(host)` and try again." - } - if let trimmed, !trimmed.isEmpty { - if let message = response.message, message.hasPrefix("exit ") { - return "SSH check failed: \(trimmed) (\(message))" - } - return "SSH check failed: \(trimmed)" - } - if let message = response.message { - return "SSH check failed (\(message))" - } - return "SSH check failed" } private func revealLogs() { diff --git a/apps/macos/Sources/OpenClaw/HostEnvSecurityPolicy.generated.swift b/apps/macos/Sources/OpenClaw/HostEnvSecurityPolicy.generated.swift index 2981a60bbf7..932c9fc5e61 100644 --- a/apps/macos/Sources/OpenClaw/HostEnvSecurityPolicy.generated.swift +++ b/apps/macos/Sources/OpenClaw/HostEnvSecurityPolicy.generated.swift @@ -17,6 +17,7 @@ enum HostEnvSecurityPolicy { "BASH_ENV", "ENV", "GIT_EXTERNAL_DIFF", + "GIT_EXEC_PATH", "SHELL", "SHELLOPTS", "PS4", diff --git a/apps/macos/Sources/OpenClaw/NodeMode/MacNodeBrowserProxy.swift b/apps/macos/Sources/OpenClaw/NodeMode/MacNodeBrowserProxy.swift index 0da6510f608..367907f9fb7 100644 --- a/apps/macos/Sources/OpenClaw/NodeMode/MacNodeBrowserProxy.swift +++ b/apps/macos/Sources/OpenClaw/NodeMode/MacNodeBrowserProxy.swift @@ -146,8 +146,8 @@ actor MacNodeBrowserProxy { request.setValue(password, forHTTPHeaderField: "x-openclaw-password") } - if method != "GET", let body = params.body?.value { - request.httpBody = try JSONSerialization.data(withJSONObject: body, options: [.fragmentsAllowed]) + if method != "GET", let body = params.body { + request.httpBody = try JSONSerialization.data(withJSONObject: body.foundationValue, options: [.fragmentsAllowed]) request.setValue("application/json", forHTTPHeaderField: "Content-Type") } diff --git a/apps/macos/Sources/OpenClaw/NodeMode/MacNodeModeCoordinator.swift b/apps/macos/Sources/OpenClaw/NodeMode/MacNodeModeCoordinator.swift index fa216d09c5f..5e093c49e24 100644 --- a/apps/macos/Sources/OpenClaw/NodeMode/MacNodeModeCoordinator.swift +++ b/apps/macos/Sources/OpenClaw/NodeMode/MacNodeModeCoordinator.swift @@ -77,6 +77,7 @@ final class MacNodeModeCoordinator { try await self.session.connect( url: config.url, token: config.token, + bootstrapToken: nil, password: config.password, connectOptions: connectOptions, sessionBox: sessionBox, diff --git a/apps/macos/Sources/OpenClaw/Onboarding.swift b/apps/macos/Sources/OpenClaw/Onboarding.swift index 4eae7e092b0..ca183d35311 100644 --- a/apps/macos/Sources/OpenClaw/Onboarding.swift +++ b/apps/macos/Sources/OpenClaw/Onboarding.swift @@ -9,6 +9,13 @@ enum UIStrings { static let welcomeTitle = "Welcome to OpenClaw" } +enum RemoteOnboardingProbeState: Equatable { + case idle + case checking + case ok(RemoteGatewayProbeSuccess) + case failed(String) +} + @MainActor final class OnboardingController { static let shared = OnboardingController() @@ -72,6 +79,9 @@ struct OnboardingView: View { @State var didAutoKickoff = false @State var showAdvancedConnection = false @State var preferredGatewayID: String? + @State var remoteProbeState: RemoteOnboardingProbeState = .idle + @State var remoteAuthIssue: RemoteGatewayAuthIssue? + @State var suppressRemoteProbeReset = false @State var gatewayDiscovery: GatewayDiscoveryModel @State var onboardingChatModel: OpenClawChatViewModel @State var onboardingSkillsModel = SkillsSettingsModel() diff --git a/apps/macos/Sources/OpenClaw/OnboardingView+Pages.swift b/apps/macos/Sources/OpenClaw/OnboardingView+Pages.swift index 8f4d16420bc..f35e4e4c4ec 100644 --- a/apps/macos/Sources/OpenClaw/OnboardingView+Pages.swift +++ b/apps/macos/Sources/OpenClaw/OnboardingView+Pages.swift @@ -2,6 +2,7 @@ import AppKit import OpenClawChatUI import OpenClawDiscovery import OpenClawIPC +import OpenClawKit import SwiftUI extension OnboardingView { @@ -97,6 +98,11 @@ extension OnboardingView { self.gatewayDiscoverySection() + if self.shouldShowRemoteConnectionSection { + Divider().padding(.vertical, 4) + self.remoteConnectionSection() + } + self.connectionChoiceButton( title: "Configure later", subtitle: "Don’t start the Gateway yet.", @@ -109,6 +115,22 @@ extension OnboardingView { } } } + .onChange(of: self.state.connectionMode) { _, newValue in + guard Self.shouldResetRemoteProbeFeedback( + for: newValue, + suppressReset: self.suppressRemoteProbeReset) + else { return } + self.resetRemoteProbeFeedback() + } + .onChange(of: self.state.remoteTransport) { _, _ in + self.resetRemoteProbeFeedback() + } + .onChange(of: self.state.remoteTarget) { _, _ in + self.resetRemoteProbeFeedback() + } + .onChange(of: self.state.remoteUrl) { _, _ in + self.resetRemoteProbeFeedback() + } } private var localGatewaySubtitle: String { @@ -199,25 +221,6 @@ extension OnboardingView { .pickerStyle(.segmented) .frame(width: fieldWidth) } - GridRow { - Text("Gateway token") - .font(.callout.weight(.semibold)) - .frame(width: labelWidth, alignment: .leading) - SecureField("remote gateway auth token (gateway.remote.token)", text: self.$state.remoteToken) - .textFieldStyle(.roundedBorder) - .frame(width: fieldWidth) - } - if self.state.remoteTokenUnsupported { - GridRow { - Text("") - .frame(width: labelWidth, alignment: .leading) - Text( - "The current gateway.remote.token value is not plain text. OpenClaw for macOS cannot use it directly; enter a plaintext token here to replace it.") - .font(.caption) - .foregroundStyle(.orange) - .frame(width: fieldWidth, alignment: .leading) - } - } if self.state.remoteTransport == .direct { GridRow { Text("Gateway URL") @@ -289,6 +292,250 @@ extension OnboardingView { } } + private var shouldShowRemoteConnectionSection: Bool { + self.state.connectionMode == .remote || + self.showAdvancedConnection || + self.remoteProbeState != .idle || + self.remoteAuthIssue != nil || + Self.shouldShowRemoteTokenField( + showAdvancedConnection: self.showAdvancedConnection, + remoteToken: self.state.remoteToken, + remoteTokenUnsupported: self.state.remoteTokenUnsupported, + authIssue: self.remoteAuthIssue) + } + + private var shouldShowRemoteTokenField: Bool { + guard self.shouldShowRemoteConnectionSection else { return false } + return Self.shouldShowRemoteTokenField( + showAdvancedConnection: self.showAdvancedConnection, + remoteToken: self.state.remoteToken, + remoteTokenUnsupported: self.state.remoteTokenUnsupported, + authIssue: self.remoteAuthIssue) + } + + private var remoteProbePreflightMessage: String? { + switch self.state.remoteTransport { + case .direct: + let trimmedUrl = self.state.remoteUrl.trimmingCharacters(in: .whitespacesAndNewlines) + if trimmedUrl.isEmpty { + return "Select a nearby gateway or open Advanced to enter a gateway URL." + } + if GatewayRemoteConfig.normalizeGatewayUrl(trimmedUrl) == nil { + return "Gateway URL must use wss:// for remote hosts (ws:// only for localhost)." + } + return nil + case .ssh: + let trimmedTarget = self.state.remoteTarget.trimmingCharacters(in: .whitespacesAndNewlines) + if trimmedTarget.isEmpty { + return "Select a nearby gateway or open Advanced to enter an SSH target." + } + return CommandResolver.sshTargetValidationMessage(trimmedTarget) + } + } + + private var canProbeRemoteConnection: Bool { + self.remoteProbePreflightMessage == nil && self.remoteProbeState != .checking + } + + @ViewBuilder + private func remoteConnectionSection() -> some View { + VStack(alignment: .leading, spacing: 10) { + HStack(alignment: .top, spacing: 12) { + VStack(alignment: .leading, spacing: 2) { + Text("Remote connection") + .font(.callout.weight(.semibold)) + Text("Checks the real remote websocket and auth handshake.") + .font(.caption) + .foregroundStyle(.secondary) + } + Spacer(minLength: 0) + Button { + Task { await self.probeRemoteConnection() } + } label: { + if self.remoteProbeState == .checking { + ProgressView() + .controlSize(.small) + .frame(minWidth: 120) + } else { + Text("Check connection") + .frame(minWidth: 120) + } + } + .buttonStyle(.borderedProminent) + .disabled(!self.canProbeRemoteConnection) + } + + if self.shouldShowRemoteTokenField { + self.remoteTokenField() + } + + if let message = self.remoteProbePreflightMessage, self.remoteProbeState != .checking { + Text(message) + .font(.caption) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + } + + self.remoteProbeStatusView() + + if let issue = self.remoteAuthIssue { + self.remoteAuthPromptView(issue: issue) + } + } + } + + private func remoteTokenField() -> some View { + VStack(alignment: .leading, spacing: 6) { + HStack(alignment: .center, spacing: 12) { + Text("Gateway token") + .font(.callout.weight(.semibold)) + .frame(width: 110, alignment: .leading) + SecureField("remote gateway auth token (gateway.remote.token)", text: self.$state.remoteToken) + .textFieldStyle(.roundedBorder) + .frame(maxWidth: 320) + } + Text("Used when the remote gateway requires token auth.") + .font(.caption) + .foregroundStyle(.secondary) + if self.state.remoteTokenUnsupported { + Text( + "The current gateway.remote.token value is not plain text. OpenClaw for macOS cannot use it directly; enter a plaintext token here to replace it.") + .font(.caption) + .foregroundStyle(.orange) + .fixedSize(horizontal: false, vertical: true) + } + } + } + + @ViewBuilder + private func remoteProbeStatusView() -> some View { + switch self.remoteProbeState { + case .idle: + EmptyView() + case .checking: + Text("Checking remote gateway…") + .font(.caption) + .foregroundStyle(.secondary) + case let .ok(success): + VStack(alignment: .leading, spacing: 2) { + Label(success.title, systemImage: "checkmark.circle.fill") + .font(.caption) + .foregroundStyle(.green) + if let detail = success.detail { + Text(detail) + .font(.caption) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + } + } + case let .failed(message): + if self.remoteAuthIssue == nil { + Text(message) + .font(.caption) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + } + } + } + + private func remoteAuthPromptView(issue: RemoteGatewayAuthIssue) -> some View { + let promptStyle = Self.remoteAuthPromptStyle(for: issue) + return HStack(alignment: .top, spacing: 10) { + Image(systemName: promptStyle.systemImage) + .font(.caption.weight(.semibold)) + .foregroundStyle(promptStyle.tint) + .frame(width: 16, alignment: .center) + .padding(.top, 1) + VStack(alignment: .leading, spacing: 4) { + Text(issue.title) + .font(.caption.weight(.semibold)) + Text(.init(issue.body)) + .font(.caption) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + if let footnote = issue.footnote { + Text(.init(footnote)) + .font(.caption) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + } + } + } + } + + @MainActor + private func probeRemoteConnection() async { + let originalMode = self.state.connectionMode + let shouldRestoreMode = originalMode != .remote + if shouldRestoreMode { + // Reuse the shared remote endpoint stack for probing without committing the user's mode choice. + self.state.connectionMode = .remote + } + self.remoteProbeState = .checking + self.remoteAuthIssue = nil + defer { + if shouldRestoreMode { + self.suppressRemoteProbeReset = true + self.state.connectionMode = originalMode + self.suppressRemoteProbeReset = false + } + } + + switch await RemoteGatewayProbe.run() { + case let .ready(success): + self.remoteProbeState = .ok(success) + case let .authIssue(issue): + self.remoteAuthIssue = issue + self.remoteProbeState = .failed(issue.statusMessage) + case let .failed(message): + self.remoteProbeState = .failed(message) + } + } + + private func resetRemoteProbeFeedback() { + self.remoteProbeState = .idle + self.remoteAuthIssue = nil + } + + static func remoteAuthPromptStyle( + for issue: RemoteGatewayAuthIssue) + -> (systemImage: String, tint: Color) + { + switch issue { + case .tokenRequired: + return ("key.fill", .orange) + case .tokenMismatch: + return ("exclamationmark.triangle.fill", .orange) + case .gatewayTokenNotConfigured: + return ("wrench.and.screwdriver.fill", .orange) + case .setupCodeExpired: + return ("qrcode.viewfinder", .orange) + case .passwordRequired: + return ("lock.slash.fill", .orange) + case .pairingRequired: + return ("link.badge.plus", .orange) + } + } + + static func shouldShowRemoteTokenField( + showAdvancedConnection: Bool, + remoteToken: String, + remoteTokenUnsupported: Bool, + authIssue: RemoteGatewayAuthIssue?) -> Bool + { + showAdvancedConnection || + remoteTokenUnsupported || + !remoteToken.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty || + authIssue?.showsTokenField == true + } + + static func shouldResetRemoteProbeFeedback( + for connectionMode: AppState.ConnectionMode, + suppressReset: Bool) -> Bool + { + !suppressReset && connectionMode != .remote + } + func gatewaySubtitle(for gateway: GatewayDiscoveryModel.DiscoveredGateway) -> String? { if self.state.remoteTransport == .direct { return GatewayDiscoveryHelpers.directUrl(for: gateway) ?? "Gateway pairing only" diff --git a/apps/macos/Sources/OpenClaw/PortGuardian.swift b/apps/macos/Sources/OpenClaw/PortGuardian.swift index dfae5c3bcaa..7d8837415ff 100644 --- a/apps/macos/Sources/OpenClaw/PortGuardian.swift +++ b/apps/macos/Sources/OpenClaw/PortGuardian.swift @@ -47,7 +47,7 @@ actor PortGuardian { let listeners = await self.listeners(on: port) guard !listeners.isEmpty else { continue } for listener in listeners { - if self.isExpected(listener, port: port, mode: mode) { + if Self.isExpected(listener, port: port, mode: mode) { let message = """ port \(port) already served by expected \(listener.command) (pid \(listener.pid)) — keeping @@ -55,6 +55,14 @@ actor PortGuardian { self.logger.info("\(message, privacy: .public)") continue } + if mode == .remote { + let message = """ + port \(port) held by \(listener.command) + (pid \(listener.pid)) in remote mode — not killing + """ + self.logger.warning(message) + continue + } let killed = await self.kill(listener.pid) if killed { let message = """ @@ -271,8 +279,8 @@ actor PortGuardian { switch mode { case .remote: - expectedDesc = "SSH tunnel to remote gateway" - okPredicate = { $0.command.lowercased().contains("ssh") } + expectedDesc = "Remote gateway (SSH tunnel, Docker, or direct)" + okPredicate = { _ in true } case .local: expectedDesc = "Gateway websocket (node/tsx)" okPredicate = { listener in @@ -352,13 +360,12 @@ actor PortGuardian { return sigkill.ok } - private func isExpected(_ listener: Listener, port: Int, mode: AppState.ConnectionMode) -> Bool { + private static func isExpected(_ listener: Listener, port: Int, mode: AppState.ConnectionMode) -> Bool { let cmd = listener.command.lowercased() let full = listener.fullCommand.lowercased() switch mode { case .remote: - // Remote mode expects an SSH tunnel for the gateway WebSocket port. - if port == GatewayEnvironment.gatewayPort() { return cmd.contains("ssh") } + if port == GatewayEnvironment.gatewayPort() { return true } return false case .local: // The gateway daemon may listen as `openclaw` or as its runtime (`node`, `bun`, etc). @@ -406,6 +413,16 @@ extension PortGuardian { self.parseListeners(from: text).map { ($0.pid, $0.command, $0.fullCommand, $0.user) } } + static func _testIsExpected( + command: String, + fullCommand: String, + port: Int, + mode: AppState.ConnectionMode) -> Bool + { + let listener = Listener(pid: 0, command: command, fullCommand: fullCommand, user: nil) + return Self.isExpected(listener, port: port, mode: mode) + } + static func _testBuildReport( port: Int, mode: AppState.ConnectionMode, diff --git a/apps/macos/Sources/OpenClaw/RemoteGatewayProbe.swift b/apps/macos/Sources/OpenClaw/RemoteGatewayProbe.swift new file mode 100644 index 00000000000..7073ad81de7 --- /dev/null +++ b/apps/macos/Sources/OpenClaw/RemoteGatewayProbe.swift @@ -0,0 +1,237 @@ +import Foundation +import OpenClawIPC +import OpenClawKit + +enum RemoteGatewayAuthIssue: Equatable { + case tokenRequired + case tokenMismatch + case gatewayTokenNotConfigured + case setupCodeExpired + case passwordRequired + case pairingRequired + + init?(error: Error) { + guard let authError = error as? GatewayConnectAuthError else { + return nil + } + switch authError.detail { + case .authTokenMissing: + self = .tokenRequired + case .authTokenMismatch: + self = .tokenMismatch + case .authTokenNotConfigured: + self = .gatewayTokenNotConfigured + case .authBootstrapTokenInvalid: + self = .setupCodeExpired + case .authPasswordMissing, .authPasswordMismatch, .authPasswordNotConfigured: + self = .passwordRequired + case .pairingRequired: + self = .pairingRequired + default: + return nil + } + } + + var showsTokenField: Bool { + switch self { + case .tokenRequired, .tokenMismatch: + true + case .gatewayTokenNotConfigured, .setupCodeExpired, .passwordRequired, .pairingRequired: + false + } + } + + var title: String { + switch self { + case .tokenRequired: + "This gateway requires an auth token" + case .tokenMismatch: + "That token did not match the gateway" + case .gatewayTokenNotConfigured: + "This gateway host needs token setup" + case .setupCodeExpired: + "This setup code is no longer valid" + case .passwordRequired: + "This gateway is using unsupported auth" + case .pairingRequired: + "This device needs pairing approval" + } + } + + var body: String { + switch self { + case .tokenRequired: + "Paste the token configured on the gateway host. On the gateway host, run `openclaw config get gateway.auth.token`. If the gateway uses an environment variable instead, use `OPENCLAW_GATEWAY_TOKEN`." + case .tokenMismatch: + "Check `gateway.auth.token` or `OPENCLAW_GATEWAY_TOKEN` on the gateway host and try again." + case .gatewayTokenNotConfigured: + "This gateway is set to token auth, but no `gateway.auth.token` is configured on the gateway host. If the gateway uses an environment variable instead, set `OPENCLAW_GATEWAY_TOKEN` before starting the gateway." + case .setupCodeExpired: + "Scan or paste a fresh setup code from an already-paired OpenClaw client, then try again." + case .passwordRequired: + "This onboarding flow does not support password auth yet. Reconfigure the gateway to use token auth, then retry." + case .pairingRequired: + "Approve this device from an already-paired OpenClaw client. In your OpenClaw chat, run `/pair approve`, then click **Check connection** again." + } + } + + var footnote: String? { + switch self { + case .tokenRequired, .gatewayTokenNotConfigured: + "No token yet? Generate one on the gateway host with `openclaw doctor --generate-gateway-token`, then set it as `gateway.auth.token`." + case .setupCodeExpired: + nil + case .pairingRequired: + "If you do not have another paired OpenClaw client yet, approve the pending request on the gateway host with `openclaw devices approve`." + case .tokenMismatch, .passwordRequired: + nil + } + } + + var statusMessage: String { + switch self { + case .tokenRequired: + "This gateway requires an auth token from the gateway host." + case .tokenMismatch: + "Gateway token mismatch. Check gateway.auth.token or OPENCLAW_GATEWAY_TOKEN on the gateway host." + case .gatewayTokenNotConfigured: + "This gateway has token auth enabled, but no gateway.auth.token is configured on the host." + case .setupCodeExpired: + "Setup code expired or already used. Scan a fresh setup code, then try again." + case .passwordRequired: + "This gateway uses password auth. Remote onboarding on macOS cannot collect gateway passwords yet." + case .pairingRequired: + "Pairing required. In an already-paired OpenClaw client, run /pair approve, then check the connection again." + } + } +} + +enum RemoteGatewayProbeResult: Equatable { + case ready(RemoteGatewayProbeSuccess) + case authIssue(RemoteGatewayAuthIssue) + case failed(String) +} + +struct RemoteGatewayProbeSuccess: Equatable { + let authSource: GatewayAuthSource? + + var title: String { + switch self.authSource { + case .some(.deviceToken): + "Connected via paired device" + case .some(.bootstrapToken): + "Connected with setup code" + case .some(.sharedToken): + "Connected with gateway token" + case .some(.password): + "Connected with password" + case .some(GatewayAuthSource.none), nil: + "Remote gateway ready" + } + } + + var detail: String? { + switch self.authSource { + case .some(.deviceToken): + "This Mac used a stored device token. New or unpaired devices may still need the gateway token." + case .some(.bootstrapToken): + "This Mac is still using the temporary setup code. Approve pairing to finish provisioning device-scoped auth." + case .some(.sharedToken), .some(.password), .some(GatewayAuthSource.none), nil: + nil + } + } +} + +enum RemoteGatewayProbe { + @MainActor + static func run() async -> RemoteGatewayProbeResult { + AppStateStore.shared.syncGatewayConfigNow() + let settings = CommandResolver.connectionSettings() + let transport = AppStateStore.shared.remoteTransport + + if transport == .direct { + let trimmedUrl = AppStateStore.shared.remoteUrl.trimmingCharacters(in: .whitespacesAndNewlines) + guard !trimmedUrl.isEmpty else { + return .failed("Set a gateway URL first") + } + guard self.isValidWsUrl(trimmedUrl) else { + return .failed("Gateway URL must use wss:// for remote hosts (ws:// only for localhost)") + } + } else { + let trimmedTarget = settings.target.trimmingCharacters(in: .whitespacesAndNewlines) + guard !trimmedTarget.isEmpty else { + return .failed("Set an SSH target first") + } + if let validationMessage = CommandResolver.sshTargetValidationMessage(trimmedTarget) { + return .failed(validationMessage) + } + guard let sshCommand = self.sshCheckCommand(target: settings.target, identity: settings.identity) else { + return .failed("SSH target is invalid") + } + + let sshResult = await ShellExecutor.run( + command: sshCommand, + cwd: nil, + env: nil, + timeout: 8) + guard sshResult.ok else { + return .failed(self.formatSSHFailure(sshResult, target: settings.target)) + } + } + + do { + _ = try await GatewayConnection.shared.healthSnapshot(timeoutMs: 10_000) + let authSource = await GatewayConnection.shared.authSource() + return .ready(RemoteGatewayProbeSuccess(authSource: authSource)) + } catch { + if let authIssue = RemoteGatewayAuthIssue(error: error) { + return .authIssue(authIssue) + } + return .failed(error.localizedDescription) + } + } + + private static func isValidWsUrl(_ raw: String) -> Bool { + GatewayRemoteConfig.normalizeGatewayUrl(raw) != nil + } + + private static func sshCheckCommand(target: String, identity: String) -> [String]? { + guard let parsed = CommandResolver.parseSSHTarget(target) else { return nil } + let options = [ + "-o", "BatchMode=yes", + "-o", "ConnectTimeout=5", + "-o", "StrictHostKeyChecking=accept-new", + "-o", "UpdateHostKeys=yes", + ] + let args = CommandResolver.sshArguments( + target: parsed, + identity: identity, + options: options, + remoteCommand: ["echo", "ok"]) + return ["/usr/bin/ssh"] + args + } + + private static func formatSSHFailure(_ response: Response, target: String) -> String { + let payload = response.payload.flatMap { String(data: $0, encoding: .utf8) } + let trimmed = payload? + .trimmingCharacters(in: .whitespacesAndNewlines) + .split(whereSeparator: \.isNewline) + .joined(separator: " ") + if let trimmed, + trimmed.localizedCaseInsensitiveContains("host key verification failed") + { + let host = CommandResolver.parseSSHTarget(target)?.host ?? target + return "SSH check failed: Host key verification failed. Remove the old key with ssh-keygen -R \(host) and try again." + } + if let trimmed, !trimmed.isEmpty { + if let message = response.message, message.hasPrefix("exit ") { + return "SSH check failed: \(trimmed) (\(message))" + } + return "SSH check failed: \(trimmed)" + } + if let message = response.message { + return "SSH check failed (\(message))" + } + return "SSH check failed" + } +} diff --git a/apps/macos/Sources/OpenClaw/Resources/Info.plist b/apps/macos/Sources/OpenClaw/Resources/Info.plist index 706fe7029c4..89ebf70beb4 100644 --- a/apps/macos/Sources/OpenClaw/Resources/Info.plist +++ b/apps/macos/Sources/OpenClaw/Resources/Info.plist @@ -15,9 +15,9 @@ CFBundlePackageType APPL CFBundleShortVersionString - 2026.3.9 + 2026.3.14 CFBundleVersion - 202603080 + 202603140 CFBundleIconFile OpenClaw CFBundleURLTypes @@ -59,6 +59,8 @@ OpenClaw uses speech recognition to detect your Voice Wake trigger phrase. NSAppleEventsUsageDescription OpenClaw needs Automation (AppleScript) permission to drive Terminal and other apps for agent actions. + NSRemindersUsageDescription + OpenClaw can access Reminders when requested by the agent for the apple-reminders skill. NSAppTransportSecurity diff --git a/apps/macos/Sources/OpenClaw/RuntimeLocator.swift b/apps/macos/Sources/OpenClaw/RuntimeLocator.swift index 3112f57879b..6f1ef2b723d 100644 --- a/apps/macos/Sources/OpenClaw/RuntimeLocator.swift +++ b/apps/macos/Sources/OpenClaw/RuntimeLocator.swift @@ -54,7 +54,7 @@ enum RuntimeResolutionError: Error { enum RuntimeLocator { private static let logger = Logger(subsystem: "ai.openclaw", category: "runtime") - private static let minNode = RuntimeVersion(major: 22, minor: 0, patch: 0) + private static let minNode = RuntimeVersion(major: 22, minor: 16, patch: 0) static func resolve( searchPaths: [String] = CommandResolver.preferredPaths()) -> Result @@ -91,7 +91,7 @@ enum RuntimeLocator { switch error { case let .notFound(searchPaths): [ - "openclaw needs Node >=22.0.0 but found no runtime.", + "openclaw needs Node >=22.16.0 but found no runtime.", "PATH searched: \(searchPaths.joined(separator: ":"))", "Install Node: https://nodejs.org/en/download", ].joined(separator: "\n") @@ -105,7 +105,7 @@ enum RuntimeLocator { [ "Could not parse \(kind.rawValue) version output \"\(raw)\" from \(path).", "PATH searched: \(searchPaths.joined(separator: ":"))", - "Try reinstalling or pinning a supported version (Node >=22.0.0).", + "Try reinstalling or pinning a supported version (Node >=22.16.0).", ].joined(separator: "\n") } } diff --git a/apps/macos/Sources/OpenClaw/WebChatSwiftUI.swift b/apps/macos/Sources/OpenClaw/WebChatSwiftUI.swift index cbec3e74e93..86c225f9ef0 100644 --- a/apps/macos/Sources/OpenClaw/WebChatSwiftUI.swift +++ b/apps/macos/Sources/OpenClaw/WebChatSwiftUI.swift @@ -8,6 +8,7 @@ import QuartzCore import SwiftUI private let webChatSwiftLogger = Logger(subsystem: "ai.openclaw", category: "WebChatSwiftUI") +private let webChatThinkingLevelDefaultsKey = "openclaw.webchat.thinkingLevel" private enum WebChatSwiftUILayout { static let windowSize = NSSize(width: 500, height: 840) @@ -21,6 +22,21 @@ struct MacGatewayChatTransport: OpenClawChatTransport { try await GatewayConnection.shared.chatHistory(sessionKey: sessionKey) } + func listModels() async throws -> [OpenClawChatModelChoice] { + do { + let data = try await GatewayConnection.shared.request( + method: "models.list", + params: [:], + timeoutMs: 15000) + let result = try JSONDecoder().decode(ModelsListResult.self, from: data) + return result.models.map(Self.mapModelChoice) + } catch { + webChatSwiftLogger.warning( + "models.list failed; hiding model picker: \(error.localizedDescription, privacy: .public)") + return [] + } + } + func abortRun(sessionKey: String, runId: String) async throws { _ = try await GatewayConnection.shared.request( method: "chat.abort", @@ -43,7 +59,45 @@ struct MacGatewayChatTransport: OpenClawChatTransport { method: "sessions.list", params: params, timeoutMs: 15000) - return try JSONDecoder().decode(OpenClawChatSessionsListResponse.self, from: data) + let decoded = try JSONDecoder().decode(OpenClawChatSessionsListResponse.self, from: data) + let mainSessionKey = await GatewayConnection.shared.cachedMainSessionKey() + let defaults = decoded.defaults.map { + OpenClawChatSessionsDefaults( + model: $0.model, + contextTokens: $0.contextTokens, + mainSessionKey: mainSessionKey) + } ?? OpenClawChatSessionsDefaults( + model: nil, + contextTokens: nil, + mainSessionKey: mainSessionKey) + return OpenClawChatSessionsListResponse( + ts: decoded.ts, + path: decoded.path, + count: decoded.count, + defaults: defaults, + sessions: decoded.sessions) + } + + func setSessionModel(sessionKey: String, model: String?) async throws { + var params: [String: AnyCodable] = [ + "key": AnyCodable(sessionKey), + ] + params["model"] = model.map(AnyCodable.init) ?? AnyCodable(NSNull()) + _ = try await GatewayConnection.shared.request( + method: "sessions.patch", + params: params, + timeoutMs: 15000) + } + + func setSessionThinking(sessionKey: String, thinkingLevel: String) async throws { + let params: [String: AnyCodable] = [ + "key": AnyCodable(sessionKey), + "thinkingLevel": AnyCodable(thinkingLevel), + ] + _ = try await GatewayConnection.shared.request( + method: "sessions.patch", + params: params, + timeoutMs: 15000) } func sendMessage( @@ -65,6 +119,13 @@ struct MacGatewayChatTransport: OpenClawChatTransport { try await GatewayConnection.shared.healthOK(timeoutMs: timeoutMs) } + func resetSession(sessionKey: String) async throws { + _ = try await GatewayConnection.shared.request( + method: "sessions.reset", + params: ["key": AnyCodable(sessionKey)], + timeoutMs: 10000) + } + func events() -> AsyncStream { AsyncStream { continuation in let task = Task { @@ -133,6 +194,14 @@ struct MacGatewayChatTransport: OpenClawChatTransport { return .seqGap } } + + private static func mapModelChoice(_ model: OpenClawProtocol.ModelChoice) -> OpenClawChatModelChoice { + OpenClawChatModelChoice( + modelID: model.id, + name: model.name, + provider: model.provider, + contextWindow: model.contextwindow) + } } // MARK: - Window controller @@ -155,7 +224,13 @@ final class WebChatSwiftUIWindowController { init(sessionKey: String, presentation: WebChatPresentation, transport: any OpenClawChatTransport) { self.sessionKey = sessionKey self.presentation = presentation - let vm = OpenClawChatViewModel(sessionKey: sessionKey, transport: transport) + let vm = OpenClawChatViewModel( + sessionKey: sessionKey, + transport: transport, + initialThinkingLevel: Self.persistedThinkingLevel(), + onThinkingLevelChanged: { level in + UserDefaults.standard.set(level, forKey: webChatThinkingLevelDefaultsKey) + }) let accent = Self.color(fromHex: AppStateStore.shared.seamColorHex) self.hosting = NSHostingController(rootView: OpenClawChatView( viewModel: vm, @@ -254,6 +329,16 @@ final class WebChatSwiftUIWindowController { OverlayPanelFactory.clearGlobalEventMonitor(&self.dismissMonitor) } + private static func persistedThinkingLevel() -> String? { + let stored = UserDefaults.standard.string(forKey: webChatThinkingLevelDefaultsKey)? + .trimmingCharacters(in: .whitespacesAndNewlines) + .lowercased() + guard let stored, ["off", "minimal", "low", "medium", "high", "xhigh", "adaptive"].contains(stored) else { + return nil + } + return stored + } + private static func makeWindow( for presentation: WebChatPresentation, contentViewController: NSViewController) -> NSWindow diff --git a/apps/macos/Sources/OpenClawProtocol/GatewayModels.swift b/apps/macos/Sources/OpenClawProtocol/GatewayModels.swift index a6223d95bee..3003ae79f7b 100644 --- a/apps/macos/Sources/OpenClawProtocol/GatewayModels.swift +++ b/apps/macos/Sources/OpenClawProtocol/GatewayModels.swift @@ -538,8 +538,6 @@ public struct AgentParams: Codable, Sendable { public let inputprovenance: [String: AnyCodable]? public let idempotencykey: String public let label: String? - public let spawnedby: String? - public let workspacedir: String? public init( message: String, @@ -566,9 +564,7 @@ public struct AgentParams: Codable, Sendable { internalevents: [[String: AnyCodable]]?, inputprovenance: [String: AnyCodable]?, idempotencykey: String, - label: String?, - spawnedby: String?, - workspacedir: String?) + label: String?) { self.message = message self.agentid = agentid @@ -595,8 +591,6 @@ public struct AgentParams: Codable, Sendable { self.inputprovenance = inputprovenance self.idempotencykey = idempotencykey self.label = label - self.spawnedby = spawnedby - self.workspacedir = workspacedir } private enum CodingKeys: String, CodingKey { @@ -625,8 +619,6 @@ public struct AgentParams: Codable, Sendable { case inputprovenance = "inputProvenance" case idempotencykey = "idempotencyKey" case label - case spawnedby = "spawnedBy" - case workspacedir = "workspaceDir" } } @@ -950,6 +942,102 @@ public struct NodeEventParams: Codable, Sendable { } } +public struct NodePendingDrainParams: Codable, Sendable { + public let maxitems: Int? + + public init( + maxitems: Int?) + { + self.maxitems = maxitems + } + + private enum CodingKeys: String, CodingKey { + case maxitems = "maxItems" + } +} + +public struct NodePendingDrainResult: Codable, Sendable { + public let nodeid: String + public let revision: Int + public let items: [[String: AnyCodable]] + public let hasmore: Bool + + public init( + nodeid: String, + revision: Int, + items: [[String: AnyCodable]], + hasmore: Bool) + { + self.nodeid = nodeid + self.revision = revision + self.items = items + self.hasmore = hasmore + } + + private enum CodingKeys: String, CodingKey { + case nodeid = "nodeId" + case revision + case items + case hasmore = "hasMore" + } +} + +public struct NodePendingEnqueueParams: Codable, Sendable { + public let nodeid: String + public let type: String + public let priority: String? + public let expiresinms: Int? + public let wake: Bool? + + public init( + nodeid: String, + type: String, + priority: String?, + expiresinms: Int?, + wake: Bool?) + { + self.nodeid = nodeid + self.type = type + self.priority = priority + self.expiresinms = expiresinms + self.wake = wake + } + + private enum CodingKeys: String, CodingKey { + case nodeid = "nodeId" + case type + case priority + case expiresinms = "expiresInMs" + case wake + } +} + +public struct NodePendingEnqueueResult: Codable, Sendable { + public let nodeid: String + public let revision: Int + public let queued: [String: AnyCodable] + public let waketriggered: Bool + + public init( + nodeid: String, + revision: Int, + queued: [String: AnyCodable], + waketriggered: Bool) + { + self.nodeid = nodeid + self.revision = revision + self.queued = queued + self.waketriggered = waketriggered + } + + private enum CodingKeys: String, CodingKey { + case nodeid = "nodeId" + case revision + case queued + case waketriggered = "wakeTriggered" + } +} + public struct NodeInvokeRequestEvent: Codable, Sendable { public let id: String public let nodeid: String @@ -1018,6 +1106,7 @@ public struct PushTestResult: Codable, Sendable { public let tokensuffix: String public let topic: String public let environment: String + public let transport: String public init( ok: Bool, @@ -1026,7 +1115,8 @@ public struct PushTestResult: Codable, Sendable { reason: String?, tokensuffix: String, topic: String, - environment: String) + environment: String, + transport: String) { self.ok = ok self.status = status @@ -1035,6 +1125,7 @@ public struct PushTestResult: Codable, Sendable { self.tokensuffix = tokensuffix self.topic = topic self.environment = environment + self.transport = transport } private enum CodingKeys: String, CodingKey { @@ -1045,6 +1136,7 @@ public struct PushTestResult: Codable, Sendable { case tokensuffix = "tokenSuffix" case topic case environment + case transport } } @@ -1230,6 +1322,7 @@ public struct SessionsPatchParams: Codable, Sendable { public let key: String public let label: AnyCodable? public let thinkinglevel: AnyCodable? + public let fastmode: AnyCodable? public let verboselevel: AnyCodable? public let reasoninglevel: AnyCodable? public let responseusage: AnyCodable? @@ -1240,7 +1333,10 @@ public struct SessionsPatchParams: Codable, Sendable { public let execnode: AnyCodable? public let model: AnyCodable? public let spawnedby: AnyCodable? + public let spawnedworkspacedir: AnyCodable? public let spawndepth: AnyCodable? + public let subagentrole: AnyCodable? + public let subagentcontrolscope: AnyCodable? public let sendpolicy: AnyCodable? public let groupactivation: AnyCodable? @@ -1248,6 +1344,7 @@ public struct SessionsPatchParams: Codable, Sendable { key: String, label: AnyCodable?, thinkinglevel: AnyCodable?, + fastmode: AnyCodable?, verboselevel: AnyCodable?, reasoninglevel: AnyCodable?, responseusage: AnyCodable?, @@ -1258,13 +1355,17 @@ public struct SessionsPatchParams: Codable, Sendable { execnode: AnyCodable?, model: AnyCodable?, spawnedby: AnyCodable?, + spawnedworkspacedir: AnyCodable?, spawndepth: AnyCodable?, + subagentrole: AnyCodable?, + subagentcontrolscope: AnyCodable?, sendpolicy: AnyCodable?, groupactivation: AnyCodable?) { self.key = key self.label = label self.thinkinglevel = thinkinglevel + self.fastmode = fastmode self.verboselevel = verboselevel self.reasoninglevel = reasoninglevel self.responseusage = responseusage @@ -1275,7 +1376,10 @@ public struct SessionsPatchParams: Codable, Sendable { self.execnode = execnode self.model = model self.spawnedby = spawnedby + self.spawnedworkspacedir = spawnedworkspacedir self.spawndepth = spawndepth + self.subagentrole = subagentrole + self.subagentcontrolscope = subagentcontrolscope self.sendpolicy = sendpolicy self.groupactivation = groupactivation } @@ -1284,6 +1388,7 @@ public struct SessionsPatchParams: Codable, Sendable { case key case label case thinkinglevel = "thinkingLevel" + case fastmode = "fastMode" case verboselevel = "verboseLevel" case reasoninglevel = "reasoningLevel" case responseusage = "responseUsage" @@ -1294,7 +1399,10 @@ public struct SessionsPatchParams: Codable, Sendable { case execnode = "execNode" case model case spawnedby = "spawnedBy" + case spawnedworkspacedir = "spawnedWorkspaceDir" case spawndepth = "spawnDepth" + case subagentrole = "subagentRole" + case subagentcontrolscope = "subagentControlScope" case sendpolicy = "sendPolicy" case groupactivation = "groupActivation" } @@ -2950,7 +3058,7 @@ public struct ExecApprovalsSnapshot: Codable, Sendable { public struct ExecApprovalRequestParams: Codable, Sendable { public let id: String? - public let command: String + public let command: String? public let commandargv: [String]? public let systemrunplan: [String: AnyCodable]? public let env: [String: AnyCodable]? @@ -2971,7 +3079,7 @@ public struct ExecApprovalRequestParams: Codable, Sendable { public init( id: String?, - command: String, + command: String?, commandargv: [String]?, systemrunplan: [String: AnyCodable]?, env: [String: AnyCodable]?, diff --git a/apps/macos/Tests/OpenClawIPCTests/ExecAllowlistTests.swift b/apps/macos/Tests/OpenClawIPCTests/ExecAllowlistTests.swift index f12b8f717dc..fa92cc81ef5 100644 --- a/apps/macos/Tests/OpenClawIPCTests/ExecAllowlistTests.swift +++ b/apps/macos/Tests/OpenClawIPCTests/ExecAllowlistTests.swift @@ -141,6 +141,26 @@ struct ExecAllowlistTests { #expect(resolutions.isEmpty) } + @Test func `resolve for allowlist fails closed on line-continued command substitution`() { + let command = ["/bin/sh", "-lc", "echo $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-line-cont-subst)"] + let resolutions = ExecCommandResolution.resolveForAllowlist( + command: command, + rawCommand: "echo $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-line-cont-subst)", + cwd: nil, + env: ["PATH": "/usr/bin:/bin"]) + #expect(resolutions.isEmpty) + } + + @Test func `resolve for allowlist fails closed on chained line-continued command substitution`() { + let command = ["/bin/sh", "-lc", "echo ok && $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-chained-line-cont-subst)"] + let resolutions = ExecCommandResolution.resolveForAllowlist( + command: command, + rawCommand: "echo ok && $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-chained-line-cont-subst)", + cwd: nil, + env: ["PATH": "/usr/bin:/bin"]) + #expect(resolutions.isEmpty) + } + @Test func `resolve for allowlist fails closed on quoted backticks`() { let command = ["/bin/sh", "-lc", "echo \"ok `/usr/bin/id`\""] let resolutions = ExecCommandResolution.resolveForAllowlist( @@ -208,6 +228,30 @@ struct ExecAllowlistTests { #expect(resolutions[1].executableName == "touch") } + @Test func `resolve for allowlist unwraps env dispatch wrappers inside shell segments`() { + let command = ["/bin/sh", "-lc", "env /usr/bin/touch /tmp/openclaw-allowlist-test"] + let resolutions = ExecCommandResolution.resolveForAllowlist( + command: command, + rawCommand: "env /usr/bin/touch /tmp/openclaw-allowlist-test", + cwd: nil, + env: ["PATH": "/usr/bin:/bin"]) + #expect(resolutions.count == 1) + #expect(resolutions[0].resolvedPath == "/usr/bin/touch") + #expect(resolutions[0].executableName == "touch") + } + + @Test func `resolve for allowlist unwraps env assignments inside shell segments`() { + let command = ["/bin/sh", "-lc", "env FOO=bar /usr/bin/touch /tmp/openclaw-allowlist-test"] + let resolutions = ExecCommandResolution.resolveForAllowlist( + command: command, + rawCommand: "env FOO=bar /usr/bin/touch /tmp/openclaw-allowlist-test", + cwd: nil, + env: ["PATH": "/usr/bin:/bin"]) + #expect(resolutions.count == 1) + #expect(resolutions[0].resolvedPath == "/usr/bin/touch") + #expect(resolutions[0].executableName == "touch") + } + @Test func `resolve for allowlist unwraps env to effective direct executable`() { let command = ["/usr/bin/env", "FOO=bar", "/usr/bin/printf", "ok"] let resolutions = ExecCommandResolution.resolveForAllowlist( diff --git a/apps/macos/Tests/OpenClawIPCTests/ExecApprovalsGatewayPrompterTests.swift b/apps/macos/Tests/OpenClawIPCTests/ExecApprovalsGatewayPrompterTests.swift index cd4e234ed66..03b17b42ab2 100644 --- a/apps/macos/Tests/OpenClawIPCTests/ExecApprovalsGatewayPrompterTests.swift +++ b/apps/macos/Tests/OpenClawIPCTests/ExecApprovalsGatewayPrompterTests.swift @@ -52,4 +52,51 @@ struct ExecApprovalsGatewayPrompterTests { lastInputSeconds: 400) #expect(!remote) } + + // MARK: - shouldAsk + + @Test func askAlwaysPromptsRegardlessOfSecurity() { + #expect(ExecApprovalsGatewayPrompter._testShouldAsk(security: .deny, ask: .always)) + #expect(ExecApprovalsGatewayPrompter._testShouldAsk(security: .allowlist, ask: .always)) + #expect(ExecApprovalsGatewayPrompter._testShouldAsk(security: .full, ask: .always)) + } + + @Test func askOnMissPromptsOnlyForAllowlist() { + #expect(ExecApprovalsGatewayPrompter._testShouldAsk(security: .allowlist, ask: .onMiss)) + #expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .deny, ask: .onMiss)) + #expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .full, ask: .onMiss)) + } + + @Test func askOffNeverPrompts() { + #expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .deny, ask: .off)) + #expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .allowlist, ask: .off)) + #expect(!ExecApprovalsGatewayPrompter._testShouldAsk(security: .full, ask: .off)) + } + + @Test func fallbackAllowlistAllowsMatchingResolvedPath() { + let decision = ExecApprovalsGatewayPrompter._testFallbackDecision( + command: "git status", + resolvedPath: "/usr/bin/git", + askFallback: .allowlist, + allowlistPatterns: ["/usr/bin/git"]) + #expect(decision == .allowOnce) + } + + @Test func fallbackAllowlistDeniesAllowlistMiss() { + let decision = ExecApprovalsGatewayPrompter._testFallbackDecision( + command: "git status", + resolvedPath: "/usr/bin/git", + askFallback: .allowlist, + allowlistPatterns: ["/usr/bin/rg"]) + #expect(decision == .deny) + } + + @Test func fallbackFullAllowsWhenPromptCannotBeShown() { + let decision = ExecApprovalsGatewayPrompter._testFallbackDecision( + command: "git status", + resolvedPath: "/usr/bin/git", + askFallback: .full, + allowlistPatterns: []) + #expect(decision == .allowOnce) + } } diff --git a/apps/macos/Tests/OpenClawIPCTests/ExecSkillBinTrustTests.swift b/apps/macos/Tests/OpenClawIPCTests/ExecSkillBinTrustTests.swift new file mode 100644 index 00000000000..779b59a3499 --- /dev/null +++ b/apps/macos/Tests/OpenClawIPCTests/ExecSkillBinTrustTests.swift @@ -0,0 +1,90 @@ +import Foundation +import Testing +@testable import OpenClaw + +struct ExecSkillBinTrustTests { + @Test func `build trust index resolves skill bin paths`() throws { + let fixture = try Self.makeExecutable(named: "jq") + defer { try? FileManager.default.removeItem(at: fixture.root) } + + let trust = SkillBinsCache._testBuildTrustIndex( + report: Self.makeReport(bins: ["jq"]), + searchPaths: [fixture.root.path]) + + #expect(trust.names == ["jq"]) + #expect(trust.pathsByName["jq"] == [fixture.path]) + } + + @Test func `skill auto allow accepts trusted resolved skill bin path`() throws { + let fixture = try Self.makeExecutable(named: "jq") + defer { try? FileManager.default.removeItem(at: fixture.root) } + + let trust = SkillBinsCache._testBuildTrustIndex( + report: Self.makeReport(bins: ["jq"]), + searchPaths: [fixture.root.path]) + let resolution = ExecCommandResolution( + rawExecutable: "jq", + resolvedPath: fixture.path, + executableName: "jq", + cwd: nil) + + #expect(ExecApprovalEvaluator._testIsSkillAutoAllowed([resolution], trustedBinsByName: trust.pathsByName)) + } + + @Test func `skill auto allow rejects same basename at different path`() throws { + let trusted = try Self.makeExecutable(named: "jq") + let untrusted = try Self.makeExecutable(named: "jq") + defer { + try? FileManager.default.removeItem(at: trusted.root) + try? FileManager.default.removeItem(at: untrusted.root) + } + + let trust = SkillBinsCache._testBuildTrustIndex( + report: Self.makeReport(bins: ["jq"]), + searchPaths: [trusted.root.path]) + let resolution = ExecCommandResolution( + rawExecutable: "jq", + resolvedPath: untrusted.path, + executableName: "jq", + cwd: nil) + + #expect(!ExecApprovalEvaluator._testIsSkillAutoAllowed([resolution], trustedBinsByName: trust.pathsByName)) + } + + private static func makeExecutable(named name: String) throws -> (root: URL, path: String) { + let root = FileManager.default.temporaryDirectory + .appendingPathComponent("openclaw-skill-bin-\(UUID().uuidString)", isDirectory: true) + try FileManager.default.createDirectory(at: root, withIntermediateDirectories: true) + let file = root.appendingPathComponent(name) + try "#!/bin/sh\nexit 0\n".write(to: file, atomically: true, encoding: .utf8) + try FileManager.default.setAttributes( + [.posixPermissions: NSNumber(value: Int16(0o755))], + ofItemAtPath: file.path) + return (root, file.path) + } + + private static func makeReport(bins: [String]) -> SkillsStatusReport { + SkillsStatusReport( + workspaceDir: "/tmp/workspace", + managedSkillsDir: "/tmp/skills", + skills: [ + SkillStatus( + name: "test-skill", + description: "test", + source: "local", + filePath: "/tmp/skills/test-skill/SKILL.md", + baseDir: "/tmp/skills/test-skill", + skillKey: "test-skill", + primaryEnv: nil, + emoji: nil, + homepage: nil, + always: false, + disabled: false, + eligible: true, + requirements: SkillRequirements(bins: bins, env: [], config: []), + missing: SkillMissing(bins: [], env: [], config: []), + configChecks: [], + install: []) + ]) + } +} diff --git a/apps/macos/Tests/OpenClawIPCTests/GatewayChannelConnectTests.swift b/apps/macos/Tests/OpenClawIPCTests/GatewayChannelConnectTests.swift index 8d37faa511e..9942f6e84ce 100644 --- a/apps/macos/Tests/OpenClawIPCTests/GatewayChannelConnectTests.swift +++ b/apps/macos/Tests/OpenClawIPCTests/GatewayChannelConnectTests.swift @@ -7,6 +7,11 @@ struct GatewayChannelConnectTests { private enum FakeResponse { case helloOk(delayMs: Int) case invalid(delayMs: Int) + case authFailed( + delayMs: Int, + detailCode: String, + canRetryWithDeviceToken: Bool, + recommendedNextStep: String?) } private func makeSession(response: FakeResponse) -> GatewayTestWebSocketSession { @@ -27,6 +32,14 @@ struct GatewayChannelConnectTests { case let .invalid(ms): delayMs = ms message = .string("not json") + case let .authFailed(ms, detailCode, canRetryWithDeviceToken, recommendedNextStep): + delayMs = ms + let id = task.snapshotConnectRequestID() ?? "connect" + message = .data(GatewayWebSocketTestSupport.connectAuthFailureData( + id: id, + detailCode: detailCode, + canRetryWithDeviceToken: canRetryWithDeviceToken, + recommendedNextStep: recommendedNextStep)) } try await Task.sleep(nanoseconds: UInt64(delayMs) * 1_000_000) return message @@ -71,4 +84,29 @@ struct GatewayChannelConnectTests { }()) #expect(session.snapshotMakeCount() == 1) } + + @Test func `connect surfaces structured auth failure`() async throws { + let session = self.makeSession(response: .authFailed( + delayMs: 0, + detailCode: GatewayConnectAuthDetailCode.authTokenMissing.rawValue, + canRetryWithDeviceToken: true, + recommendedNextStep: GatewayConnectRecoveryNextStep.updateAuthConfiguration.rawValue)) + let channel = try GatewayChannelActor( + url: #require(URL(string: "ws://example.invalid")), + token: nil, + session: WebSocketSessionBox(session: session)) + + do { + try await channel.connect() + Issue.record("expected GatewayConnectAuthError") + } catch let error as GatewayConnectAuthError { + #expect(error.detail == .authTokenMissing) + #expect(error.detailCode == GatewayConnectAuthDetailCode.authTokenMissing.rawValue) + #expect(error.canRetryWithDeviceToken) + #expect(error.recommendedNextStep == .updateAuthConfiguration) + #expect(error.recommendedNextStepCode == GatewayConnectRecoveryNextStep.updateAuthConfiguration.rawValue) + } catch { + Issue.record("unexpected error: \(error)") + } + } } diff --git a/apps/macos/Tests/OpenClawIPCTests/GatewayWebSocketTestSupport.swift b/apps/macos/Tests/OpenClawIPCTests/GatewayWebSocketTestSupport.swift index 8af4ccf6905..cf2b13de5ea 100644 --- a/apps/macos/Tests/OpenClawIPCTests/GatewayWebSocketTestSupport.swift +++ b/apps/macos/Tests/OpenClawIPCTests/GatewayWebSocketTestSupport.swift @@ -52,6 +52,40 @@ enum GatewayWebSocketTestSupport { return Data(json.utf8) } + static func connectAuthFailureData( + id: String, + detailCode: String, + message: String = "gateway auth rejected", + canRetryWithDeviceToken: Bool = false, + recommendedNextStep: String? = nil) -> Data + { + let recommendedNextStepJson: String + if let recommendedNextStep { + recommendedNextStepJson = """ + , + "recommendedNextStep": "\(recommendedNextStep)" + """ + } else { + recommendedNextStepJson = "" + } + let json = """ + { + "type": "res", + "id": "\(id)", + "ok": false, + "error": { + "message": "\(message)", + "details": { + "code": "\(detailCode)", + "canRetryWithDeviceToken": \(canRetryWithDeviceToken ? "true" : "false") + \(recommendedNextStepJson) + } + } + } + """ + return Data(json.utf8) + } + static func requestID(from message: URLSessionWebSocketTask.Message) -> String? { guard let obj = self.requestFrameObject(from: message) else { return nil } guard (obj["type"] as? String) == "req" else { diff --git a/apps/macos/Tests/OpenClawIPCTests/LowCoverageHelperTests.swift b/apps/macos/Tests/OpenClawIPCTests/LowCoverageHelperTests.swift index c8928978f74..a37135ff490 100644 --- a/apps/macos/Tests/OpenClawIPCTests/LowCoverageHelperTests.swift +++ b/apps/macos/Tests/OpenClawIPCTests/LowCoverageHelperTests.swift @@ -139,6 +139,54 @@ struct LowCoverageHelperTests { #expect(emptyReport.summary.contains("Nothing is listening")) } + @Test func `port guardian remote mode does not kill docker`() { + #expect(PortGuardian._testIsExpected( + command: "com.docker.backend", + fullCommand: "com.docker.backend", + port: 18789, mode: .remote) == true) + + #expect(PortGuardian._testIsExpected( + command: "ssh", + fullCommand: "ssh -L 18789:localhost:18789 user@host", + port: 18789, mode: .remote) == true) + + #expect(PortGuardian._testIsExpected( + command: "podman", + fullCommand: "podman", + port: 18789, mode: .remote) == true) + } + + @Test func `port guardian local mode still rejects unexpected`() { + #expect(PortGuardian._testIsExpected( + command: "com.docker.backend", + fullCommand: "com.docker.backend", + port: 18789, mode: .local) == false) + + #expect(PortGuardian._testIsExpected( + command: "python", + fullCommand: "python server.py", + port: 18789, mode: .local) == false) + + #expect(PortGuardian._testIsExpected( + command: "node", + fullCommand: "node /path/to/gateway-daemon", + port: 18789, mode: .local) == true) + } + + @Test func `port guardian remote mode report accepts any listener`() { + let dockerReport = PortGuardian._testBuildReport( + port: 18789, mode: .remote, + listeners: [(pid: 99, command: "com.docker.backend", + fullCommand: "com.docker.backend", user: "me")]) + #expect(dockerReport.offenders.isEmpty) + + let localDockerReport = PortGuardian._testBuildReport( + port: 18789, mode: .local, + listeners: [(pid: 99, command: "com.docker.backend", + fullCommand: "com.docker.backend", user: "me")]) + #expect(!localDockerReport.offenders.isEmpty) + } + @Test @MainActor func `canvas scheme handler resolves files and errors`() throws { let root = FileManager().temporaryDirectory .appendingPathComponent("canvas-\(UUID().uuidString)", isDirectory: true) diff --git a/apps/macos/Tests/OpenClawIPCTests/MacNodeBrowserProxyTests.swift b/apps/macos/Tests/OpenClawIPCTests/MacNodeBrowserProxyTests.swift index c000f6d4241..b341263b21f 100644 --- a/apps/macos/Tests/OpenClawIPCTests/MacNodeBrowserProxyTests.swift +++ b/apps/macos/Tests/OpenClawIPCTests/MacNodeBrowserProxyTests.swift @@ -38,4 +38,49 @@ struct MacNodeBrowserProxyTests { #expect(tabs.count == 1) #expect(tabs[0]["id"] as? String == "tab-1") } + + // Regression test: nested POST bodies must serialize without __SwiftValue crashes. + @Test func postRequestSerializesNestedBodyWithoutCrash() async throws { + actor BodyCapture { + private var body: Data? + + func set(_ body: Data?) { + self.body = body + } + + func get() -> Data? { + self.body + } + } + + let capturedBody = BodyCapture() + let proxy = MacNodeBrowserProxy( + endpointProvider: { + MacNodeBrowserProxy.Endpoint( + baseURL: URL(string: "http://127.0.0.1:18791")!, + token: nil, + password: nil) + }, + performRequest: { request in + await capturedBody.set(request.httpBody) + let url = try #require(request.url) + let response = try #require( + HTTPURLResponse( + url: url, + statusCode: 200, + httpVersion: nil, + headerFields: nil)) + return (Data(#"{"ok":true}"#.utf8), response) + }) + + _ = try await proxy.request( + paramsJSON: #"{"method":"POST","path":"/action","body":{"nested":{"key":"val"},"arr":[1,2]}}"#) + + let bodyData = try #require(await capturedBody.get()) + let parsed = try #require(JSONSerialization.jsonObject(with: bodyData) as? [String: Any]) + let nested = try #require(parsed["nested"] as? [String: Any]) + #expect(nested["key"] as? String == "val") + let arr = try #require(parsed["arr"] as? [Any]) + #expect(arr.count == 2) + } } diff --git a/apps/macos/Tests/OpenClawIPCTests/OnboardingRemoteAuthPromptTests.swift b/apps/macos/Tests/OpenClawIPCTests/OnboardingRemoteAuthPromptTests.swift new file mode 100644 index 00000000000..00f3e704708 --- /dev/null +++ b/apps/macos/Tests/OpenClawIPCTests/OnboardingRemoteAuthPromptTests.swift @@ -0,0 +1,139 @@ +import OpenClawKit +import Testing +@testable import OpenClaw + +@MainActor +struct OnboardingRemoteAuthPromptTests { + @Test func `auth detail codes map to remote auth issues`() { + let tokenMissing = GatewayConnectAuthError( + message: "token missing", + detailCode: GatewayConnectAuthDetailCode.authTokenMissing.rawValue, + canRetryWithDeviceToken: false) + let tokenMismatch = GatewayConnectAuthError( + message: "token mismatch", + detailCode: GatewayConnectAuthDetailCode.authTokenMismatch.rawValue, + canRetryWithDeviceToken: false) + let tokenNotConfigured = GatewayConnectAuthError( + message: "token not configured", + detailCode: GatewayConnectAuthDetailCode.authTokenNotConfigured.rawValue, + canRetryWithDeviceToken: false) + let bootstrapInvalid = GatewayConnectAuthError( + message: "setup code expired", + detailCode: GatewayConnectAuthDetailCode.authBootstrapTokenInvalid.rawValue, + canRetryWithDeviceToken: false) + let passwordMissing = GatewayConnectAuthError( + message: "password missing", + detailCode: GatewayConnectAuthDetailCode.authPasswordMissing.rawValue, + canRetryWithDeviceToken: false) + let pairingRequired = GatewayConnectAuthError( + message: "pairing required", + detailCode: GatewayConnectAuthDetailCode.pairingRequired.rawValue, + canRetryWithDeviceToken: false) + let unknown = GatewayConnectAuthError( + message: "other", + detailCode: "SOMETHING_ELSE", + canRetryWithDeviceToken: false) + + #expect(RemoteGatewayAuthIssue(error: tokenMissing) == .tokenRequired) + #expect(RemoteGatewayAuthIssue(error: tokenMismatch) == .tokenMismatch) + #expect(RemoteGatewayAuthIssue(error: tokenNotConfigured) == .gatewayTokenNotConfigured) + #expect(RemoteGatewayAuthIssue(error: bootstrapInvalid) == .setupCodeExpired) + #expect(RemoteGatewayAuthIssue(error: passwordMissing) == .passwordRequired) + #expect(RemoteGatewayAuthIssue(error: pairingRequired) == .pairingRequired) + #expect(RemoteGatewayAuthIssue(error: unknown) == nil) + } + + @Test func `password detail family maps to password required issue`() { + let mismatch = GatewayConnectAuthError( + message: "password mismatch", + detailCode: GatewayConnectAuthDetailCode.authPasswordMismatch.rawValue, + canRetryWithDeviceToken: false) + let notConfigured = GatewayConnectAuthError( + message: "password not configured", + detailCode: GatewayConnectAuthDetailCode.authPasswordNotConfigured.rawValue, + canRetryWithDeviceToken: false) + + #expect(RemoteGatewayAuthIssue(error: mismatch) == .passwordRequired) + #expect(RemoteGatewayAuthIssue(error: notConfigured) == .passwordRequired) + } + + @Test func `token field visibility follows onboarding rules`() { + #expect(OnboardingView.shouldShowRemoteTokenField( + showAdvancedConnection: false, + remoteToken: "", + remoteTokenUnsupported: false, + authIssue: nil) == false) + #expect(OnboardingView.shouldShowRemoteTokenField( + showAdvancedConnection: true, + remoteToken: "", + remoteTokenUnsupported: false, + authIssue: nil)) + #expect(OnboardingView.shouldShowRemoteTokenField( + showAdvancedConnection: false, + remoteToken: "secret", + remoteTokenUnsupported: false, + authIssue: nil)) + #expect(OnboardingView.shouldShowRemoteTokenField( + showAdvancedConnection: false, + remoteToken: "", + remoteTokenUnsupported: true, + authIssue: nil)) + #expect(OnboardingView.shouldShowRemoteTokenField( + showAdvancedConnection: false, + remoteToken: "", + remoteTokenUnsupported: false, + authIssue: .tokenRequired)) + #expect(OnboardingView.shouldShowRemoteTokenField( + showAdvancedConnection: false, + remoteToken: "", + remoteTokenUnsupported: false, + authIssue: .tokenMismatch)) + #expect(OnboardingView.shouldShowRemoteTokenField( + showAdvancedConnection: false, + remoteToken: "", + remoteTokenUnsupported: false, + authIssue: .gatewayTokenNotConfigured) == false) + #expect(OnboardingView.shouldShowRemoteTokenField( + showAdvancedConnection: false, + remoteToken: "", + remoteTokenUnsupported: false, + authIssue: .setupCodeExpired) == false) + #expect(OnboardingView.shouldShowRemoteTokenField( + showAdvancedConnection: false, + remoteToken: "", + remoteTokenUnsupported: false, + authIssue: .pairingRequired) == false) + } + + @Test func `pairing required copy points users to pair approve`() { + let issue = RemoteGatewayAuthIssue.pairingRequired + + #expect(issue.title == "This device needs pairing approval") + #expect(issue.body.contains("`/pair approve`")) + #expect(issue.statusMessage.contains("/pair approve")) + #expect(issue.footnote?.contains("`openclaw devices approve`") == true) + } + + @Test func `paired device success copy explains auth source`() { + let pairedDevice = RemoteGatewayProbeSuccess(authSource: .deviceToken) + let bootstrap = RemoteGatewayProbeSuccess(authSource: .bootstrapToken) + let sharedToken = RemoteGatewayProbeSuccess(authSource: .sharedToken) + let noAuth = RemoteGatewayProbeSuccess(authSource: GatewayAuthSource.none) + + #expect(pairedDevice.title == "Connected via paired device") + #expect(pairedDevice.detail == "This Mac used a stored device token. New or unpaired devices may still need the gateway token.") + #expect(bootstrap.title == "Connected with setup code") + #expect(bootstrap.detail == "This Mac is still using the temporary setup code. Approve pairing to finish provisioning device-scoped auth.") + #expect(sharedToken.title == "Connected with gateway token") + #expect(sharedToken.detail == nil) + #expect(noAuth.title == "Remote gateway ready") + #expect(noAuth.detail == nil) + } + + @Test func `transient probe mode restore does not clear probe feedback`() { + #expect(OnboardingView.shouldResetRemoteProbeFeedback(for: .local, suppressReset: false)) + #expect(OnboardingView.shouldResetRemoteProbeFeedback(for: .unconfigured, suppressReset: false)) + #expect(OnboardingView.shouldResetRemoteProbeFeedback(for: .remote, suppressReset: false) == false) + #expect(OnboardingView.shouldResetRemoteProbeFeedback(for: .local, suppressReset: true) == false) + } +} diff --git a/apps/macos/Tests/OpenClawIPCTests/RuntimeLocatorTests.swift b/apps/macos/Tests/OpenClawIPCTests/RuntimeLocatorTests.swift index 990c033445f..782dbd77212 100644 --- a/apps/macos/Tests/OpenClawIPCTests/RuntimeLocatorTests.swift +++ b/apps/macos/Tests/OpenClawIPCTests/RuntimeLocatorTests.swift @@ -16,7 +16,7 @@ struct RuntimeLocatorTests { @Test func `resolve succeeds with valid node`() throws { let script = """ #!/bin/sh - echo v22.5.0 + echo v22.16.0 """ let node = try self.makeTempExecutable(contents: script) let result = RuntimeLocator.resolve(searchPaths: [node.deletingLastPathComponent().path]) @@ -25,7 +25,23 @@ struct RuntimeLocatorTests { return } #expect(res.path == node.path) - #expect(res.version == RuntimeVersion(major: 22, minor: 5, patch: 0)) + #expect(res.version == RuntimeVersion(major: 22, minor: 16, patch: 0)) + } + + @Test func `resolve fails on boundary below minimum`() throws { + let script = """ + #!/bin/sh + echo v22.15.9 + """ + let node = try self.makeTempExecutable(contents: script) + let result = RuntimeLocator.resolve(searchPaths: [node.deletingLastPathComponent().path]) + guard case let .failure(.unsupported(_, found, required, path, _)) = result else { + Issue.record("Expected unsupported error, got \(result)") + return + } + #expect(found == RuntimeVersion(major: 22, minor: 15, patch: 9)) + #expect(required == RuntimeVersion(major: 22, minor: 16, patch: 0)) + #expect(path == node.path) } @Test func `resolve fails when too old`() throws { @@ -60,7 +76,17 @@ struct RuntimeLocatorTests { @Test func `describe failure includes paths`() { let msg = RuntimeLocator.describeFailure(.notFound(searchPaths: ["/tmp/a", "/tmp/b"])) + #expect(msg.contains("Node >=22.16.0")) #expect(msg.contains("PATH searched: /tmp/a:/tmp/b")) + + let parseMsg = RuntimeLocator.describeFailure( + .versionParse( + kind: .node, + raw: "garbage", + path: "/usr/local/bin/node", + searchPaths: ["/usr/local/bin"], + )) + #expect(parseMsg.contains("Node >=22.16.0")) } @Test func `runtime version parses with leading V and metadata`() { diff --git a/apps/macos/Tests/OpenClawIPCTests/VoiceWakeRuntimeTests.swift b/apps/macos/Tests/OpenClawIPCTests/VoiceWakeRuntimeTests.swift index eac7ceea37d..fcf3f3b1158 100644 --- a/apps/macos/Tests/OpenClawIPCTests/VoiceWakeRuntimeTests.swift +++ b/apps/macos/Tests/OpenClawIPCTests/VoiceWakeRuntimeTests.swift @@ -74,4 +74,22 @@ struct VoiceWakeRuntimeTests { let config = WakeWordGateConfig(triggers: ["openclaw"], minPostTriggerGap: 0.3) #expect(WakeWordGate.match(transcript: transcript, segments: segments, config: config)?.command == "do thing") } + + @Test func `gate command text handles foreign string ranges`() { + let transcript = "hey openclaw do thing" + let other = "do thing" + let foreignRange = other.range(of: "do") + let segments = [ + WakeWordSegment(text: "hey", start: 0.0, duration: 0.1, range: transcript.range(of: "hey")), + WakeWordSegment(text: "openclaw", start: 0.2, duration: 0.1, range: transcript.range(of: "openclaw")), + WakeWordSegment(text: "do", start: 0.9, duration: 0.1, range: foreignRange), + WakeWordSegment(text: "thing", start: 1.1, duration: 0.1, range: nil), + ] + + #expect( + WakeWordGate.commandText( + transcript: transcript, + segments: segments, + triggerEndTime: 0.3) == "do thing") + } } diff --git a/apps/shared/OpenClawKit/Sources/OpenClawChatUI/ChatComposer.swift b/apps/shared/OpenClawKit/Sources/OpenClawChatUI/ChatComposer.swift index 14bd67ed445..3cd290389fe 100644 --- a/apps/shared/OpenClawKit/Sources/OpenClawChatUI/ChatComposer.swift +++ b/apps/shared/OpenClawKit/Sources/OpenClawChatUI/ChatComposer.swift @@ -9,6 +9,8 @@ import UniformTypeIdentifiers @MainActor struct OpenClawChatComposer: View { + private static let menuThinkingLevels = ["off", "low", "medium", "high"] + @Bindable var viewModel: OpenClawChatViewModel let style: OpenClawChatView.Style let showsSessionSwitcher: Bool @@ -27,11 +29,15 @@ struct OpenClawChatComposer: View { if self.showsSessionSwitcher { self.sessionPicker } + if self.viewModel.showsModelPicker { + self.modelPicker + } self.thinkingPicker Spacer() self.refreshButton self.attachmentPicker } + .padding(.horizontal, 10) } if self.showsAttachments, !self.viewModel.attachments.isEmpty { @@ -83,11 +89,19 @@ struct OpenClawChatComposer: View { } private var thinkingPicker: some View { - Picker("Thinking", selection: self.$viewModel.thinkingLevel) { + Picker( + "Thinking", + selection: Binding( + get: { self.viewModel.thinkingLevel }, + set: { next in self.viewModel.selectThinkingLevel(next) })) + { Text("Off").tag("off") Text("Low").tag("low") Text("Medium").tag("medium") Text("High").tag("high") + if !Self.menuThinkingLevels.contains(self.viewModel.thinkingLevel) { + Text(self.viewModel.thinkingLevel.capitalized).tag(self.viewModel.thinkingLevel) + } } .labelsHidden() .pickerStyle(.menu) @@ -95,6 +109,25 @@ struct OpenClawChatComposer: View { .frame(maxWidth: 140, alignment: .leading) } + private var modelPicker: some View { + Picker( + "Model", + selection: Binding( + get: { self.viewModel.modelSelectionID }, + set: { next in self.viewModel.selectModel(next) })) + { + Text(self.viewModel.defaultModelLabel).tag(OpenClawChatViewModel.defaultModelSelectionID) + ForEach(self.viewModel.modelChoices) { model in + Text(model.displayLabel).tag(model.selectionID) + } + } + .labelsHidden() + .pickerStyle(.menu) + .controlSize(.small) + .frame(maxWidth: 240, alignment: .leading) + .help("Model") + } + private var sessionPicker: some View { Picker( "Session", diff --git a/apps/shared/OpenClawKit/Sources/OpenClawChatUI/ChatSessions.swift b/apps/shared/OpenClawKit/Sources/OpenClawChatUI/ChatSessions.swift index febe69a3cbe..c5a74c9a9aa 100644 --- a/apps/shared/OpenClawKit/Sources/OpenClawChatUI/ChatSessions.swift +++ b/apps/shared/OpenClawKit/Sources/OpenClawChatUI/ChatSessions.swift @@ -1,8 +1,46 @@ import Foundation +public struct OpenClawChatModelChoice: Identifiable, Codable, Sendable, Hashable { + public var id: String { self.selectionID } + + public let modelID: String + public let name: String + public let provider: String + public let contextWindow: Int? + + public init(modelID: String, name: String, provider: String, contextWindow: Int?) { + self.modelID = modelID + self.name = name + self.provider = provider + self.contextWindow = contextWindow + } + + /// Provider-qualified model ref used for picker identity and selection tags. + public var selectionID: String { + let trimmedProvider = self.provider.trimmingCharacters(in: .whitespacesAndNewlines) + guard !trimmedProvider.isEmpty else { return self.modelID } + let providerPrefix = "\(trimmedProvider)/" + if self.modelID.hasPrefix(providerPrefix) { + return self.modelID + } + return "\(trimmedProvider)/\(self.modelID)" + } + + public var displayLabel: String { + self.selectionID + } +} + public struct OpenClawChatSessionsDefaults: Codable, Sendable { public let model: String? public let contextTokens: Int? + public let mainSessionKey: String? + + public init(model: String?, contextTokens: Int?, mainSessionKey: String? = nil) { + self.model = model + self.contextTokens = contextTokens + self.mainSessionKey = mainSessionKey + } } public struct OpenClawChatSessionEntry: Codable, Identifiable, Sendable, Hashable { @@ -27,6 +65,7 @@ public struct OpenClawChatSessionEntry: Codable, Identifiable, Sendable, Hashabl public let outputTokens: Int? public let totalTokens: Int? + public let modelProvider: String? public let model: String? public let contextTokens: Int? } @@ -37,4 +76,18 @@ public struct OpenClawChatSessionsListResponse: Codable, Sendable { public let count: Int? public let defaults: OpenClawChatSessionsDefaults? public let sessions: [OpenClawChatSessionEntry] + + public init( + ts: Double?, + path: String?, + count: Int?, + defaults: OpenClawChatSessionsDefaults?, + sessions: [OpenClawChatSessionEntry]) + { + self.ts = ts + self.path = path + self.count = count + self.defaults = defaults + self.sessions = sessions + } } diff --git a/apps/shared/OpenClawKit/Sources/OpenClawChatUI/ChatTransport.swift b/apps/shared/OpenClawKit/Sources/OpenClawChatUI/ChatTransport.swift index 037c1352205..49bd91db372 100644 --- a/apps/shared/OpenClawKit/Sources/OpenClawChatUI/ChatTransport.swift +++ b/apps/shared/OpenClawKit/Sources/OpenClawChatUI/ChatTransport.swift @@ -10,6 +10,7 @@ public enum OpenClawChatTransportEvent: Sendable { public protocol OpenClawChatTransport: Sendable { func requestHistory(sessionKey: String) async throws -> OpenClawChatHistoryPayload + func listModels() async throws -> [OpenClawChatModelChoice] func sendMessage( sessionKey: String, message: String, @@ -19,16 +20,26 @@ public protocol OpenClawChatTransport: Sendable { func abortRun(sessionKey: String, runId: String) async throws func listSessions(limit: Int?) async throws -> OpenClawChatSessionsListResponse + func setSessionModel(sessionKey: String, model: String?) async throws + func setSessionThinking(sessionKey: String, thinkingLevel: String) async throws func requestHealth(timeoutMs: Int) async throws -> Bool func events() -> AsyncStream func setActiveSessionKey(_ sessionKey: String) async throws + func resetSession(sessionKey: String) async throws } extension OpenClawChatTransport { public func setActiveSessionKey(_: String) async throws {} + public func resetSession(sessionKey _: String) async throws { + throw NSError( + domain: "OpenClawChatTransport", + code: 0, + userInfo: [NSLocalizedDescriptionKey: "sessions.reset not supported by this transport"]) + } + public func abortRun(sessionKey _: String, runId _: String) async throws { throw NSError( domain: "OpenClawChatTransport", @@ -42,4 +53,25 @@ extension OpenClawChatTransport { code: 0, userInfo: [NSLocalizedDescriptionKey: "sessions.list not supported by this transport"]) } + + public func listModels() async throws -> [OpenClawChatModelChoice] { + throw NSError( + domain: "OpenClawChatTransport", + code: 0, + userInfo: [NSLocalizedDescriptionKey: "models.list not supported by this transport"]) + } + + public func setSessionModel(sessionKey _: String, model _: String?) async throws { + throw NSError( + domain: "OpenClawChatTransport", + code: 0, + userInfo: [NSLocalizedDescriptionKey: "sessions.patch(model) not supported by this transport"]) + } + + public func setSessionThinking(sessionKey _: String, thinkingLevel _: String) async throws { + throw NSError( + domain: "OpenClawChatTransport", + code: 0, + userInfo: [NSLocalizedDescriptionKey: "sessions.patch(thinkingLevel) not supported by this transport"]) + } } diff --git a/apps/shared/OpenClawKit/Sources/OpenClawChatUI/ChatViewModel.swift b/apps/shared/OpenClawKit/Sources/OpenClawChatUI/ChatViewModel.swift index 62cb97a0e2f..92413aefe64 100644 --- a/apps/shared/OpenClawKit/Sources/OpenClawChatUI/ChatViewModel.swift +++ b/apps/shared/OpenClawKit/Sources/OpenClawChatUI/ChatViewModel.swift @@ -15,9 +15,13 @@ private let chatUILogger = Logger(subsystem: "ai.openclaw", category: "OpenClawC @MainActor @Observable public final class OpenClawChatViewModel { + public static let defaultModelSelectionID = "__default__" + public private(set) var messages: [OpenClawChatMessage] = [] public var input: String = "" - public var thinkingLevel: String = "off" + public private(set) var thinkingLevel: String + public private(set) var modelSelectionID: String = "__default__" + public private(set) var modelChoices: [OpenClawChatModelChoice] = [] public private(set) var isLoading = false public private(set) var isSending = false public private(set) var isAborting = false @@ -32,6 +36,9 @@ public final class OpenClawChatViewModel { public private(set) var pendingToolCalls: [OpenClawChatPendingToolCall] = [] public private(set) var sessions: [OpenClawChatSessionEntry] = [] private let transport: any OpenClawChatTransport + private var sessionDefaults: OpenClawChatSessionsDefaults? + private let prefersExplicitThinkingLevel: Bool + private let onThinkingLevelChanged: (@MainActor @Sendable (String) -> Void)? @ObservationIgnored private nonisolated(unsafe) var eventTask: Task? @@ -42,6 +49,17 @@ public final class OpenClawChatViewModel { @ObservationIgnored private nonisolated(unsafe) var pendingRunTimeoutTasks: [String: Task] = [:] private let pendingRunTimeoutMs: UInt64 = 120_000 + // Session switches can overlap in-flight picker patches, so stale completions + // must compare against the latest request and latest desired value for that session. + private var nextModelSelectionRequestID: UInt64 = 0 + private var latestModelSelectionRequestIDsBySession: [String: UInt64] = [:] + private var latestModelSelectionIDsBySession: [String: String] = [:] + private var lastSuccessfulModelSelectionIDsBySession: [String: String] = [:] + private var inFlightModelPatchCountsBySession: [String: Int] = [:] + private var modelPatchWaitersBySession: [String: [CheckedContinuation]] = [:] + private var nextThinkingSelectionRequestID: UInt64 = 0 + private var latestThinkingSelectionRequestIDsBySession: [String: UInt64] = [:] + private var latestThinkingLevelsBySession: [String: String] = [:] private var pendingToolCallsById: [String: OpenClawChatPendingToolCall] = [:] { didSet { @@ -52,9 +70,18 @@ public final class OpenClawChatViewModel { private var lastHealthPollAt: Date? - public init(sessionKey: String, transport: any OpenClawChatTransport) { + public init( + sessionKey: String, + transport: any OpenClawChatTransport, + initialThinkingLevel: String? = nil, + onThinkingLevelChanged: (@MainActor @Sendable (String) -> Void)? = nil) + { self.sessionKey = sessionKey self.transport = transport + let normalizedThinkingLevel = Self.normalizedThinkingLevel(initialThinkingLevel) + self.thinkingLevel = normalizedThinkingLevel ?? "off" + self.prefersExplicitThinkingLevel = normalizedThinkingLevel != nil + self.onThinkingLevelChanged = onThinkingLevelChanged self.eventTask = Task { [weak self] in guard let self else { return } @@ -99,25 +126,35 @@ public final class OpenClawChatViewModel { Task { await self.performSwitchSession(to: sessionKey) } } + public func selectThinkingLevel(_ level: String) { + Task { await self.performSelectThinkingLevel(level) } + } + + public func selectModel(_ selectionID: String) { + Task { await self.performSelectModel(selectionID) } + } + public var sessionChoices: [OpenClawChatSessionEntry] { let now = Date().timeIntervalSince1970 * 1000 let cutoff = now - (24 * 60 * 60 * 1000) let sorted = self.sessions.sorted { ($0.updatedAt ?? 0) > ($1.updatedAt ?? 0) } + let mainSessionKey = self.resolvedMainSessionKey var result: [OpenClawChatSessionEntry] = [] var included = Set() - // Always show the main session first, even if it hasn't been updated recently. - if let main = sorted.first(where: { $0.key == "main" }) { + // Always show the resolved main session first, even if it hasn't been updated recently. + if let main = sorted.first(where: { $0.key == mainSessionKey }) { result.append(main) included.insert(main.key) } else { - result.append(self.placeholderSession(key: "main")) - included.insert("main") + result.append(self.placeholderSession(key: mainSessionKey)) + included.insert(mainSessionKey) } for entry in sorted { guard !included.contains(entry.key) else { continue } + guard entry.key == self.sessionKey || !Self.isHiddenInternalSession(entry.key) else { continue } guard (entry.updatedAt ?? 0) >= cutoff else { continue } result.append(entry) included.insert(entry.key) @@ -134,6 +171,29 @@ public final class OpenClawChatViewModel { return result } + private var resolvedMainSessionKey: String { + let trimmed = self.sessionDefaults?.mainSessionKey? + .trimmingCharacters(in: .whitespacesAndNewlines) + return (trimmed?.isEmpty == false ? trimmed : nil) ?? "main" + } + + private static func isHiddenInternalSession(_ key: String) -> Bool { + let trimmed = key.trimmingCharacters(in: .whitespacesAndNewlines) + guard !trimmed.isEmpty else { return false } + return trimmed == "onboarding" || trimmed.hasSuffix(":onboarding") + } + + public var showsModelPicker: Bool { + !self.modelChoices.isEmpty + } + + public var defaultModelLabel: String { + guard let defaultModelID = self.normalizedModelSelectionID(self.sessionDefaults?.model) else { + return "Default" + } + return "Default: \(self.modelLabel(for: defaultModelID))" + } + public func addAttachments(urls: [URL]) { Task { await self.loadAttachments(urls: urls) } } @@ -174,11 +234,14 @@ public final class OpenClawChatViewModel { previous: self.messages, incoming: Self.decodeMessages(payload.messages ?? [])) self.sessionId = payload.sessionId - if let level = payload.thinkingLevel, !level.isEmpty { + if !self.prefersExplicitThinkingLevel, + let level = Self.normalizedThinkingLevel(payload.thinkingLevel) + { self.thinkingLevel = level } await self.pollHealthIfNeeded(force: true) await self.fetchSessions(limit: 50) + await self.fetchModels() self.errorText = nil } catch { self.errorText = error.localizedDescription @@ -316,11 +379,21 @@ public final class OpenClawChatViewModel { return "\(message.role)|\(timestamp)|\(text)" } + private static let resetTriggers: Set = ["/new", "/reset", "/clear"] + private func performSend() async { guard !self.isSending else { return } let trimmed = self.input.trimmingCharacters(in: .whitespacesAndNewlines) guard !trimmed.isEmpty || !self.attachments.isEmpty else { return } + if Self.resetTriggers.contains(trimmed.lowercased()) { + self.input = "" + await self.performReset() + return + } + + let sessionKey = self.sessionKey + guard self.healthOK else { self.errorText = "Gateway health not OK; cannot send" return @@ -330,6 +403,7 @@ public final class OpenClawChatViewModel { self.errorText = nil let runId = UUID().uuidString let messageText = trimmed.isEmpty && !self.attachments.isEmpty ? "See attached." : trimmed + let thinkingLevel = self.thinkingLevel self.pendingRuns.insert(runId) self.armPendingRunTimeout(runId: runId) self.pendingToolCallsById = [:] @@ -382,10 +456,11 @@ public final class OpenClawChatViewModel { self.attachments = [] do { + await self.waitForPendingModelPatches(in: sessionKey) let response = try await self.transport.sendMessage( - sessionKey: self.sessionKey, + sessionKey: sessionKey, message: messageText, - thinking: self.thinkingLevel, + thinking: thinkingLevel, idempotencyKey: runId, attachments: encodedAttachments) if response.runId != runId { @@ -422,6 +497,17 @@ public final class OpenClawChatViewModel { do { let res = try await self.transport.listSessions(limit: limit) self.sessions = res.sessions + self.sessionDefaults = res.defaults + self.syncSelectedModel() + } catch { + // Best-effort. + } + } + + private func fetchModels() async { + do { + self.modelChoices = try await self.transport.listModels() + self.syncSelectedModel() } catch { // Best-effort. } @@ -432,9 +518,124 @@ public final class OpenClawChatViewModel { guard !next.isEmpty else { return } guard next != self.sessionKey else { return } self.sessionKey = next + self.modelSelectionID = Self.defaultModelSelectionID await self.bootstrap() } + private func performReset() async { + self.isLoading = true + self.errorText = nil + defer { self.isLoading = false } + + do { + try await self.transport.resetSession(sessionKey: self.sessionKey) + } catch { + self.errorText = error.localizedDescription + chatUILogger.error("session reset failed \(error.localizedDescription, privacy: .public)") + return + } + + await self.bootstrap() + } + + private func performSelectThinkingLevel(_ level: String) async { + let next = Self.normalizedThinkingLevel(level) ?? "off" + guard next != self.thinkingLevel else { return } + + let sessionKey = self.sessionKey + self.thinkingLevel = next + self.onThinkingLevelChanged?(next) + self.nextThinkingSelectionRequestID &+= 1 + let requestID = self.nextThinkingSelectionRequestID + self.latestThinkingSelectionRequestIDsBySession[sessionKey] = requestID + self.latestThinkingLevelsBySession[sessionKey] = next + + do { + try await self.transport.setSessionThinking(sessionKey: sessionKey, thinkingLevel: next) + guard requestID == self.latestThinkingSelectionRequestIDsBySession[sessionKey] else { + let latest = self.latestThinkingLevelsBySession[sessionKey] ?? next + guard latest != next else { return } + try? await self.transport.setSessionThinking(sessionKey: sessionKey, thinkingLevel: latest) + return + } + } catch { + guard sessionKey == self.sessionKey, + requestID == self.latestThinkingSelectionRequestIDsBySession[sessionKey] + else { return } + // Best-effort. Persisting the user's local preference matters more than a patch error here. + } + } + + private func performSelectModel(_ selectionID: String) async { + let next = self.normalizedSelectionID(selectionID) + guard next != self.modelSelectionID else { return } + + let sessionKey = self.sessionKey + let previous = self.modelSelectionID + let previousRequestID = self.latestModelSelectionRequestIDsBySession[sessionKey] + self.nextModelSelectionRequestID &+= 1 + let requestID = self.nextModelSelectionRequestID + let nextModelRef = self.modelRef(forSelectionID: next) + self.latestModelSelectionRequestIDsBySession[sessionKey] = requestID + self.latestModelSelectionIDsBySession[sessionKey] = next + self.beginModelPatch(for: sessionKey) + self.modelSelectionID = next + self.errorText = nil + defer { self.endModelPatch(for: sessionKey) } + + do { + try await self.transport.setSessionModel( + sessionKey: sessionKey, + model: nextModelRef) + guard requestID == self.latestModelSelectionRequestIDsBySession[sessionKey] else { + // Keep older successful patches as rollback state, but do not replay + // stale UI/session state over a newer in-flight or completed selection. + self.lastSuccessfulModelSelectionIDsBySession[sessionKey] = next + return + } + self.applySuccessfulModelSelection(next, sessionKey: sessionKey, syncSelection: true) + } catch { + guard requestID == self.latestModelSelectionRequestIDsBySession[sessionKey] else { return } + self.latestModelSelectionIDsBySession[sessionKey] = previous + if let previousRequestID { + self.latestModelSelectionRequestIDsBySession[sessionKey] = previousRequestID + } else { + self.latestModelSelectionRequestIDsBySession.removeValue(forKey: sessionKey) + } + if self.lastSuccessfulModelSelectionIDsBySession[sessionKey] == previous { + self.applySuccessfulModelSelection(previous, sessionKey: sessionKey, syncSelection: sessionKey == self.sessionKey) + } + guard sessionKey == self.sessionKey else { return } + self.modelSelectionID = previous + self.errorText = error.localizedDescription + chatUILogger.error("sessions.patch(model) failed \(error.localizedDescription, privacy: .public)") + } + } + + private func beginModelPatch(for sessionKey: String) { + self.inFlightModelPatchCountsBySession[sessionKey, default: 0] += 1 + } + + private func endModelPatch(for sessionKey: String) { + let remaining = max(0, (self.inFlightModelPatchCountsBySession[sessionKey] ?? 0) - 1) + if remaining == 0 { + self.inFlightModelPatchCountsBySession.removeValue(forKey: sessionKey) + let waiters = self.modelPatchWaitersBySession.removeValue(forKey: sessionKey) ?? [] + for waiter in waiters { + waiter.resume() + } + return + } + self.inFlightModelPatchCountsBySession[sessionKey] = remaining + } + + private func waitForPendingModelPatches(in sessionKey: String) async { + guard (self.inFlightModelPatchCountsBySession[sessionKey] ?? 0) > 0 else { return } + await withCheckedContinuation { continuation in + self.modelPatchWaitersBySession[sessionKey, default: []].append(continuation) + } + } + private func placeholderSession(key: String) -> OpenClawChatSessionEntry { OpenClawChatSessionEntry( key: key, @@ -453,10 +654,159 @@ public final class OpenClawChatViewModel { inputTokens: nil, outputTokens: nil, totalTokens: nil, + modelProvider: nil, model: nil, contextTokens: nil) } + private func syncSelectedModel() { + let currentSession = self.sessions.first(where: { $0.key == self.sessionKey }) + let explicitModelID = self.normalizedModelSelectionID( + currentSession?.model, + provider: currentSession?.modelProvider) + if let explicitModelID { + self.lastSuccessfulModelSelectionIDsBySession[self.sessionKey] = explicitModelID + self.modelSelectionID = explicitModelID + return + } + self.lastSuccessfulModelSelectionIDsBySession[self.sessionKey] = Self.defaultModelSelectionID + self.modelSelectionID = Self.defaultModelSelectionID + } + + private func normalizedSelectionID(_ selectionID: String) -> String { + let trimmed = selectionID.trimmingCharacters(in: .whitespacesAndNewlines) + guard !trimmed.isEmpty else { return Self.defaultModelSelectionID } + return trimmed + } + + private func normalizedModelSelectionID(_ modelID: String?, provider: String? = nil) -> String? { + guard let modelID else { return nil } + let trimmed = modelID.trimmingCharacters(in: .whitespacesAndNewlines) + guard !trimmed.isEmpty else { return nil } + if let provider = Self.normalizedProvider(provider) { + let providerQualified = Self.providerQualifiedModelSelectionID(modelID: trimmed, provider: provider) + if let match = self.modelChoices.first(where: { + $0.selectionID == providerQualified || + ($0.modelID == trimmed && Self.normalizedProvider($0.provider) == provider) + }) { + return match.selectionID + } + return providerQualified + } + if self.modelChoices.contains(where: { $0.selectionID == trimmed }) { + return trimmed + } + let matches = self.modelChoices.filter { $0.modelID == trimmed || $0.selectionID == trimmed } + if matches.count == 1 { + return matches[0].selectionID + } + return trimmed + } + + private func modelRef(forSelectionID selectionID: String) -> String? { + let normalized = self.normalizedSelectionID(selectionID) + if normalized == Self.defaultModelSelectionID { + return nil + } + return normalized + } + + private func modelLabel(for modelID: String) -> String { + self.modelChoices.first(where: { $0.selectionID == modelID || $0.modelID == modelID })?.displayLabel ?? + modelID + } + + private func applySuccessfulModelSelection(_ selectionID: String, sessionKey: String, syncSelection: Bool) { + self.lastSuccessfulModelSelectionIDsBySession[sessionKey] = selectionID + let resolved = self.resolvedSessionModelIdentity(forSelectionID: selectionID) + self.updateCurrentSessionModel( + modelID: resolved.modelID, + modelProvider: resolved.modelProvider, + sessionKey: sessionKey, + syncSelection: syncSelection) + } + + private func resolvedSessionModelIdentity(forSelectionID selectionID: String) -> (modelID: String?, modelProvider: String?) { + guard let modelRef = self.modelRef(forSelectionID: selectionID) else { + return (nil, nil) + } + if let choice = self.modelChoices.first(where: { $0.selectionID == modelRef }) { + return (choice.modelID, Self.normalizedProvider(choice.provider)) + } + return (modelRef, nil) + } + + private static func normalizedProvider(_ provider: String?) -> String? { + let trimmed = provider?.trimmingCharacters(in: .whitespacesAndNewlines) + guard let trimmed, !trimmed.isEmpty else { return nil } + return trimmed + } + + private static func providerQualifiedModelSelectionID(modelID: String, provider: String) -> String { + let providerPrefix = "\(provider)/" + if modelID.hasPrefix(providerPrefix) { + return modelID + } + return "\(provider)/\(modelID)" + } + + private func updateCurrentSessionModel( + modelID: String?, + modelProvider: String?, + sessionKey: String, + syncSelection: Bool) + { + if let index = self.sessions.firstIndex(where: { $0.key == sessionKey }) { + let current = self.sessions[index] + self.sessions[index] = OpenClawChatSessionEntry( + key: current.key, + kind: current.kind, + displayName: current.displayName, + surface: current.surface, + subject: current.subject, + room: current.room, + space: current.space, + updatedAt: current.updatedAt, + sessionId: current.sessionId, + systemSent: current.systemSent, + abortedLastRun: current.abortedLastRun, + thinkingLevel: current.thinkingLevel, + verboseLevel: current.verboseLevel, + inputTokens: current.inputTokens, + outputTokens: current.outputTokens, + totalTokens: current.totalTokens, + modelProvider: modelProvider, + model: modelID, + contextTokens: current.contextTokens) + } else { + let placeholder = self.placeholderSession(key: sessionKey) + self.sessions.append( + OpenClawChatSessionEntry( + key: placeholder.key, + kind: placeholder.kind, + displayName: placeholder.displayName, + surface: placeholder.surface, + subject: placeholder.subject, + room: placeholder.room, + space: placeholder.space, + updatedAt: placeholder.updatedAt, + sessionId: placeholder.sessionId, + systemSent: placeholder.systemSent, + abortedLastRun: placeholder.abortedLastRun, + thinkingLevel: placeholder.thinkingLevel, + verboseLevel: placeholder.verboseLevel, + inputTokens: placeholder.inputTokens, + outputTokens: placeholder.outputTokens, + totalTokens: placeholder.totalTokens, + modelProvider: modelProvider, + model: modelID, + contextTokens: placeholder.contextTokens)) + } + if syncSelection { + self.syncSelectedModel() + } + } + private func handleTransportEvent(_ evt: OpenClawChatTransportEvent) { switch evt { case let .health(ok): @@ -573,7 +923,9 @@ public final class OpenClawChatViewModel { previous: self.messages, incoming: Self.decodeMessages(payload.messages ?? [])) self.sessionId = payload.sessionId - if let level = payload.thinkingLevel, !level.isEmpty { + if !self.prefersExplicitThinkingLevel, + let level = Self.normalizedThinkingLevel(payload.thinkingLevel) + { self.thinkingLevel = level } } catch { @@ -682,4 +1034,13 @@ public final class OpenClawChatViewModel { nil #endif } + + private static func normalizedThinkingLevel(_ level: String?) -> String? { + guard let level else { return nil } + let trimmed = level.trimmingCharacters(in: .whitespacesAndNewlines).lowercased() + guard ["off", "minimal", "low", "medium", "high", "xhigh", "adaptive"].contains(trimmed) else { + return nil + } + return trimmed + } } diff --git a/apps/shared/OpenClawKit/Sources/OpenClawKit/DeepLinks.swift b/apps/shared/OpenClawKit/Sources/OpenClawKit/DeepLinks.swift index 20b3761668b..5f1440ccb1a 100644 --- a/apps/shared/OpenClawKit/Sources/OpenClawKit/DeepLinks.swift +++ b/apps/shared/OpenClawKit/Sources/OpenClawKit/DeepLinks.swift @@ -9,13 +9,15 @@ public struct GatewayConnectDeepLink: Codable, Sendable, Equatable { public let host: String public let port: Int public let tls: Bool + public let bootstrapToken: String? public let token: String? public let password: String? - public init(host: String, port: Int, tls: Bool, token: String?, password: String?) { + public init(host: String, port: Int, tls: Bool, bootstrapToken: String?, token: String?, password: String?) { self.host = host self.port = port self.tls = tls + self.bootstrapToken = bootstrapToken self.token = token self.password = password } @@ -25,7 +27,7 @@ public struct GatewayConnectDeepLink: Codable, Sendable, Equatable { return URL(string: "\(scheme)://\(self.host):\(self.port)") } - /// Parse a device-pair setup code (base64url-encoded JSON: `{url, token?, password?}`). + /// Parse a device-pair setup code (base64url-encoded JSON: `{url, bootstrapToken?, token?, password?}`). public static func fromSetupCode(_ code: String) -> GatewayConnectDeepLink? { guard let data = Self.decodeBase64Url(code) else { return nil } guard let json = try? JSONSerialization.jsonObject(with: data) as? [String: Any] else { return nil } @@ -41,9 +43,16 @@ public struct GatewayConnectDeepLink: Codable, Sendable, Equatable { return nil } let port = parsed.port ?? (tls ? 443 : 18789) + let bootstrapToken = json["bootstrapToken"] as? String let token = json["token"] as? String let password = json["password"] as? String - return GatewayConnectDeepLink(host: hostname, port: port, tls: tls, token: token, password: password) + return GatewayConnectDeepLink( + host: hostname, + port: port, + tls: tls, + bootstrapToken: bootstrapToken, + token: token, + password: password) } private static func decodeBase64Url(_ input: String) -> Data? { @@ -140,6 +149,7 @@ public enum DeepLinkParser { host: hostParam, port: port, tls: tls, + bootstrapToken: nil, token: query["token"], password: query["password"])) diff --git a/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayChannel.swift b/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayChannel.swift index 3dc5eacee6e..2c3da84af68 100644 --- a/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayChannel.swift +++ b/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayChannel.swift @@ -112,6 +112,7 @@ public struct GatewayConnectOptions: Sendable { public enum GatewayAuthSource: String, Sendable { case deviceToken = "device-token" case sharedToken = "shared-token" + case bootstrapToken = "bootstrap-token" case password = "password" case none = "none" } @@ -131,6 +132,36 @@ private let defaultOperatorConnectScopes: [String] = [ "operator.pairing", ] +private extension String { + var nilIfEmpty: String? { + self.isEmpty ? nil : self + } +} + +private struct SelectedConnectAuth: Sendable { + let authToken: String? + let authBootstrapToken: String? + let authDeviceToken: String? + let authPassword: String? + let signatureToken: String? + let storedToken: String? + let authSource: GatewayAuthSource +} + +private enum GatewayConnectErrorCodes { + static let authTokenMismatch = GatewayConnectAuthDetailCode.authTokenMismatch.rawValue + static let authDeviceTokenMismatch = GatewayConnectAuthDetailCode.authDeviceTokenMismatch.rawValue + static let authTokenMissing = GatewayConnectAuthDetailCode.authTokenMissing.rawValue + static let authTokenNotConfigured = GatewayConnectAuthDetailCode.authTokenNotConfigured.rawValue + static let authPasswordMissing = GatewayConnectAuthDetailCode.authPasswordMissing.rawValue + static let authPasswordMismatch = GatewayConnectAuthDetailCode.authPasswordMismatch.rawValue + static let authPasswordNotConfigured = GatewayConnectAuthDetailCode.authPasswordNotConfigured.rawValue + static let authRateLimited = GatewayConnectAuthDetailCode.authRateLimited.rawValue + static let pairingRequired = GatewayConnectAuthDetailCode.pairingRequired.rawValue + static let controlUiDeviceIdentityRequired = GatewayConnectAuthDetailCode.controlUiDeviceIdentityRequired.rawValue + static let deviceIdentityRequired = GatewayConnectAuthDetailCode.deviceIdentityRequired.rawValue +} + public actor GatewayChannelActor { private let logger = Logger(subsystem: "ai.openclaw", category: "gateway") private var task: WebSocketTaskBox? @@ -140,6 +171,7 @@ public actor GatewayChannelActor { private var connectWaiters: [CheckedContinuation] = [] private var url: URL private var token: String? + private var bootstrapToken: String? private var password: String? private let session: WebSocketSessioning private var backoffMs: Double = 500 @@ -160,6 +192,9 @@ public actor GatewayChannelActor { private var watchdogTask: Task? private var tickTask: Task? private var keepaliveTask: Task? + private var pendingDeviceTokenRetry = false + private var deviceTokenRetryBudgetUsed = false + private var reconnectPausedForAuthFailure = false private let defaultRequestTimeoutMs: Double = 15000 private let pushHandler: (@Sendable (GatewayPush) async -> Void)? private let connectOptions: GatewayConnectOptions? @@ -168,6 +203,7 @@ public actor GatewayChannelActor { public init( url: URL, token: String?, + bootstrapToken: String? = nil, password: String? = nil, session: WebSocketSessionBox? = nil, pushHandler: (@Sendable (GatewayPush) async -> Void)? = nil, @@ -176,6 +212,7 @@ public actor GatewayChannelActor { { self.url = url self.token = token + self.bootstrapToken = bootstrapToken self.password = password self.session = session?.session ?? URLSession(configuration: .default) self.pushHandler = pushHandler @@ -232,10 +269,18 @@ public actor GatewayChannelActor { while self.shouldReconnect { guard await self.sleepUnlessCancelled(nanoseconds: 30 * 1_000_000_000) else { return } // 30s cadence guard self.shouldReconnect else { return } + if self.reconnectPausedForAuthFailure { continue } if self.connected { continue } do { try await self.connect() } catch { + if self.shouldPauseReconnectAfterAuthFailure(error) { + self.reconnectPausedForAuthFailure = true + self.logger.error( + "gateway watchdog reconnect paused for non-recoverable auth failure \(error.localizedDescription, privacy: .public)" + ) + continue + } let wrapped = self.wrap(error, context: "gateway watchdog reconnect") self.logger.error("gateway watchdog reconnect failed \(wrapped.localizedDescription, privacy: .public)") } @@ -267,7 +312,12 @@ public actor GatewayChannelActor { }, operation: { try await self.sendConnect() }) } catch { - let wrapped = self.wrap(error, context: "connect to gateway @ \(self.url.absoluteString)") + let wrapped: Error + if let authError = error as? GatewayConnectAuthError { + wrapped = authError + } else { + wrapped = self.wrap(error, context: "connect to gateway @ \(self.url.absoluteString)") + } self.connected = false self.task?.cancel(with: .goingAway, reason: nil) await self.disconnectHandler?("connect failed: \(wrapped.localizedDescription)") @@ -281,6 +331,7 @@ public actor GatewayChannelActor { } self.listen() self.connected = true + self.reconnectPausedForAuthFailure = false self.backoffMs = 500 self.lastSeq = nil self.startKeepalive() @@ -367,29 +418,24 @@ public actor GatewayChannelActor { } let includeDeviceIdentity = options.includeDeviceIdentity let identity = includeDeviceIdentity ? DeviceIdentityStore.loadOrCreate() : nil - let storedToken = - (includeDeviceIdentity && identity != nil) - ? DeviceAuthStore.loadToken(deviceId: identity!.deviceId, role: role)?.token - : nil - // If we're not sending a device identity, a device token can't be validated server-side. - // In that mode we always use the shared gateway token/password. - let authToken = includeDeviceIdentity ? (storedToken ?? self.token) : self.token - let authSource: GatewayAuthSource - if storedToken != nil { - authSource = .deviceToken - } else if authToken != nil { - authSource = .sharedToken - } else if self.password != nil { - authSource = .password - } else { - authSource = .none + let selectedAuth = self.selectConnectAuth( + role: role, + includeDeviceIdentity: includeDeviceIdentity, + deviceId: identity?.deviceId) + if selectedAuth.authDeviceToken != nil && self.pendingDeviceTokenRetry { + self.pendingDeviceTokenRetry = false } - self.lastAuthSource = authSource - self.logger.info("gateway connect auth=\(authSource.rawValue, privacy: .public)") - let canFallbackToShared = includeDeviceIdentity && storedToken != nil && self.token != nil - if let authToken { - params["auth"] = ProtoAnyCodable(["token": ProtoAnyCodable(authToken)]) - } else if let password = self.password { + self.lastAuthSource = selectedAuth.authSource + self.logger.info("gateway connect auth=\(selectedAuth.authSource.rawValue, privacy: .public)") + if let authToken = selectedAuth.authToken { + var auth: [String: ProtoAnyCodable] = ["token": ProtoAnyCodable(authToken)] + if let authDeviceToken = selectedAuth.authDeviceToken { + auth["deviceToken"] = ProtoAnyCodable(authDeviceToken) + } + params["auth"] = ProtoAnyCodable(auth) + } else if let authBootstrapToken = selectedAuth.authBootstrapToken { + params["auth"] = ProtoAnyCodable(["bootstrapToken": ProtoAnyCodable(authBootstrapToken)]) + } else if let password = selectedAuth.authPassword { params["auth"] = ProtoAnyCodable(["password": ProtoAnyCodable(password)]) } let signedAtMs = Int(Date().timeIntervalSince1970 * 1000) @@ -402,7 +448,7 @@ public actor GatewayChannelActor { role: role, scopes: scopes, signedAtMs: signedAtMs, - token: authToken, + token: selectedAuth.signatureToken, nonce: connectNonce, platform: platform, deviceFamily: InstanceIdentity.deviceFamily) @@ -426,16 +472,73 @@ public actor GatewayChannelActor { do { let response = try await self.waitForConnectResponse(reqId: reqId) try await self.handleConnectResponse(response, identity: identity, role: role) + self.pendingDeviceTokenRetry = false + self.deviceTokenRetryBudgetUsed = false } catch { - if canFallbackToShared { - if let identity { - DeviceAuthStore.clearToken(deviceId: identity.deviceId, role: role) - } + let shouldRetryWithDeviceToken = self.shouldRetryWithStoredDeviceToken( + error: error, + explicitGatewayToken: self.token?.trimmingCharacters(in: .whitespacesAndNewlines).nilIfEmpty, + storedToken: selectedAuth.storedToken, + attemptedDeviceTokenRetry: selectedAuth.authDeviceToken != nil) + if shouldRetryWithDeviceToken { + self.pendingDeviceTokenRetry = true + self.deviceTokenRetryBudgetUsed = true + self.backoffMs = min(self.backoffMs, 250) + } else if selectedAuth.authDeviceToken != nil, + let identity, + self.shouldClearStoredDeviceTokenAfterRetry(error) + { + // Retry failed with an explicit device-token mismatch; clear stale local token. + DeviceAuthStore.clearToken(deviceId: identity.deviceId, role: role) } throw error } } + private func selectConnectAuth( + role: String, + includeDeviceIdentity: Bool, + deviceId: String? + ) -> SelectedConnectAuth { + let explicitToken = self.token?.trimmingCharacters(in: .whitespacesAndNewlines).nilIfEmpty + let explicitBootstrapToken = + self.bootstrapToken?.trimmingCharacters(in: .whitespacesAndNewlines).nilIfEmpty + let explicitPassword = self.password?.trimmingCharacters(in: .whitespacesAndNewlines).nilIfEmpty + let storedToken = + (includeDeviceIdentity && deviceId != nil) + ? DeviceAuthStore.loadToken(deviceId: deviceId!, role: role)?.token + : nil + let shouldUseDeviceRetryToken = + includeDeviceIdentity && self.pendingDeviceTokenRetry && + storedToken != nil && explicitToken != nil && self.isTrustedDeviceRetryEndpoint() + let authToken = + explicitToken ?? + (includeDeviceIdentity && explicitPassword == nil && + (explicitBootstrapToken == nil || storedToken != nil) ? storedToken : nil) + let authBootstrapToken = authToken == nil ? explicitBootstrapToken : nil + let authDeviceToken = shouldUseDeviceRetryToken ? storedToken : nil + let authSource: GatewayAuthSource + if authDeviceToken != nil || (explicitToken == nil && authToken != nil) { + authSource = .deviceToken + } else if authToken != nil { + authSource = .sharedToken + } else if authBootstrapToken != nil { + authSource = .bootstrapToken + } else if explicitPassword != nil { + authSource = .password + } else { + authSource = .none + } + return SelectedConnectAuth( + authToken: authToken, + authBootstrapToken: authBootstrapToken, + authDeviceToken: authDeviceToken, + authPassword: explicitPassword, + signatureToken: authToken ?? authBootstrapToken, + storedToken: storedToken, + authSource: authSource) + } + private func handleConnectResponse( _ res: ResponseFrame, identity: DeviceIdentity?, @@ -443,7 +546,15 @@ public actor GatewayChannelActor { ) async throws { if res.ok == false { let msg = (res.error?["message"]?.value as? String) ?? "gateway connect failed" - throw NSError(domain: "Gateway", code: 1008, userInfo: [NSLocalizedDescriptionKey: msg]) + let details = res.error?["details"]?.value as? [String: ProtoAnyCodable] + let detailCode = details?["code"]?.value as? String + let canRetryWithDeviceToken = details?["canRetryWithDeviceToken"]?.value as? Bool ?? false + let recommendedNextStep = details?["recommendedNextStep"]?.value as? String + throw GatewayConnectAuthError( + message: msg, + detailCodeRaw: detailCode, + canRetryWithDeviceToken: canRetryWithDeviceToken, + recommendedNextStepRaw: recommendedNextStep) } guard let payload = res.payload else { throw NSError( @@ -616,19 +727,90 @@ public actor GatewayChannelActor { private func scheduleReconnect() async { guard self.shouldReconnect else { return } + guard !self.reconnectPausedForAuthFailure else { return } let delay = self.backoffMs / 1000 self.backoffMs = min(self.backoffMs * 2, 30000) guard await self.sleepUnlessCancelled(nanoseconds: UInt64(delay * 1_000_000_000)) else { return } guard self.shouldReconnect else { return } + guard !self.reconnectPausedForAuthFailure else { return } do { try await self.connect() } catch { + if self.shouldPauseReconnectAfterAuthFailure(error) { + self.reconnectPausedForAuthFailure = true + self.logger.error( + "gateway reconnect paused for non-recoverable auth failure \(error.localizedDescription, privacy: .public)" + ) + return + } let wrapped = self.wrap(error, context: "gateway reconnect") self.logger.error("gateway reconnect failed \(wrapped.localizedDescription, privacy: .public)") await self.scheduleReconnect() } } + private func shouldRetryWithStoredDeviceToken( + error: Error, + explicitGatewayToken: String?, + storedToken: String?, + attemptedDeviceTokenRetry: Bool + ) -> Bool { + if self.deviceTokenRetryBudgetUsed { + return false + } + if attemptedDeviceTokenRetry { + return false + } + guard explicitGatewayToken != nil, storedToken != nil else { + return false + } + guard self.isTrustedDeviceRetryEndpoint() else { + return false + } + guard let authError = error as? GatewayConnectAuthError else { + return false + } + return authError.canRetryWithDeviceToken || + authError.detail == .authTokenMismatch + } + + private func shouldPauseReconnectAfterAuthFailure(_ error: Error) -> Bool { + guard let authError = error as? GatewayConnectAuthError else { + return false + } + if authError.isNonRecoverable { + return true + } + if authError.detail == .authTokenMismatch && + self.deviceTokenRetryBudgetUsed && !self.pendingDeviceTokenRetry + { + return true + } + return false + } + + private func shouldClearStoredDeviceTokenAfterRetry(_ error: Error) -> Bool { + guard let authError = error as? GatewayConnectAuthError else { + return false + } + return authError.detail == .authDeviceTokenMismatch + } + + private func isTrustedDeviceRetryEndpoint() -> Bool { + // This client currently treats loopback as the only trusted retry target. + // Unlike the Node gateway client, it does not yet expose a pinned TLS-fingerprint + // trust path for remote retry, so remote fallback remains disabled by default. + guard let host = self.url.host?.trimmingCharacters(in: .whitespacesAndNewlines).lowercased(), + !host.isEmpty + else { + return false + } + if host == "localhost" || host == "::1" || host == "127.0.0.1" || host.hasPrefix("127.") { + return true + } + return false + } + private nonisolated func sleepUnlessCancelled(nanoseconds: UInt64) async -> Bool { do { try await Task.sleep(nanoseconds: nanoseconds) @@ -713,6 +895,9 @@ public actor GatewayChannelActor { // Wrap low-level URLSession/WebSocket errors with context so UI can surface them. private func wrap(_ error: Error, context: String) -> Error { + if error is GatewayConnectAuthError || error is GatewayResponseError || error is GatewayDecodingError { + return error + } if let urlError = error as? URLError { let desc = urlError.localizedDescription.isEmpty ? "cancelled" : urlError.localizedDescription return NSError( @@ -756,7 +941,8 @@ public actor GatewayChannelActor { return (id: id, data: data) } catch { self.logger.error( - "gateway \(kind) encode failed \(method, privacy: .public) error=\(error.localizedDescription, privacy: .public)") + "gateway \(kind) encode failed \(method, privacy: .public) error=\(error.localizedDescription, privacy: .public)" + ) throw error } } diff --git a/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayErrors.swift b/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayErrors.swift index 6ca81dec445..7ef7f466476 100644 --- a/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayErrors.swift +++ b/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayErrors.swift @@ -1,6 +1,114 @@ import OpenClawProtocol import Foundation +public enum GatewayConnectAuthDetailCode: String, Sendable { + case authRequired = "AUTH_REQUIRED" + case authUnauthorized = "AUTH_UNAUTHORIZED" + case authTokenMismatch = "AUTH_TOKEN_MISMATCH" + case authBootstrapTokenInvalid = "AUTH_BOOTSTRAP_TOKEN_INVALID" + case authDeviceTokenMismatch = "AUTH_DEVICE_TOKEN_MISMATCH" + case authTokenMissing = "AUTH_TOKEN_MISSING" + case authTokenNotConfigured = "AUTH_TOKEN_NOT_CONFIGURED" + case authPasswordMissing = "AUTH_PASSWORD_MISSING" + case authPasswordMismatch = "AUTH_PASSWORD_MISMATCH" + case authPasswordNotConfigured = "AUTH_PASSWORD_NOT_CONFIGURED" + case authRateLimited = "AUTH_RATE_LIMITED" + case authTailscaleIdentityMissing = "AUTH_TAILSCALE_IDENTITY_MISSING" + case authTailscaleProxyMissing = "AUTH_TAILSCALE_PROXY_MISSING" + case authTailscaleWhoisFailed = "AUTH_TAILSCALE_WHOIS_FAILED" + case authTailscaleIdentityMismatch = "AUTH_TAILSCALE_IDENTITY_MISMATCH" + case pairingRequired = "PAIRING_REQUIRED" + case controlUiDeviceIdentityRequired = "CONTROL_UI_DEVICE_IDENTITY_REQUIRED" + case deviceIdentityRequired = "DEVICE_IDENTITY_REQUIRED" + case deviceAuthInvalid = "DEVICE_AUTH_INVALID" + case deviceAuthDeviceIdMismatch = "DEVICE_AUTH_DEVICE_ID_MISMATCH" + case deviceAuthSignatureExpired = "DEVICE_AUTH_SIGNATURE_EXPIRED" + case deviceAuthNonceRequired = "DEVICE_AUTH_NONCE_REQUIRED" + case deviceAuthNonceMismatch = "DEVICE_AUTH_NONCE_MISMATCH" + case deviceAuthSignatureInvalid = "DEVICE_AUTH_SIGNATURE_INVALID" + case deviceAuthPublicKeyInvalid = "DEVICE_AUTH_PUBLIC_KEY_INVALID" +} + +public enum GatewayConnectRecoveryNextStep: String, Sendable { + case retryWithDeviceToken = "retry_with_device_token" + case updateAuthConfiguration = "update_auth_configuration" + case updateAuthCredentials = "update_auth_credentials" + case waitThenRetry = "wait_then_retry" + case reviewAuthConfiguration = "review_auth_configuration" +} + +/// Structured websocket connect-auth rejection surfaced before the channel is usable. +public struct GatewayConnectAuthError: LocalizedError, Sendable { + public let message: String + public let detailCodeRaw: String? + public let recommendedNextStepRaw: String? + public let canRetryWithDeviceToken: Bool + + public init( + message: String, + detailCodeRaw: String?, + canRetryWithDeviceToken: Bool, + recommendedNextStepRaw: String? = nil) + { + let trimmedMessage = message.trimmingCharacters(in: .whitespacesAndNewlines) + let trimmedDetailCode = detailCodeRaw?.trimmingCharacters(in: .whitespacesAndNewlines) + let trimmedRecommendedNextStep = + recommendedNextStepRaw?.trimmingCharacters(in: .whitespacesAndNewlines) + self.message = trimmedMessage.isEmpty ? "gateway connect failed" : trimmedMessage + self.detailCodeRaw = trimmedDetailCode?.isEmpty == false ? trimmedDetailCode : nil + self.canRetryWithDeviceToken = canRetryWithDeviceToken + self.recommendedNextStepRaw = + trimmedRecommendedNextStep?.isEmpty == false ? trimmedRecommendedNextStep : nil + } + + public init( + message: String, + detailCode: String?, + canRetryWithDeviceToken: Bool, + recommendedNextStep: String? = nil) + { + self.init( + message: message, + detailCodeRaw: detailCode, + canRetryWithDeviceToken: canRetryWithDeviceToken, + recommendedNextStepRaw: recommendedNextStep) + } + + public var detailCode: String? { self.detailCodeRaw } + + public var recommendedNextStepCode: String? { self.recommendedNextStepRaw } + + public var detail: GatewayConnectAuthDetailCode? { + guard let detailCodeRaw else { return nil } + return GatewayConnectAuthDetailCode(rawValue: detailCodeRaw) + } + + public var recommendedNextStep: GatewayConnectRecoveryNextStep? { + guard let recommendedNextStepRaw else { return nil } + return GatewayConnectRecoveryNextStep(rawValue: recommendedNextStepRaw) + } + + public var errorDescription: String? { self.message } + + public var isNonRecoverable: Bool { + switch self.detail { + case .authTokenMissing, + .authBootstrapTokenInvalid, + .authTokenNotConfigured, + .authPasswordMissing, + .authPasswordMismatch, + .authPasswordNotConfigured, + .authRateLimited, + .pairingRequired, + .controlUiDeviceIdentityRequired, + .deviceIdentityRequired: + return true + default: + return false + } + } +} + /// Structured error surfaced when the gateway responds with `{ ok: false }`. public struct GatewayResponseError: LocalizedError, @unchecked Sendable { public let method: String diff --git a/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayNodeSession.swift b/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayNodeSession.swift index 378ad10e365..945e482bbbf 100644 --- a/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayNodeSession.swift +++ b/apps/shared/OpenClawKit/Sources/OpenClawKit/GatewayNodeSession.swift @@ -64,6 +64,7 @@ public actor GatewayNodeSession { private var channel: GatewayChannelActor? private var activeURL: URL? private var activeToken: String? + private var activeBootstrapToken: String? private var activePassword: String? private var activeConnectOptionsKey: String? private var connectOptions: GatewayConnectOptions? @@ -194,6 +195,7 @@ public actor GatewayNodeSession { public func connect( url: URL, token: String?, + bootstrapToken: String?, password: String?, connectOptions: GatewayConnectOptions, sessionBox: WebSocketSessionBox?, @@ -204,6 +206,7 @@ public actor GatewayNodeSession { let nextOptionsKey = self.connectOptionsKey(connectOptions) let shouldReconnect = self.activeURL != url || self.activeToken != token || + self.activeBootstrapToken != bootstrapToken || self.activePassword != password || self.activeConnectOptionsKey != nextOptionsKey || self.channel == nil @@ -221,6 +224,7 @@ public actor GatewayNodeSession { let channel = GatewayChannelActor( url: url, token: token, + bootstrapToken: bootstrapToken, password: password, session: sessionBox, pushHandler: { [weak self] push in @@ -233,6 +237,7 @@ public actor GatewayNodeSession { self.channel = channel self.activeURL = url self.activeToken = token + self.activeBootstrapToken = bootstrapToken self.activePassword = password self.activeConnectOptionsKey = nextOptionsKey } @@ -257,6 +262,7 @@ public actor GatewayNodeSession { self.channel = nil self.activeURL = nil self.activeToken = nil + self.activeBootstrapToken = nil self.activePassword = nil self.activeConnectOptionsKey = nil self.hasEverConnected = false diff --git a/apps/shared/OpenClawKit/Sources/OpenClawKit/Resources/CanvasScaffold/scaffold.html b/apps/shared/OpenClawKit/Sources/OpenClawKit/Resources/CanvasScaffold/scaffold.html index ceb7a975da4..684d5a9f148 100644 --- a/apps/shared/OpenClawKit/Sources/OpenClawKit/Resources/CanvasScaffold/scaffold.html +++ b/apps/shared/OpenClawKit/Sources/OpenClawKit/Resources/CanvasScaffold/scaffold.html @@ -3,7 +3,7 @@ - Canvas + OpenClaw - + +
+
+
+
+ + Welcome to OpenClaw +
+

Your phone stays quiet until it is needed

+

+ Pair this device to your gateway to wake it only for real work, keep a live agent overview handy, and avoid battery-draining background loops. +

+ +
+
+
Gateway
+
Gateway
+
Connect to load your agents
+
+ +
+
Active Agent
+
+
OC
+
+
Main
+
Connect to load your agents
+
+
+
+
+
+ +
+
+
Live agents
+
0 agents
+
+
+ +
+
+
+
Ready
Waiting for agent
+ diff --git a/apps/shared/OpenClawKit/Sources/OpenClawProtocol/GatewayModels.swift b/apps/shared/OpenClawKit/Sources/OpenClawProtocol/GatewayModels.swift index a6223d95bee..3003ae79f7b 100644 --- a/apps/shared/OpenClawKit/Sources/OpenClawProtocol/GatewayModels.swift +++ b/apps/shared/OpenClawKit/Sources/OpenClawProtocol/GatewayModels.swift @@ -538,8 +538,6 @@ public struct AgentParams: Codable, Sendable { public let inputprovenance: [String: AnyCodable]? public let idempotencykey: String public let label: String? - public let spawnedby: String? - public let workspacedir: String? public init( message: String, @@ -566,9 +564,7 @@ public struct AgentParams: Codable, Sendable { internalevents: [[String: AnyCodable]]?, inputprovenance: [String: AnyCodable]?, idempotencykey: String, - label: String?, - spawnedby: String?, - workspacedir: String?) + label: String?) { self.message = message self.agentid = agentid @@ -595,8 +591,6 @@ public struct AgentParams: Codable, Sendable { self.inputprovenance = inputprovenance self.idempotencykey = idempotencykey self.label = label - self.spawnedby = spawnedby - self.workspacedir = workspacedir } private enum CodingKeys: String, CodingKey { @@ -625,8 +619,6 @@ public struct AgentParams: Codable, Sendable { case inputprovenance = "inputProvenance" case idempotencykey = "idempotencyKey" case label - case spawnedby = "spawnedBy" - case workspacedir = "workspaceDir" } } @@ -950,6 +942,102 @@ public struct NodeEventParams: Codable, Sendable { } } +public struct NodePendingDrainParams: Codable, Sendable { + public let maxitems: Int? + + public init( + maxitems: Int?) + { + self.maxitems = maxitems + } + + private enum CodingKeys: String, CodingKey { + case maxitems = "maxItems" + } +} + +public struct NodePendingDrainResult: Codable, Sendable { + public let nodeid: String + public let revision: Int + public let items: [[String: AnyCodable]] + public let hasmore: Bool + + public init( + nodeid: String, + revision: Int, + items: [[String: AnyCodable]], + hasmore: Bool) + { + self.nodeid = nodeid + self.revision = revision + self.items = items + self.hasmore = hasmore + } + + private enum CodingKeys: String, CodingKey { + case nodeid = "nodeId" + case revision + case items + case hasmore = "hasMore" + } +} + +public struct NodePendingEnqueueParams: Codable, Sendable { + public let nodeid: String + public let type: String + public let priority: String? + public let expiresinms: Int? + public let wake: Bool? + + public init( + nodeid: String, + type: String, + priority: String?, + expiresinms: Int?, + wake: Bool?) + { + self.nodeid = nodeid + self.type = type + self.priority = priority + self.expiresinms = expiresinms + self.wake = wake + } + + private enum CodingKeys: String, CodingKey { + case nodeid = "nodeId" + case type + case priority + case expiresinms = "expiresInMs" + case wake + } +} + +public struct NodePendingEnqueueResult: Codable, Sendable { + public let nodeid: String + public let revision: Int + public let queued: [String: AnyCodable] + public let waketriggered: Bool + + public init( + nodeid: String, + revision: Int, + queued: [String: AnyCodable], + waketriggered: Bool) + { + self.nodeid = nodeid + self.revision = revision + self.queued = queued + self.waketriggered = waketriggered + } + + private enum CodingKeys: String, CodingKey { + case nodeid = "nodeId" + case revision + case queued + case waketriggered = "wakeTriggered" + } +} + public struct NodeInvokeRequestEvent: Codable, Sendable { public let id: String public let nodeid: String @@ -1018,6 +1106,7 @@ public struct PushTestResult: Codable, Sendable { public let tokensuffix: String public let topic: String public let environment: String + public let transport: String public init( ok: Bool, @@ -1026,7 +1115,8 @@ public struct PushTestResult: Codable, Sendable { reason: String?, tokensuffix: String, topic: String, - environment: String) + environment: String, + transport: String) { self.ok = ok self.status = status @@ -1035,6 +1125,7 @@ public struct PushTestResult: Codable, Sendable { self.tokensuffix = tokensuffix self.topic = topic self.environment = environment + self.transport = transport } private enum CodingKeys: String, CodingKey { @@ -1045,6 +1136,7 @@ public struct PushTestResult: Codable, Sendable { case tokensuffix = "tokenSuffix" case topic case environment + case transport } } @@ -1230,6 +1322,7 @@ public struct SessionsPatchParams: Codable, Sendable { public let key: String public let label: AnyCodable? public let thinkinglevel: AnyCodable? + public let fastmode: AnyCodable? public let verboselevel: AnyCodable? public let reasoninglevel: AnyCodable? public let responseusage: AnyCodable? @@ -1240,7 +1333,10 @@ public struct SessionsPatchParams: Codable, Sendable { public let execnode: AnyCodable? public let model: AnyCodable? public let spawnedby: AnyCodable? + public let spawnedworkspacedir: AnyCodable? public let spawndepth: AnyCodable? + public let subagentrole: AnyCodable? + public let subagentcontrolscope: AnyCodable? public let sendpolicy: AnyCodable? public let groupactivation: AnyCodable? @@ -1248,6 +1344,7 @@ public struct SessionsPatchParams: Codable, Sendable { key: String, label: AnyCodable?, thinkinglevel: AnyCodable?, + fastmode: AnyCodable?, verboselevel: AnyCodable?, reasoninglevel: AnyCodable?, responseusage: AnyCodable?, @@ -1258,13 +1355,17 @@ public struct SessionsPatchParams: Codable, Sendable { execnode: AnyCodable?, model: AnyCodable?, spawnedby: AnyCodable?, + spawnedworkspacedir: AnyCodable?, spawndepth: AnyCodable?, + subagentrole: AnyCodable?, + subagentcontrolscope: AnyCodable?, sendpolicy: AnyCodable?, groupactivation: AnyCodable?) { self.key = key self.label = label self.thinkinglevel = thinkinglevel + self.fastmode = fastmode self.verboselevel = verboselevel self.reasoninglevel = reasoninglevel self.responseusage = responseusage @@ -1275,7 +1376,10 @@ public struct SessionsPatchParams: Codable, Sendable { self.execnode = execnode self.model = model self.spawnedby = spawnedby + self.spawnedworkspacedir = spawnedworkspacedir self.spawndepth = spawndepth + self.subagentrole = subagentrole + self.subagentcontrolscope = subagentcontrolscope self.sendpolicy = sendpolicy self.groupactivation = groupactivation } @@ -1284,6 +1388,7 @@ public struct SessionsPatchParams: Codable, Sendable { case key case label case thinkinglevel = "thinkingLevel" + case fastmode = "fastMode" case verboselevel = "verboseLevel" case reasoninglevel = "reasoningLevel" case responseusage = "responseUsage" @@ -1294,7 +1399,10 @@ public struct SessionsPatchParams: Codable, Sendable { case execnode = "execNode" case model case spawnedby = "spawnedBy" + case spawnedworkspacedir = "spawnedWorkspaceDir" case spawndepth = "spawnDepth" + case subagentrole = "subagentRole" + case subagentcontrolscope = "subagentControlScope" case sendpolicy = "sendPolicy" case groupactivation = "groupActivation" } @@ -2950,7 +3058,7 @@ public struct ExecApprovalsSnapshot: Codable, Sendable { public struct ExecApprovalRequestParams: Codable, Sendable { public let id: String? - public let command: String + public let command: String? public let commandargv: [String]? public let systemrunplan: [String: AnyCodable]? public let env: [String: AnyCodable]? @@ -2971,7 +3079,7 @@ public struct ExecApprovalRequestParams: Codable, Sendable { public init( id: String?, - command: String, + command: String?, commandargv: [String]?, systemrunplan: [String: AnyCodable]?, env: [String: AnyCodable]?, diff --git a/apps/shared/OpenClawKit/Tests/OpenClawKitTests/ChatViewModelTests.swift b/apps/shared/OpenClawKit/Tests/OpenClawKitTests/ChatViewModelTests.swift index e7ba4523e68..6d1fa88e569 100644 --- a/apps/shared/OpenClawKit/Tests/OpenClawKitTests/ChatViewModelTests.swift +++ b/apps/shared/OpenClawKit/Tests/OpenClawKitTests/ChatViewModelTests.swift @@ -41,17 +41,69 @@ private func sessionEntry(key: String, updatedAt: Double) -> OpenClawChatSession inputTokens: nil, outputTokens: nil, totalTokens: nil, + modelProvider: nil, model: nil, contextTokens: nil) } +private func sessionEntry( + key: String, + updatedAt: Double, + model: String?, + modelProvider: String? = nil) -> OpenClawChatSessionEntry +{ + OpenClawChatSessionEntry( + key: key, + kind: nil, + displayName: nil, + surface: nil, + subject: nil, + room: nil, + space: nil, + updatedAt: updatedAt, + sessionId: nil, + systemSent: nil, + abortedLastRun: nil, + thinkingLevel: nil, + verboseLevel: nil, + inputTokens: nil, + outputTokens: nil, + totalTokens: nil, + modelProvider: modelProvider, + model: model, + contextTokens: nil) +} + +private func modelChoice(id: String, name: String, provider: String = "anthropic") -> OpenClawChatModelChoice { + OpenClawChatModelChoice(modelID: id, name: name, provider: provider, contextWindow: nil) +} + private func makeViewModel( sessionKey: String = "main", historyResponses: [OpenClawChatHistoryPayload], - sessionsResponses: [OpenClawChatSessionsListResponse] = []) async -> (TestChatTransport, OpenClawChatViewModel) + sessionsResponses: [OpenClawChatSessionsListResponse] = [], + modelResponses: [[OpenClawChatModelChoice]] = [], + resetSessionHook: (@Sendable (String) async throws -> Void)? = nil, + setSessionModelHook: (@Sendable (String?) async throws -> Void)? = nil, + setSessionThinkingHook: (@Sendable (String) async throws -> Void)? = nil, + initialThinkingLevel: String? = nil, + onThinkingLevelChanged: (@MainActor @Sendable (String) -> Void)? = nil) async + -> (TestChatTransport, OpenClawChatViewModel) { - let transport = TestChatTransport(historyResponses: historyResponses, sessionsResponses: sessionsResponses) - let vm = await MainActor.run { OpenClawChatViewModel(sessionKey: sessionKey, transport: transport) } + let transport = TestChatTransport( + historyResponses: historyResponses, + sessionsResponses: sessionsResponses, + modelResponses: modelResponses, + resetSessionHook: resetSessionHook, + setSessionModelHook: setSessionModelHook, + setSessionThinkingHook: setSessionThinkingHook) + let vm = await MainActor.run { + OpenClawChatViewModel( + sessionKey: sessionKey, + transport: transport, + initialThinkingLevel: initialThinkingLevel, + onThinkingLevelChanged: onThinkingLevelChanged) + } return (transport, vm) } @@ -125,27 +177,64 @@ private func emitExternalFinal( errorMessage: nil))) } +@MainActor +private final class CallbackBox { + var values: [String] = [] +} + +private actor AsyncGate { + private var continuation: CheckedContinuation? + + func wait() async { + await withCheckedContinuation { continuation in + self.continuation = continuation + } + } + + func open() { + self.continuation?.resume() + self.continuation = nil + } +} + private actor TestChatTransportState { var historyCallCount: Int = 0 var sessionsCallCount: Int = 0 + var modelsCallCount: Int = 0 + var resetSessionKeys: [String] = [] var sentRunIds: [String] = [] + var sentThinkingLevels: [String] = [] var abortedRunIds: [String] = [] + var patchedModels: [String?] = [] + var patchedThinkingLevels: [String] = [] } private final class TestChatTransport: @unchecked Sendable, OpenClawChatTransport { private let state = TestChatTransportState() private let historyResponses: [OpenClawChatHistoryPayload] private let sessionsResponses: [OpenClawChatSessionsListResponse] + private let modelResponses: [[OpenClawChatModelChoice]] + private let resetSessionHook: (@Sendable (String) async throws -> Void)? + private let setSessionModelHook: (@Sendable (String?) async throws -> Void)? + private let setSessionThinkingHook: (@Sendable (String) async throws -> Void)? private let stream: AsyncStream private let continuation: AsyncStream.Continuation init( historyResponses: [OpenClawChatHistoryPayload], - sessionsResponses: [OpenClawChatSessionsListResponse] = []) + sessionsResponses: [OpenClawChatSessionsListResponse] = [], + modelResponses: [[OpenClawChatModelChoice]] = [], + resetSessionHook: (@Sendable (String) async throws -> Void)? = nil, + setSessionModelHook: (@Sendable (String?) async throws -> Void)? = nil, + setSessionThinkingHook: (@Sendable (String) async throws -> Void)? = nil) { self.historyResponses = historyResponses self.sessionsResponses = sessionsResponses + self.modelResponses = modelResponses + self.resetSessionHook = resetSessionHook + self.setSessionModelHook = setSessionModelHook + self.setSessionThinkingHook = setSessionThinkingHook var cont: AsyncStream.Continuation! self.stream = AsyncStream { c in cont = c @@ -175,11 +264,12 @@ private final class TestChatTransport: @unchecked Sendable, OpenClawChatTranspor func sendMessage( sessionKey _: String, message _: String, - thinking _: String, + thinking: String, idempotencyKey: String, attachments _: [OpenClawChatAttachmentPayload]) async throws -> OpenClawChatSendResponse { await self.state.sentRunIdsAppend(idempotencyKey) + await self.state.sentThinkingLevelsAppend(thinking) return OpenClawChatSendResponse(runId: idempotencyKey, status: "ok") } @@ -201,6 +291,36 @@ private final class TestChatTransport: @unchecked Sendable, OpenClawChatTranspor sessions: []) } + func listModels() async throws -> [OpenClawChatModelChoice] { + let idx = await self.state.modelsCallCount + await self.state.setModelsCallCount(idx + 1) + if idx < self.modelResponses.count { + return self.modelResponses[idx] + } + return self.modelResponses.last ?? [] + } + + func setSessionModel(sessionKey _: String, model: String?) async throws { + await self.state.patchedModelsAppend(model) + if let setSessionModelHook = self.setSessionModelHook { + try await setSessionModelHook(model) + } + } + + func resetSession(sessionKey: String) async throws { + await self.state.resetSessionKeysAppend(sessionKey) + if let resetSessionHook = self.resetSessionHook { + try await resetSessionHook(sessionKey) + } + } + + func setSessionThinking(sessionKey _: String, thinkingLevel: String) async throws { + await self.state.patchedThinkingLevelsAppend(thinkingLevel) + if let setSessionThinkingHook = self.setSessionThinkingHook { + try await setSessionThinkingHook(thinkingLevel) + } + } + func requestHealth(timeoutMs _: Int) async throws -> Bool { true } @@ -217,6 +337,22 @@ private final class TestChatTransport: @unchecked Sendable, OpenClawChatTranspor func abortedRunIds() async -> [String] { await self.state.abortedRunIds } + + func sentThinkingLevels() async -> [String] { + await self.state.sentThinkingLevels + } + + func patchedModels() async -> [String?] { + await self.state.patchedModels + } + + func patchedThinkingLevels() async -> [String] { + await self.state.patchedThinkingLevels + } + + func resetSessionKeys() async -> [String] { + await self.state.resetSessionKeys + } } extension TestChatTransportState { @@ -228,6 +364,10 @@ extension TestChatTransportState { self.sessionsCallCount = v } + fileprivate func setModelsCallCount(_ v: Int) { + self.modelsCallCount = v + } + fileprivate func sentRunIdsAppend(_ v: String) { self.sentRunIds.append(v) } @@ -235,6 +375,22 @@ extension TestChatTransportState { fileprivate func abortedRunIdsAppend(_ v: String) { self.abortedRunIds.append(v) } + + fileprivate func sentThinkingLevelsAppend(_ v: String) { + self.sentThinkingLevels.append(v) + } + + fileprivate func patchedModelsAppend(_ v: String?) { + self.patchedModels.append(v) + } + + fileprivate func patchedThinkingLevelsAppend(_ v: String) { + self.patchedThinkingLevels.append(v) + } + + fileprivate func resetSessionKeysAppend(_ v: String) { + self.resetSessionKeys.append(v) + } } @Suite struct ChatViewModelTests { @@ -457,6 +613,667 @@ extension TestChatTransportState { #expect(keys == ["main", "custom"]) } + @Test func sessionChoicesUseResolvedMainSessionKeyInsteadOfLiteralMain() async throws { + let now = Date().timeIntervalSince1970 * 1000 + let recent = now - (30 * 60 * 1000) + let recentOlder = now - (90 * 60 * 1000) + let history = historyPayload(sessionKey: "Luke’s MacBook Pro", sessionId: "sess-main") + let sessions = OpenClawChatSessionsListResponse( + ts: now, + path: nil, + count: 2, + defaults: OpenClawChatSessionsDefaults( + model: nil, + contextTokens: nil, + mainSessionKey: "Luke’s MacBook Pro"), + sessions: [ + OpenClawChatSessionEntry( + key: "Luke’s MacBook Pro", + kind: nil, + displayName: "Luke’s MacBook Pro", + surface: nil, + subject: nil, + room: nil, + space: nil, + updatedAt: recent, + sessionId: nil, + systemSent: nil, + abortedLastRun: nil, + thinkingLevel: nil, + verboseLevel: nil, + inputTokens: nil, + outputTokens: nil, + totalTokens: nil, + modelProvider: nil, + model: nil, + contextTokens: nil), + sessionEntry(key: "recent-1", updatedAt: recentOlder), + ]) + + let (_, vm) = await makeViewModel( + sessionKey: "Luke’s MacBook Pro", + historyResponses: [history], + sessionsResponses: [sessions]) + await MainActor.run { vm.load() } + try await waitUntil("sessions loaded") { await MainActor.run { !vm.sessions.isEmpty } } + + let keys = await MainActor.run { vm.sessionChoices.map(\.key) } + #expect(keys == ["Luke’s MacBook Pro", "recent-1"]) + } + + @Test func sessionChoicesHideInternalOnboardingSession() async throws { + let now = Date().timeIntervalSince1970 * 1000 + let recent = now - (2 * 60 * 1000) + let recentOlder = now - (5 * 60 * 1000) + let history = historyPayload(sessionKey: "agent:main:main", sessionId: "sess-main") + let sessions = OpenClawChatSessionsListResponse( + ts: now, + path: nil, + count: 2, + defaults: OpenClawChatSessionsDefaults( + model: nil, + contextTokens: nil, + mainSessionKey: "agent:main:main"), + sessions: [ + OpenClawChatSessionEntry( + key: "agent:main:onboarding", + kind: nil, + displayName: "Luke’s MacBook Pro", + surface: nil, + subject: nil, + room: nil, + space: nil, + updatedAt: recent, + sessionId: nil, + systemSent: nil, + abortedLastRun: nil, + thinkingLevel: nil, + verboseLevel: nil, + inputTokens: nil, + outputTokens: nil, + totalTokens: nil, + modelProvider: nil, + model: nil, + contextTokens: nil), + OpenClawChatSessionEntry( + key: "agent:main:main", + kind: nil, + displayName: "Luke’s MacBook Pro", + surface: nil, + subject: nil, + room: nil, + space: nil, + updatedAt: recentOlder, + sessionId: nil, + systemSent: nil, + abortedLastRun: nil, + thinkingLevel: nil, + verboseLevel: nil, + inputTokens: nil, + outputTokens: nil, + totalTokens: nil, + modelProvider: nil, + model: nil, + contextTokens: nil), + ]) + + let (_, vm) = await makeViewModel( + sessionKey: "agent:main:main", + historyResponses: [history], + sessionsResponses: [sessions]) + await MainActor.run { vm.load() } + try await waitUntil("sessions loaded") { await MainActor.run { !vm.sessions.isEmpty } } + + let keys = await MainActor.run { vm.sessionChoices.map(\.key) } + #expect(keys == ["agent:main:main"]) + } + + @Test func resetTriggerResetsSessionAndReloadsHistory() async throws { + let before = historyPayload( + messages: [ + chatTextMessage(role: "assistant", text: "before reset", timestamp: 1), + ]) + let after = historyPayload( + messages: [ + chatTextMessage(role: "assistant", text: "after reset", timestamp: 2), + ]) + + let (transport, vm) = await makeViewModel(historyResponses: [before, after]) + try await loadAndWaitBootstrap(vm: vm) + try await waitUntil("initial history loaded") { + await MainActor.run { vm.messages.first?.content.first?.text == "before reset" } + } + + await MainActor.run { + vm.input = "/new" + vm.send() + } + + try await waitUntil("reset called") { + await transport.resetSessionKeys() == ["main"] + } + try await waitUntil("history reloaded") { + await MainActor.run { vm.messages.first?.content.first?.text == "after reset" } + } + #expect(await transport.lastSentRunId() == nil) + } + + @Test func bootstrapsModelSelectionFromSessionAndDefaults() async throws { + let now = Date().timeIntervalSince1970 * 1000 + let history = historyPayload() + let sessions = OpenClawChatSessionsListResponse( + ts: now, + path: nil, + count: 1, + defaults: OpenClawChatSessionsDefaults(model: "openai/gpt-4.1-mini", contextTokens: nil), + sessions: [ + sessionEntry(key: "main", updatedAt: now, model: "anthropic/claude-opus-4-6"), + ]) + let models = [ + modelChoice(id: "anthropic/claude-opus-4-6", name: "Claude Opus 4.6"), + modelChoice(id: "openai/gpt-4.1-mini", name: "GPT-4.1 mini", provider: "openai"), + ] + + let (_, vm) = await makeViewModel( + historyResponses: [history], + sessionsResponses: [sessions], + modelResponses: [models]) + + try await loadAndWaitBootstrap(vm: vm) + + #expect(await MainActor.run { vm.showsModelPicker }) + #expect(await MainActor.run { vm.modelSelectionID } == "anthropic/claude-opus-4-6") + #expect(await MainActor.run { vm.defaultModelLabel } == "Default: openai/gpt-4.1-mini") + } + + @Test func selectingDefaultModelPatchesNilAndUpdatesSelection() async throws { + let now = Date().timeIntervalSince1970 * 1000 + let history = historyPayload() + let sessions = OpenClawChatSessionsListResponse( + ts: now, + path: nil, + count: 1, + defaults: OpenClawChatSessionsDefaults(model: "openai/gpt-4.1-mini", contextTokens: nil), + sessions: [ + sessionEntry(key: "main", updatedAt: now, model: "anthropic/claude-opus-4-6"), + ]) + let models = [ + modelChoice(id: "anthropic/claude-opus-4-6", name: "Claude Opus 4.6"), + modelChoice(id: "openai/gpt-4.1-mini", name: "GPT-4.1 mini", provider: "openai"), + ] + + let (transport, vm) = await makeViewModel( + historyResponses: [history], + sessionsResponses: [sessions], + modelResponses: [models]) + + try await loadAndWaitBootstrap(vm: vm) + + await MainActor.run { vm.selectModel(OpenClawChatViewModel.defaultModelSelectionID) } + + try await waitUntil("session model patched") { + let patched = await transport.patchedModels() + return patched == [nil] + } + + #expect(await MainActor.run { vm.modelSelectionID } == OpenClawChatViewModel.defaultModelSelectionID) + } + + @Test func selectingProviderQualifiedModelDisambiguatesDuplicateModelIDs() async throws { + let now = Date().timeIntervalSince1970 * 1000 + let history = historyPayload() + let sessions = OpenClawChatSessionsListResponse( + ts: now, + path: nil, + count: 1, + defaults: OpenClawChatSessionsDefaults(model: "openrouter/gpt-4.1-mini", contextTokens: nil), + sessions: [ + sessionEntry(key: "main", updatedAt: now, model: "gpt-4.1-mini", modelProvider: "openrouter"), + ]) + let models = [ + modelChoice(id: "gpt-4.1-mini", name: "GPT-4.1 mini", provider: "openai"), + modelChoice(id: "gpt-4.1-mini", name: "GPT-4.1 mini", provider: "openrouter"), + ] + + let (transport, vm) = await makeViewModel( + historyResponses: [history], + sessionsResponses: [sessions], + modelResponses: [models]) + + try await loadAndWaitBootstrap(vm: vm) + + #expect(await MainActor.run { vm.modelSelectionID } == "openrouter/gpt-4.1-mini") + + await MainActor.run { vm.selectModel("openai/gpt-4.1-mini") } + + try await waitUntil("provider-qualified model patched") { + let patched = await transport.patchedModels() + return patched == ["openai/gpt-4.1-mini"] + } + } + + @Test func slashModelIDsStayProviderQualifiedInSelectionAndPatch() async throws { + let now = Date().timeIntervalSince1970 * 1000 + let history = historyPayload() + let sessions = OpenClawChatSessionsListResponse( + ts: now, + path: nil, + count: 1, + defaults: nil, + sessions: [ + sessionEntry(key: "main", updatedAt: now, model: nil), + ]) + let models = [ + modelChoice( + id: "openai/gpt-5.4", + name: "GPT-5.4 via Vercel AI Gateway", + provider: "vercel-ai-gateway"), + ] + + let (transport, vm) = await makeViewModel( + historyResponses: [history], + sessionsResponses: [sessions], + modelResponses: [models]) + + try await loadAndWaitBootstrap(vm: vm) + + await MainActor.run { vm.selectModel("vercel-ai-gateway/openai/gpt-5.4") } + + try await waitUntil("slash model patched with provider-qualified ref") { + let patched = await transport.patchedModels() + return patched == ["vercel-ai-gateway/openai/gpt-5.4"] + } + } + + @Test func staleModelPatchCompletionsDoNotOverwriteNewerSelection() async throws { + let now = Date().timeIntervalSince1970 * 1000 + let history = historyPayload() + let sessions = OpenClawChatSessionsListResponse( + ts: now, + path: nil, + count: 1, + defaults: nil, + sessions: [ + sessionEntry(key: "main", updatedAt: now, model: nil), + ]) + let models = [ + modelChoice(id: "gpt-5.4", name: "GPT-5.4", provider: "openai"), + modelChoice(id: "gpt-5.4-pro", name: "GPT-5.4 Pro", provider: "openai"), + ] + + let (transport, vm) = await makeViewModel( + historyResponses: [history], + sessionsResponses: [sessions], + modelResponses: [models], + setSessionModelHook: { model in + if model == "openai/gpt-5.4" { + try await Task.sleep(for: .milliseconds(200)) + } + }) + + try await loadAndWaitBootstrap(vm: vm) + + await MainActor.run { + vm.selectModel("openai/gpt-5.4") + vm.selectModel("openai/gpt-5.4-pro") + } + + try await waitUntil("two model patches complete") { + let patched = await transport.patchedModels() + return patched == ["openai/gpt-5.4", "openai/gpt-5.4-pro"] + } + + #expect(await MainActor.run { vm.modelSelectionID } == "openai/gpt-5.4-pro") + #expect(await MainActor.run { vm.sessions.first(where: { $0.key == "main" })?.model } == "gpt-5.4-pro") + #expect(await MainActor.run { vm.sessions.first(where: { $0.key == "main" })?.modelProvider } == "openai") + } + + @Test func sendWaitsForInFlightModelPatchToFinish() async throws { + let now = Date().timeIntervalSince1970 * 1000 + let history = historyPayload() + let sessions = OpenClawChatSessionsListResponse( + ts: now, + path: nil, + count: 1, + defaults: nil, + sessions: [ + sessionEntry(key: "main", updatedAt: now, model: nil), + ]) + let models = [ + modelChoice(id: "gpt-5.4", name: "GPT-5.4", provider: "openai"), + ] + let gate = AsyncGate() + + let (transport, vm) = await makeViewModel( + historyResponses: [history], + sessionsResponses: [sessions], + modelResponses: [models], + setSessionModelHook: { model in + if model == "openai/gpt-5.4" { + await gate.wait() + } + }) + + try await loadAndWaitBootstrap(vm: vm) + + await MainActor.run { vm.selectModel("openai/gpt-5.4") } + try await waitUntil("model patch started") { + let patched = await transport.patchedModels() + return patched == ["openai/gpt-5.4"] + } + + await sendUserMessage(vm, text: "hello") + try await waitUntil("send entered waiting state") { + await MainActor.run { vm.isSending } + } + #expect(await transport.lastSentRunId() == nil) + + await MainActor.run { vm.selectThinkingLevel("high") } + try await waitUntil("thinking level changed while send is blocked") { + await MainActor.run { vm.thinkingLevel == "high" } + } + + await gate.open() + + try await waitUntil("send released after model patch") { + await transport.lastSentRunId() != nil + } + #expect(await transport.sentThinkingLevels() == ["off"]) + } + + @Test func failedLatestModelSelectionDoesNotReplayAfterOlderCompletionFinishes() async throws { + let now = Date().timeIntervalSince1970 * 1000 + let history = historyPayload() + let sessions = OpenClawChatSessionsListResponse( + ts: now, + path: nil, + count: 1, + defaults: nil, + sessions: [ + sessionEntry(key: "main", updatedAt: now, model: nil), + ]) + let models = [ + modelChoice(id: "gpt-5.4", name: "GPT-5.4", provider: "openai"), + modelChoice(id: "gpt-5.4-pro", name: "GPT-5.4 Pro", provider: "openai"), + ] + + let (transport, vm) = await makeViewModel( + historyResponses: [history], + sessionsResponses: [sessions], + modelResponses: [models], + setSessionModelHook: { model in + if model == "openai/gpt-5.4" { + try await Task.sleep(for: .milliseconds(200)) + return + } + if model == "openai/gpt-5.4-pro" { + throw NSError(domain: "test", code: 1, userInfo: [NSLocalizedDescriptionKey: "boom"]) + } + }) + + try await loadAndWaitBootstrap(vm: vm) + + await MainActor.run { + vm.selectModel("openai/gpt-5.4") + vm.selectModel("openai/gpt-5.4-pro") + } + + try await waitUntil("older model completion wins after latest failure") { + await MainActor.run { + vm.sessions.first(where: { $0.key == "main" })?.model == "gpt-5.4" && + vm.sessions.first(where: { $0.key == "main" })?.modelProvider == "openai" + } + } + + #expect(await MainActor.run { vm.modelSelectionID } == "openai/gpt-5.4") + #expect(await MainActor.run { vm.sessions.first(where: { $0.key == "main" })?.model } == "gpt-5.4") + #expect(await MainActor.run { vm.sessions.first(where: { $0.key == "main" })?.modelProvider } == "openai") + #expect(await transport.patchedModels() == ["openai/gpt-5.4", "openai/gpt-5.4-pro"]) + } + + @Test func failedLatestModelSelectionRestoresEarlierSuccessWithoutReplay() async throws { + let now = Date().timeIntervalSince1970 * 1000 + let history = historyPayload() + let sessions = OpenClawChatSessionsListResponse( + ts: now, + path: nil, + count: 1, + defaults: nil, + sessions: [ + sessionEntry(key: "main", updatedAt: now, model: nil), + ]) + let models = [ + modelChoice(id: "gpt-5.4", name: "GPT-5.4", provider: "openai"), + modelChoice(id: "gpt-5.4-pro", name: "GPT-5.4 Pro", provider: "openai"), + ] + + let (transport, vm) = await makeViewModel( + historyResponses: [history], + sessionsResponses: [sessions], + modelResponses: [models], + setSessionModelHook: { model in + if model == "openai/gpt-5.4" { + try await Task.sleep(for: .milliseconds(100)) + return + } + if model == "openai/gpt-5.4-pro" { + try await Task.sleep(for: .milliseconds(200)) + throw NSError(domain: "test", code: 1, userInfo: [NSLocalizedDescriptionKey: "boom"]) + } + }) + + try await loadAndWaitBootstrap(vm: vm) + + await MainActor.run { + vm.selectModel("openai/gpt-5.4") + vm.selectModel("openai/gpt-5.4-pro") + } + + try await waitUntil("latest failure restores prior successful model") { + await MainActor.run { + vm.modelSelectionID == "openai/gpt-5.4" && + vm.sessions.first(where: { $0.key == "main" })?.model == "gpt-5.4" && + vm.sessions.first(where: { $0.key == "main" })?.modelProvider == "openai" + } + } + + #expect(await transport.patchedModels() == ["openai/gpt-5.4", "openai/gpt-5.4-pro"]) + } + + @Test func switchingSessionsIgnoresLateModelPatchCompletionFromPreviousSession() async throws { + let now = Date().timeIntervalSince1970 * 1000 + let sessions = OpenClawChatSessionsListResponse( + ts: now, + path: nil, + count: 2, + defaults: nil, + sessions: [ + sessionEntry(key: "main", updatedAt: now, model: nil), + sessionEntry(key: "other", updatedAt: now - 1000, model: nil), + ]) + let models = [ + modelChoice(id: "gpt-5.4", name: "GPT-5.4", provider: "openai"), + ] + + let (transport, vm) = await makeViewModel( + historyResponses: [ + historyPayload(sessionKey: "main", sessionId: "sess-main"), + historyPayload(sessionKey: "other", sessionId: "sess-other"), + ], + sessionsResponses: [sessions, sessions], + modelResponses: [models, models], + setSessionModelHook: { model in + if model == "openai/gpt-5.4" { + try await Task.sleep(for: .milliseconds(200)) + } + }) + + try await loadAndWaitBootstrap(vm: vm, sessionId: "sess-main") + + await MainActor.run { vm.selectModel("openai/gpt-5.4") } + await MainActor.run { vm.switchSession(to: "other") } + + try await waitUntil("switched sessions") { + await MainActor.run { vm.sessionKey == "other" && vm.sessionId == "sess-other" } + } + try await waitUntil("late model patch finished") { + let patched = await transport.patchedModels() + return patched == ["openai/gpt-5.4"] + } + + #expect(await MainActor.run { vm.modelSelectionID } == OpenClawChatViewModel.defaultModelSelectionID) + #expect(await MainActor.run { vm.sessions.first(where: { $0.key == "other" })?.model } == nil) + } + + @Test func lateModelCompletionDoesNotReplayCurrentSessionSelectionIntoPreviousSession() async throws { + let now = Date().timeIntervalSince1970 * 1000 + let initialSessions = OpenClawChatSessionsListResponse( + ts: now, + path: nil, + count: 2, + defaults: nil, + sessions: [ + sessionEntry(key: "main", updatedAt: now, model: nil), + sessionEntry(key: "other", updatedAt: now - 1000, model: nil), + ]) + let sessionsAfterOtherSelection = OpenClawChatSessionsListResponse( + ts: now, + path: nil, + count: 2, + defaults: nil, + sessions: [ + sessionEntry(key: "main", updatedAt: now, model: nil), + sessionEntry(key: "other", updatedAt: now - 1000, model: "openai/gpt-5.4-pro"), + ]) + let models = [ + modelChoice(id: "gpt-5.4", name: "GPT-5.4", provider: "openai"), + modelChoice(id: "gpt-5.4-pro", name: "GPT-5.4 Pro", provider: "openai"), + ] + + let (transport, vm) = await makeViewModel( + historyResponses: [ + historyPayload(sessionKey: "main", sessionId: "sess-main"), + historyPayload(sessionKey: "other", sessionId: "sess-other"), + historyPayload(sessionKey: "main", sessionId: "sess-main"), + ], + sessionsResponses: [initialSessions, initialSessions, sessionsAfterOtherSelection], + modelResponses: [models, models, models], + setSessionModelHook: { model in + if model == "openai/gpt-5.4" { + try await Task.sleep(for: .milliseconds(200)) + } + }) + + try await loadAndWaitBootstrap(vm: vm, sessionId: "sess-main") + + await MainActor.run { vm.selectModel("openai/gpt-5.4") } + await MainActor.run { vm.switchSession(to: "other") } + try await waitUntil("switched to other session") { + await MainActor.run { vm.sessionKey == "other" && vm.sessionId == "sess-other" } + } + + await MainActor.run { vm.selectModel("openai/gpt-5.4-pro") } + try await waitUntil("both model patches issued") { + let patched = await transport.patchedModels() + return patched == ["openai/gpt-5.4", "openai/gpt-5.4-pro"] + } + await MainActor.run { vm.switchSession(to: "main") } + try await waitUntil("switched back to main session") { + await MainActor.run { vm.sessionKey == "main" && vm.sessionId == "sess-main" } + } + + try await waitUntil("late model completion updates only the original session") { + await MainActor.run { + vm.sessions.first(where: { $0.key == "main" })?.model == "gpt-5.4" && + vm.sessions.first(where: { $0.key == "main" })?.modelProvider == "openai" + } + } + + #expect(await MainActor.run { vm.modelSelectionID } == "openai/gpt-5.4") + #expect(await MainActor.run { vm.sessions.first(where: { $0.key == "main" })?.model } == "gpt-5.4") + #expect(await MainActor.run { vm.sessions.first(where: { $0.key == "main" })?.modelProvider } == "openai") + #expect(await MainActor.run { vm.sessions.first(where: { $0.key == "other" })?.model } == "openai/gpt-5.4-pro") + #expect(await MainActor.run { vm.sessions.first(where: { $0.key == "other" })?.modelProvider } == nil) + #expect(await transport.patchedModels() == ["openai/gpt-5.4", "openai/gpt-5.4-pro"]) + } + + @Test func explicitThinkingLevelWinsOverHistoryAndPersistsChanges() async throws { + let history = OpenClawChatHistoryPayload( + sessionKey: "main", + sessionId: "sess-main", + messages: [], + thinkingLevel: "off") + let callbackState = await MainActor.run { CallbackBox() } + + let (transport, vm) = await makeViewModel( + historyResponses: [history], + initialThinkingLevel: "high", + onThinkingLevelChanged: { level in + callbackState.values.append(level) + }) + + try await loadAndWaitBootstrap(vm: vm, sessionId: "sess-main") + #expect(await MainActor.run { vm.thinkingLevel } == "high") + + await MainActor.run { vm.selectThinkingLevel("medium") } + + try await waitUntil("thinking level patched") { + let patched = await transport.patchedThinkingLevels() + return patched == ["medium"] + } + + #expect(await MainActor.run { vm.thinkingLevel } == "medium") + #expect(await MainActor.run { callbackState.values } == ["medium"]) + } + + @Test func serverProvidedThinkingLevelsOutsideMenuArePreservedForSend() async throws { + let history = OpenClawChatHistoryPayload( + sessionKey: "main", + sessionId: "sess-main", + messages: [], + thinkingLevel: "xhigh") + + let (transport, vm) = await makeViewModel(historyResponses: [history]) + + try await loadAndWaitBootstrap(vm: vm, sessionId: "sess-main") + #expect(await MainActor.run { vm.thinkingLevel } == "xhigh") + + await sendUserMessage(vm, text: "hello") + try await waitUntil("send uses preserved thinking level") { + await transport.sentThinkingLevels() == ["xhigh"] + } + } + + @Test func staleThinkingPatchCompletionReappliesLatestSelection() async throws { + let history = OpenClawChatHistoryPayload( + sessionKey: "main", + sessionId: "sess-main", + messages: [], + thinkingLevel: "off") + + let (transport, vm) = await makeViewModel( + historyResponses: [history], + setSessionThinkingHook: { level in + if level == "medium" { + try await Task.sleep(for: .milliseconds(200)) + } + }) + + try await loadAndWaitBootstrap(vm: vm, sessionId: "sess-main") + + await MainActor.run { + vm.selectThinkingLevel("medium") + vm.selectThinkingLevel("high") + } + + try await waitUntil("thinking patch replayed latest selection") { + let patched = await transport.patchedThinkingLevels() + return patched == ["medium", "high", "high"] + } + + #expect(await MainActor.run { vm.thinkingLevel } == "high") + } + @Test func clearsStreamingOnExternalErrorEvent() async throws { let sessionId = "sess-main" let history = historyPayload(sessionId: sessionId) diff --git a/apps/shared/OpenClawKit/Tests/OpenClawKitTests/DeepLinksSecurityTests.swift b/apps/shared/OpenClawKit/Tests/OpenClawKitTests/DeepLinksSecurityTests.swift index 8bbf4f8a650..79613b310ff 100644 --- a/apps/shared/OpenClawKit/Tests/OpenClawKitTests/DeepLinksSecurityTests.swift +++ b/apps/shared/OpenClawKit/Tests/OpenClawKitTests/DeepLinksSecurityTests.swift @@ -20,11 +20,17 @@ import Testing string: "openclaw://gateway?host=127.0.0.1&port=18789&tls=0&token=abc")! #expect( DeepLinkParser.parse(url) == .gateway( - .init(host: "127.0.0.1", port: 18789, tls: false, token: "abc", password: nil))) + .init( + host: "127.0.0.1", + port: 18789, + tls: false, + bootstrapToken: nil, + token: "abc", + password: nil))) } @Test func setupCodeRejectsInsecureNonLoopbackWs() { - let payload = #"{"url":"ws://attacker.example:18789","token":"tok"}"# + let payload = #"{"url":"ws://attacker.example:18789","bootstrapToken":"tok"}"# let encoded = Data(payload.utf8) .base64EncodedString() .replacingOccurrences(of: "+", with: "-") @@ -34,7 +40,7 @@ import Testing } @Test func setupCodeRejectsInsecurePrefixBypassHost() { - let payload = #"{"url":"ws://127.attacker.example:18789","token":"tok"}"# + let payload = #"{"url":"ws://127.attacker.example:18789","bootstrapToken":"tok"}"# let encoded = Data(payload.utf8) .base64EncodedString() .replacingOccurrences(of: "+", with: "-") @@ -44,7 +50,7 @@ import Testing } @Test func setupCodeAllowsLoopbackWs() { - let payload = #"{"url":"ws://127.0.0.1:18789","token":"tok"}"# + let payload = #"{"url":"ws://127.0.0.1:18789","bootstrapToken":"tok"}"# let encoded = Data(payload.utf8) .base64EncodedString() .replacingOccurrences(of: "+", with: "-") @@ -55,7 +61,8 @@ import Testing host: "127.0.0.1", port: 18789, tls: false, - token: "tok", + bootstrapToken: "tok", + token: nil, password: nil)) } } diff --git a/apps/shared/OpenClawKit/Tests/OpenClawKitTests/GatewayErrorsTests.swift b/apps/shared/OpenClawKit/Tests/OpenClawKitTests/GatewayErrorsTests.swift new file mode 100644 index 00000000000..92d3e1292de --- /dev/null +++ b/apps/shared/OpenClawKit/Tests/OpenClawKitTests/GatewayErrorsTests.swift @@ -0,0 +1,14 @@ +import OpenClawKit +import Testing + +@Suite struct GatewayErrorsTests { + @Test func bootstrapTokenInvalidIsNonRecoverable() { + let error = GatewayConnectAuthError( + message: "setup code expired", + detailCode: GatewayConnectAuthDetailCode.authBootstrapTokenInvalid.rawValue, + canRetryWithDeviceToken: false) + + #expect(error.isNonRecoverable) + #expect(error.detail == .authBootstrapTokenInvalid) + } +} diff --git a/apps/shared/OpenClawKit/Tests/OpenClawKitTests/GatewayNodeSessionTests.swift b/apps/shared/OpenClawKit/Tests/OpenClawKitTests/GatewayNodeSessionTests.swift index a48015e1100..183fc385d8c 100644 --- a/apps/shared/OpenClawKit/Tests/OpenClawKitTests/GatewayNodeSessionTests.swift +++ b/apps/shared/OpenClawKit/Tests/OpenClawKitTests/GatewayNodeSessionTests.swift @@ -266,6 +266,7 @@ struct GatewayNodeSessionTests { try await gateway.connect( url: URL(string: "ws://example.invalid")!, token: nil, + bootstrapToken: nil, password: nil, connectOptions: options, sessionBox: WebSocketSessionBox(session: session), diff --git a/changelog/fragments/openai-codex-auth-tests-gpt54.md b/changelog/fragments/openai-codex-auth-tests-gpt54.md new file mode 100644 index 00000000000..ec1cd4b199f --- /dev/null +++ b/changelog/fragments/openai-codex-auth-tests-gpt54.md @@ -0,0 +1 @@ +- tests: align OpenAI Codex auth login expectations with the `gpt-5.4` default model to prevent stale CI failures. (#44367) thanks @jrrcdev diff --git a/changelog/fragments/toolcall-id-malformed-name-inference.md b/changelog/fragments/toolcall-id-malformed-name-inference.md new file mode 100644 index 00000000000..6af2b986f34 --- /dev/null +++ b/changelog/fragments/toolcall-id-malformed-name-inference.md @@ -0,0 +1 @@ +- runner: infer canonical tool names from malformed `toolCallId` variants (e.g. `functionsread3`, `functionswrite4`) when allowlist is present, preventing `Tool not found` regressions in strict routers. diff --git a/docker-compose.yml b/docker-compose.yml index cc7169d3a88..c0bffc64458 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -9,6 +9,7 @@ services: CLAUDE_AI_SESSION_KEY: ${CLAUDE_AI_SESSION_KEY:-} CLAUDE_WEB_SESSION_KEY: ${CLAUDE_WEB_SESSION_KEY:-} CLAUDE_WEB_COOKIE: ${CLAUDE_WEB_COOKIE:-} + TZ: ${OPENCLAW_TZ:-UTC} volumes: - ${OPENCLAW_CONFIG_DIR}:/home/node/.openclaw - ${OPENCLAW_WORKSPACE_DIR}:/home/node/.openclaw/workspace @@ -65,6 +66,7 @@ services: CLAUDE_AI_SESSION_KEY: ${CLAUDE_AI_SESSION_KEY:-} CLAUDE_WEB_SESSION_KEY: ${CLAUDE_WEB_SESSION_KEY:-} CLAUDE_WEB_COOKIE: ${CLAUDE_WEB_COOKIE:-} + TZ: ${OPENCLAW_TZ:-UTC} volumes: - ${OPENCLAW_CONFIG_DIR}:/home/node/.openclaw - ${OPENCLAW_WORKSPACE_DIR}:/home/node/.openclaw/workspace diff --git a/docker-setup.sh b/docker-setup.sh index 450c2025ffa..19e5461765b 100755 --- a/docker-setup.sh +++ b/docker-setup.sh @@ -10,6 +10,7 @@ HOME_VOLUME_NAME="${OPENCLAW_HOME_VOLUME:-}" RAW_SANDBOX_SETTING="${OPENCLAW_SANDBOX:-}" SANDBOX_ENABLED="" DOCKER_SOCKET_PATH="${OPENCLAW_DOCKER_SOCKET:-}" +TIMEZONE="${OPENCLAW_TZ:-}" fail() { echo "ERROR: $*" >&2 @@ -135,6 +136,11 @@ contains_disallowed_chars() { [[ "$value" == *$'\n'* || "$value" == *$'\r'* || "$value" == *$'\t'* ]] } +is_valid_timezone() { + local value="$1" + [[ -e "/usr/share/zoneinfo/$value" && ! -d "/usr/share/zoneinfo/$value" ]] +} + validate_mount_path_value() { local label="$1" local value="$2" @@ -202,6 +208,17 @@ fi if [[ -n "$SANDBOX_ENABLED" ]]; then validate_mount_path_value "OPENCLAW_DOCKER_SOCKET" "$DOCKER_SOCKET_PATH" fi +if [[ -n "$TIMEZONE" ]]; then + if contains_disallowed_chars "$TIMEZONE"; then + fail "OPENCLAW_TZ contains unsupported control characters." + fi + if [[ ! "$TIMEZONE" =~ ^[A-Za-z0-9/_+\-]+$ ]]; then + fail "OPENCLAW_TZ must be a valid IANA timezone string (e.g. Asia/Shanghai)." + fi + if ! is_valid_timezone "$TIMEZONE"; then + fail "OPENCLAW_TZ must match a timezone in /usr/share/zoneinfo (e.g. Asia/Shanghai)." + fi +fi mkdir -p "$OPENCLAW_CONFIG_DIR" mkdir -p "$OPENCLAW_WORKSPACE_DIR" @@ -224,6 +241,7 @@ export OPENCLAW_HOME_VOLUME="$HOME_VOLUME_NAME" export OPENCLAW_ALLOW_INSECURE_PRIVATE_WS="${OPENCLAW_ALLOW_INSECURE_PRIVATE_WS:-}" export OPENCLAW_SANDBOX="$SANDBOX_ENABLED" export OPENCLAW_DOCKER_SOCKET="$DOCKER_SOCKET_PATH" +export OPENCLAW_TZ="$TIMEZONE" # Detect Docker socket GID for sandbox group_add. DOCKER_GID="" @@ -408,7 +426,8 @@ upsert_env "$ENV_FILE" \ OPENCLAW_DOCKER_SOCKET \ DOCKER_GID \ OPENCLAW_INSTALL_DOCKER_CLI \ - OPENCLAW_ALLOW_INSECURE_PRIVATE_WS + OPENCLAW_ALLOW_INSECURE_PRIVATE_WS \ + OPENCLAW_TZ if [[ "$IMAGE_NAME" == "openclaw:local" ]]; then echo "==> Building Docker image: $IMAGE_NAME" diff --git a/docs.acp.md b/docs.acp.md index cfe7349c341..1e93ee0cf63 100644 --- a/docs.acp.md +++ b/docs.acp.md @@ -17,6 +17,51 @@ Key goals: - Works with existing Gateway session store (list/resolve/reset). - Safe defaults (isolated ACP session keys by default). +## Bridge Scope + +`openclaw acp` is a Gateway-backed ACP bridge, not a full ACP-native editor +runtime. It is designed to route IDE prompts into an existing OpenClaw Gateway +session with predictable session mapping and basic streaming updates. + +## Compatibility Matrix + +| ACP area | Status | Notes | +| --------------------------------------------------------------------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `initialize`, `newSession`, `prompt`, `cancel` | Implemented | Core bridge flow over stdio to Gateway chat/send + abort. | +| `listSessions`, slash commands | Implemented | Session list works against Gateway session state; commands are advertised via `available_commands_update`. | +| `loadSession` | Partial | Rebinds the ACP session to a Gateway session key and replays stored user/assistant text history. Tool/system history is not reconstructed yet. | +| Prompt content (`text`, embedded `resource`, images) | Partial | Text/resources are flattened into chat input; images become Gateway attachments. | +| Session modes | Partial | `session/set_mode` is supported and the bridge exposes initial Gateway-backed session controls for thought level, tool verbosity, reasoning, usage detail, and elevated actions. Broader ACP-native mode/config surfaces are still out of scope. | +| Session info and usage updates | Partial | The bridge emits `session_info_update` and best-effort `usage_update` notifications from cached Gateway session snapshots. Usage is approximate and only sent when Gateway token totals are marked fresh. | +| Tool streaming | Partial | `tool_call` / `tool_call_update` events include raw I/O, text content, and best-effort file locations when Gateway tool args/results expose them. Embedded terminals and richer diff-native output are still not exposed. | +| Per-session MCP servers (`mcpServers`) | Unsupported | Bridge mode rejects per-session MCP server requests. Configure MCP on the OpenClaw gateway or agent instead. | +| Client filesystem methods (`fs/read_text_file`, `fs/write_text_file`) | Unsupported | The bridge does not call ACP client filesystem methods. | +| Client terminal methods (`terminal/*`) | Unsupported | The bridge does not create ACP client terminals or stream terminal ids through tool calls. | +| Session plans / thought streaming | Unsupported | The bridge currently emits output text and tool status, not ACP plan or thought updates. | + +## Known Limitations + +- `loadSession` replays stored user and assistant text history, but it does not + reconstruct historic tool calls, system notices, or richer ACP-native event + types. +- If multiple ACP clients share the same Gateway session key, event and cancel + routing are best-effort rather than strictly isolated per client. Prefer the + default isolated `acp:` sessions when you need clean editor-local + turns. +- Gateway stop states are translated into ACP stop reasons, but that mapping is + less expressive than a fully ACP-native runtime. +- Initial session controls currently surface a focused subset of Gateway knobs: + thought level, tool verbosity, reasoning, usage detail, and elevated + actions. Model selection and exec-host controls are not yet exposed as ACP + config options. +- `session_info_update` and `usage_update` are derived from Gateway session + snapshots, not live ACP-native runtime accounting. Usage is approximate, + carries no cost data, and is only emitted when the Gateway marks total token + data as fresh. +- Tool follow-along data is best-effort. The bridge can surface file paths that + appear in known tool args/results, but it does not yet emit ACP terminals or + structured file diffs. + ## How can I use this Use ACP when an IDE or tooling speaks Agent Client Protocol and you want it to @@ -181,9 +226,11 @@ updates. Terminal Gateway states map to ACP `done` with stop reasons: ## Compatibility -- ACP bridge uses `@agentclientprotocol/sdk` (currently 0.13.x). +- ACP bridge uses `@agentclientprotocol/sdk` (currently 0.15.x). - Works with ACP clients that implement `initialize`, `newSession`, `loadSession`, `prompt`, `cancel`, and `listSessions`. +- Bridge mode rejects per-session `mcpServers` instead of silently ignoring + them. Configure MCP at the Gateway or agent layer. ## Testing diff --git a/docs/automation/cron-jobs.md b/docs/automation/cron-jobs.md index 47bae78b86f..cb27380416b 100644 --- a/docs/automation/cron-jobs.md +++ b/docs/automation/cron-jobs.md @@ -25,10 +25,13 @@ Troubleshooting: [/automation/troubleshooting](/automation/troubleshooting) - Jobs persist under `~/.openclaw/cron/` so restarts don’t lose schedules. - Two execution styles: - **Main session**: enqueue a system event, then run on the next heartbeat. - - **Isolated**: run a dedicated agent turn in `cron:`, with delivery (announce by default or none). + - **Isolated**: run a dedicated agent turn in `cron:` or a custom session, with delivery (announce by default or none). + - **Current session**: bind to the session where the cron is created (`sessionTarget: "current"`). + - **Custom session**: run in a persistent named session (`sessionTarget: "session:custom-id"`). - Wakeups are first-class: a job can request “wake now” vs “next heartbeat”. - Webhook posting is per job via `delivery.mode = "webhook"` + `delivery.to = ""`. - Legacy fallback remains for stored jobs with `notify: true` when `cron.webhook` is set, migrate those jobs to webhook delivery mode. +- For upgrades, `openclaw doctor --fix` can normalize legacy cron store fields before the scheduler touches them. ## Quick start (actionable) @@ -85,6 +88,14 @@ Think of a cron job as: **when** to run + **what** to do. 2. **Choose where it runs** - `sessionTarget: "main"` → run during the next heartbeat with main context. - `sessionTarget: "isolated"` → run a dedicated agent turn in `cron:`. + - `sessionTarget: "current"` → bind to the current session (resolved at creation time to `session:`). + - `sessionTarget: "session:custom-id"` → run in a persistent named session that maintains context across runs. + + Default behavior (unchanged): + - `systemEvent` payloads default to `main` + - `agentTurn` payloads default to `isolated` + + To use current session binding, explicitly set `sessionTarget: "current"`. 3. **Choose the payload** - Main session → `payload.kind = "systemEvent"` @@ -146,12 +157,13 @@ See [Heartbeat](/gateway/heartbeat). #### Isolated jobs (dedicated cron sessions) -Isolated jobs run a dedicated agent turn in session `cron:`. +Isolated jobs run a dedicated agent turn in session `cron:` or a custom session. Key behaviors: - Prompt is prefixed with `[cron: ]` for traceability. -- Each run starts a **fresh session id** (no prior conversation carry-over). +- Each run starts a **fresh session id** (no prior conversation carry-over), unless using a custom session. +- Custom sessions (`session:xxx`) persist context across runs, enabling workflows like daily standups that build on previous summaries. - Default behavior: if `delivery` is omitted, isolated jobs announce a summary (`delivery.mode = "announce"`). - `delivery.mode` chooses what happens: - `announce`: deliver a summary to the target channel and post a brief summary to the main session. @@ -261,6 +273,7 @@ If `delivery.channel` or `delivery.to` is omitted, cron can fall back to the mai Target format reminders: - Slack/Discord/Mattermost (plugin) targets should use explicit prefixes (e.g. `channel:`, `user:`) to avoid ambiguity. + Mattermost bare 26-char IDs are resolved **user-first** (DM if user exists, channel otherwise) — use `user:` or `channel:` for deterministic routing. - Telegram topics should use the `:topic:` form (see below). #### Telegram delivery targets (topics / forum threads) @@ -319,12 +332,42 @@ Recurring, isolated job with delivery: } ``` +Recurring job bound to current session (auto-resolved at creation): + +```json +{ + "name": "Daily standup", + "schedule": { "kind": "cron", "expr": "0 9 * * *" }, + "sessionTarget": "current", + "payload": { + "kind": "agentTurn", + "message": "Summarize yesterday's progress." + } +} +``` + +Recurring job in a custom persistent session: + +```json +{ + "name": "Project monitor", + "schedule": { "kind": "every", "everyMs": 300000 }, + "sessionTarget": "session:project-alpha-monitor", + "payload": { + "kind": "agentTurn", + "message": "Check project status and update the running log." + } +} +``` + Notes: - `schedule.kind`: `at` (`at`), `every` (`everyMs`), or `cron` (`expr`, optional `tz`). - `schedule.at` accepts ISO 8601 (timezone optional; treated as UTC when omitted). - `everyMs` is milliseconds. -- `sessionTarget` must be `"main"` or `"isolated"` and must match `payload.kind`. +- `sessionTarget`: `"main"`, `"isolated"`, `"current"`, or `"session:"`. +- `"current"` is resolved to `"session:"` at creation time. +- Custom sessions (`session:xxx`) maintain persistent context across runs. - Optional fields: `agentId`, `description`, `enabled`, `deleteAfterRun` (defaults to true for `at`), `delivery`. - `wakeMode` defaults to `"now"` when omitted. diff --git a/docs/automation/cron-vs-heartbeat.md b/docs/automation/cron-vs-heartbeat.md index 9676d960d23..09f9187c368 100644 --- a/docs/automation/cron-vs-heartbeat.md +++ b/docs/automation/cron-vs-heartbeat.md @@ -219,13 +219,13 @@ See [Lobster](/tools/lobster) for full usage and examples. Both heartbeat and cron can interact with the main session, but differently: -| | Heartbeat | Cron (main) | Cron (isolated) | -| ------- | ------------------------------- | ------------------------ | -------------------------- | -| Session | Main | Main (via system event) | `cron:` | -| History | Shared | Shared | Fresh each run | -| Context | Full | Full | None (starts clean) | -| Model | Main session model | Main session model | Can override | -| Output | Delivered if not `HEARTBEAT_OK` | Heartbeat prompt + event | Announce summary (default) | +| | Heartbeat | Cron (main) | Cron (isolated) | +| ------- | ------------------------------- | ------------------------ | ----------------------------------------------- | +| Session | Main | Main (via system event) | `cron:` or custom session | +| History | Shared | Shared | Fresh each run (isolated) / Persistent (custom) | +| Context | Full | Full | None (isolated) / Cumulative (custom) | +| Model | Main session model | Main session model | Can override | +| Output | Delivered if not `HEARTBEAT_OK` | Heartbeat prompt + event | Announce summary (default) | ### When to use main session cron diff --git a/docs/brave-search.md b/docs/brave-search.md index a8bba5c3e91..4a541690431 100644 --- a/docs/brave-search.md +++ b/docs/brave-search.md @@ -73,7 +73,7 @@ await web_search({ ## Notes - OpenClaw uses the Brave **Search** plan. If you have a legacy subscription (e.g. the original Free plan with 2,000 queries/month), it remains valid but does not include newer features like LLM Context or higher rate limits. -- Each Brave plan includes **$5/month in free credit** (renewing). The Search plan costs $5 per 1,000 requests, so the credit covers 1,000 queries/month. Set your usage limit in the Brave dashboard to avoid unexpected charges. See the [Brave API portal](https://brave.com/search/api/) for current plans. +- Each Brave plan includes **\$5/month in free credit** (renewing). The Search plan costs \$5 per 1,000 requests, so the credit covers 1,000 queries/month. Set your usage limit in the Brave dashboard to avoid unexpected charges. See the [Brave API portal](https://brave.com/search/api/) for current plans. - The Search plan includes the LLM Context endpoint and AI inference rights. Storing results to train or tune models requires a plan with explicit storage rights. See the Brave [Terms of Service](https://api-dashboard.search.brave.com/terms-of-service). - Results are cached for 15 minutes by default (configurable via `cacheTtlMinutes`). diff --git a/docs/channels/channel-routing.md b/docs/channels/channel-routing.md index 2d824359311..63c5806ebae 100644 --- a/docs/channels/channel-routing.md +++ b/docs/channels/channel-routing.md @@ -118,6 +118,11 @@ Session stores live under the state directory (default `~/.openclaw`): You can override the store path via `session.store` and `{agentId}` templating. +Gateway and ACP session discovery also scans disk-backed agent stores under the +default `agents/` root and under templated `session.store` roots. Discovered +stores must stay inside that resolved agent root and use a regular +`sessions.json` file. Symlinks and out-of-root paths are ignored. + ## WebChat behavior WebChat attaches to the **selected agent** and defaults to the agent’s main diff --git a/docs/channels/discord.md b/docs/channels/discord.md index 994c03391ce..e179417e9b8 100644 --- a/docs/channels/discord.md +++ b/docs/channels/discord.md @@ -168,6 +168,7 @@ openclaw pairing approve discord Token resolution is account-aware. Config token values win over env fallback. `DISCORD_BOT_TOKEN` is only used for the default account. +For advanced outbound calls (message tool/channel actions), an explicit per-call `token` is used for that call. Account policy/retry settings still come from the selected account in the active runtime snapshot. ## Recommended: Set up a guild workspace @@ -945,7 +946,7 @@ Default slash command settings: Gateway auth for this handler uses the same shared credential resolution contract as other Gateway clients: - env-first local auth (`OPENCLAW_GATEWAY_TOKEN` / `OPENCLAW_GATEWAY_PASSWORD` then `gateway.auth.*`) - - in local mode, `gateway.remote.*` can be used as fallback when `gateway.auth.*` is unset + - in local mode, `gateway.remote.*` can be used as fallback only when `gateway.auth.*` is unset; configured-but-unresolved local SecretRefs fail closed - remote-mode support via `gateway.remote.*` when applicable - URL overrides are override-safe: CLI overrides do not reuse implicit credentials, and env overrides use env credentials only diff --git a/docs/channels/feishu.md b/docs/channels/feishu.md index 67e4fd60379..467fc57c0fe 100644 --- a/docs/channels/feishu.md +++ b/docs/channels/feishu.md @@ -193,16 +193,18 @@ Edit `~/.openclaw/openclaw.json`: } ``` -If you use `connectionMode: "webhook"`, set `verificationToken`. The Feishu webhook server binds to `127.0.0.1` by default; set `webhookHost` only if you intentionally need a different bind address. +If you use `connectionMode: "webhook"`, set both `verificationToken` and `encryptKey`. The Feishu webhook server binds to `127.0.0.1` by default; set `webhookHost` only if you intentionally need a different bind address. -#### Verification Token (webhook mode) +#### Verification Token and Encrypt Key (webhook mode) -When using webhook mode, set `channels.feishu.verificationToken` in your config. To get the value: +When using webhook mode, set both `channels.feishu.verificationToken` and `channels.feishu.encryptKey` in your config. To get the values: 1. In Feishu Open Platform, open your app 2. Go to **Development** → **Events & Callbacks** (开发配置 → 事件与回调) 3. Open the **Encryption** tab (加密策略) -4. Copy **Verification Token** +4. Copy **Verification Token** and **Encrypt Key** + +The screenshot below shows where to find the **Verification Token**. The **Encrypt Key** is listed in the same **Encryption** section. ![Verification Token location](../images/feishu-verification-token.png) @@ -600,6 +602,7 @@ Key options: | `channels.feishu.connectionMode` | Event transport mode | `websocket` | | `channels.feishu.defaultAccount` | Default account ID for outbound routing | `default` | | `channels.feishu.verificationToken` | Required for webhook mode | - | +| `channels.feishu.encryptKey` | Required for webhook mode | - | | `channels.feishu.webhookPath` | Webhook route path | `/feishu/events` | | `channels.feishu.webhookHost` | Webhook bind host | `127.0.0.1` | | `channels.feishu.webhookPort` | Webhook bind port | `3000` | diff --git a/docs/channels/googlechat.md b/docs/channels/googlechat.md index 09693589af7..bc9d435f4de 100644 --- a/docs/channels/googlechat.md +++ b/docs/channels/googlechat.md @@ -145,7 +145,7 @@ Configure your tunnel's ingress rules to only route the webhook path: - `audienceType: "app-url"` → audience is your HTTPS webhook URL. - `audienceType: "project-number"` → audience is the Cloud project number. 3. Messages are routed by space: - - DMs use session key `agent::googlechat:dm:`. + - DMs use session key `agent::googlechat:direct:`. - Spaces use session key `agent::googlechat:group:`. 4. DM access is pairing by default. Unknown senders receive a pairing code; approve with: - `openclaw pairing approve googlechat ` diff --git a/docs/channels/line.md b/docs/channels/line.md index 50972d93d21..a965dc6e991 100644 --- a/docs/channels/line.md +++ b/docs/channels/line.md @@ -87,6 +87,8 @@ Token/secret files: } ``` +`tokenFile` and `secretFile` must point to regular files. Symlinks are rejected. + Multiple accounts: ```json5 diff --git a/docs/channels/mattermost.md b/docs/channels/mattermost.md index f9417109a77..1e3e3f4bad2 100644 --- a/docs/channels/mattermost.md +++ b/docs/channels/mattermost.md @@ -129,6 +129,35 @@ Notes: - `onchar` still responds to explicit @mentions. - `channels.mattermost.requireMention` is honored for legacy configs but `chatmode` is preferred. +## Threading and sessions + +Use `channels.mattermost.replyToMode` to control whether channel and group replies stay in the +main channel or start a thread under the triggering post. + +- `off` (default): only reply in a thread when the inbound post is already in one. +- `first`: for top-level channel/group posts, start a thread under that post and route the + conversation to a thread-scoped session. +- `all`: same behavior as `first` for Mattermost today. +- Direct messages ignore this setting and stay non-threaded. + +Config example: + +```json5 +{ + channels: { + mattermost: { + replyToMode: "all", + }, + }, +} +``` + +Notes: + +- Thread-scoped sessions use the triggering post id as the thread root. +- `first` and `all` are currently equivalent because once Mattermost has a thread root, + follow-up chunks and media continue in that same thread. + ## Access control (DMs) - Default: `channels.mattermost.dmPolicy = "pairing"` (unknown senders get a pairing code). @@ -153,7 +182,14 @@ Use these target formats with `openclaw message send` or cron/webhooks: - `user:` for a DM - `@username` for a DM (resolved via the Mattermost API) -Bare IDs are treated as channels. +Bare opaque IDs (like `64ifufp...`) are **ambiguous** in Mattermost (user ID vs channel ID). + +OpenClaw resolves them **user-first**: + +- If the ID exists as a user (`GET /api/v4/users/` succeeds), OpenClaw sends a **DM** by resolving the direct channel via `/api/v4/channels/direct`. +- Otherwise the ID is treated as a **channel ID**. + +If you need deterministic behavior, always use the explicit prefixes (`user:` / `channel:`). ## Reactions (message tool) diff --git a/docs/channels/msteams.md b/docs/channels/msteams.md index 9c4a583e1b5..a24f20c69df 100644 --- a/docs/channels/msteams.md +++ b/docs/channels/msteams.md @@ -114,11 +114,11 @@ Example: **Teams + channel allowlist** - Scope group/channel replies by listing teams and channels under `channels.msteams.teams`. -- Keys can be team IDs or names; channel keys can be conversation IDs or names. +- Keys should use stable team IDs and channel conversation IDs. - When `groupPolicy="allowlist"` and a teams allowlist is present, only listed teams/channels are accepted (mention‑gated). - The configure wizard accepts `Team/Channel` entries and stores them for you. - On startup, OpenClaw resolves team/channel and user allowlist names to IDs (when Graph permissions allow) - and logs the mapping; unresolved entries are kept as typed. + and logs the mapping; unresolved team/channel names are kept as typed but ignored for routing by default unless `channels.msteams.dangerouslyAllowNameMatching: true` is enabled. Example: @@ -457,7 +457,7 @@ Key settings (see `/gateway/configuration` for shared channel patterns): - `channels.msteams.webhook.path` (default `/api/messages`) - `channels.msteams.dmPolicy`: `pairing | allowlist | open | disabled` (default: pairing) - `channels.msteams.allowFrom`: DM allowlist (AAD object IDs recommended). The wizard resolves names to IDs during setup when Graph access is available. -- `channels.msteams.dangerouslyAllowNameMatching`: break-glass toggle to re-enable mutable UPN/display-name matching. +- `channels.msteams.dangerouslyAllowNameMatching`: break-glass toggle to re-enable mutable UPN/display-name matching and direct team/channel name routing. - `channels.msteams.textChunkLimit`: outbound text chunk size. - `channels.msteams.chunkMode`: `length` (default) or `newline` to split on blank lines (paragraph boundaries) before length chunking. - `channels.msteams.mediaAllowHosts`: allowlist for inbound attachment hosts (defaults to Microsoft/Teams domains). diff --git a/docs/channels/nextcloud-talk.md b/docs/channels/nextcloud-talk.md index d4ab9e2c397..7797b1276ff 100644 --- a/docs/channels/nextcloud-talk.md +++ b/docs/channels/nextcloud-talk.md @@ -115,7 +115,7 @@ Provider options: - `channels.nextcloud-talk.enabled`: enable/disable channel startup. - `channels.nextcloud-talk.baseUrl`: Nextcloud instance URL. - `channels.nextcloud-talk.botSecret`: bot shared secret. -- `channels.nextcloud-talk.botSecretFile`: secret file path. +- `channels.nextcloud-talk.botSecretFile`: regular-file secret path. Symlinks are rejected. - `channels.nextcloud-talk.apiUser`: API user for room lookups (DM detection). - `channels.nextcloud-talk.apiPassword`: API/app password for room lookups. - `channels.nextcloud-talk.apiPasswordFile`: API password file path. diff --git a/docs/channels/pairing.md b/docs/channels/pairing.md index d402de16662..1ba3c6c92f2 100644 --- a/docs/channels/pairing.md +++ b/docs/channels/pairing.md @@ -72,7 +72,7 @@ If you use the `device-pair` plugin, you can do first-time device pairing entire The setup code is a base64-encoded JSON payload that contains: - `url`: the Gateway WebSocket URL (`ws://...` or `wss://...`) -- `token`: a short-lived pairing token +- `bootstrapToken`: a short-lived single-device bootstrap token used for the initial pairing handshake Treat the setup code like a password while it is valid. diff --git a/docs/channels/signal.md b/docs/channels/signal.md index b216af120ce..cfc050b6e75 100644 --- a/docs/channels/signal.md +++ b/docs/channels/signal.md @@ -195,6 +195,8 @@ Groups: - `channels.signal.groupPolicy = open | allowlist | disabled`. - `channels.signal.groupAllowFrom` controls who can trigger in groups when `allowlist` is set. +- `channels.signal.groups["" | "*"]` can override group behavior with `requireMention`, `tools`, and `toolsBySender`. +- Use `channels.signal.accounts..groups` for per-account overrides in multi-account setups. - Runtime note: if `channels.signal` is completely missing, runtime falls back to `groupPolicy="allowlist"` for group checks (even if `channels.defaults.groupPolicy` is set). ## How it works (behavior) @@ -312,6 +314,8 @@ Provider options: - `channels.signal.allowFrom`: DM allowlist (E.164 or `uuid:`). `open` requires `"*"`. Signal has no usernames; use phone/UUID ids. - `channels.signal.groupPolicy`: `open | allowlist | disabled` (default: allowlist). - `channels.signal.groupAllowFrom`: group sender allowlist. +- `channels.signal.groups`: per-group overrides keyed by Signal group id (or `"*"`). Supported fields: `requireMention`, `tools`, `toolsBySender`. +- `channels.signal.accounts..groups`: per-account version of `channels.signal.groups` for multi-account setups. - `channels.signal.historyLimit`: max group messages to include as context (0 disables). - `channels.signal.dmHistoryLimit`: DM history limit in user turns. Per-user overrides: `channels.signal.dms[""].historyLimit`. - `channels.signal.textChunkLimit`: outbound chunk size (chars). diff --git a/docs/channels/slack.md b/docs/channels/slack.md index c099120c699..aa9127ea630 100644 --- a/docs/channels/slack.md +++ b/docs/channels/slack.md @@ -169,15 +169,15 @@ For actions/directory reads, user token can be preferred when configured. For wr - `allowlist` - `disabled` - Channel allowlist lives under `channels.slack.channels`. + Channel allowlist lives under `channels.slack.channels` and should use stable channel IDs. Runtime note: if `channels.slack` is completely missing (env-only setup), runtime falls back to `groupPolicy="allowlist"` and logs a warning (even if `channels.defaults.groupPolicy` is set). Name/ID resolution: - channel allowlist entries and DM allowlist entries are resolved at startup when token access allows - - unresolved entries are kept as configured - - inbound authorization matching is ID-first by default; direct username/slug matching requires `channels.slack.dangerouslyAllowNameMatching: true` + - unresolved channel-name entries are kept as configured but ignored for routing by default + - inbound authorization and channel routing are ID-first by default; direct username/slug matching requires `channels.slack.dangerouslyAllowNameMatching: true` @@ -190,7 +190,7 @@ For actions/directory reads, user token can be preferred when configured. For wr - mention regex patterns (`agents.list[].groupChat.mentionPatterns`, fallback `messages.groupChat.mentionPatterns`) - implicit reply-to-bot thread behavior - Per-channel controls (`channels.slack.channels.`): + Per-channel controls (`channels.slack.channels.`; names only via startup resolution or `dangerouslyAllowNameMatching`): - `requireMention` - `users` (allowlist) @@ -218,6 +218,55 @@ For actions/directory reads, user token can be preferred when configured. For wr - if encoded option values exceed Slack limits, the flow falls back to buttons - For long option payloads, Slash command argument menus use a confirm dialog before dispatching a selected value. +## Interactive replies + +Slack can render agent-authored interactive reply controls, but this feature is disabled by default. + +Enable it globally: + +```json5 +{ + channels: { + slack: { + capabilities: { + interactiveReplies: true, + }, + }, + }, +} +``` + +Or enable it for one Slack account only: + +```json5 +{ + channels: { + slack: { + accounts: { + ops: { + capabilities: { + interactiveReplies: true, + }, + }, + }, + }, + }, +} +``` + +When enabled, agents can emit Slack-only reply directives: + +- `[[slack_buttons: Approve:approve, Reject:reject]]` +- `[[slack_select: Choose a target | Canary:canary, Production:production]]` + +These directives compile into Slack Block Kit and route clicks or selections back through the existing Slack interaction event path. + +Notes: + +- This is Slack-specific UI. Other channels do not translate Slack Block Kit directives into their own button systems. +- The interactive callback values are OpenClaw-generated opaque tokens, not raw agent-authored values. +- If generated interactive blocks would exceed Slack Block Kit limits, OpenClaw falls back to the original text reply instead of sending an invalid blocks payload. + Default slash command settings: - `enabled: false` diff --git a/docs/channels/telegram.md b/docs/channels/telegram.md index f49ea5fe3f7..a0c679988d3 100644 --- a/docs/channels/telegram.md +++ b/docs/channels/telegram.md @@ -155,6 +155,7 @@ curl "https://api.telegram.org/bot/getUpdates" `groupAllowFrom` is used for group sender filtering. If not set, Telegram falls back to `allowFrom`. `groupAllowFrom` entries should be numeric Telegram user IDs (`telegram:` / `tg:` prefixes are normalized). + Do not put Telegram group or supergroup chat IDs in `groupAllowFrom`. Negative chat IDs belong under `channels.telegram.groups`. Non-numeric entries are ignored for sender authorization. Security boundary (`2026.2.25+`): group sender auth does **not** inherit DM pairing-store approvals. Pairing stays DM-only. For groups, set `groupAllowFrom` or per-group/per-topic `allowFrom`. @@ -177,6 +178,31 @@ curl "https://api.telegram.org/bot/getUpdates" } ``` + Example: allow only specific users inside one specific group: + +```json5 +{ + channels: { + telegram: { + groups: { + "-1001234567890": { + requireMention: true, + allowFrom: ["8734062810", "745123456"], + }, + }, + }, + }, +} +``` + + + Common mistake: `groupAllowFrom` is not a Telegram group allowlist. + + - Put negative Telegram group or supergroup chat IDs like `-1001234567890` under `channels.telegram.groups`. + - Put Telegram user IDs like `8734062810` under `groupAllowFrom` when you want to limit which people inside an allowed group can trigger the bot. + - Use `groupAllowFrom: ["*"]` only when you want any member of an allowed group to be able to talk to the bot. + + @@ -309,9 +335,10 @@ curl "https://api.telegram.org/bot/getUpdates" If native commands are disabled, built-ins are removed. Custom/plugin commands may still register if configured. - Common setup failure: + Common setup failures: - - `setMyCommands failed` usually means outbound DNS/HTTPS to `api.telegram.org` is blocked. + - `setMyCommands failed` with `BOT_COMMANDS_TOO_MUCH` means the Telegram menu still overflowed after trimming; reduce plugin/skill/custom commands or disable `channels.telegram.commands.native`. + - `setMyCommands failed` with network/fetch errors usually means outbound DNS/HTTPS to `api.telegram.org` is blocked. ### Device pairing commands (`device-pair` plugin) @@ -410,6 +437,7 @@ curl "https://api.telegram.org/bot/getUpdates" - `channels.telegram.actions.sticker` (default: disabled) Note: `edit` and `topic-create` are currently enabled by default and do not have separate `channels.telegram.actions.*` toggles. + Runtime sends use the active config/secrets snapshot (startup/reload), so action paths do not perform ad-hoc SecretRef re-resolution per send. Reaction removal semantics: [/tools/reactions](/tools/reactions) @@ -760,6 +788,34 @@ openclaw message poll --channel telegram --target -1001234567890:topic:42 \ - `channels.telegram.actions.poll=false` disables Telegram poll creation while leaving regular sends enabled + + + Telegram supports exec approvals in approver DMs and can optionally post approval prompts in the originating chat or topic. + + Config path: + + - `channels.telegram.execApprovals.enabled` + - `channels.telegram.execApprovals.approvers` + - `channels.telegram.execApprovals.target` (`dm` | `channel` | `both`, default: `dm`) + - `agentFilter`, `sessionFilter` + + Approvers must be numeric Telegram user IDs. When `enabled` is false or `approvers` is empty, Telegram does not act as an exec approval client. Approval requests fall back to other configured approval routes or the exec approval fallback policy. + + Delivery rules: + + - `target: "dm"` sends approval prompts only to configured approver DMs + - `target: "channel"` sends the prompt back to the originating Telegram chat/topic + - `target: "both"` sends to approver DMs and the originating chat/topic + + Only configured approvers can approve or deny. Non-approvers cannot use `/approve` and cannot use Telegram approval buttons. + + Channel delivery shows the command text in the chat, so only enable `channel` or `both` in trusted groups/topics. When the prompt lands in a forum topic, OpenClaw preserves the topic for both the approval prompt and the post-approval follow-up. + + Inline approval buttons also depend on `channels.telegram.capabilities.inlineButtons` allowing the target surface (`dm`, `group`, or `all`). + + Related docs: [Exec approvals](/tools/exec-approvals) + + ## Troubleshooting @@ -788,7 +844,8 @@ openclaw message poll --channel telegram --target -1001234567890:topic:42 \ - authorize your sender identity (pairing and/or numeric `allowFrom`) - command authorization still applies even when group policy is `open` - - `setMyCommands failed` usually indicates DNS/HTTPS reachability issues to `api.telegram.org` + - `setMyCommands failed` with `BOT_COMMANDS_TOO_MUCH` means the native menu has too many entries; reduce plugin/skill/custom commands or disable native menus + - `setMyCommands failed` with network/fetch errors usually indicates DNS/HTTPS reachability issues to `api.telegram.org` @@ -837,7 +894,7 @@ Primary reference: - `channels.telegram.enabled`: enable/disable channel startup. - `channels.telegram.botToken`: bot token (BotFather). -- `channels.telegram.tokenFile`: read token from file path. +- `channels.telegram.tokenFile`: read token from a regular file path. Symlinks are rejected. - `channels.telegram.dmPolicy`: `pairing | allowlist | open | disabled` (default: pairing). - `channels.telegram.allowFrom`: DM allowlist (numeric Telegram user IDs). `allowlist` requires at least one sender ID. `open` requires `"*"`. `openclaw doctor --fix` can resolve legacy `@username` entries to IDs and can recover allowlist entries from pairing-store files in allowlist migration flows. - `channels.telegram.actions.poll`: enable or disable Telegram poll creation (default: enabled; still requires `sendMessage`). @@ -859,10 +916,16 @@ Primary reference: - `channels.telegram.groups..enabled`: disable the group when `false`. - `channels.telegram.groups..topics..*`: per-topic overrides (group fields + topic-only `agentId`). - `channels.telegram.groups..topics..agentId`: route this topic to a specific agent (overrides group-level and binding routing). - - `channels.telegram.groups..topics..groupPolicy`: per-topic override for groupPolicy (`open | allowlist | disabled`). - - `channels.telegram.groups..topics..requireMention`: per-topic mention gating override. - - top-level `bindings[]` with `type: "acp"` and canonical topic id `chatId:topic:topicId` in `match.peer.id`: persistent ACP topic binding fields (see [ACP Agents](/tools/acp-agents#channel-specific-settings)). - - `channels.telegram.direct..topics..agentId`: route DM topics to a specific agent (same behavior as forum topics). +- `channels.telegram.groups..topics..groupPolicy`: per-topic override for groupPolicy (`open | allowlist | disabled`). +- `channels.telegram.groups..topics..requireMention`: per-topic mention gating override. +- top-level `bindings[]` with `type: "acp"` and canonical topic id `chatId:topic:topicId` in `match.peer.id`: persistent ACP topic binding fields (see [ACP Agents](/tools/acp-agents#channel-specific-settings)). +- `channels.telegram.direct..topics..agentId`: route DM topics to a specific agent (same behavior as forum topics). +- `channels.telegram.execApprovals.enabled`: enable Telegram as a chat-based exec approval client for this account. +- `channels.telegram.execApprovals.approvers`: Telegram user IDs allowed to approve or deny exec requests. Required when exec approvals are enabled. +- `channels.telegram.execApprovals.target`: `dm | channel | both` (default: `dm`). `channel` and `both` preserve the originating Telegram topic when present. +- `channels.telegram.execApprovals.agentFilter`: optional agent ID filter for forwarded approval prompts. +- `channels.telegram.execApprovals.sessionFilter`: optional session key filter (substring or regex) for forwarded approval prompts. +- `channels.telegram.accounts..execApprovals`: per-account override for Telegram exec approval routing and approver authorization. - `channels.telegram.capabilities.inlineButtons`: `off | dm | group | all | allowlist` (default: allowlist). - `channels.telegram.accounts..capabilities.inlineButtons`: per-account override. - `channels.telegram.commands.nativeSkills`: enable/disable Telegram native skills commands. @@ -892,8 +955,9 @@ Primary reference: Telegram-specific high-signal fields: -- startup/auth: `enabled`, `botToken`, `tokenFile`, `accounts.*` +- startup/auth: `enabled`, `botToken`, `tokenFile`, `accounts.*` (`tokenFile` must point to a regular file; symlinks are rejected) - access control: `dmPolicy`, `allowFrom`, `groupPolicy`, `groupAllowFrom`, `groups`, `groups.*.topics.*`, top-level `bindings[]` (`type: "acp"`) +- exec approvals: `execApprovals`, `accounts.*.execApprovals` - command/menu: `commands.native`, `commands.nativeSkills`, `customCommands` - threading/replies: `replyToMode` - streaming: `streaming` (preview), `blockStreaming` diff --git a/docs/channels/troubleshooting.md b/docs/channels/troubleshooting.md index 2848947c479..a7850801948 100644 --- a/docs/channels/troubleshooting.md +++ b/docs/channels/troubleshooting.md @@ -44,12 +44,13 @@ Full troubleshooting: [/channels/whatsapp#troubleshooting-quick](/channels/whats ### Telegram failure signatures -| Symptom | Fastest check | Fix | -| --------------------------------- | ----------------------------------------------- | --------------------------------------------------------------------------- | -| `/start` but no usable reply flow | `openclaw pairing list telegram` | Approve pairing or change DM policy. | -| Bot online but group stays silent | Verify mention requirement and bot privacy mode | Disable privacy mode for group visibility or mention bot. | -| Send failures with network errors | Inspect logs for Telegram API call failures | Fix DNS/IPv6/proxy routing to `api.telegram.org`. | -| Upgraded and allowlist blocks you | `openclaw security audit` and config allowlists | Run `openclaw doctor --fix` or replace `@username` with numeric sender IDs. | +| Symptom | Fastest check | Fix | +| ----------------------------------- | ----------------------------------------------- | --------------------------------------------------------------------------- | +| `/start` but no usable reply flow | `openclaw pairing list telegram` | Approve pairing or change DM policy. | +| Bot online but group stays silent | Verify mention requirement and bot privacy mode | Disable privacy mode for group visibility or mention bot. | +| Send failures with network errors | Inspect logs for Telegram API call failures | Fix DNS/IPv6/proxy routing to `api.telegram.org`. | +| `setMyCommands` rejected at startup | Inspect logs for `BOT_COMMANDS_TOO_MUCH` | Reduce plugin/skill/custom Telegram commands or disable native menus. | +| Upgraded and allowlist blocks you | `openclaw security audit` and config allowlists | Run `openclaw doctor --fix` or replace `@username` with numeric sender IDs. | Full troubleshooting: [/channels/telegram#troubleshooting](/channels/telegram#troubleshooting) diff --git a/docs/channels/zalo.md b/docs/channels/zalo.md index 8e5d8ab0382..77b288b0ab7 100644 --- a/docs/channels/zalo.md +++ b/docs/channels/zalo.md @@ -179,7 +179,7 @@ Provider options: - `channels.zalo.enabled`: enable/disable channel startup. - `channels.zalo.botToken`: bot token from Zalo Bot Platform. -- `channels.zalo.tokenFile`: read token from file path. +- `channels.zalo.tokenFile`: read token from a regular file path. Symlinks are rejected. - `channels.zalo.dmPolicy`: `pairing | allowlist | open | disabled` (default: pairing). - `channels.zalo.allowFrom`: DM allowlist (user IDs). `open` requires `"*"`. The wizard will ask for numeric IDs. - `channels.zalo.groupPolicy`: `open | allowlist | disabled` (default: allowlist). @@ -193,7 +193,7 @@ Provider options: Multi-account options: - `channels.zalo.accounts..botToken`: per-account token. -- `channels.zalo.accounts..tokenFile`: per-account token file. +- `channels.zalo.accounts..tokenFile`: per-account regular token file. Symlinks are rejected. - `channels.zalo.accounts..name`: display name. - `channels.zalo.accounts..enabled`: enable/disable account. - `channels.zalo.accounts..dmPolicy`: per-account DM policy. diff --git a/docs/channels/zalouser.md b/docs/channels/zalouser.md index 9b62244e234..58bd2a43923 100644 --- a/docs/channels/zalouser.md +++ b/docs/channels/zalouser.md @@ -86,11 +86,13 @@ Approve via: - Default: `channels.zalouser.groupPolicy = "open"` (groups allowed). Use `channels.defaults.groupPolicy` to override the default when unset. - Restrict to an allowlist with: - `channels.zalouser.groupPolicy = "allowlist"` - - `channels.zalouser.groups` (keys are group IDs or names; controls which groups are allowed) + - `channels.zalouser.groups` (keys should be stable group IDs; names are resolved to IDs on startup when possible) - `channels.zalouser.groupAllowFrom` (controls which senders in allowed groups can trigger the bot) - Block all groups: `channels.zalouser.groupPolicy = "disabled"`. - The configure wizard can prompt for group allowlists. -- On startup, OpenClaw resolves group/user names in allowlists to IDs and logs the mapping; unresolved entries are kept as typed. +- On startup, OpenClaw resolves group/user names in allowlists to IDs and logs the mapping. +- Group allowlist matching is ID-only by default. Unresolved names are ignored for auth unless `channels.zalouser.dangerouslyAllowNameMatching: true` is enabled. +- `channels.zalouser.dangerouslyAllowNameMatching: true` is a break-glass compatibility mode that re-enables mutable group-name matching. - If `groupAllowFrom` is unset, runtime falls back to `allowFrom` for group sender checks. - Sender checks apply to both normal group messages and control commands (for example `/new`, `/reset`). diff --git a/docs/ci.md b/docs/ci.md index 16a7e670964..e8710b87cb1 100644 --- a/docs/ci.md +++ b/docs/ci.md @@ -9,32 +9,32 @@ read_when: # CI Pipeline -The CI runs on every push to `main` and every pull request. It uses smart scoping to skip expensive jobs when only docs or native code changed. +The CI runs on every push to `main` and every pull request. It uses smart scoping to skip expensive jobs when only unrelated areas changed. ## Job Overview -| Job | Purpose | When it runs | -| ----------------- | ------------------------------------------------------- | ------------------------------------------------- | -| `docs-scope` | Detect docs-only changes | Always | -| `changed-scope` | Detect which areas changed (node/macos/android/windows) | Non-docs PRs | -| `check` | TypeScript types, lint, format | Push to `main`, or PRs with Node-relevant changes | -| `check-docs` | Markdown lint + broken link check | Docs changed | -| `code-analysis` | LOC threshold check (1000 lines) | PRs only | -| `secrets` | Detect leaked secrets | Always | -| `build-artifacts` | Build dist once, share with other jobs | Non-docs, node changes | -| `release-check` | Validate npm pack contents | After build | -| `checks` | Node/Bun tests + protocol check | Non-docs, node changes | -| `checks-windows` | Windows-specific tests | Non-docs, windows-relevant changes | -| `macos` | Swift lint/build/test + TS tests | PRs with macos changes | -| `android` | Gradle build + tests | Non-docs, android changes | +| Job | Purpose | When it runs | +| ----------------- | ------------------------------------------------------- | ---------------------------------- | +| `docs-scope` | Detect docs-only changes | Always | +| `changed-scope` | Detect which areas changed (node/macos/android/windows) | Non-doc changes | +| `check` | TypeScript types, lint, format | Non-docs, node changes | +| `check-docs` | Markdown lint + broken link check | Docs changed | +| `secrets` | Detect leaked secrets | Always | +| `build-artifacts` | Build dist once, share with `release-check` | Pushes to `main`, node changes | +| `release-check` | Validate npm pack contents | Pushes to `main` after build | +| `checks` | Node tests + protocol check on PRs; Bun compat on push | Non-docs, node changes | +| `compat-node22` | Minimum supported Node runtime compatibility | Pushes to `main`, node changes | +| `checks-windows` | Windows-specific tests | Non-docs, windows-relevant changes | +| `macos` | Swift lint/build/test + TS tests | PRs with macos changes | +| `android` | Gradle build + tests | Non-docs, android changes | ## Fail-Fast Order Jobs are ordered so cheap checks fail before expensive ones run: -1. `docs-scope` + `code-analysis` + `check` (parallel, ~1-2 min) -2. `build-artifacts` (blocked on above) -3. `checks`, `checks-windows`, `macos`, `android` (blocked on build) +1. `docs-scope` + `changed-scope` + `check` + `secrets` (parallel, cheap gates first) +2. PRs: `checks` (Linux Node test split into 2 shards), `checks-windows`, `macos`, `android` +3. Pushes to `main`: `build-artifacts` + `release-check` + Bun compat + `compat-node22` Scope logic lives in `scripts/ci-changed-scope.mjs` and is covered by unit tests in `src/scripts/ci-changed-scope.test.ts`. diff --git a/docs/cli/acp.md b/docs/cli/acp.md index 7650390ed55..9e239fc8bdf 100644 --- a/docs/cli/acp.md +++ b/docs/cli/acp.md @@ -13,6 +13,49 @@ Run the [Agent Client Protocol (ACP)](https://agentclientprotocol.com/) bridge t This command speaks ACP over stdio for IDEs and forwards prompts to the Gateway over WebSocket. It keeps ACP sessions mapped to Gateway session keys. +`openclaw acp` is a Gateway-backed ACP bridge, not a full ACP-native editor +runtime. It focuses on session routing, prompt delivery, and basic streaming +updates. + +## Compatibility Matrix + +| ACP area | Status | Notes | +| --------------------------------------------------------------------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `initialize`, `newSession`, `prompt`, `cancel` | Implemented | Core bridge flow over stdio to Gateway chat/send + abort. | +| `listSessions`, slash commands | Implemented | Session list works against Gateway session state; commands are advertised via `available_commands_update`. | +| `loadSession` | Partial | Rebinds the ACP session to a Gateway session key and replays stored user/assistant text history. Tool/system history is not reconstructed yet. | +| Prompt content (`text`, embedded `resource`, images) | Partial | Text/resources are flattened into chat input; images become Gateway attachments. | +| Session modes | Partial | `session/set_mode` is supported and the bridge exposes initial Gateway-backed session controls for thought level, tool verbosity, reasoning, usage detail, and elevated actions. Broader ACP-native mode/config surfaces are still out of scope. | +| Session info and usage updates | Partial | The bridge emits `session_info_update` and best-effort `usage_update` notifications from cached Gateway session snapshots. Usage is approximate and only sent when Gateway token totals are marked fresh. | +| Tool streaming | Partial | `tool_call` / `tool_call_update` events include raw I/O, text content, and best-effort file locations when Gateway tool args/results expose them. Embedded terminals and richer diff-native output are still not exposed. | +| Per-session MCP servers (`mcpServers`) | Unsupported | Bridge mode rejects per-session MCP server requests. Configure MCP on the OpenClaw gateway or agent instead. | +| Client filesystem methods (`fs/read_text_file`, `fs/write_text_file`) | Unsupported | The bridge does not call ACP client filesystem methods. | +| Client terminal methods (`terminal/*`) | Unsupported | The bridge does not create ACP client terminals or stream terminal ids through tool calls. | +| Session plans / thought streaming | Unsupported | The bridge currently emits output text and tool status, not ACP plan or thought updates. | + +## Known Limitations + +- `loadSession` replays stored user and assistant text history, but it does not + reconstruct historic tool calls, system notices, or richer ACP-native event + types. +- If multiple ACP clients share the same Gateway session key, event and cancel + routing are best-effort rather than strictly isolated per client. Prefer the + default isolated `acp:` sessions when you need clean editor-local + turns. +- Gateway stop states are translated into ACP stop reasons, but that mapping is + less expressive than a fully ACP-native runtime. +- Initial session controls currently surface a focused subset of Gateway knobs: + thought level, tool verbosity, reasoning, usage detail, and elevated + actions. Model selection and exec-host controls are not yet exposed as ACP + config options. +- `session_info_update` and `usage_update` are derived from Gateway session + snapshots, not live ACP-native runtime accounting. Usage is approximate, + carries no cost data, and is only emitted when the Gateway marks total token + data as fresh. +- Tool follow-along data is best-effort. The bridge can surface file paths that + appear in known tool args/results, but it does not yet emit ACP terminals or + structured file diffs. + ## Usage ```bash @@ -96,6 +139,10 @@ Each ACP session maps to a single Gateway session key. One agent can have many sessions; ACP defaults to an isolated `acp:` session unless you override the key or label. +Per-session `mcpServers` are not supported in bridge mode. If an ACP client +sends them during `newSession` or `loadSession`, the bridge returns a clear +error instead of silently ignoring them. + ## Use from `acpx` (Codex, Claude, other ACP clients) If you want a coding agent such as Codex or Claude Code to talk to your @@ -226,7 +273,7 @@ Security note: - `--token` and `--password` can be visible in local process listings on some systems. - Prefer `--token-file`/`--password-file` or environment variables (`OPENCLAW_GATEWAY_TOKEN`, `OPENCLAW_GATEWAY_PASSWORD`). - Gateway auth resolution follows the shared contract used by other Gateway clients: - - local mode: env (`OPENCLAW_GATEWAY_*`) -> `gateway.auth.*` -> `gateway.remote.*` fallback when `gateway.auth.*` is unset + - local mode: env (`OPENCLAW_GATEWAY_*`) -> `gateway.auth.*` -> `gateway.remote.*` fallback only when `gateway.auth.*` is unset (configured-but-unresolved local SecretRefs fail closed) - remote mode: `gateway.remote.*` with env/config fallback per remote precedence rules - `--url` is override-safe and does not reuse implicit config/env credentials; pass explicit `--token`/`--password` (or file variants) - ACP runtime backend child processes receive `OPENCLAW_SHELL=acp`, which can be used for context-specific shell/profile rules. diff --git a/docs/cli/agent.md b/docs/cli/agent.md index 93c8d04b41a..430bdf50743 100644 --- a/docs/cli/agent.md +++ b/docs/cli/agent.md @@ -25,4 +25,5 @@ openclaw agent --agent ops --message "Generate report" --deliver --reply-channel ## Notes -- When this command triggers `models.json` regeneration, SecretRef-managed provider credentials are persisted as non-secret markers (for example env var names or `secretref-managed`), not resolved secret plaintext. +- When this command triggers `models.json` regeneration, SecretRef-managed provider credentials are persisted as non-secret markers (for example env var names, `secretref-env:ENV_VAR_NAME`, or `secretref-managed`), not resolved secret plaintext. +- Marker writes are source-authoritative: OpenClaw persists markers from the active source config snapshot, not from resolved runtime secret values. diff --git a/docs/cli/browser.md b/docs/cli/browser.md index 8e0ddad92ef..f9ddc151717 100644 --- a/docs/cli/browser.md +++ b/docs/cli/browser.md @@ -27,7 +27,7 @@ Related: ## Quick start (local) ```bash -openclaw browser --browser-profile chrome tabs +openclaw browser profiles openclaw browser --browser-profile openclaw start openclaw browser --browser-profile openclaw open https://example.com openclaw browser --browser-profile openclaw snapshot @@ -38,7 +38,8 @@ openclaw browser --browser-profile openclaw snapshot Profiles are named browser routing configs. In practice: - `openclaw`: launches/attaches to a dedicated OpenClaw-managed Chrome instance (isolated user data dir). -- `chrome`: controls your existing Chrome tab(s) via the Chrome extension relay. +- `user`: controls your existing signed-in Chrome session via Chrome DevTools MCP. +- `chrome-relay`: controls your existing Chrome tab(s) via the Chrome extension relay. ```bash openclaw browser profiles diff --git a/docs/cli/cron.md b/docs/cli/cron.md index 28e61e20c99..6ee25859749 100644 --- a/docs/cli/cron.md +++ b/docs/cli/cron.md @@ -30,6 +30,12 @@ Note: retention/pruning is controlled in config: - `cron.sessionRetention` (default `24h`) prunes completed isolated run sessions. - `cron.runLog.maxBytes` + `cron.runLog.keepLines` prune `~/.openclaw/cron/runs/.jsonl`. +Upgrade note: if you have older cron jobs from before the current delivery/store format, run +`openclaw doctor --fix`. Doctor now normalizes legacy cron fields (`jobId`, `schedule.cron`, +top-level delivery fields, payload `provider` delivery aliases) and migrates simple +`notify: true` webhook fallback jobs to explicit webhook delivery when `cron.webhook` is +configured. + ## Common edits Update delivery settings without changing the message: diff --git a/docs/cli/devices.md b/docs/cli/devices.md index be01e3cc0d5..f73f30dfa1d 100644 --- a/docs/cli/devices.md +++ b/docs/cli/devices.md @@ -92,3 +92,40 @@ Pass `--token` or `--password` explicitly. Missing explicit credentials is an er - These commands require `operator.pairing` (or `operator.admin`) scope. - `devices clear` is intentionally gated by `--yes`. - If pairing scope is unavailable on local loopback (and no explicit `--url` is passed), list/approve can use a local pairing fallback. + +## Token drift recovery checklist + +Use this when Control UI or other clients keep failing with `AUTH_TOKEN_MISMATCH` or `AUTH_DEVICE_TOKEN_MISMATCH`. + +1. Confirm current gateway token source: + +```bash +openclaw config get gateway.auth.token +``` + +2. List paired devices and identify the affected device id: + +```bash +openclaw devices list +``` + +3. Rotate operator token for the affected device: + +```bash +openclaw devices rotate --device --role operator +``` + +4. If rotation is not enough, remove stale pairing and approve again: + +```bash +openclaw devices remove +openclaw devices list +openclaw devices approve +``` + +5. Retry client connection with the current shared token/password. + +Related: + +- [Dashboard auth troubleshooting](/web/dashboard#if-you-see-unauthorized-1008) +- [Gateway troubleshooting](/gateway/troubleshooting#dashboard-control-ui-connectivity) diff --git a/docs/cli/doctor.md b/docs/cli/doctor.md index d53d86452f3..90e5fa7d7a2 100644 --- a/docs/cli/doctor.md +++ b/docs/cli/doctor.md @@ -28,6 +28,7 @@ Notes: - Interactive prompts (like keychain/OAuth fixes) only run when stdin is a TTY and `--non-interactive` is **not** set. Headless runs (cron, Telegram, no terminal) will skip prompts. - `--fix` (alias for `--repair`) writes a backup to `~/.openclaw/openclaw.json.bak` and drops unknown config keys, listing each removal. - State integrity checks now detect orphan transcript files in the sessions directory and can archive them as `.deleted.` to reclaim space safely. +- Doctor also scans `~/.openclaw/cron/jobs.json` (or `cron.store`) for legacy cron job shapes and can rewrite them in place before the scheduler has to auto-normalize them at runtime. - Doctor includes a memory-search readiness check and can recommend `openclaw configure --section model` when embedding credentials are missing. - If sandbox mode is enabled but Docker is unavailable, doctor reports a high-signal warning with remediation (`install Docker` or `openclaw config set agents.defaults.sandbox.mode off`). diff --git a/docs/cli/gateway.md b/docs/cli/gateway.md index 95c20e3aa7c..96367774948 100644 --- a/docs/cli/gateway.md +++ b/docs/cli/gateway.md @@ -126,6 +126,23 @@ openclaw gateway probe openclaw gateway probe --json ``` +Interpretation: + +- `Reachable: yes` means at least one target accepted a WebSocket connect. +- `RPC: ok` means detail RPC calls (`health`/`status`/`system-presence`/`config.get`) also succeeded. +- `RPC: limited - missing scope: operator.read` means connect succeeded but detail RPC is scope-limited. This is reported as **degraded** reachability, not full failure. +- Exit code is non-zero only when no probed target is reachable. + +JSON notes (`--json`): + +- Top level: + - `ok`: at least one target is reachable. + - `degraded`: at least one target had scope-limited detail RPC. +- Per target (`targets[].connect`): + - `ok`: reachability after connect + degraded classification. + - `rpcOk`: full detail RPC success. + - `scopeLimited`: detail RPC failed due to missing operator scope. + #### Remote over SSH (Mac app parity) The macOS app “Remote over SSH” mode uses a local port-forward so the remote gateway (which may be bound to loopback only) becomes reachable at `ws://127.0.0.1:`. diff --git a/docs/cli/index.md b/docs/cli/index.md index fdee80038c0..2796e7927d2 100644 --- a/docs/cli/index.md +++ b/docs/cli/index.md @@ -337,7 +337,7 @@ Options: - `--non-interactive` - `--mode ` - `--flow ` (manual is an alias for advanced) -- `--auth-choice ` +- `--auth-choice ` - `--token-provider ` (non-interactive; used with `--auth-choice token`) - `--token ` (non-interactive; used with `--auth-choice token`) - `--token-profile-id ` (non-interactive; default: `:manual`) @@ -354,8 +354,9 @@ Options: - `--zai-api-key ` - `--minimax-api-key ` - `--opencode-zen-api-key ` -- `--custom-base-url ` (non-interactive; used with `--auth-choice custom-api-key`) -- `--custom-model-id ` (non-interactive; used with `--auth-choice custom-api-key`) +- `--opencode-go-api-key ` +- `--custom-base-url ` (non-interactive; used with `--auth-choice custom-api-key` or `--auth-choice ollama`) +- `--custom-model-id ` (non-interactive; used with `--auth-choice custom-api-key` or `--auth-choice ollama`) - `--custom-api-key ` (non-interactive; optional; used with `--auth-choice custom-api-key`; falls back to `CUSTOM_API_KEY` when omitted) - `--custom-provider-id ` (non-interactive; optional custom provider id) - `--custom-compatibility ` (non-interactive; optional; default `openai`) @@ -1018,7 +1019,7 @@ Subcommands: Auth notes: -- `node` resolves gateway auth from env/config (no `--token`/`--password` flags): `OPENCLAW_GATEWAY_TOKEN` / `OPENCLAW_GATEWAY_PASSWORD`, then `gateway.auth.*`, with remote-mode support via `gateway.remote.*`. +- `node` resolves gateway auth from env/config (no `--token`/`--password` flags): `OPENCLAW_GATEWAY_TOKEN` / `OPENCLAW_GATEWAY_PASSWORD`, then `gateway.auth.*`. In local mode, node host intentionally ignores `gateway.remote.*`; in `gateway.mode=remote`, `gateway.remote.*` participates per remote precedence rules. - Legacy `CLAWDBOT_GATEWAY_*` env vars are intentionally ignored for node-host auth resolution. ## Nodes diff --git a/docs/cli/node.md b/docs/cli/node.md index 95f0936065e..baf8c3cd45e 100644 --- a/docs/cli/node.md +++ b/docs/cli/node.md @@ -64,7 +64,8 @@ Options: - `OPENCLAW_GATEWAY_TOKEN` / `OPENCLAW_GATEWAY_PASSWORD` are checked first. - Then local config fallback: `gateway.auth.token` / `gateway.auth.password`. -- In local mode, `gateway.remote.token` / `gateway.remote.password` are also eligible as fallback when `gateway.auth.*` is unset. +- In local mode, node host intentionally does not inherit `gateway.remote.token` / `gateway.remote.password`. +- If `gateway.auth.token` / `gateway.auth.password` is explicitly configured via SecretRef and unresolved, node auth resolution fails closed (no remote fallback masking). - In `gateway.mode=remote`, remote client fields (`gateway.remote.token` / `gateway.remote.password`) are also eligible per remote precedence rules. - Legacy `CLAWDBOT_GATEWAY_*` env vars are ignored for node host auth resolution. diff --git a/docs/cli/onboard.md b/docs/cli/onboard.md index 36629a3bb8d..4b30e0d52b3 100644 --- a/docs/cli/onboard.md +++ b/docs/cli/onboard.md @@ -43,6 +43,18 @@ openclaw onboard --non-interactive \ `--custom-api-key` is optional in non-interactive mode. If omitted, onboarding checks `CUSTOM_API_KEY`. +Non-interactive Ollama: + +```bash +openclaw onboard --non-interactive \ + --auth-choice ollama \ + --custom-base-url "http://ollama-host:11434" \ + --custom-model-id "qwen3.5:27b" \ + --accept-risk +``` + +`--custom-base-url` defaults to `http://127.0.0.1:11434`. `--custom-model-id` is optional; if omitted, onboarding uses Ollama's suggested defaults. Cloud model IDs such as `kimi-k2.5:cloud` also work here. + Store provider keys as refs instead of plaintext: ```bash @@ -83,6 +95,13 @@ openclaw onboard --non-interactive \ --accept-risk ``` +Non-interactive local gateway health: + +- Unless you pass `--skip-health`, onboarding waits for a reachable local gateway before it exits successfully. +- `--install-daemon` starts the managed gateway install path first. Without it, you must already have a local gateway running, for example `openclaw gateway run`. +- If you only want config/workspace/bootstrap writes in automation, use `--skip-health`. +- On native Windows, `--install-daemon` tries Scheduled Tasks first and falls back to a per-user Startup-folder login item if task creation is denied. + Interactive onboarding behavior with reference mode: - Choose **Use secret reference** when prompted. diff --git a/docs/cli/qr.md b/docs/cli/qr.md index 2fc070ca1bd..1575b16d029 100644 --- a/docs/cli/qr.md +++ b/docs/cli/qr.md @@ -17,7 +17,7 @@ openclaw qr openclaw qr --setup-code-only openclaw qr --json openclaw qr --remote -openclaw qr --url wss://gateway.example/ws --token '' +openclaw qr --url wss://gateway.example/ws ``` ## Options @@ -25,8 +25,8 @@ openclaw qr --url wss://gateway.example/ws --token '' - `--remote`: use `gateway.remote.url` plus remote token/password from config - `--url `: override gateway URL used in payload - `--public-url `: override public URL used in payload -- `--token `: override gateway token for payload -- `--password `: override gateway password for payload +- `--token `: override which gateway token the bootstrap flow authenticates against +- `--password `: override which gateway password the bootstrap flow authenticates against - `--setup-code-only`: print only setup code - `--no-ascii`: skip ASCII QR rendering - `--json`: emit JSON (`setupCode`, `gatewayUrl`, `auth`, `urlSource`) @@ -34,6 +34,7 @@ openclaw qr --url wss://gateway.example/ws --token '' ## Notes - `--token` and `--password` are mutually exclusive. +- The setup code itself now carries an opaque short-lived `bootstrapToken`, not the shared gateway token/password. - With `--remote`, if effectively active remote credentials are configured as SecretRefs and you do not pass `--token` or `--password`, the command resolves them from the active gateway snapshot. If gateway is unavailable, the command fails fast. - Without `--remote`, local gateway auth SecretRefs are resolved when no CLI auth override is passed: - `gateway.auth.token` resolves when token auth can win (explicit `gateway.auth.mode="token"` or inferred mode where no password source wins). diff --git a/docs/cli/sessions.md b/docs/cli/sessions.md index 4ed5ace54ee..6ea2df094f0 100644 --- a/docs/cli/sessions.md +++ b/docs/cli/sessions.md @@ -24,6 +24,12 @@ Scope selection: - `--all-agents`: aggregate all configured agent stores - `--store `: explicit store path (cannot be combined with `--agent` or `--all-agents`) +`openclaw sessions --all-agents` reads configured agent stores. Gateway and ACP +session discovery are broader: they also include disk-only stores found under +the default `agents/` root or a templated `session.store` root. Those +discovered stores must resolve to regular `sessions.json` files inside the +agent root; symlinks and out-of-root paths are skipped. + JSON examples: `openclaw sessions --all-agents --json`: @@ -54,7 +60,7 @@ openclaw sessions cleanup --dry-run openclaw sessions cleanup --agent work --dry-run openclaw sessions cleanup --all-agents --dry-run openclaw sessions cleanup --enforce -openclaw sessions cleanup --enforce --active-key "agent:main:telegram:dm:123" +openclaw sessions cleanup --enforce --active-key "agent:main:telegram:direct:123" openclaw sessions cleanup --json ``` diff --git a/docs/concepts/memory.md b/docs/concepts/memory.md index b3940945249..8ed755b394c 100644 --- a/docs/concepts/memory.md +++ b/docs/concepts/memory.md @@ -284,9 +284,46 @@ Notes: - Paths can be absolute or workspace-relative. - Directories are scanned recursively for `.md` files. -- Only Markdown files are indexed. +- By default, only Markdown files are indexed. +- If `memorySearch.multimodal.enabled = true`, OpenClaw also indexes supported image/audio files under `extraPaths` only. Default memory roots (`MEMORY.md`, `memory.md`, `memory/**/*.md`) stay Markdown-only. - Symlinks are ignored (files or directories). +### Multimodal memory files (Gemini image + audio) + +OpenClaw can index image and audio files from `memorySearch.extraPaths` when using Gemini embedding 2: + +```json5 +agents: { + defaults: { + memorySearch: { + provider: "gemini", + model: "gemini-embedding-2-preview", + extraPaths: ["assets/reference", "voice-notes"], + multimodal: { + enabled: true, + modalities: ["image", "audio"], // or ["all"] + maxFileBytes: 10000000 + }, + remote: { + apiKey: "YOUR_GEMINI_API_KEY" + } + } + } +} +``` + +Notes: + +- Multimodal memory is currently supported only for `gemini-embedding-2-preview`. +- Multimodal indexing applies only to files discovered through `memorySearch.extraPaths`. +- Supported modalities in this phase: image and audio. +- `memorySearch.fallback` must stay `"none"` while multimodal memory is enabled. +- Matching image/audio file bytes are uploaded to the configured Gemini embedding endpoint during indexing. +- Supported image extensions: `.jpg`, `.jpeg`, `.png`, `.webp`, `.gif`, `.heic`, `.heif`. +- Supported audio extensions: `.mp3`, `.wav`, `.ogg`, `.opus`, `.m4a`, `.aac`, `.flac`. +- Search queries remain text, but Gemini can compare those text queries against indexed image/audio embeddings. +- `memory_get` still reads Markdown only; binary files are searchable but not returned as raw file contents. + ### Gemini embeddings (native) Set the provider to `gemini` to use the Gemini embeddings API directly: @@ -310,6 +347,29 @@ Notes: - `remote.baseUrl` is optional (defaults to the Gemini API base URL). - `remote.headers` lets you add extra headers if needed. - Default model: `gemini-embedding-001`. +- `gemini-embedding-2-preview` is also supported: 8192 token limit and configurable dimensions (768 / 1536 / 3072, default 3072). + +#### Gemini Embedding 2 (preview) + +```json5 +agents: { + defaults: { + memorySearch: { + provider: "gemini", + model: "gemini-embedding-2-preview", + outputDimensionality: 3072, // optional: 768, 1536, or 3072 (default) + remote: { + apiKey: "YOUR_GEMINI_API_KEY" + } + } + } +} +``` + +> **⚠️ Re-index required:** Switching from `gemini-embedding-001` (768 dimensions) +> to `gemini-embedding-2-preview` (3072 dimensions) changes the vector size. The same is true if you +> change `outputDimensionality` between 768, 1536, and 3072. +> OpenClaw will automatically reindex when it detects a model or dimension change. If you want to use a **custom OpenAI-compatible endpoint** (OpenRouter, vLLM, or a proxy), you can use the `remote` configuration with the OpenAI provider: diff --git a/docs/concepts/model-providers.md b/docs/concepts/model-providers.md index 6dd4c2f9c03..a502240226e 100644 --- a/docs/concepts/model-providers.md +++ b/docs/concepts/model-providers.md @@ -47,6 +47,8 @@ OpenClaw ships with the pi‑ai catalog. These providers require **no** - Override per model via `agents.defaults.models["openai/"].params.transport` (`"sse"`, `"websocket"`, or `"auto"`) - OpenAI Responses WebSocket warm-up defaults to enabled via `params.openaiWsWarmup` (`true`/`false`) - OpenAI priority processing can be enabled via `agents.defaults.models["openai/"].params.serviceTier` +- OpenAI fast mode can be enabled per model via `agents.defaults.models["/"].params.fastMode` +- `openai/gpt-5.3-codex-spark` is intentionally suppressed in OpenClaw because the live OpenAI API rejects it; Spark is treated as Codex-only ```json5 { @@ -61,6 +63,7 @@ OpenClaw ships with the pi‑ai catalog. These providers require **no** - Optional rotation: `ANTHROPIC_API_KEYS`, `ANTHROPIC_API_KEY_1`, `ANTHROPIC_API_KEY_2`, plus `OPENCLAW_LIVE_ANTHROPIC_KEY` (single override) - Example model: `anthropic/claude-opus-4-6` - CLI: `openclaw onboard --auth-choice token` (paste setup-token) or `openclaw models auth paste-token --provider anthropic` +- Direct API-key models support the shared `/fast` toggle and `params.fastMode`; OpenClaw maps that to Anthropic `service_tier` (`auto` vs `standard_only`) - Policy note: setup-token support is technical compatibility; Anthropic has blocked some subscription usage outside Claude Code in the past. Verify current Anthropic terms and decide based on your risk tolerance. - Recommendation: Anthropic API key auth is the safer, recommended path over subscription setup-token auth. @@ -78,6 +81,8 @@ OpenClaw ships with the pi‑ai catalog. These providers require **no** - CLI: `openclaw onboard --auth-choice openai-codex` or `openclaw models auth login --provider openai-codex` - Default transport is `auto` (WebSocket-first, SSE fallback) - Override per model via `agents.defaults.models["openai-codex/"].params.transport` (`"sse"`, `"websocket"`, or `"auto"`) +- Shares the same `/fast` toggle and `params.fastMode` config as direct `openai/*` +- `openai-codex/gpt-5.3-codex-spark` remains available when the Codex OAuth catalog exposes it; entitlement-dependent - Policy note: OpenAI Codex OAuth is explicitly supported for external tools/workflows like OpenClaw. ```json5 @@ -86,12 +91,13 @@ OpenClaw ships with the pi‑ai catalog. These providers require **no** } ``` -### OpenCode Zen +### OpenCode -- Provider: `opencode` - Auth: `OPENCODE_API_KEY` (or `OPENCODE_ZEN_API_KEY`) -- Example model: `opencode/claude-opus-4-6` -- CLI: `openclaw onboard --auth-choice opencode-zen` +- Zen runtime provider: `opencode` +- Go runtime provider: `opencode-go` +- Example models: `opencode/claude-opus-4-6`, `opencode-go/kimi-k2.5` +- CLI: `openclaw onboard --auth-choice opencode-zen` or `openclaw onboard --auth-choice opencode-go` ```json5 { @@ -104,8 +110,8 @@ OpenClaw ships with the pi‑ai catalog. These providers require **no** - Provider: `google` - Auth: `GEMINI_API_KEY` - Optional rotation: `GEMINI_API_KEYS`, `GEMINI_API_KEY_1`, `GEMINI_API_KEY_2`, `GOOGLE_API_KEY` fallback, and `OPENCLAW_LIVE_GEMINI_KEY` (single override) -- Example models: `google/gemini-3.1-pro-preview`, `google/gemini-3-flash-preview`, `google/gemini-3.1-flash-lite-preview` -- Compatibility: legacy OpenClaw config using `google/gemini-3.1-flash-preview` is normalized to `google/gemini-3-flash-preview`, and bare `google/gemini-3.1-flash-lite` is normalized to `google/gemini-3.1-flash-lite-preview` +- Example models: `google/gemini-3.1-pro-preview`, `google/gemini-3-flash-preview` +- Compatibility: legacy OpenClaw config using `google/gemini-3.1-flash-preview` is normalized to `google/gemini-3-flash-preview` - CLI: `openclaw onboard --auth-choice gemini-api-key` ### Google Vertex, Antigravity, and Gemini CLI @@ -351,12 +357,12 @@ See [/providers/minimax](/providers/minimax) for setup details, model options, a ### Ollama -Ollama is a local LLM runtime that provides an OpenAI-compatible API: +Ollama ships as a bundled provider plugin and uses Ollama's native API: - Provider: `ollama` - Auth: None required (local server) - Example model: `ollama/llama3.3` -- Installation: [https://ollama.ai](https://ollama.ai) +- Installation: [https://ollama.com/download](https://ollama.com/download) ```bash # Install Ollama, then pull a model: @@ -371,11 +377,15 @@ ollama pull llama3.3 } ``` -Ollama is automatically detected when running locally at `http://127.0.0.1:11434/v1`. See [/providers/ollama](/providers/ollama) for model recommendations and custom configuration. +Ollama is detected locally at `http://127.0.0.1:11434` when you opt in with +`OLLAMA_API_KEY`, and the bundled provider plugin adds Ollama directly to +`openclaw onboard` and the model picker. See [/providers/ollama](/providers/ollama) +for onboarding, cloud/local mode, and custom configuration. ### vLLM -vLLM is a local (or self-hosted) OpenAI-compatible server: +vLLM ships as a bundled provider plugin for local/self-hosted OpenAI-compatible +servers: - Provider: `vllm` - Auth: Optional (depends on your server) @@ -399,6 +409,34 @@ Then set a model (replace with one of the IDs returned by `/v1/models`): See [/providers/vllm](/providers/vllm) for details. +### SGLang + +SGLang ships as a bundled provider plugin for fast self-hosted +OpenAI-compatible servers: + +- Provider: `sglang` +- Auth: Optional (depends on your server) +- Default base URL: `http://127.0.0.1:30000/v1` + +To opt in to auto-discovery locally (any value works if your server does not +enforce auth): + +```bash +export SGLANG_API_KEY="sglang-local" +``` + +Then set a model (replace with one of the IDs returned by `/v1/models`): + +```json5 +{ + agents: { + defaults: { model: { primary: "sglang/your-model-id" } }, + }, +} +``` + +See [/providers/sglang](/providers/sglang) for details. + ### Local proxies (LM Studio, vLLM, LiteLLM, etc.) Example (OpenAI‑compatible): diff --git a/docs/concepts/models.md b/docs/concepts/models.md index 2ad809d9599..6323feef04e 100644 --- a/docs/concepts/models.md +++ b/docs/concepts/models.md @@ -55,8 +55,8 @@ subscription** (OAuth) and **Anthropic** (API key or `claude setup-token`). Model refs are normalized to lowercase. Provider aliases like `z.ai/*` normalize to `zai/*`. -Provider configuration examples (including OpenCode Zen) live in -[/gateway/configuration](/gateway/configuration#opencode-zen-multi-model-proxy). +Provider configuration examples (including OpenCode) live in +[/gateway/configuration](/gateway/configuration#opencode). ## “Model is not allowed” (and why replies stop) @@ -207,7 +207,7 @@ mode, pass `--yes` to accept defaults. ## Models registry (`models.json`) Custom providers in `models.providers` are written into `models.json` under the -agent directory (default `~/.openclaw/agents//models.json`). This file +agent directory (default `~/.openclaw/agents//agent/models.json`). This file is merged by default unless `models.mode` is set to `replace`. Merge mode precedence for matching provider IDs: @@ -215,7 +215,9 @@ Merge mode precedence for matching provider IDs: - Non-empty `baseUrl` already present in the agent `models.json` wins. - Non-empty `apiKey` in the agent `models.json` wins only when that provider is not SecretRef-managed in current config/auth-profile context. - SecretRef-managed provider `apiKey` values are refreshed from source markers (`ENV_VAR_NAME` for env refs, `secretref-managed` for file/exec refs) instead of persisting resolved secrets. +- SecretRef-managed provider header values are refreshed from source markers (`secretref-env:ENV_VAR_NAME` for env refs, `secretref-managed` for file/exec refs). - Empty or missing agent `apiKey`/`baseUrl` fall back to config `models.providers`. - Other provider fields are refreshed from config and normalized catalog data. -This marker-based persistence applies whenever OpenClaw regenerates `models.json`, including command-driven paths like `openclaw agent`. +Marker persistence is source-authoritative: OpenClaw writes markers from the active source config snapshot (pre-resolution), not from resolved runtime secret values. +This applies whenever OpenClaw regenerates `models.json`, including command-driven paths like `openclaw agent`. diff --git a/docs/concepts/session.md b/docs/concepts/session.md index 6c9010d2c11..2f00325b730 100644 --- a/docs/concepts/session.md +++ b/docs/concepts/session.md @@ -191,16 +191,16 @@ the workspace is writable. See [Memory](/concepts/memory) and - Direct chats follow `session.dmScope` (default `main`). - `main`: `agent::` (continuity across devices/channels). - Multiple phone numbers and channels can map to the same agent main key; they act as transports into one conversation. - - `per-peer`: `agent::dm:`. - - `per-channel-peer`: `agent:::dm:`. - - `per-account-channel-peer`: `agent::::dm:` (accountId defaults to `default`). + - `per-peer`: `agent::direct:`. + - `per-channel-peer`: `agent:::direct:`. + - `per-account-channel-peer`: `agent::::direct:` (accountId defaults to `default`). - If `session.identityLinks` matches a provider-prefixed peer id (for example `telegram:123`), the canonical key replaces `` so the same person shares a session across channels. - Group chats isolate state: `agent:::group:` (rooms/channels use `agent:::channel:`). - Telegram forum topics append `:topic:` to the group id for isolation. - Legacy `group:` keys are still recognized for migration. - Inbound contexts may still use `group:`; the channel is inferred from `Provider` and normalized to the canonical `agent:::group:` form. - Other sources: - - Cron jobs: `cron:` + - Cron jobs: `cron:` (isolated) or custom `session:` (persistent) - Webhooks: `hook:` (unless explicitly set by the hook) - Node runs: `node-` @@ -281,7 +281,7 @@ Runtime override (owner only): - `openclaw status` — shows store path and recent sessions. - `openclaw sessions --json` — dumps every entry (filter with `--active `). - `openclaw gateway call sessions.list --params '{}'` — fetch sessions from the running gateway (use `--url`/`--token` for remote gateway access). -- Send `/status` as a standalone message in chat to see whether the agent is reachable, how much of the session context is used, current thinking/verbose toggles, and when your WhatsApp web creds were last refreshed (helps spot relink needs). +- Send `/status` as a standalone message in chat to see whether the agent is reachable, how much of the session context is used, current thinking/fast/verbose toggles, and when your WhatsApp web creds were last refreshed (helps spot relink needs). - Send `/context list` or `/context detail` to see what’s in the system prompt and injected workspace files (and the biggest context contributors). - Send `/stop` (or standalone abort phrases like `stop`, `stop action`, `stop run`, `stop openclaw`) to abort the current run, clear queued followups for that session, and stop any sub-agent runs spawned from it (the reply includes the stopped count). - Send `/compact` (optional instructions) as a standalone message to summarize older context and free up window space. See [/concepts/compaction](/concepts/compaction). diff --git a/docs/concepts/system-prompt.md b/docs/concepts/system-prompt.md index 1a5edfcc6e3..a1d1b482fb2 100644 --- a/docs/concepts/system-prompt.md +++ b/docs/concepts/system-prompt.md @@ -59,7 +59,7 @@ Bootstrap files are trimmed and appended under **Project Context** so the model - `USER.md` - `HEARTBEAT.md` - `BOOTSTRAP.md` (only on brand-new workspaces) -- `MEMORY.md` and/or `memory.md` (when present in the workspace; either or both may be injected) +- `MEMORY.md` when present, otherwise `memory.md` as a lowercase fallback All of these files are **injected into the context window** on every turn, which means they consume tokens. Keep them concise — especially `MEMORY.md`, which can diff --git a/docs/docs.json b/docs/docs.json index 8592618cd7d..402d56aa380 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -103,6 +103,10 @@ "source": "/opencode", "destination": "/providers/opencode" }, + { + "source": "/opencode-go", + "destination": "/providers/opencode-go" + }, { "source": "/qianfan", "destination": "/providers/qianfan" @@ -872,6 +876,7 @@ "group": "Hosting and deployment", "pages": [ "vps", + "install/kubernetes", "install/fly", "install/hetzner", "install/gcp", @@ -1013,8 +1018,7 @@ "tools/browser", "tools/browser-login", "tools/chrome-extension", - "tools/browser-linux-troubleshooting", - "tools/browser-wsl2-windows-remote-cdp-troubleshooting" + "tools/browser-linux-troubleshooting" ] }, { @@ -1112,6 +1116,7 @@ "providers/nvidia", "providers/ollama", "providers/openai", + "providers/opencode-go", "providers/opencode", "providers/openrouter", "providers/qianfan", diff --git a/docs/gateway/configuration-reference.md b/docs/gateway/configuration-reference.md index 538b80f6138..658a3084437 100644 --- a/docs/gateway/configuration-reference.md +++ b/docs/gateway/configuration-reference.md @@ -203,7 +203,7 @@ WhatsApp runs through the gateway's web channel (Baileys Web). It starts automat } ``` -- Bot token: `channels.telegram.botToken` or `channels.telegram.tokenFile`, with `TELEGRAM_BOT_TOKEN` as fallback for the default account. +- Bot token: `channels.telegram.botToken` or `channels.telegram.tokenFile` (regular file only; symlinks rejected), with `TELEGRAM_BOT_TOKEN` as fallback for the default account. - Optional `channels.telegram.defaultAccount` overrides default account selection when it matches a configured account id. - In multi-account setups (2+ account ids), set an explicit default (`channels.telegram.defaultAccount` or `channels.telegram.accounts.default`) to avoid fallback routing; `openclaw doctor` warns when this is missing or invalid. - `configWrites: false` blocks Telegram-initiated config writes (supergroup ID migrations, `/config set|unset`). @@ -304,6 +304,7 @@ WhatsApp runs through the gateway's web channel (Baileys Web). It starts automat ``` - Token: `channels.discord.token`, with `DISCORD_BOT_TOKEN` as fallback for the default account. +- Direct outbound calls that provide an explicit Discord `token` use that token for the call; account retry/policy settings still come from the selected account in the active runtime snapshot. - Optional `channels.discord.defaultAccount` overrides default account selection when it matches a configured account id. - Use `user:` (DM) or `channel:` (guild channel) for delivery targets; bare numeric IDs are rejected. - Guild slugs are lowercase with spaces replaced by `-`; channel keys use the slugged name (no `#`). Prefer guild IDs. @@ -747,6 +748,7 @@ Include your own number in `allowFrom` to enable self-chat mode (ignores native - `bash: true` enables `! ` for host shell. Requires `tools.elevated.enabled` and sender in `tools.elevated.allowFrom.`. - `config: true` enables `/config` (reads/writes `openclaw.json`). For gateway `chat.send` clients, persistent `/config set|unset` writes also require `operator.admin`; read-only `/config show` stays available to normal write-scoped operator clients. - `channels..configWrites` gates config mutations per channel (default: true). +- For multi-account channels, `channels..accounts..configWrites` also gates writes that target that account (for example `/allowlist --config --account ` or `/config set channels..accounts....`). - `allowFrom` is per-provider. When set, it is the **only** authorization source (channel allowlists/pairing and `useAccessGroups` are ignored). - `useAccessGroups: false` allows commands to bypass access-group policies when `allowFrom` is not set. @@ -2012,9 +2014,11 @@ OpenClaw uses the pi-coding-agent model catalog. Add custom providers via `model - Non-empty agent `models.json` `baseUrl` values win. - Non-empty agent `apiKey` values win only when that provider is not SecretRef-managed in current config/auth-profile context. - SecretRef-managed provider `apiKey` values are refreshed from source markers (`ENV_VAR_NAME` for env refs, `secretref-managed` for file/exec refs) instead of persisting resolved secrets. + - SecretRef-managed provider header values are refreshed from source markers (`secretref-env:ENV_VAR_NAME` for env refs, `secretref-managed` for file/exec refs). - Empty or missing agent `apiKey`/`baseUrl` fall back to `models.providers` in config. - Matching model `contextWindow`/`maxTokens` use the higher value between explicit config and implicit catalog values. - Use `models.mode: "replace"` when you want config to fully rewrite `models.json`. + - Marker persistence is source-authoritative: markers are written from the active source config snapshot (pre-resolution), not from resolved runtime secret values. ### Provider field details @@ -2077,7 +2081,7 @@ Use `cerebras/zai-glm-4.7` for Cerebras; `zai/glm-4.7` for Z.AI direct. - + ```json5 { @@ -2090,7 +2094,7 @@ Use `cerebras/zai-glm-4.7` for Cerebras; `zai/glm-4.7` for Z.AI direct. } ``` -Set `OPENCODE_API_KEY` (or `OPENCODE_ZEN_API_KEY`). Shortcut: `openclaw onboard --auth-choice opencode-zen`. +Set `OPENCODE_API_KEY` (or `OPENCODE_ZEN_API_KEY`). Use `opencode/...` refs for the Zen catalog or `opencode-go/...` refs for the Go catalog. Shortcut: `openclaw onboard --auth-choice opencode-zen` or `openclaw onboard --auth-choice opencode-go`. @@ -2194,7 +2198,7 @@ Anthropic-compatible, built-in provider. Shortcut: `openclaw onboard --auth-choi { id: "hf:MiniMaxAI/MiniMax-M2.5", name: "MiniMax M2.5", - reasoning: false, + reasoning: true, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 192000, @@ -2234,7 +2238,7 @@ Base URL should omit `/v1` (Anthropic client appends it). Shortcut: `openclaw on { id: "MiniMax-M2.5", name: "MiniMax M2.5", - reasoning: false, + reasoning: true, input: ["text"], cost: { input: 15, output: 60, cacheRead: 2, cacheWrite: 10 }, contextWindow: 200000, @@ -2338,7 +2342,7 @@ See [Plugins](/tools/plugin). browser: { enabled: true, evaluateEnabled: true, - defaultProfile: "chrome", + defaultProfile: "user", ssrfPolicy: { dangerouslyAllowPrivateNetwork: true, // default trusted-network mode // allowPrivateNetwork: true, // legacy alias @@ -2443,6 +2447,14 @@ See [Plugins](/tools/plugin). // Remove tools from the default HTTP deny list allow: ["gateway"], }, + push: { + apns: { + relay: { + baseUrl: "https://relay.example.com", + timeoutMs: 10000, + }, + }, + }, }, } ``` @@ -2468,7 +2480,13 @@ See [Plugins](/tools/plugin). - `remote.transport`: `ssh` (default) or `direct` (ws/wss). For `direct`, `remote.url` must be `ws://` or `wss://`. - `OPENCLAW_ALLOW_INSECURE_PRIVATE_WS=1`: client-side break-glass override that allows plaintext `ws://` to trusted private-network IPs; default remains loopback-only for plaintext. - `gateway.remote.token` / `.password` are remote-client credential fields. They do not configure gateway auth by themselves. -- Local gateway call paths can use `gateway.remote.*` as fallback when `gateway.auth.*` is unset. +- `gateway.push.apns.relay.baseUrl`: base HTTPS URL for the external APNs relay used by official/TestFlight iOS builds after they publish relay-backed registrations to the gateway. This URL must match the relay URL compiled into the iOS build. +- `gateway.push.apns.relay.timeoutMs`: gateway-to-relay send timeout in milliseconds. Defaults to `10000`. +- Relay-backed registrations are delegated to a specific gateway identity. The paired iOS app fetches `gateway.identity.get`, includes that identity in the relay registration, and forwards a registration-scoped send grant to the gateway. Another gateway cannot reuse that stored registration. +- `OPENCLAW_APNS_RELAY_BASE_URL` / `OPENCLAW_APNS_RELAY_TIMEOUT_MS`: temporary env overrides for the relay config above. +- `OPENCLAW_APNS_RELAY_ALLOW_HTTP=true`: development-only escape hatch for loopback HTTP relay URLs. Production relay URLs should stay on HTTPS. +- Local gateway call paths can use `gateway.remote.*` as fallback only when `gateway.auth.*` is unset. +- If `gateway.auth.token` / `gateway.auth.password` is explicitly configured via SecretRef and unresolved, resolution fails closed (no remote fallback masking). - `trustedProxies`: reverse proxy IPs that terminate TLS. Only list proxies you control. - `allowRealIpFallback`: when `true`, the gateway accepts `X-Real-IP` if `X-Forwarded-For` is missing. Default `false` for fail-closed behavior. - `gateway.tools.deny`: extra tool names blocked for HTTP `POST /tools/invoke` (extends default deny list). @@ -2712,6 +2730,7 @@ Validation: - `source: "env"` id pattern: `^[A-Z][A-Z0-9_]{0,127}$` - `source: "file"` id: absolute JSON pointer (for example `"/providers/openai/apiKey"`) - `source: "exec"` id pattern: `^[A-Za-z0-9][A-Za-z0-9._:/-]{0,255}$` +- `source: "exec"` ids must not contain `.` or `..` slash-delimited path segments (for example `a/../b` is rejected) ### Supported credential surface diff --git a/docs/gateway/configuration.md b/docs/gateway/configuration.md index ece612d101d..0f1dd65cbbc 100644 --- a/docs/gateway/configuration.md +++ b/docs/gateway/configuration.md @@ -225,6 +225,63 @@ When validation fails: + + Relay-backed push is configured in `openclaw.json`. + + Set this in gateway config: + + ```json5 + { + gateway: { + push: { + apns: { + relay: { + baseUrl: "https://relay.example.com", + // Optional. Default: 10000 + timeoutMs: 10000, + }, + }, + }, + }, + } + ``` + + CLI equivalent: + + ```bash + openclaw config set gateway.push.apns.relay.baseUrl https://relay.example.com + ``` + + What this does: + + - Lets the gateway send `push.test`, wake nudges, and reconnect wakes through the external relay. + - Uses a registration-scoped send grant forwarded by the paired iOS app. The gateway does not need a deployment-wide relay token. + - Binds each relay-backed registration to the gateway identity that the iOS app paired with, so another gateway cannot reuse the stored registration. + - Keeps local/manual iOS builds on direct APNs. Relay-backed sends apply only to official distributed builds that registered through the relay. + - Must match the relay base URL baked into the official/TestFlight iOS build, so registration and send traffic reach the same relay deployment. + + End-to-end flow: + + 1. Install an official/TestFlight iOS build that was compiled with the same relay base URL. + 2. Configure `gateway.push.apns.relay.baseUrl` on the gateway. + 3. Pair the iOS app to the gateway and let both node and operator sessions connect. + 4. The iOS app fetches the gateway identity, registers with the relay using App Attest plus the app receipt, and then publishes the relay-backed `push.apns.register` payload to the paired gateway. + 5. The gateway stores the relay handle and send grant, then uses them for `push.test`, wake nudges, and reconnect wakes. + + Operational notes: + + - If you switch the iOS app to a different gateway, reconnect the app so it can publish a new relay registration bound to that gateway. + - If you ship a new iOS build that points at a different relay deployment, the app refreshes its cached relay registration instead of reusing the old relay origin. + + Compatibility note: + + - `OPENCLAW_APNS_RELAY_BASE_URL` and `OPENCLAW_APNS_RELAY_TIMEOUT_MS` still work as temporary env overrides. + - `OPENCLAW_APNS_RELAY_ALLOW_HTTP=true` remains a loopback-only development escape hatch; do not persist HTTP relay URLs in config. + + See [iOS App](/platforms/ios#relay-backed-push-for-official-builds) for the end-to-end flow and [Authentication and trust flow](/platforms/ios#authentication-and-trust-flow) for the relay security model. + + + ```json5 { @@ -415,7 +472,7 @@ Control-plane write RPCs (`config.apply`, `config.patch`, `update.run`) are rate openclaw gateway call config.apply --params '{ "raw": "{ agents: { defaults: { workspace: \"~/.openclaw/workspace\" } } }", "baseHash": "", - "sessionKey": "agent:main:whatsapp:dm:+15555550123" + "sessionKey": "agent:main:whatsapp:direct:+15555550123" }' ``` diff --git a/docs/gateway/doctor.md b/docs/gateway/doctor.md index 2550406f4ff..95027906750 100644 --- a/docs/gateway/doctor.md +++ b/docs/gateway/doctor.md @@ -63,8 +63,9 @@ cat ~/.openclaw/openclaw.json - Health check + restart prompt. - Skills status summary (eligible/missing/blocked). - Config normalization for legacy values. -- OpenCode Zen provider override warnings (`models.providers.opencode`). +- OpenCode provider override warnings (`models.providers.opencode` / `models.providers.opencode-go`). - Legacy on-disk state migration (sessions/agent dir/WhatsApp auth). +- Legacy cron store migration (`jobId`, `schedule.cron`, top-level delivery/payload fields, payload `provider`, simple `notify: true` webhook fallback jobs). - State integrity and permissions checks (sessions, transcripts, state dir). - Config file permission checks (chmod 600) when running locally. - Model auth health: checks OAuth expiry, can refresh expiring tokens, and reports auth-profile cooldown/disabled states. @@ -133,12 +134,12 @@ Doctor warnings also include account-default guidance for multi-account channels - If two or more `channels..accounts` entries are configured without `channels..defaultAccount` or `accounts.default`, doctor warns that fallback routing can pick an unexpected account. - If `channels..defaultAccount` is set to an unknown account ID, doctor warns and lists configured account IDs. -### 2b) OpenCode Zen provider overrides +### 2b) OpenCode provider overrides -If you’ve added `models.providers.opencode` (or `opencode-zen`) manually, it -overrides the built-in OpenCode Zen catalog from `@mariozechner/pi-ai`. That can -force every model onto a single API or zero out costs. Doctor warns so you can -remove the override and restore per-model API routing + costs. +If you’ve added `models.providers.opencode`, `opencode-zen`, or `opencode-go` +manually, it overrides the built-in OpenCode catalog from `@mariozechner/pi-ai`. +That can force models onto the wrong API or zero out costs. Doctor warns so you +can remove the override and restore per-model API routing + costs. ### 3) Legacy state migrations (disk layout) @@ -158,6 +159,25 @@ the legacy sessions + agent dir on startup so history/auth/models land in the per-agent path without a manual doctor run. WhatsApp auth is intentionally only migrated via `openclaw doctor`. +### 3b) Legacy cron store migrations + +Doctor also checks the cron job store (`~/.openclaw/cron/jobs.json` by default, +or `cron.store` when overridden) for old job shapes that the scheduler still +accepts for compatibility. + +Current cron cleanups include: + +- `jobId` → `id` +- `schedule.cron` → `schedule.expr` +- top-level payload fields (`message`, `model`, `thinking`, ...) → `payload` +- top-level delivery fields (`deliver`, `channel`, `to`, `provider`, ...) → `delivery` +- payload `provider` delivery aliases → explicit `delivery.channel` +- simple legacy `notify: true` webhook fallback jobs → explicit `delivery.mode="webhook"` with `delivery.to=cron.webhook` + +Doctor only auto-migrates `notify: true` jobs when it can do so without +changing behavior. If a job combines legacy notify fallback with an existing +non-webhook delivery mode, doctor warns and leaves that job for manual review. + ### 4) State integrity checks (session persistence, routing, and safety) The state directory is the operational brainstem. If it vanishes, you lose diff --git a/docs/gateway/local-models.md b/docs/gateway/local-models.md index 8a07a827467..4059f988776 100644 --- a/docs/gateway/local-models.md +++ b/docs/gateway/local-models.md @@ -11,6 +11,8 @@ title: "Local Models" Local is doable, but OpenClaw expects large context + strong defenses against prompt injection. Small cards truncate context and leak safety. Aim high: **≥2 maxed-out Mac Studios or equivalent GPU rig (~$30k+)**. A single **24 GB** GPU works only for lighter prompts with higher latency. Use the **largest / full-size model variant you can run**; aggressively quantized or “small” checkpoints raise prompt-injection risk (see [Security](/gateway/security)). +If you want the lowest-friction local setup, start with [Ollama](/providers/ollama) and `openclaw onboard`. This page is the opinionated guide for higher-end local stacks and custom OpenAI-compatible local servers. + ## Recommended: LM Studio + MiniMax M2.5 (Responses API, full-size) Best current local stack. Load MiniMax M2.5 in LM Studio, enable the local server (default `http://127.0.0.1:1234`), and use Responses API to keep reasoning separate from final text. diff --git a/docs/gateway/openresponses-http-api.md b/docs/gateway/openresponses-http-api.md index bcba166db9d..fa86f912ef5 100644 --- a/docs/gateway/openresponses-http-api.md +++ b/docs/gateway/openresponses-http-api.md @@ -18,77 +18,16 @@ This endpoint is **disabled by default**. Enable it in config first. Under the hood, requests are executed as a normal Gateway agent run (same codepath as `openclaw agent`), so routing/permissions/config match your Gateway. -## Authentication +## Authentication, security, and routing -Uses the Gateway auth configuration. Send a bearer token: +Operational behavior matches [OpenAI Chat Completions](/gateway/openai-http-api): -- `Authorization: Bearer ` +- use `Authorization: Bearer ` with the normal Gateway auth config +- treat the endpoint as full operator access for the gateway instance +- select agents with `model: "openclaw:"`, `model: "agent:"`, or `x-openclaw-agent-id` +- use `x-openclaw-session-key` for explicit session routing -Notes: - -- When `gateway.auth.mode="token"`, use `gateway.auth.token` (or `OPENCLAW_GATEWAY_TOKEN`). -- When `gateway.auth.mode="password"`, use `gateway.auth.password` (or `OPENCLAW_GATEWAY_PASSWORD`). -- If `gateway.auth.rateLimit` is configured and too many auth failures occur, the endpoint returns `429` with `Retry-After`. - -## Security boundary (important) - -Treat this endpoint as a **full operator-access** surface for the gateway instance. - -- HTTP bearer auth here is not a narrow per-user scope model. -- A valid Gateway token/password for this endpoint should be treated like an owner/operator credential. -- Requests run through the same control-plane agent path as trusted operator actions. -- There is no separate non-owner/per-user tool boundary on this endpoint; once a caller passes Gateway auth here, OpenClaw treats that caller as a trusted operator for this gateway. -- If the target agent policy allows sensitive tools, this endpoint can use them. -- Keep this endpoint on loopback/tailnet/private ingress only; do not expose it directly to the public internet. - -See [Security](/gateway/security) and [Remote access](/gateway/remote). - -## Choosing an agent - -No custom headers required: encode the agent id in the OpenResponses `model` field: - -- `model: "openclaw:"` (example: `"openclaw:main"`, `"openclaw:beta"`) -- `model: "agent:"` (alias) - -Or target a specific OpenClaw agent by header: - -- `x-openclaw-agent-id: ` (default: `main`) - -Advanced: - -- `x-openclaw-session-key: ` to fully control session routing. - -## Enabling the endpoint - -Set `gateway.http.endpoints.responses.enabled` to `true`: - -```json5 -{ - gateway: { - http: { - endpoints: { - responses: { enabled: true }, - }, - }, - }, -} -``` - -## Disabling the endpoint - -Set `gateway.http.endpoints.responses.enabled` to `false`: - -```json5 -{ - gateway: { - http: { - endpoints: { - responses: { enabled: false }, - }, - }, - }, -} -``` +Enable or disable this endpoint with `gateway.http.endpoints.responses.enabled`. ## Session behavior diff --git a/docs/gateway/protocol.md b/docs/gateway/protocol.md index 62a5adb1fef..9c886a31716 100644 --- a/docs/gateway/protocol.md +++ b/docs/gateway/protocol.md @@ -206,6 +206,12 @@ The Gateway treats these as **claims** and enforces server-side allowlists. persisted by the client for future connects. - Device tokens can be rotated/revoked via `device.token.rotate` and `device.token.revoke` (requires `operator.pairing` scope). +- Auth failures include `error.details.code` plus recovery hints: + - `error.details.canRetryWithDeviceToken` (boolean) + - `error.details.recommendedNextStep` (`retry_with_device_token`, `update_auth_configuration`, `update_auth_credentials`, `wait_then_retry`, `review_auth_configuration`) +- Client behavior for `AUTH_TOKEN_MISMATCH`: + - Trusted clients may attempt one bounded retry with a cached per-device token. + - If that retry fails, clients should stop automatic reconnect loops and surface operator action guidance. ## Device identity + pairing @@ -217,8 +223,9 @@ The Gateway treats these as **claims** and enforces server-side allowlists. - **Local** connects include loopback and the gateway host’s own tailnet address (so same‑host tailnet binds can still auto‑approve). - All WS clients must include `device` identity during `connect` (operator + node). - Control UI can omit it **only** when `gateway.controlUi.dangerouslyDisableDeviceAuth` - is enabled for break-glass use. + Control UI can omit it only in these modes: + - `gateway.controlUi.allowInsecureAuth=true` for localhost-only insecure HTTP compatibility. + - `gateway.controlUi.dangerouslyDisableDeviceAuth=true` (break-glass, severe security downgrade). - All connections must sign the server-provided `connect.challenge` nonce. ### Device auth migration diagnostics diff --git a/docs/gateway/remote.md b/docs/gateway/remote.md index a9aadc49dd1..dcbae985b74 100644 --- a/docs/gateway/remote.md +++ b/docs/gateway/remote.md @@ -103,18 +103,19 @@ When the gateway is loopback-only, keep the URL at `ws://127.0.0.1:18789` and op ## Credential precedence -Gateway credential resolution follows one shared contract across call/probe/status paths, Discord exec-approval monitoring, and node-host connections: +Gateway credential resolution follows one shared contract across call/probe/status paths and Discord exec-approval monitoring. Node-host uses the same base contract with one local-mode exception (it intentionally ignores `gateway.remote.*`): - Explicit credentials (`--token`, `--password`, or tool `gatewayToken`) always win on call paths that accept explicit auth. - URL override safety: - CLI URL overrides (`--url`) never reuse implicit config/env credentials. - Env URL overrides (`OPENCLAW_GATEWAY_URL`) may use env credentials only (`OPENCLAW_GATEWAY_TOKEN` / `OPENCLAW_GATEWAY_PASSWORD`). - Local mode defaults: - - token: `OPENCLAW_GATEWAY_TOKEN` -> `gateway.auth.token` -> `gateway.remote.token` - - password: `OPENCLAW_GATEWAY_PASSWORD` -> `gateway.auth.password` -> `gateway.remote.password` + - token: `OPENCLAW_GATEWAY_TOKEN` -> `gateway.auth.token` -> `gateway.remote.token` (remote fallback applies only when local auth token input is unset) + - password: `OPENCLAW_GATEWAY_PASSWORD` -> `gateway.auth.password` -> `gateway.remote.password` (remote fallback applies only when local auth password input is unset) - Remote mode defaults: - token: `gateway.remote.token` -> `OPENCLAW_GATEWAY_TOKEN` -> `gateway.auth.token` - password: `OPENCLAW_GATEWAY_PASSWORD` -> `gateway.remote.password` -> `gateway.auth.password` +- Node-host local-mode exception: `gateway.remote.token` / `gateway.remote.password` are ignored. - Remote probe/status token checks are strict by default: they use `gateway.remote.token` only (no local token fallback) when targeting remote mode. - Legacy `CLAWDBOT_GATEWAY_*` env vars are only used by compatibility call paths; probe/status/auth resolution uses `OPENCLAW_GATEWAY_*` only. @@ -140,7 +141,8 @@ Short version: **keep the Gateway loopback-only** unless you’re sure you need set `OPENCLAW_ALLOW_INSECURE_PRIVATE_WS=1` on the client process as break-glass. - **Non-loopback binds** (`lan`/`tailnet`/`custom`, or `auto` when loopback is unavailable) must use auth tokens/passwords. - `gateway.remote.token` / `.password` are client credential sources. They do **not** configure server auth by themselves. -- Local call paths can use `gateway.remote.*` as fallback when `gateway.auth.*` is unset. +- Local call paths can use `gateway.remote.*` as fallback only when `gateway.auth.*` is unset. +- If `gateway.auth.token` / `gateway.auth.password` is explicitly configured via SecretRef and unresolved, resolution fails closed (no remote fallback masking). - `gateway.remote.tlsFingerprint` pins the remote TLS cert when using `wss://`. - **Tailscale Serve** can authenticate Control UI/WebSocket traffic via identity headers when `gateway.auth.allowTailscale: true`; HTTP API endpoints still diff --git a/docs/gateway/secrets.md b/docs/gateway/secrets.md index 3ef08267618..93cd508d4f1 100644 --- a/docs/gateway/secrets.md +++ b/docs/gateway/secrets.md @@ -21,6 +21,7 @@ Secrets are resolved into an in-memory runtime snapshot. - Startup fails fast when an effectively active SecretRef cannot be resolved. - Reload uses atomic swap: full success, or keep the last-known-good snapshot. - Runtime requests read from the active in-memory snapshot only. +- Outbound delivery paths also read from that active snapshot (for example Discord reply/thread delivery and Telegram action sends); they do not re-resolve SecretRefs on each send. This keeps secret-provider outages off hot request paths. @@ -38,14 +39,15 @@ Examples of inactive surfaces: - Top-level channel credentials that no enabled account inherits. - Disabled tool/feature surfaces. - Web search provider-specific keys that are not selected by `tools.web.search.provider`. - In auto mode (provider unset), provider-specific keys are also active for provider auto-detection. -- `gateway.remote.token` / `gateway.remote.password` SecretRefs are active (when `gateway.remote.enabled` is not `false`) if one of these is true: + In auto mode (provider unset), keys are consulted by precedence for provider auto-detection until one resolves. + After selection, non-selected provider keys are treated as inactive until selected. +- `gateway.remote.token` / `gateway.remote.password` SecretRefs are active if one of these is true: - `gateway.mode=remote` - `gateway.remote.url` is configured - `gateway.tailscale.mode` is `serve` or `funnel` - In local mode without those remote surfaces: - - `gateway.remote.token` is active when token auth can win and no env/auth token is configured. - - `gateway.remote.password` is active only when password auth can win and no env/auth password is configured. + - In local mode without those remote surfaces: + - `gateway.remote.token` is active when token auth can win and no env/auth token is configured. + - `gateway.remote.password` is active only when password auth can win and no env/auth password is configured. - `gateway.auth.token` SecretRef is inactive for startup auth resolution when `OPENCLAW_GATEWAY_TOKEN` (or `CLAWDBOT_GATEWAY_TOKEN`) is set, because env token input wins for that runtime. ## Gateway auth surface diagnostics @@ -112,6 +114,7 @@ Validation: - `provider` must match `^[a-z][a-z0-9_-]{0,63}$` - `id` must match `^[A-Za-z0-9][A-Za-z0-9._:/-]{0,255}$` +- `id` must not contain `.` or `..` as slash-delimited path segments (for example `a/../b` is rejected) ## Provider config @@ -320,6 +323,7 @@ Activation contract: - Success swaps the snapshot atomically. - Startup failure aborts gateway startup. - Runtime reload failure keeps the last-known-good snapshot. +- Providing an explicit per-call channel token to an outbound helper/tool call does not trigger SecretRef activation; activation points remain startup, reload, and explicit `secrets.reload`. ## Degraded and recovered signals diff --git a/docs/gateway/security/index.md b/docs/gateway/security/index.md index c62b77352e8..f7f6583d794 100644 --- a/docs/gateway/security/index.md +++ b/docs/gateway/security/index.md @@ -104,6 +104,7 @@ Treat Gateway and node as one operator trust domain, with different roles: - A caller authenticated to the Gateway is trusted at Gateway scope. After pairing, node actions are trusted operator actions on that node. - `sessionKey` is routing/context selection, not per-user auth. - Exec approvals (allowlist + ask) are guardrails for operator intent, not hostile multi-tenant isolation. +- Exec approvals bind exact request context and best-effort direct local file operands; they do not semantically model every runtime/interpreter loader path. Use sandboxing and host isolation for strong boundaries. If you need hostile-user isolation, split trust boundaries by OS user/host and run separate gateways. @@ -199,7 +200,7 @@ If you run `--deep`, OpenClaw also attempts a best-effort live Gateway probe. Use this when auditing access or deciding what to back up: - **WhatsApp**: `~/.openclaw/credentials/whatsapp//creds.json` -- **Telegram bot token**: config/env or `channels.telegram.tokenFile` +- **Telegram bot token**: config/env or `channels.telegram.tokenFile` (regular file only; symlinks rejected) - **Discord bot token**: config/env or SecretRef (env/file/exec providers) - **Slack tokens**: config/env (`channels.slack.*`) - **Pairing allowlists**: @@ -262,9 +263,14 @@ High-signal `checkId` values you will most likely see in real deployments (not e ## Control UI over HTTP The Control UI needs a **secure context** (HTTPS or localhost) to generate device -identity. `gateway.controlUi.allowInsecureAuth` does **not** bypass secure-context, -device-identity, or device-pairing checks. Prefer HTTPS (Tailscale Serve) or open -the UI on `127.0.0.1`. +identity. `gateway.controlUi.allowInsecureAuth` is a local compatibility toggle: + +- On localhost, it allows Control UI auth without device identity when the page + is loaded over non-secure HTTP. +- It does not bypass pairing checks. +- It does not relax remote (non-localhost) device identity requirements. + +Prefer HTTPS (Tailscale Serve) or open the UI on `127.0.0.1`. For break-glass scenarios only, `gateway.controlUi.dangerouslyDisableDeviceAuth` disables device identity checks entirely. This is a severe security downgrade; @@ -298,6 +304,7 @@ schema: - `channels.googlechat.dangerouslyAllowNameMatching` - `channels.googlechat.accounts..dangerouslyAllowNameMatching` - `channels.msteams.dangerouslyAllowNameMatching` +- `channels.zalouser.dangerouslyAllowNameMatching` (extension channel) - `channels.irc.dangerouslyAllowNameMatching` (extension channel) - `channels.irc.accounts..dangerouslyAllowNameMatching` (extension channel) - `channels.mattermost.dangerouslyAllowNameMatching` (extension channel) @@ -365,6 +372,7 @@ If a macOS node is paired, the Gateway can invoke `system.run` on that node. Thi - Requires node pairing (approval + token). - Controlled on the Mac via **Settings → Exec approvals** (security + ask + allowlist). +- Approval mode binds exact request context and, when possible, one concrete local script/file operand. If OpenClaw cannot identify exactly one direct local file for an interpreter/runtime command, approval-backed execution is denied rather than promising full semantic coverage. - If you don’t want remote execution, set security to **deny** and remove node pairing for that Mac. ## Dynamic skills (watcher / remote nodes) @@ -747,8 +755,10 @@ Doctor can generate one for you: `openclaw doctor --generate-gateway-token`. Note: `gateway.remote.token` / `.password` are client credential sources. They do **not** protect local WS access by themselves. -Local call paths can use `gateway.remote.*` as fallback when `gateway.auth.*` +Local call paths can use `gateway.remote.*` as fallback only when `gateway.auth.*` is unset. +If `gateway.auth.token` / `gateway.auth.password` is explicitly configured via +SecretRef and unresolved, resolution fails closed (no remote fallback masking). Optional: pin remote TLS with `gateway.remote.tlsFingerprint` when using `wss://`. Plaintext `ws://` is loopback-only by default. For trusted private-network paths, set `OPENCLAW_ALLOW_INSECURE_PRIVATE_WS=1` on the client process as break-glass. diff --git a/docs/gateway/troubleshooting.md b/docs/gateway/troubleshooting.md index 46d2c58b966..f5829454e57 100644 --- a/docs/gateway/troubleshooting.md +++ b/docs/gateway/troubleshooting.md @@ -113,9 +113,21 @@ Common signatures: challenge-based device auth flow (`connect.challenge` + `device.nonce`). - `device signature invalid` / `device signature expired` → client signed the wrong payload (or stale timestamp) for the current handshake. -- `unauthorized` / reconnect loop → token/password mismatch. +- `AUTH_TOKEN_MISMATCH` with `canRetryWithDeviceToken=true` → client can do one trusted retry with cached device token. +- repeated `unauthorized` after that retry → shared token/device token drift; refresh token config and re-approve/rotate device token if needed. - `gateway connect failed:` → wrong host/port/url target. +### Auth detail codes quick map + +Use `error.details.code` from the failed `connect` response to pick the next action: + +| Detail code | Meaning | Recommended action | +| ---------------------------- | -------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `AUTH_TOKEN_MISSING` | Client did not send a required shared token. | Paste/set token in the client and retry. For dashboard paths: `openclaw config get gateway.auth.token` then paste into Control UI settings. | +| `AUTH_TOKEN_MISMATCH` | Shared token did not match gateway auth token. | If `canRetryWithDeviceToken=true`, allow one trusted retry. If still failing, run the [token drift recovery checklist](/cli/devices#token-drift-recovery-checklist). | +| `AUTH_DEVICE_TOKEN_MISMATCH` | Cached per-device token is stale or revoked. | Rotate/re-approve device token using [devices CLI](/cli/devices), then reconnect. | +| `PAIRING_REQUIRED` | Device identity is known but not approved for this role. | Approve pending request: `openclaw devices list` then `openclaw devices approve `. | + Device auth v2 migration check: ```bash @@ -135,6 +147,7 @@ Related: - [/web/control-ui](/web/control-ui) - [/gateway/authentication](/gateway/authentication) - [/gateway/remote](/gateway/remote) +- [/cli/devices](/cli/devices) ## Gateway service not running @@ -276,7 +289,7 @@ Look for: - Valid browser executable path. - CDP profile reachability. -- Extension relay tab attachment for `profile="chrome"`. +- Extension relay tab attachment for `profile="chrome-relay"`. Common signatures: diff --git a/docs/help/faq.md b/docs/help/faq.md index 7dad0548fd4..37f5f96c815 100644 --- a/docs/help/faq.md +++ b/docs/help/faq.md @@ -179,7 +179,7 @@ Quick answers plus deeper troubleshooting for real-world setups (local dev, VPS, - [I closed my terminal on Windows - how do I restart OpenClaw?](#i-closed-my-terminal-on-windows-how-do-i-restart-openclaw) - [The Gateway is up but replies never arrive. What should I check?](#the-gateway-is-up-but-replies-never-arrive-what-should-i-check) - ["Disconnected from gateway: no reason" - what now?](#disconnected-from-gateway-no-reason-what-now) - - [Telegram setMyCommands fails with network errors. What should I check?](#telegram-setmycommands-fails-with-network-errors-what-should-i-check) + - [Telegram setMyCommands fails. What should I check?](#telegram-setmycommands-fails-what-should-i-check) - [TUI shows no output. What should I check?](#tui-shows-no-output-what-should-i-check) - [How do I completely stop then start the Gateway?](#how-do-i-completely-stop-then-start-the-gateway) - [ELI5: `openclaw gateway restart` vs `openclaw gateway`](#eli5-openclaw-gateway-restart-vs-openclaw-gateway) @@ -1452,7 +1452,8 @@ Non-loopback binds **require auth**. Configure `gateway.auth.mode` + `gateway.au Notes: - `gateway.remote.token` / `.password` do **not** enable local gateway auth by themselves. -- Local call paths can use `gateway.remote.*` as fallback when `gateway.auth.*` is unset. +- Local call paths can use `gateway.remote.*` as fallback only when `gateway.auth.*` is unset. +- If `gateway.auth.token` / `gateway.auth.password` is explicitly configured via SecretRef and unresolved, resolution fails closed (no remote fallback masking). - The Control UI authenticates via `connect.params.auth.token` (stored in app/UI settings). Avoid putting tokens in URLs. ### Why do I need a token on localhost now @@ -1489,10 +1490,16 @@ Set `cli.banner.taglineMode` in config: ### How do I enable web search and web fetch -`web_fetch` works without an API key. `web_search` requires a Brave Search API -key. **Recommended:** run `openclaw configure --section web` to store it in -`tools.web.search.apiKey`. Environment alternative: set `BRAVE_API_KEY` for the -Gateway process. +`web_fetch` works without an API key. `web_search` requires a key for your +selected provider (Brave, Gemini, Grok, Kimi, or Perplexity). +**Recommended:** run `openclaw configure --section web` and choose a provider. +Environment alternatives: + +- Brave: `BRAVE_API_KEY` +- Gemini: `GEMINI_API_KEY` +- Grok: `XAI_API_KEY` +- Kimi: `KIMI_API_KEY` or `MOONSHOT_API_KEY` +- Perplexity: `PERPLEXITY_API_KEY` or `OPENROUTER_API_KEY` ```json5 { @@ -1500,6 +1507,7 @@ Gateway process. web: { search: { enabled: true, + provider: "brave", apiKey: "BRAVE_API_KEY_HERE", maxResults: 5, }, @@ -2076,8 +2084,21 @@ More context: [Models](/concepts/models). ### Can I use selfhosted models llamacpp vLLM Ollama -Yes. If your local server exposes an OpenAI-compatible API, you can point a -custom provider at it. Ollama is supported directly and is the easiest path. +Yes. Ollama is the easiest path for local models. + +Quickest setup: + +1. Install Ollama from `https://ollama.com/download` +2. Pull a local model such as `ollama pull glm-4.7-flash` +3. If you want Ollama Cloud too, run `ollama signin` +4. Run `openclaw onboard` and choose `Ollama` +5. Pick `Local` or `Cloud + Local` + +Notes: + +- `Cloud + Local` gives you Ollama Cloud models plus your local Ollama models +- cloud models such as `kimi-k2.5:cloud` do not need a local pull +- for manual switching, use `openclaw models list` and `openclaw models set ollama/` Security note: smaller or heavily quantized models are more vulnerable to prompt injection. We strongly recommend **large models** for any bot that can use tools. @@ -2505,6 +2526,7 @@ Your gateway is running with auth enabled (`gateway.auth.*`), but the UI is not Facts (from code): - The Control UI keeps the token in `sessionStorage` for the current browser tab session and selected gateway URL, so same-tab refreshes keep working without restoring long-lived localStorage token persistence. +- On `AUTH_TOKEN_MISMATCH`, trusted clients can attempt one bounded retry with a cached device token when the gateway returns retry hints (`canRetryWithDeviceToken=true`, `recommendedNextStep=retry_with_device_token`). Fix: @@ -2513,6 +2535,9 @@ Fix: - If remote, tunnel first: `ssh -N -L 18789:127.0.0.1:18789 user@host` then open `http://127.0.0.1:18789/`. - Set `gateway.auth.token` (or `OPENCLAW_GATEWAY_TOKEN`) on the gateway host. - In the Control UI settings, paste the same token. +- If mismatch persists after the one retry, rotate/re-approve the paired device token: + - `openclaw devices list` + - `openclaw devices rotate --device --role operator` - Still stuck? Run `openclaw status --all` and follow [Troubleshooting](/gateway/troubleshooting). See [Dashboard](/web/dashboard) for auth details. ### I set gatewaybind tailnet but it can't bind nothing listens @@ -2685,7 +2710,7 @@ openclaw logs --follow Docs: [Dashboard](/web/dashboard), [Remote access](/gateway/remote), [Troubleshooting](/gateway/troubleshooting). -### Telegram setMyCommands fails with network errors What should I check +### Telegram setMyCommands fails What should I check Start with logs and channel status: @@ -2694,7 +2719,11 @@ openclaw channels status openclaw channels logs --channel telegram ``` -If you are on a VPS or behind a proxy, confirm outbound HTTPS is allowed and DNS works. +Then match the error: + +- `BOT_COMMANDS_TOO_MUCH`: the Telegram menu has too many entries. OpenClaw already trims to the Telegram limit and retries with fewer commands, but some menu entries still need to be dropped. Reduce plugin/skill/custom commands, or disable `channels.telegram.commands.native` if you do not need the menu. +- `TypeError: fetch failed`, `Network request for 'setMyCommands' failed!`, or similar network errors: if you are on a VPS or behind a proxy, confirm outbound HTTPS is allowed and DNS works for `api.telegram.org`. + If the Gateway is remote, make sure you are looking at logs on the Gateway host. Docs: [Telegram](/channels/telegram), [Channel troubleshooting](/channels/troubleshooting). diff --git a/docs/help/testing.md b/docs/help/testing.md index 9e965b4c769..b2057e8a1da 100644 --- a/docs/help/testing.md +++ b/docs/help/testing.md @@ -53,8 +53,8 @@ Think of the suites as “increasing realism” (and increasing flakiness/cost): - No real keys required - Should be fast and stable - Pool note: - - OpenClaw uses Vitest `vmForks` on Node 22/23 for faster unit shards. - - On Node 24+, OpenClaw automatically falls back to regular `forks` to avoid Node VM linking errors (`ERR_VM_MODULE_LINK_FAILURE` / `module is already linked`). + - OpenClaw uses Vitest `vmForks` on Node 22, 23, and 24 for faster unit shards. + - On Node 25+, OpenClaw automatically falls back to regular `forks` until the repo is re-validated there. - Override manually with `OPENCLAW_TEST_VM_FORKS=0` (force `forks`) or `OPENCLAW_TEST_VM_FORKS=1` (force `vmForks`). ### E2E (gateway smoke) @@ -311,11 +311,11 @@ Include at least one image-capable model in `OPENCLAW_LIVE_GATEWAY_MODELS` (Clau If you have keys enabled, we also support testing via: - OpenRouter: `openrouter/...` (hundreds of models; use `openclaw models scan` to find tool+image capable candidates) -- OpenCode Zen: `opencode/...` (auth via `OPENCODE_API_KEY` / `OPENCODE_ZEN_API_KEY`) +- OpenCode: `opencode/...` for Zen and `opencode-go/...` for Go (auth via `OPENCODE_API_KEY` / `OPENCODE_ZEN_API_KEY`) More providers you can include in the live matrix (if you have creds/config): -- Built-in: `openai`, `openai-codex`, `anthropic`, `google`, `google-vertex`, `google-antigravity`, `google-gemini-cli`, `zai`, `openrouter`, `opencode`, `xai`, `groq`, `cerebras`, `mistral`, `github-copilot` +- Built-in: `openai`, `openai-codex`, `anthropic`, `google`, `google-vertex`, `google-antigravity`, `google-gemini-cli`, `zai`, `openrouter`, `opencode`, `opencode-go`, `xai`, `groq`, `cerebras`, `mistral`, `github-copilot` - Via `models.providers` (custom endpoints): `minimax` (cloud/API), plus any OpenAI/Anthropic-compatible proxy (LM Studio, vLLM, LiteLLM, etc.) Tip: don’t try to hardcode “all models” in docs. The authoritative list is whatever `discoverModels(...)` returns on your machine + whatever keys are available. @@ -409,3 +409,6 @@ When you fix a provider/model issue discovered in live: - Prefer targeting the smallest layer that catches the bug: - provider request conversion/replay bug → direct models test - gateway session/history/tool pipeline bug → gateway live smoke or CI-safe gateway mock test +- SecretRef traversal guardrail: + - `src/secrets/exec-secret-ref-id-parity.test.ts` derives one sampled target per SecretRef class from registry metadata (`listSecretTargetRegistryEntries()`), then asserts traversal-segment exec ids are rejected. + - If you add a new `includeInPlan` SecretRef target family in `src/secrets/target-registry-data.ts`, update `classifyTargetClass` in that test. The test intentionally fails on unclassified target ids so new classes cannot be skipped silently. diff --git a/docs/help/troubleshooting.md b/docs/help/troubleshooting.md index e051f77f589..a3988c4ea58 100644 --- a/docs/help/troubleshooting.md +++ b/docs/help/troubleshooting.md @@ -28,7 +28,7 @@ Good output in one line: - `openclaw status` → shows configured channels and no obvious auth errors. - `openclaw status --all` → full report is present and shareable. -- `openclaw gateway probe` → expected gateway target is reachable. +- `openclaw gateway probe` → expected gateway target is reachable (`Reachable: yes`). `RPC: limited - missing scope: operator.read` is degraded diagnostics, not a connect failure. - `openclaw gateway status` → `Runtime: running` and `RPC probe: ok`. - `openclaw doctor` → no blocking config/service errors. - `openclaw channels status --probe` → channels report `connected` or `ready`. @@ -136,7 +136,8 @@ flowchart TD Common log signatures: - `device identity required` → HTTP/non-secure context cannot complete device auth. - - `unauthorized` / reconnect loop → wrong token/password or auth mode mismatch. + - `AUTH_TOKEN_MISMATCH` with retry hints (`canRetryWithDeviceToken=true`) → one trusted device-token retry may occur automatically. + - repeated `unauthorized` after that retry → wrong token/password, auth mode mismatch, or stale paired device token. - `gateway connect failed:` → UI is targeting the wrong URL/port or unreachable gateway. Deep pages: diff --git a/docs/index.md b/docs/index.md index f838ebf4cab..7c69600f55d 100644 --- a/docs/index.md +++ b/docs/index.md @@ -54,7 +54,7 @@ OpenClaw is a **self-hosted gateway** that connects your favorite chat apps — - **Agent-native**: built for coding agents with tool use, sessions, memory, and multi-agent routing - **Open source**: MIT licensed, community-driven -**What do you need?** Node 22+, an API key from your chosen provider, and 5 minutes. For best quality and security, use the strongest latest-generation model available. +**What do you need?** Node 24 (recommended), or Node 22 LTS (`22.16+`) for compatibility, an API key from your chosen provider, and 5 minutes. For best quality and security, use the strongest latest-generation model available. ## How it works diff --git a/docs/install/ansible.md b/docs/install/ansible.md index be91aedaadd..63c18bec237 100644 --- a/docs/install/ansible.md +++ b/docs/install/ansible.md @@ -46,7 +46,7 @@ The Ansible playbook installs and configures: 1. **Tailscale** (mesh VPN for secure remote access) 2. **UFW firewall** (SSH + Tailscale ports only) 3. **Docker CE + Compose V2** (for agent sandboxes) -4. **Node.js 22.x + pnpm** (runtime dependencies) +4. **Node.js 24 + pnpm** (runtime dependencies; Node 22 LTS, currently `22.16+`, remains supported for compatibility) 5. **OpenClaw** (host-based, not containerized) 6. **Systemd service** (auto-start with security hardening) diff --git a/docs/install/bun.md b/docs/install/bun.md index 9b3dcb2c224..5cbe76ce3ac 100644 --- a/docs/install/bun.md +++ b/docs/install/bun.md @@ -45,7 +45,7 @@ bun run vitest run Bun may block dependency lifecycle scripts unless explicitly trusted (`bun pm untrusted` / `bun pm trust`). For this repo, the commonly blocked scripts are not required: -- `@whiskeysockets/baileys` `preinstall`: checks Node major >= 20 (we run Node 22+). +- `@whiskeysockets/baileys` `preinstall`: checks Node major >= 20 (OpenClaw defaults to Node 24 and still supports Node 22 LTS, currently `22.16+`). - `protobufjs` `postinstall`: emits warnings about incompatible version schemes (no build artifacts). If you hit a real runtime issue that requires these scripts, trust them explicitly: diff --git a/docs/install/docker-vm-runtime.md b/docs/install/docker-vm-runtime.md new file mode 100644 index 00000000000..77436f44486 --- /dev/null +++ b/docs/install/docker-vm-runtime.md @@ -0,0 +1,138 @@ +--- +summary: "Shared Docker VM runtime steps for long-lived OpenClaw Gateway hosts" +read_when: + - You are deploying OpenClaw on a cloud VM with Docker + - You need the shared binary bake, persistence, and update flow +title: "Docker VM Runtime" +--- + +# Docker VM Runtime + +Shared runtime steps for VM-based Docker installs such as GCP, Hetzner, and similar VPS providers. + +## Bake required binaries into the image + +Installing binaries inside a running container is a trap. +Anything installed at runtime will be lost on restart. + +All external binaries required by skills must be installed at image build time. + +The examples below show three common binaries only: + +- `gog` for Gmail access +- `goplaces` for Google Places +- `wacli` for WhatsApp + +These are examples, not a complete list. +You may install as many binaries as needed using the same pattern. + +If you add new skills later that depend on additional binaries, you must: + +1. Update the Dockerfile +2. Rebuild the image +3. Restart the containers + +**Example Dockerfile** + +```dockerfile +FROM node:24-bookworm + +RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/* + +# Example binary 1: Gmail CLI +RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \ + | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog + +# Example binary 2: Google Places CLI +RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \ + | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces + +# Example binary 3: WhatsApp CLI +RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \ + | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli + +# Add more binaries below using the same pattern + +WORKDIR /app +COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./ +COPY ui/package.json ./ui/package.json +COPY scripts ./scripts + +RUN corepack enable +RUN pnpm install --frozen-lockfile + +COPY . . +RUN pnpm build +RUN pnpm ui:install +RUN pnpm ui:build + +ENV NODE_ENV=production + +CMD ["node","dist/index.js"] +``` + +## Build and launch + +```bash +docker compose build +docker compose up -d openclaw-gateway +``` + +If build fails with `Killed` or `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. +Use a larger machine class before retrying. + +Verify binaries: + +```bash +docker compose exec openclaw-gateway which gog +docker compose exec openclaw-gateway which goplaces +docker compose exec openclaw-gateway which wacli +``` + +Expected output: + +``` +/usr/local/bin/gog +/usr/local/bin/goplaces +/usr/local/bin/wacli +``` + +Verify Gateway: + +```bash +docker compose logs -f openclaw-gateway +``` + +Expected output: + +``` +[gateway] listening on ws://0.0.0.0:18789 +``` + +## What persists where + +OpenClaw runs in Docker, but Docker is not the source of truth. +All long-lived state must survive restarts, rebuilds, and reboots. + +| Component | Location | Persistence mechanism | Notes | +| ------------------- | --------------------------------- | ---------------------- | -------------------------------- | +| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens | +| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys | +| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state | +| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts | +| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login | +| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` | +| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time | +| Node runtime | Container filesystem | Docker image | Rebuilt every image build | +| OS packages | Container filesystem | Docker image | Do not install at runtime | +| Docker container | Ephemeral | Restartable | Safe to destroy | + +## Updates + +To update OpenClaw on the VM: + +```bash +git pull +docker compose build +docker compose up -d +``` diff --git a/docs/install/docker.md b/docs/install/docker.md index c6337c3db48..a68066dcd57 100644 --- a/docs/install/docker.md +++ b/docs/install/docker.md @@ -165,13 +165,13 @@ Common tags: The main Docker image currently uses: -- `node:22-bookworm` +- `node:24-bookworm` The docker image now publishes OCI base-image annotations (sha256 is an example, and points at the pinned multi-arch manifest list for that tag): -- `org.opencontainers.image.base.name=docker.io/library/node:22-bookworm` -- `org.opencontainers.image.base.digest=sha256:b501c082306a4f528bc4038cbf2fbb58095d583d0419a259b2114b5ac53d12e9` +- `org.opencontainers.image.base.name=docker.io/library/node:24-bookworm` +- `org.opencontainers.image.base.digest=sha256:3a09aa6354567619221ef6c45a5051b671f953f0a1924d1f819ffb236e520e6b` - `org.opencontainers.image.source=https://github.com/openclaw/openclaw` - `org.opencontainers.image.url=https://openclaw.ai` - `org.opencontainers.image.documentation=https://docs.openclaw.ai/install/docker` @@ -408,7 +408,7 @@ To speed up rebuilds, order your Dockerfile so dependency layers are cached. This avoids re-running `pnpm install` unless lockfiles change: ```dockerfile -FROM node:22-bookworm +FROM node:24-bookworm # Install Bun (required for build scripts) RUN curl -fsSL https://bun.sh/install | bash diff --git a/docs/install/gcp.md b/docs/install/gcp.md index 2c6bdd8ac1f..7ff4a00d087 100644 --- a/docs/install/gcp.md +++ b/docs/install/gcp.md @@ -281,77 +281,20 @@ services: --- -## 10) Bake required binaries into the image (critical) +## 10) Shared Docker VM runtime steps -Installing binaries inside a running container is a trap. -Anything installed at runtime will be lost on restart. +Use the shared runtime guide for the common Docker host flow: -All external binaries required by skills must be installed at image build time. - -The examples below show three common binaries only: - -- `gog` for Gmail access -- `goplaces` for Google Places -- `wacli` for WhatsApp - -These are examples, not a complete list. -You may install as many binaries as needed using the same pattern. - -If you add new skills later that depend on additional binaries, you must: - -1. Update the Dockerfile -2. Rebuild the image -3. Restart the containers - -**Example Dockerfile** - -```dockerfile -FROM node:22-bookworm - -RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/* - -# Example binary 1: Gmail CLI -RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog - -# Example binary 2: Google Places CLI -RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces - -# Example binary 3: WhatsApp CLI -RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli - -# Add more binaries below using the same pattern - -WORKDIR /app -COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./ -COPY ui/package.json ./ui/package.json -COPY scripts ./scripts - -RUN corepack enable -RUN pnpm install --frozen-lockfile - -COPY . . -RUN pnpm build -RUN pnpm ui:install -RUN pnpm ui:build - -ENV NODE_ENV=production - -CMD ["node","dist/index.js"] -``` +- [Bake required binaries into the image](/install/docker-vm-runtime#bake-required-binaries-into-the-image) +- [Build and launch](/install/docker-vm-runtime#build-and-launch) +- [What persists where](/install/docker-vm-runtime#what-persists-where) +- [Updates](/install/docker-vm-runtime#updates) --- -## 11) Build and launch +## 11) GCP-specific launch notes -```bash -docker compose build -docker compose up -d openclaw-gateway -``` - -If build fails with `Killed` / `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. Use `e2-small` minimum, or `e2-medium` for more reliable first builds. +On GCP, if build fails with `Killed` or `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. Use `e2-small` minimum, or `e2-medium` for more reliable first builds. When binding to LAN (`OPENCLAW_GATEWAY_BIND=lan`), configure a trusted browser origin before continuing: @@ -361,39 +304,7 @@ docker compose run --rm openclaw-cli config set gateway.controlUi.allowedOrigins If you changed the gateway port, replace `18789` with your configured port. -Verify binaries: - -```bash -docker compose exec openclaw-gateway which gog -docker compose exec openclaw-gateway which goplaces -docker compose exec openclaw-gateway which wacli -``` - -Expected output: - -``` -/usr/local/bin/gog -/usr/local/bin/goplaces -/usr/local/bin/wacli -``` - ---- - -## 12) Verify Gateway - -```bash -docker compose logs -f openclaw-gateway -``` - -Success: - -``` -[gateway] listening on ws://0.0.0.0:18789 -``` - ---- - -## 13) Access from your laptop +## 12) Access from your laptop Create an SSH tunnel to forward the Gateway port: @@ -420,38 +331,8 @@ docker compose run --rm openclaw-cli devices list docker compose run --rm openclaw-cli devices approve ``` ---- - -## What persists where (source of truth) - -OpenClaw runs in Docker, but Docker is not the source of truth. -All long-lived state must survive restarts, rebuilds, and reboots. - -| Component | Location | Persistence mechanism | Notes | -| ------------------- | --------------------------------- | ---------------------- | -------------------------------- | -| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens | -| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys | -| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state | -| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts | -| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login | -| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` | -| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time | -| Node runtime | Container filesystem | Docker image | Rebuilt every image build | -| OS packages | Container filesystem | Docker image | Do not install at runtime | -| Docker container | Ephemeral | Restartable | Safe to destroy | - ---- - -## Updates - -To update OpenClaw on the VM: - -```bash -cd ~/openclaw -git pull -docker compose build -docker compose up -d -``` +Need the shared persistence and update reference again? +See [Docker VM Runtime](/install/docker-vm-runtime#what-persists-where) and [Docker VM Runtime updates](/install/docker-vm-runtime#updates). --- diff --git a/docs/install/hetzner.md b/docs/install/hetzner.md index 9baf90278b8..46bc76d6243 100644 --- a/docs/install/hetzner.md +++ b/docs/install/hetzner.md @@ -202,107 +202,20 @@ services: --- -## 7) Bake required binaries into the image (critical) +## 7) Shared Docker VM runtime steps -Installing binaries inside a running container is a trap. -Anything installed at runtime will be lost on restart. +Use the shared runtime guide for the common Docker host flow: -All external binaries required by skills must be installed at image build time. - -The examples below show three common binaries only: - -- `gog` for Gmail access -- `goplaces` for Google Places -- `wacli` for WhatsApp - -These are examples, not a complete list. -You may install as many binaries as needed using the same pattern. - -If you add new skills later that depend on additional binaries, you must: - -1. Update the Dockerfile -2. Rebuild the image -3. Restart the containers - -**Example Dockerfile** - -```dockerfile -FROM node:22-bookworm - -RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/* - -# Example binary 1: Gmail CLI -RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog - -# Example binary 2: Google Places CLI -RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces - -# Example binary 3: WhatsApp CLI -RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli - -# Add more binaries below using the same pattern - -WORKDIR /app -COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./ -COPY ui/package.json ./ui/package.json -COPY scripts ./scripts - -RUN corepack enable -RUN pnpm install --frozen-lockfile - -COPY . . -RUN pnpm build -RUN pnpm ui:install -RUN pnpm ui:build - -ENV NODE_ENV=production - -CMD ["node","dist/index.js"] -``` +- [Bake required binaries into the image](/install/docker-vm-runtime#bake-required-binaries-into-the-image) +- [Build and launch](/install/docker-vm-runtime#build-and-launch) +- [What persists where](/install/docker-vm-runtime#what-persists-where) +- [Updates](/install/docker-vm-runtime#updates) --- -## 8) Build and launch +## 8) Hetzner-specific access -```bash -docker compose build -docker compose up -d openclaw-gateway -``` - -Verify binaries: - -```bash -docker compose exec openclaw-gateway which gog -docker compose exec openclaw-gateway which goplaces -docker compose exec openclaw-gateway which wacli -``` - -Expected output: - -``` -/usr/local/bin/gog -/usr/local/bin/goplaces -/usr/local/bin/wacli -``` - ---- - -## 9) Verify Gateway - -```bash -docker compose logs -f openclaw-gateway -``` - -Success: - -``` -[gateway] listening on ws://0.0.0.0:18789 -``` - -From your laptop: +After the shared build and launch steps, tunnel from your laptop: ```bash ssh -N -L 18789:127.0.0.1:18789 root@YOUR_VPS_IP @@ -316,25 +229,7 @@ Paste your gateway token. --- -## What persists where (source of truth) - -OpenClaw runs in Docker, but Docker is not the source of truth. -All long-lived state must survive restarts, rebuilds, and reboots. - -| Component | Location | Persistence mechanism | Notes | -| ------------------- | --------------------------------- | ---------------------- | -------------------------------- | -| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens | -| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys | -| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state | -| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts | -| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login | -| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` | -| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time | -| Node runtime | Container filesystem | Docker image | Rebuilt every image build | -| OS packages | Container filesystem | Docker image | Do not install at runtime | -| Docker container | Ephemeral | Restartable | Safe to destroy | - ---- +The shared persistence map lives in [Docker VM Runtime](/install/docker-vm-runtime#what-persists-where). ## Infrastructure as Code (Terraform) diff --git a/docs/install/index.md b/docs/install/index.md index 285324ed6b7..d0f847838d0 100644 --- a/docs/install/index.md +++ b/docs/install/index.md @@ -13,7 +13,7 @@ Already followed [Getting Started](/start/getting-started)? You're all set — t ## System requirements -- **[Node 22+](/install/node)** (the [installer script](#install-methods) will install it if missing) +- **[Node 24 (recommended)](/install/node)** (Node 22 LTS, currently `22.16+`, is still supported for compatibility; the [installer script](#install-methods) will install Node 24 if missing) - macOS, Linux, or Windows - `pnpm` only if you build from source @@ -70,7 +70,7 @@ For VPS/cloud hosts, avoid third-party "1-click" marketplace images when possibl - If you already have Node 22+ and prefer to manage the install yourself: + If you already manage Node yourself, we recommend Node 24. OpenClaw still supports Node 22 LTS, currently `22.16+`, for compatibility: diff --git a/docs/install/installer.md b/docs/install/installer.md index 78334681ad4..6317e8e06cc 100644 --- a/docs/install/installer.md +++ b/docs/install/installer.md @@ -70,8 +70,8 @@ Recommended for most interactive installs on macOS/Linux/WSL. Supports macOS and Linux (including WSL). If macOS is detected, installs Homebrew if missing. - - Checks Node version and installs Node 22 if needed (Homebrew on macOS, NodeSource setup scripts on Linux apt/dnf/yum). + + Checks Node version and installs Node 24 if needed (Homebrew on macOS, NodeSource setup scripts on Linux apt/dnf/yum). OpenClaw still supports Node 22 LTS, currently `22.16+`, for compatibility. Installs Git if missing. @@ -175,7 +175,7 @@ Designed for environments where you want everything under a local prefix (defaul - Downloads Node tarball (default `22.22.0`) to `/tools/node-v` and verifies SHA-256. + Downloads a pinned supported Node tarball (currently default `22.22.0`) to `/tools/node-v` and verifies SHA-256. If Git is missing, attempts install via apt/dnf/yum on Linux or Homebrew on macOS. @@ -251,8 +251,8 @@ Designed for environments where you want everything under a local prefix (defaul Requires PowerShell 5+. - - If missing, attempts install via winget, then Chocolatey, then Scoop. + + If missing, attempts install via winget, then Chocolatey, then Scoop. Node 22 LTS, currently `22.16+`, remains supported for compatibility. - `npm` method (default): global npm install using selected `-Tag` diff --git a/docs/install/kubernetes.md b/docs/install/kubernetes.md new file mode 100644 index 00000000000..577ff9d2df5 --- /dev/null +++ b/docs/install/kubernetes.md @@ -0,0 +1,191 @@ +--- +summary: "Deploy OpenClaw Gateway to a Kubernetes cluster with Kustomize" +read_when: + - You want to run OpenClaw on a Kubernetes cluster + - You want to test OpenClaw in a Kubernetes environment +title: "Kubernetes" +--- + +# OpenClaw on Kubernetes + +A minimal starting point for running OpenClaw on Kubernetes — not a production-ready deployment. It covers the core resources and is meant to be adapted to your environment. + +## Why not Helm? + +OpenClaw is a single container with some config files. The interesting customization is in agent content (markdown files, skills, config overrides), not infrastructure templating. Kustomize handles overlays without the overhead of a Helm chart. If your deployment grows more complex, a Helm chart can be layered on top of these manifests. + +## What you need + +- A running Kubernetes cluster (AKS, EKS, GKE, k3s, kind, OpenShift, etc.) +- `kubectl` connected to your cluster +- An API key for at least one model provider + +## Quick start + +```bash +# Replace with your provider: ANTHROPIC, GEMINI, OPENAI, or OPENROUTER +export _API_KEY="..." +./scripts/k8s/deploy.sh + +kubectl port-forward svc/openclaw 18789:18789 -n openclaw +open http://localhost:18789 +``` + +Retrieve the gateway token and paste it into the Control UI: + +```bash +kubectl get secret openclaw-secrets -n openclaw -o jsonpath='{.data.OPENCLAW_GATEWAY_TOKEN}' | base64 -d +``` + +For local debugging, `./scripts/k8s/deploy.sh --show-token` prints the token after deploy. + +## Local testing with Kind + +If you don't have a cluster, create one locally with [Kind](https://kind.sigs.k8s.io/): + +```bash +./scripts/k8s/create-kind.sh # auto-detects docker or podman +./scripts/k8s/create-kind.sh --delete # tear down +``` + +Then deploy as usual with `./scripts/k8s/deploy.sh`. + +## Step by step + +### 1) Deploy + +**Option A** — API key in environment (one step): + +```bash +# Replace with your provider: ANTHROPIC, GEMINI, OPENAI, or OPENROUTER +export _API_KEY="..." +./scripts/k8s/deploy.sh +``` + +The script creates a Kubernetes Secret with the API key and an auto-generated gateway token, then deploys. If the Secret already exists, it preserves the current gateway token and any provider keys not being changed. + +**Option B** — create the secret separately: + +```bash +export _API_KEY="..." +./scripts/k8s/deploy.sh --create-secret +./scripts/k8s/deploy.sh +``` + +Use `--show-token` with either command if you want the token printed to stdout for local testing. + +### 2) Access the gateway + +```bash +kubectl port-forward svc/openclaw 18789:18789 -n openclaw +open http://localhost:18789 +``` + +## What gets deployed + +``` +Namespace: openclaw (configurable via OPENCLAW_NAMESPACE) +├── Deployment/openclaw # Single pod, init container + gateway +├── Service/openclaw # ClusterIP on port 18789 +├── PersistentVolumeClaim # 10Gi for agent state and config +├── ConfigMap/openclaw-config # openclaw.json + AGENTS.md +└── Secret/openclaw-secrets # Gateway token + API keys +``` + +## Customization + +### Agent instructions + +Edit the `AGENTS.md` in `scripts/k8s/manifests/configmap.yaml` and redeploy: + +```bash +./scripts/k8s/deploy.sh +``` + +### Gateway config + +Edit `openclaw.json` in `scripts/k8s/manifests/configmap.yaml`. See [Gateway configuration](/gateway/configuration) for the full reference. + +### Add providers + +Re-run with additional keys exported: + +```bash +export ANTHROPIC_API_KEY="..." +export OPENAI_API_KEY="..." +./scripts/k8s/deploy.sh --create-secret +./scripts/k8s/deploy.sh +``` + +Existing provider keys stay in the Secret unless you overwrite them. + +Or patch the Secret directly: + +```bash +kubectl patch secret openclaw-secrets -n openclaw \ + -p '{"stringData":{"_API_KEY":"..."}}' +kubectl rollout restart deployment/openclaw -n openclaw +``` + +### Custom namespace + +```bash +OPENCLAW_NAMESPACE=my-namespace ./scripts/k8s/deploy.sh +``` + +### Custom image + +Edit the `image` field in `scripts/k8s/manifests/deployment.yaml`: + +```yaml +image: ghcr.io/openclaw/openclaw:2026.3.1 +``` + +### Expose beyond port-forward + +The default manifests bind the gateway to loopback inside the pod. That works with `kubectl port-forward`, but it does not work with a Kubernetes `Service` or Ingress path that needs to reach the pod IP. + +If you want to expose the gateway through an Ingress or load balancer: + +- Change the gateway bind in `scripts/k8s/manifests/configmap.yaml` from `loopback` to a non-loopback bind that matches your deployment model +- Keep gateway auth enabled and use a proper TLS-terminated entrypoint +- Configure the Control UI for remote access using the supported web security model (for example HTTPS/Tailscale Serve and explicit allowed origins when needed) + +## Re-deploy + +```bash +./scripts/k8s/deploy.sh +``` + +This applies all manifests and restarts the pod to pick up any config or secret changes. + +## Teardown + +```bash +./scripts/k8s/deploy.sh --delete +``` + +This deletes the namespace and all resources in it, including the PVC. + +## Architecture notes + +- The gateway binds to loopback inside the pod by default, so the included setup is for `kubectl port-forward` +- No cluster-scoped resources — everything lives in a single namespace +- Security: `readOnlyRootFilesystem`, `drop: ALL` capabilities, non-root user (UID 1000) +- The default config keeps the Control UI on the safer local-access path: loopback bind plus `kubectl port-forward` to `http://127.0.0.1:18789` +- If you move beyond localhost access, use the supported remote model: HTTPS/Tailscale plus the appropriate gateway bind and Control UI origin settings +- Secrets are generated in a temp directory and applied directly to the cluster — no secret material is written to the repo checkout + +## File structure + +``` +scripts/k8s/ +├── deploy.sh # Creates namespace + secret, deploys via kustomize +├── create-kind.sh # Local Kind cluster (auto-detects docker/podman) +└── manifests/ + ├── kustomization.yaml # Kustomize base + ├── configmap.yaml # openclaw.json + AGENTS.md + ├── deployment.yaml # Pod spec with security hardening + ├── pvc.yaml # 10Gi persistent storage + └── service.yaml # ClusterIP on 18789 +``` diff --git a/docs/install/node.md b/docs/install/node.md index 8c57fde4f72..9cf2f59ec77 100644 --- a/docs/install/node.md +++ b/docs/install/node.md @@ -9,7 +9,7 @@ read_when: # Node.js -OpenClaw requires **Node 22 or newer**. The [installer script](/install#install-methods) will detect and install Node automatically — this page is for when you want to set up Node yourself and make sure everything is wired up correctly (versions, PATH, global installs). +OpenClaw requires **Node 22.16 or newer**. **Node 24 is the default and recommended runtime** for installs, CI, and release workflows. Node 22 remains supported via the active LTS line. The [installer script](/install#install-methods) will detect and install Node automatically — this page is for when you want to set up Node yourself and make sure everything is wired up correctly (versions, PATH, global installs). ## Check your version @@ -17,7 +17,7 @@ OpenClaw requires **Node 22 or newer**. The [installer script](/install#install- node -v ``` -If this prints `v22.x.x` or higher, you're good. If Node isn't installed or the version is too old, pick an install method below. +If this prints `v24.x.x` or higher, you're on the recommended default. If it prints `v22.16.x` or higher, you're on the supported Node 22 LTS path, but we still recommend upgrading to Node 24 when convenient. If Node isn't installed or the version is too old, pick an install method below. ## Install Node @@ -36,7 +36,7 @@ If this prints `v22.x.x` or higher, you're good. If Node isn't installed or the **Ubuntu / Debian:** ```bash - curl -fsSL https://deb.nodesource.com/setup_22.x | sudo -E bash - + curl -fsSL https://deb.nodesource.com/setup_24.x | sudo -E bash - sudo apt-get install -y nodejs ``` @@ -77,8 +77,8 @@ If this prints `v22.x.x` or higher, you're good. If Node isn't installed or the Example with fnm: ```bash -fnm install 22 -fnm use 22 +fnm install 24 +fnm use 24 ``` diff --git a/docs/nodes/index.md b/docs/nodes/index.md index 1b9b2bfaea2..7c087162c46 100644 --- a/docs/nodes/index.md +++ b/docs/nodes/index.md @@ -54,6 +54,15 @@ forwards `exec` calls to the **node host** when `host=node` is selected. - **Node host**: executes `system.run`/`system.which` on the node machine. - **Approvals**: enforced on the node host via `~/.openclaw/exec-approvals.json`. +Approval note: + +- Approval-backed node runs bind exact request context. +- For direct shell/runtime file executions, OpenClaw also best-effort binds one concrete local + file operand and denies the run if that file changes before execution. +- If OpenClaw cannot identify exactly one concrete local file for an interpreter/runtime command, + approval-backed execution is denied instead of pretending full runtime coverage. Use sandboxing, + separate hosts, or an explicit trusted allowlist/full workflow for broader interpreter semantics. + ### Start a node host (foreground) On the node machine: @@ -83,7 +92,10 @@ Notes: - `openclaw node run` supports token or password auth. - Env vars are preferred: `OPENCLAW_GATEWAY_TOKEN` / `OPENCLAW_GATEWAY_PASSWORD`. -- Config fallback is `gateway.auth.token` / `gateway.auth.password`; in remote mode, `gateway.remote.token` / `gateway.remote.password` are also eligible. +- Config fallback is `gateway.auth.token` / `gateway.auth.password`. +- In local mode, node host intentionally ignores `gateway.remote.token` / `gateway.remote.password`. +- In remote mode, `gateway.remote.token` / `gateway.remote.password` are eligible per remote precedence rules. +- If active local `gateway.auth.*` SecretRefs are configured but unresolved, node-host auth fails closed. - Legacy `CLAWDBOT_GATEWAY_*` env vars are intentionally ignored by node-host auth resolution. ### Start a node host (service) diff --git a/docs/perplexity.md b/docs/perplexity.md index bb1acef49c8..f7eccc9453e 100644 --- a/docs/perplexity.md +++ b/docs/perplexity.md @@ -71,11 +71,14 @@ Optional legacy controls: **Via config:** run `openclaw configure --section web`. It stores the key in `~/.openclaw/openclaw.json` under `tools.web.search.perplexity.apiKey`. +That field also accepts SecretRef objects. **Via environment:** set `PERPLEXITY_API_KEY` or `OPENROUTER_API_KEY` in the Gateway process environment. For a gateway install, put it in `~/.openclaw/.env` (or your service environment). See [Env vars](/help/faq#how-does-openclaw-load-environment-variables). +If `provider: "perplexity"` is configured and the Perplexity key SecretRef is unresolved with no env fallback, startup/reload fails fast. + ## Tool parameters These parameters apply to the native Perplexity Search API path. diff --git a/docs/platforms/android.md b/docs/platforms/android.md index 4df71b83e73..6bd5effb361 100644 --- a/docs/platforms/android.md +++ b/docs/platforms/android.md @@ -9,6 +9,8 @@ title: "Android App" # Android App (Node) +> **Note:** The Android app has not been publicly released yet. The source code is available in the [OpenClaw repository](https://github.com/openclaw/openclaw) under `apps/android`. You can build it yourself using Java 17 and the Android SDK (`./gradlew :app:assembleDebug`). See [apps/android/README.md](https://github.com/openclaw/openclaw/blob/main/apps/android/README.md) for build instructions. + ## Support snapshot - Role: companion node app (Android does not host the Gateway). diff --git a/docs/platforms/digitalocean.md b/docs/platforms/digitalocean.md index bddc63b9d1f..cd05587ae76 100644 --- a/docs/platforms/digitalocean.md +++ b/docs/platforms/digitalocean.md @@ -66,8 +66,8 @@ ssh root@YOUR_DROPLET_IP # Update system apt update && apt upgrade -y -# Install Node.js 22 -curl -fsSL https://deb.nodesource.com/setup_22.x | bash - +# Install Node.js 24 +curl -fsSL https://deb.nodesource.com/setup_24.x | bash - apt install -y nodejs # Install OpenClaw diff --git a/docs/platforms/ios.md b/docs/platforms/ios.md index 0a2eb5abae5..f64eba3fed0 100644 --- a/docs/platforms/ios.md +++ b/docs/platforms/ios.md @@ -49,6 +49,114 @@ openclaw nodes status openclaw gateway call node.list --params "{}" ``` +## Relay-backed push for official builds + +Official distributed iOS builds use the external push relay instead of publishing the raw APNs +token to the gateway. + +Gateway-side requirement: + +```json5 +{ + gateway: { + push: { + apns: { + relay: { + baseUrl: "https://relay.example.com", + }, + }, + }, + }, +} +``` + +How the flow works: + +- The iOS app registers with the relay using App Attest and the app receipt. +- The relay returns an opaque relay handle plus a registration-scoped send grant. +- The iOS app fetches the paired gateway identity and includes it in relay registration, so the relay-backed registration is delegated to that specific gateway. +- The app forwards that relay-backed registration to the paired gateway with `push.apns.register`. +- The gateway uses that stored relay handle for `push.test`, background wakes, and wake nudges. +- The gateway relay base URL must match the relay URL baked into the official/TestFlight iOS build. +- If the app later connects to a different gateway or a build with a different relay base URL, it refreshes the relay registration instead of reusing the old binding. + +What the gateway does **not** need for this path: + +- No deployment-wide relay token. +- No direct APNs key for official/TestFlight relay-backed sends. + +Expected operator flow: + +1. Install the official/TestFlight iOS build. +2. Set `gateway.push.apns.relay.baseUrl` on the gateway. +3. Pair the app to the gateway and let it finish connecting. +4. The app publishes `push.apns.register` automatically after it has an APNs token, the operator session is connected, and relay registration succeeds. +5. After that, `push.test`, reconnect wakes, and wake nudges can use the stored relay-backed registration. + +Compatibility note: + +- `OPENCLAW_APNS_RELAY_BASE_URL` still works as a temporary env override for the gateway. + +## Authentication and trust flow + +The relay exists to enforce two constraints that direct APNs-on-gateway cannot provide for +official iOS builds: + +- Only genuine OpenClaw iOS builds distributed through Apple can use the hosted relay. +- A gateway can send relay-backed pushes only for iOS devices that paired with that specific + gateway. + +Hop by hop: + +1. `iOS app -> gateway` + - The app first pairs with the gateway through the normal Gateway auth flow. + - That gives the app an authenticated node session plus an authenticated operator session. + - The operator session is used to call `gateway.identity.get`. + +2. `iOS app -> relay` + - The app calls the relay registration endpoints over HTTPS. + - Registration includes App Attest proof plus the app receipt. + - The relay validates the bundle ID, App Attest proof, and Apple receipt, and requires the + official/production distribution path. + - This is what blocks local Xcode/dev builds from using the hosted relay. A local build may be + signed, but it does not satisfy the official Apple distribution proof the relay expects. + +3. `gateway identity delegation` + - Before relay registration, the app fetches the paired gateway identity from + `gateway.identity.get`. + - The app includes that gateway identity in the relay registration payload. + - The relay returns a relay handle and a registration-scoped send grant that are delegated to + that gateway identity. + +4. `gateway -> relay` + - The gateway stores the relay handle and send grant from `push.apns.register`. + - On `push.test`, reconnect wakes, and wake nudges, the gateway signs the send request with its + own device identity. + - The relay verifies both the stored send grant and the gateway signature against the delegated + gateway identity from registration. + - Another gateway cannot reuse that stored registration, even if it somehow obtains the handle. + +5. `relay -> APNs` + - The relay owns the production APNs credentials and the raw APNs token for the official build. + - The gateway never stores the raw APNs token for relay-backed official builds. + - The relay sends the final push to APNs on behalf of the paired gateway. + +Why this design was created: + +- To keep production APNs credentials out of user gateways. +- To avoid storing raw official-build APNs tokens on the gateway. +- To allow hosted relay usage only for official/TestFlight OpenClaw builds. +- To prevent one gateway from sending wake pushes to iOS devices owned by a different gateway. + +Local/manual builds remain on direct APNs. If you are testing those builds without the relay, the +gateway still needs direct APNs credentials: + +```bash +export OPENCLAW_APNS_TEAM_ID="TEAMID" +export OPENCLAW_APNS_KEY_ID="KEYID" +export OPENCLAW_APNS_PRIVATE_KEY_P8="$(cat /path/to/AuthKey_KEYID.p8)" +``` + ## Discovery paths ### Bonjour (LAN) diff --git a/docs/platforms/linux.md b/docs/platforms/linux.md index 0cce3a54e75..c03dba6f795 100644 --- a/docs/platforms/linux.md +++ b/docs/platforms/linux.md @@ -15,7 +15,7 @@ Native Linux companion apps are planned. Contributions are welcome if you want t ## Beginner quick path (VPS) -1. Install Node 22+ +1. Install Node 24 (recommended; Node 22 LTS, currently `22.16+`, still works for compatibility) 2. `npm i -g openclaw@latest` 3. `openclaw onboard --install-daemon` 4. From your laptop: `ssh -N -L 18789:127.0.0.1:18789 @` diff --git a/docs/platforms/mac/bundled-gateway.md b/docs/platforms/mac/bundled-gateway.md index 6cb878015fb..e6e57cc1809 100644 --- a/docs/platforms/mac/bundled-gateway.md +++ b/docs/platforms/mac/bundled-gateway.md @@ -16,7 +16,7 @@ running (or attaches to an existing local Gateway if one is already running). ## Install the CLI (required for local mode) -You need Node 22+ on the Mac, then install `openclaw` globally: +Node 24 is the default runtime on the Mac. Node 22 LTS, currently `22.16+`, still works for compatibility. Then install `openclaw` globally: ```bash npm install -g openclaw@ diff --git a/docs/platforms/mac/dev-setup.md b/docs/platforms/mac/dev-setup.md index e50a850086a..982f687049c 100644 --- a/docs/platforms/mac/dev-setup.md +++ b/docs/platforms/mac/dev-setup.md @@ -14,7 +14,7 @@ This guide covers the necessary steps to build and run the OpenClaw macOS applic Before building the app, ensure you have the following installed: 1. **Xcode 26.2+**: Required for Swift development. -2. **Node.js 22+ & pnpm**: Required for the gateway, CLI, and packaging scripts. +2. **Node.js 24 & pnpm**: Recommended for the gateway, CLI, and packaging scripts. Node 22 LTS, currently `22.16+`, remains supported for compatibility. ## 1. Install Dependencies diff --git a/docs/platforms/mac/release.md b/docs/platforms/mac/release.md index 180a52075ed..5276d46848e 100644 --- a/docs/platforms/mac/release.md +++ b/docs/platforms/mac/release.md @@ -39,7 +39,7 @@ Notes: # Default is auto-derived from APP_VERSION when omitted. SKIP_NOTARIZE=1 \ BUNDLE_ID=ai.openclaw.mac \ -APP_VERSION=2026.3.9 \ +APP_VERSION=2026.3.13 \ BUILD_CONFIG=release \ SIGN_IDENTITY="Developer ID Application: ()" \ scripts/package-mac-dist.sh @@ -47,10 +47,10 @@ scripts/package-mac-dist.sh # `package-mac-dist.sh` already creates the zip + DMG. # If you used `package-mac-app.sh` directly instead, create them manually: # If you want notarization/stapling in this step, use the NOTARIZE command below. -ditto -c -k --sequesterRsrc --keepParent dist/OpenClaw.app dist/OpenClaw-2026.3.9.zip +ditto -c -k --sequesterRsrc --keepParent dist/OpenClaw.app dist/OpenClaw-2026.3.13.zip # Optional: build a styled DMG for humans (drag to /Applications) -scripts/create-dmg.sh dist/OpenClaw.app dist/OpenClaw-2026.3.9.dmg +scripts/create-dmg.sh dist/OpenClaw.app dist/OpenClaw-2026.3.13.dmg # Recommended: build + notarize/staple zip + DMG # First, create a keychain profile once: @@ -58,13 +58,13 @@ scripts/create-dmg.sh dist/OpenClaw.app dist/OpenClaw-2026.3.9.dmg # --apple-id "" --team-id "" --password "" NOTARIZE=1 NOTARYTOOL_PROFILE=openclaw-notary \ BUNDLE_ID=ai.openclaw.mac \ -APP_VERSION=2026.3.9 \ +APP_VERSION=2026.3.13 \ BUILD_CONFIG=release \ SIGN_IDENTITY="Developer ID Application: ()" \ scripts/package-mac-dist.sh # Optional: ship dSYM alongside the release -ditto -c -k --keepParent apps/macos/.build/release/OpenClaw.app.dSYM dist/OpenClaw-2026.3.9.dSYM.zip +ditto -c -k --keepParent apps/macos/.build/release/OpenClaw.app.dSYM dist/OpenClaw-2026.3.13.dSYM.zip ``` ## Appcast entry @@ -72,7 +72,7 @@ ditto -c -k --keepParent apps/macos/.build/release/OpenClaw.app.dSYM dist/OpenCl Use the release note generator so Sparkle renders formatted HTML notes: ```bash -SPARKLE_PRIVATE_KEY_FILE=/path/to/ed25519-private-key scripts/make_appcast.sh dist/OpenClaw-2026.3.9.zip https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml +SPARKLE_PRIVATE_KEY_FILE=/path/to/ed25519-private-key scripts/make_appcast.sh dist/OpenClaw-2026.3.13.zip https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml ``` Generates HTML release notes from `CHANGELOG.md` (via [`scripts/changelog-to-html.sh`](https://github.com/openclaw/openclaw/blob/main/scripts/changelog-to-html.sh)) and embeds them in the appcast entry. @@ -80,7 +80,7 @@ Commit the updated `appcast.xml` alongside the release assets (zip + dSYM) when ## Publish & verify -- Upload `OpenClaw-2026.3.9.zip` (and `OpenClaw-2026.3.9.dSYM.zip`) to the GitHub release for tag `v2026.3.9`. +- Upload `OpenClaw-2026.3.13.zip` (and `OpenClaw-2026.3.13.dSYM.zip`) to the GitHub release for tag `v2026.3.13`. - Ensure the raw appcast URL matches the baked feed: `https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml`. - Sanity checks: - `curl -I https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml` returns 200. diff --git a/docs/platforms/mac/signing.md b/docs/platforms/mac/signing.md index 9927ca5f82b..0feac8cd281 100644 --- a/docs/platforms/mac/signing.md +++ b/docs/platforms/mac/signing.md @@ -14,7 +14,7 @@ This app is usually built from [`scripts/package-mac-app.sh`](https://github.com - calls [`scripts/codesign-mac-app.sh`](https://github.com/openclaw/openclaw/blob/main/scripts/codesign-mac-app.sh) to sign the main binary and app bundle so macOS treats each rebuild as the same signed bundle and keeps TCC permissions (notifications, accessibility, screen recording, mic, speech). For stable permissions, use a real signing identity; ad-hoc is opt-in and fragile (see [macOS permissions](/platforms/mac/permissions)). - uses `CODESIGN_TIMESTAMP=auto` by default; it enables trusted timestamps for Developer ID signatures. Set `CODESIGN_TIMESTAMP=off` to skip timestamping (offline debug builds). - inject build metadata into Info.plist: `OpenClawBuildTimestamp` (UTC) and `OpenClawGitCommit` (short hash) so the About pane can show build, git, and debug/release channel. -- **Packaging requires Node 22+**: the script runs TS builds and the Control UI build. +- **Packaging defaults to Node 24**: the script runs TS builds and the Control UI build. Node 22 LTS, currently `22.16+`, remains supported for compatibility. - reads `SIGN_IDENTITY` from the environment. Add `export SIGN_IDENTITY="Apple Development: Your Name (TEAMID)"` (or your Developer ID Application cert) to your shell rc to always sign with your cert. Ad-hoc signing requires explicit opt-in via `ALLOW_ADHOC_SIGNING=1` or `SIGN_IDENTITY="-"` (not recommended for permission testing). - runs a Team ID audit after signing and fails if any Mach-O inside the app bundle is signed by a different Team ID. Set `SKIP_TEAM_ID_CHECK=1` to bypass. diff --git a/docs/platforms/raspberry-pi.md b/docs/platforms/raspberry-pi.md index e46076e869d..5e7e35c9544 100644 --- a/docs/platforms/raspberry-pi.md +++ b/docs/platforms/raspberry-pi.md @@ -76,15 +76,15 @@ sudo apt install -y git curl build-essential sudo timedatectl set-timezone America/Chicago # Change to your timezone ``` -## 4) Install Node.js 22 (ARM64) +## 4) Install Node.js 24 (ARM64) ```bash # Install Node.js via NodeSource -curl -fsSL https://deb.nodesource.com/setup_22.x | sudo -E bash - +curl -fsSL https://deb.nodesource.com/setup_24.x | sudo -E bash - sudo apt install -y nodejs # Verify -node --version # Should show v22.x.x +node --version # Should show v24.x.x npm --version ``` @@ -153,30 +153,33 @@ sudo systemctl status openclaw journalctl -u openclaw -f ``` -## 9) Access the Dashboard +## 9) Access the OpenClaw Dashboard -Since the Pi is headless, use an SSH tunnel: +Replace `user@gateway-host` with your Pi username and hostname or IP address. + +On your computer, ask the Pi to print a fresh dashboard URL: ```bash -# From your laptop/desktop -ssh -L 18789:localhost:18789 user@gateway-host - -# Then open in browser -open http://localhost:18789 +ssh user@gateway-host 'openclaw dashboard --no-open' ``` -Or use Tailscale for always-on access: +The command prints `Dashboard URL:`. Depending on how `gateway.auth.token` +is configured, the URL may be a plain `http://127.0.0.1:18789/` link or one +that includes `#token=...`. + +In another terminal on your computer, create the SSH tunnel: ```bash -# On the Pi -curl -fsSL https://tailscale.com/install.sh | sh -sudo tailscale up - -# Update config -openclaw config set gateway.bind tailnet -sudo systemctl restart openclaw +ssh -N -L 18789:127.0.0.1:18789 user@gateway-host ``` +Then open the printed Dashboard URL in your local browser. + +If the UI asks for auth, paste the token from `gateway.auth.token` +(or `OPENCLAW_GATEWAY_TOKEN`) into Control UI settings. + +For always-on remote access, see [Tailscale](/gateway/tailscale). + --- ## Performance Optimizations diff --git a/docs/platforms/windows.md b/docs/platforms/windows.md index 3ab668ea01e..e40d798604d 100644 --- a/docs/platforms/windows.md +++ b/docs/platforms/windows.md @@ -22,6 +22,44 @@ Native Windows companion apps are planned. - [Install & updates](/install/updating) - Official WSL2 guide (Microsoft): [https://learn.microsoft.com/windows/wsl/install](https://learn.microsoft.com/windows/wsl/install) +## Native Windows status + +Native Windows CLI flows are improving, but WSL2 is still the recommended path. + +What works well on native Windows today: + +- website installer via `install.ps1` +- local CLI use such as `openclaw --version`, `openclaw doctor`, and `openclaw plugins list --json` +- embedded local-agent/provider smoke such as: + +```powershell +openclaw agent --local --agent main --thinking low -m "Reply with exactly WINDOWS-HATCH-OK." +``` + +Current caveats: + +- `openclaw onboard --non-interactive` still expects a reachable local gateway unless you pass `--skip-health` +- `openclaw onboard --non-interactive --install-daemon` and `openclaw gateway install` try Windows Scheduled Tasks first +- if Scheduled Task creation is denied, OpenClaw falls back to a per-user Startup-folder login item and starts the gateway immediately +- if `schtasks` itself wedges or stops responding, OpenClaw now aborts that path quickly and falls back instead of hanging forever +- Scheduled Tasks are still preferred when available because they provide better supervisor status + +If you want the native CLI only, without gateway service install, use one of these: + +```powershell +openclaw onboard --non-interactive --skip-health +openclaw gateway run +``` + +If you do want managed startup on native Windows: + +```powershell +openclaw gateway install +openclaw gateway status --json +``` + +If Scheduled Task creation is blocked, the fallback service mode still auto-starts after login through the current user's Startup folder. + ## Gateway - [Gateway runbook](/gateway) diff --git a/docs/plugins/voice-call.md b/docs/plugins/voice-call.md index 17263ca0509..14198fdba36 100644 --- a/docs/plugins/voice-call.md +++ b/docs/plugins/voice-call.md @@ -296,6 +296,12 @@ Inbound policy defaults to `disabled`. To enable inbound calls, set: } ``` +`inboundPolicy: "allowlist"` is a low-assurance caller-ID screen. The plugin +normalizes the provider-supplied `From` value and compares it to `allowFrom`. +Webhook verification authenticates provider delivery and payload integrity, but +it does not prove PSTN/VoIP caller-number ownership. Treat `allowFrom` as +caller-ID filtering, not strong caller identity. + Auto-responses use the agent system. Tune with: - `responseModel` diff --git a/docs/providers/anthropic.md b/docs/providers/anthropic.md index de974315273..8974bb2dd61 100644 --- a/docs/providers/anthropic.md +++ b/docs/providers/anthropic.md @@ -44,6 +44,34 @@ openclaw onboard --anthropic-api-key "$ANTHROPIC_API_KEY" - [Adaptive thinking](https://platform.claude.com/docs/en/build-with-claude/adaptive-thinking) - [Extended thinking](https://platform.claude.com/docs/en/build-with-claude/extended-thinking) +## Fast mode (Anthropic API) + +OpenClaw's shared `/fast` toggle also supports direct Anthropic API-key traffic. + +- `/fast on` maps to `service_tier: "auto"` +- `/fast off` maps to `service_tier: "standard_only"` +- Config default: + +```json5 +{ + agents: { + defaults: { + models: { + "anthropic/claude-sonnet-4-5": { + params: { fastMode: true }, + }, + }, + }, + }, +} +``` + +Important limits: + +- This is **API-key only**. Anthropic setup-token / OAuth auth does not honor OpenClaw fast-mode tier injection. +- OpenClaw only injects Anthropic service tiers for direct `api.anthropic.com` requests. If you route `anthropic/*` through a proxy or gateway, `/fast` leaves `service_tier` untouched. +- Anthropic reports the effective tier on the response under `usage.service_tier`. On accounts without Priority Tier capacity, `service_tier: "auto"` may still resolve to `standard`. + ## Prompt caching (Anthropic API) OpenClaw supports Anthropic's prompt caching feature. This is **API-only**; subscription auth does not honor cache settings. diff --git a/docs/providers/index.md b/docs/providers/index.md index a4587213832..f68cd0e0b53 100644 --- a/docs/providers/index.md +++ b/docs/providers/index.md @@ -37,9 +37,9 @@ Looking for chat channel docs (WhatsApp/Telegram/Discord/Slack/Mattermost (plugi - [Mistral](/providers/mistral) - [Moonshot AI (Kimi + Kimi Coding)](/providers/moonshot) - [NVIDIA](/providers/nvidia) -- [Ollama (local models)](/providers/ollama) +- [Ollama (cloud + local models)](/providers/ollama) - [OpenAI (API + Codex)](/providers/openai) -- [OpenCode Zen](/providers/opencode) +- [OpenCode (Zen + Go)](/providers/opencode) - [OpenRouter](/providers/openrouter) - [Qianfan](/providers/qianfan) - [Qwen (OAuth)](/providers/qwen) diff --git a/docs/providers/minimax.md b/docs/providers/minimax.md index f060c637de8..8cdc5b028f6 100644 --- a/docs/providers/minimax.md +++ b/docs/providers/minimax.md @@ -151,7 +151,7 @@ Configure manually via `openclaw.json`: { id: "minimax-m2.5-gs32", name: "MiniMax M2.5 GS32", - reasoning: false, + reasoning: true, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: 196608, diff --git a/docs/providers/models.md b/docs/providers/models.md index 7da741f4077..a117d286051 100644 --- a/docs/providers/models.md +++ b/docs/providers/models.md @@ -32,7 +32,7 @@ model as `provider/model`. - [Moonshot AI (Kimi + Kimi Coding)](/providers/moonshot) - [Mistral](/providers/mistral) - [Synthetic](/providers/synthetic) -- [OpenCode Zen](/providers/opencode) +- [OpenCode (Zen + Go)](/providers/opencode) - [Z.AI](/providers/zai) - [GLM models](/providers/glm) - [MiniMax](/providers/minimax) diff --git a/docs/providers/ollama.md b/docs/providers/ollama.md index b82f6411b68..c4604a8e350 100644 --- a/docs/providers/ollama.md +++ b/docs/providers/ollama.md @@ -1,14 +1,14 @@ --- -summary: "Run OpenClaw with Ollama (local LLM runtime)" +summary: "Run OpenClaw with Ollama (cloud and local models)" read_when: - - You want to run OpenClaw with local models via Ollama + - You want to run OpenClaw with cloud or local models via Ollama - You need Ollama setup and configuration guidance title: "Ollama" --- # Ollama -Ollama is a local LLM runtime that makes it easy to run open-source models on your machine. OpenClaw integrates with Ollama's native API (`/api/chat`), supporting streaming and tool calling, and can **auto-discover tool-capable models** when you opt in with `OLLAMA_API_KEY` (or an auth profile) and do not define an explicit `models.providers.ollama` entry. +Ollama is a local LLM runtime that makes it easy to run open-source models on your machine. OpenClaw integrates with Ollama's native API (`/api/chat`), supports streaming and tool calling, and can auto-discover local Ollama models when you opt in with `OLLAMA_API_KEY` (or an auth profile) and do not define an explicit `models.providers.ollama` entry. **Remote Ollama users**: Do not use the `/v1` OpenAI-compatible URL (`http://host:11434/v1`) with OpenClaw. This breaks tool calling and models may output raw tool JSON as plain text. Use the native Ollama API URL instead: `baseUrl: "http://host:11434"` (no `/v1`). @@ -16,21 +16,76 @@ Ollama is a local LLM runtime that makes it easy to run open-source models on yo ## Quick start -1. Install Ollama: [https://ollama.ai](https://ollama.ai) +### Onboarding wizard (recommended) -2. Pull a model: +The fastest way to set up Ollama is through the onboarding wizard: ```bash +openclaw onboard +``` + +Select **Ollama** from the provider list. The wizard will: + +1. Ask for the Ollama base URL where your instance can be reached (default `http://127.0.0.1:11434`). +2. Let you choose **Cloud + Local** (cloud models and local models) or **Local** (local models only). +3. Open a browser sign-in flow if you choose **Cloud + Local** and are not signed in to ollama.com. +4. Discover available models and suggest defaults. +5. Auto-pull the selected model if it is not available locally. + +Non-interactive mode is also supported: + +```bash +openclaw onboard --non-interactive \ + --auth-choice ollama \ + --accept-risk +``` + +Optionally specify a custom base URL or model: + +```bash +openclaw onboard --non-interactive \ + --auth-choice ollama \ + --custom-base-url "http://ollama-host:11434" \ + --custom-model-id "qwen3.5:27b" \ + --accept-risk +``` + +### Manual setup + +1. Install Ollama: [https://ollama.com/download](https://ollama.com/download) + +2. Pull a local model if you want local inference: + +```bash +ollama pull glm-4.7-flash +# or ollama pull gpt-oss:20b # or ollama pull llama3.3 -# or -ollama pull qwen2.5-coder:32b -# or -ollama pull deepseek-r1:32b ``` -3. Enable Ollama for OpenClaw (any value works; Ollama doesn't require a real key): +3. If you want cloud models too, sign in: + +```bash +ollama signin +``` + +4. Run onboarding and choose `Ollama`: + +```bash +openclaw onboard +``` + +- `Local`: local models only +- `Cloud + Local`: local models plus cloud models +- Cloud models such as `kimi-k2.5:cloud`, `minimax-m2.5:cloud`, and `glm-5:cloud` do **not** require a local `ollama pull` + +OpenClaw currently suggests: + +- local default: `glm-4.7-flash` +- cloud defaults: `kimi-k2.5:cloud`, `minimax-m2.5:cloud`, `glm-5:cloud` + +5. If you prefer manual setup, enable Ollama for OpenClaw directly (any value works; Ollama doesn't require a real key): ```bash # Set environment variable @@ -40,13 +95,20 @@ export OLLAMA_API_KEY="ollama-local" openclaw config set models.providers.ollama.apiKey "ollama-local" ``` -4. Use Ollama models: +6. Inspect or switch models: + +```bash +openclaw models list +openclaw models set ollama/glm-4.7-flash +``` + +7. Or set the default in config: ```json5 { agents: { defaults: { - model: { primary: "ollama/gpt-oss:20b" }, + model: { primary: "ollama/glm-4.7-flash" }, }, }, } @@ -56,14 +118,13 @@ openclaw config set models.providers.ollama.apiKey "ollama-local" When you set `OLLAMA_API_KEY` (or an auth profile) and **do not** define `models.providers.ollama`, OpenClaw discovers models from the local Ollama instance at `http://127.0.0.1:11434`: -- Queries `/api/tags` and `/api/show` -- Keeps only models that report `tools` capability -- Marks `reasoning` when the model reports `thinking` -- Reads `contextWindow` from `model_info[".context_length"]` when available -- Sets `maxTokens` to 10× the context window +- Queries `/api/tags` +- Uses best-effort `/api/show` lookups to read `contextWindow` when available +- Marks `reasoning` with a model-name heuristic (`r1`, `reasoning`, `think`) +- Sets `maxTokens` to the default Ollama max-token cap used by OpenClaw - Sets all costs to `0` -This avoids manual model entries while keeping the catalog aligned with Ollama's capabilities. +This avoids manual model entries while keeping the catalog aligned with the local Ollama instance. To see what models are available: @@ -98,7 +159,7 @@ Use explicit config when: - Ollama runs on another host/port. - You want to force specific context windows or model lists. -- You want to include models that do not report tool support. +- You want fully manual model definitions. ```json5 { @@ -166,11 +227,19 @@ Once configured, all your Ollama models are available: } ``` +## Cloud models + +Cloud models let you run cloud-hosted models (for example `kimi-k2.5:cloud`, `minimax-m2.5:cloud`, `glm-5:cloud`) alongside your local models. + +To use cloud models, select **Cloud + Local** mode during onboarding. The wizard checks whether you are signed in and opens a browser sign-in flow when needed. If authentication cannot be verified, the wizard falls back to local model defaults. + +You can also sign in directly at [ollama.com/signin](https://ollama.com/signin). + ## Advanced ### Reasoning models -OpenClaw marks models as reasoning-capable when Ollama reports `thinking` in `/api/show`: +OpenClaw treats models with names such as `deepseek-r1`, `reasoning`, or `think` as reasoning-capable by default: ```bash ollama pull deepseek-r1:32b @@ -230,7 +299,7 @@ When `api: "openai-completions"` is used with Ollama, OpenClaw injects `options. ### Context windows -For auto-discovered models, OpenClaw uses the context window reported by Ollama when available, otherwise it defaults to `8192`. You can override `contextWindow` and `maxTokens` in explicit provider config. +For auto-discovered models, OpenClaw uses the context window reported by Ollama when available, otherwise it falls back to the default Ollama context window used by OpenClaw. You can override `contextWindow` and `maxTokens` in explicit provider config. ## Troubleshooting @@ -250,16 +319,17 @@ curl http://localhost:11434/api/tags ### No models available -OpenClaw only auto-discovers models that report tool support. If your model isn't listed, either: +If your model is not listed, either: -- Pull a tool-capable model, or +- Pull the model locally, or - Define the model explicitly in `models.providers.ollama`. To add models: ```bash ollama list # See what's installed -ollama pull gpt-oss:20b # Pull a tool-capable model +ollama pull glm-4.7-flash +ollama pull gpt-oss:20b ollama pull llama3.3 # Or another model ``` diff --git a/docs/providers/openai.md b/docs/providers/openai.md index 4683f061546..a6a60f8f2ea 100644 --- a/docs/providers/openai.md +++ b/docs/providers/openai.md @@ -36,6 +36,12 @@ openclaw onboard --openai-api-key "$OPENAI_API_KEY" OpenAI's current API model docs list `gpt-5.4` and `gpt-5.4-pro` for direct OpenAI API usage. OpenClaw forwards both through the `openai/*` Responses path. +OpenClaw intentionally suppresses the stale `openai/gpt-5.3-codex-spark` row, +because direct OpenAI API calls reject it in live traffic. + +OpenClaw does **not** expose `openai/gpt-5.3-codex-spark` on the direct OpenAI +API path. `pi-ai` still ships a built-in row for that model, but live OpenAI API +requests currently reject it. Spark is treated as Codex-only in OpenClaw. ## Option B: OpenAI Code (Codex) subscription @@ -63,6 +69,18 @@ openclaw models auth login --provider openai-codex OpenAI's current Codex docs list `gpt-5.4` as the current Codex model. OpenClaw maps that to `openai-codex/gpt-5.4` for ChatGPT/Codex OAuth usage. +If your Codex account is entitled to Codex Spark, OpenClaw also supports: + +- `openai-codex/gpt-5.3-codex-spark` + +OpenClaw treats Codex Spark as Codex-only. It does not expose a direct +`openai/gpt-5.3-codex-spark` API-key path. + +OpenClaw also preserves `openai-codex/gpt-5.3-codex-spark` when `pi-ai` +discovers it. Treat it as entitlement-dependent and experimental: Codex Spark is +separate from GPT-5.4 `/fast`, and availability depends on the signed-in Codex / +ChatGPT account. + ### Transport default OpenClaw uses `pi-ai` for model streaming. For both `openai/*` and @@ -165,6 +183,46 @@ pass that field through on direct `openai/*` Responses requests. Supported values are `auto`, `default`, `flex`, and `priority`. +### OpenAI fast mode + +OpenClaw exposes a shared fast-mode toggle for both `openai/*` and +`openai-codex/*` sessions: + +- Chat/UI: `/fast status|on|off` +- Config: `agents.defaults.models["/"].params.fastMode` + +When fast mode is enabled, OpenClaw applies a low-latency OpenAI profile: + +- `reasoning.effort = "low"` when the payload does not already specify reasoning +- `text.verbosity = "low"` when the payload does not already specify verbosity +- `service_tier = "priority"` for direct `openai/*` Responses calls to `api.openai.com` + +Example: + +```json5 +{ + agents: { + defaults: { + models: { + "openai/gpt-5.4": { + params: { + fastMode: true, + }, + }, + "openai-codex/gpt-5.4": { + params: { + fastMode: true, + }, + }, + }, + }, + }, +} +``` + +Session overrides win over config. Clearing the session override in the Sessions UI +returns the session to the configured default. + ### OpenAI Responses server-side compaction For direct OpenAI Responses models (`openai/*` using `api: "openai-responses"` with diff --git a/docs/providers/opencode-go.md b/docs/providers/opencode-go.md new file mode 100644 index 00000000000..4552e916beb --- /dev/null +++ b/docs/providers/opencode-go.md @@ -0,0 +1,45 @@ +--- +summary: "Use the OpenCode Go catalog with the shared OpenCode setup" +read_when: + - You want the OpenCode Go catalog + - You need the runtime model refs for Go-hosted models +title: "OpenCode Go" +--- + +# OpenCode Go + +OpenCode Go is the Go catalog within [OpenCode](/providers/opencode). +It uses the same `OPENCODE_API_KEY` as the Zen catalog, but keeps the runtime +provider id `opencode-go` so upstream per-model routing stays correct. + +## Supported models + +- `opencode-go/kimi-k2.5` +- `opencode-go/glm-5` +- `opencode-go/minimax-m2.5` + +## CLI setup + +```bash +openclaw onboard --auth-choice opencode-go +# or non-interactive +openclaw onboard --opencode-go-api-key "$OPENCODE_API_KEY" +``` + +## Config snippet + +```json5 +{ + env: { OPENCODE_API_KEY: "YOUR_API_KEY_HERE" }, // pragma: allowlist secret + agents: { defaults: { model: { primary: "opencode-go/kimi-k2.5" } } }, +} +``` + +## Routing behavior + +OpenClaw handles per-model routing automatically when the model ref uses `opencode-go/...`. + +## Notes + +- Use [OpenCode](/providers/opencode) for the shared onboarding and catalog overview. +- Runtime refs stay explicit: `opencode/...` for Zen, `opencode-go/...` for Go. diff --git a/docs/providers/opencode.md b/docs/providers/opencode.md index aa0614bff80..bf8d54afc9e 100644 --- a/docs/providers/opencode.md +++ b/docs/providers/opencode.md @@ -1,25 +1,38 @@ --- -summary: "Use OpenCode Zen (curated models) with OpenClaw" +summary: "Use OpenCode Zen and Go catalogs with OpenClaw" read_when: - - You want OpenCode Zen for model access - - You want a curated list of coding-friendly models -title: "OpenCode Zen" + - You want OpenCode-hosted model access + - You want to pick between the Zen and Go catalogs +title: "OpenCode" --- -# OpenCode Zen +# OpenCode -OpenCode Zen is a **curated list of models** recommended by the OpenCode team for coding agents. -It is an optional, hosted model access path that uses an API key and the `opencode` provider. -Zen is currently in beta. +OpenCode exposes two hosted catalogs in OpenClaw: + +- `opencode/...` for the **Zen** catalog +- `opencode-go/...` for the **Go** catalog + +Both catalogs use the same OpenCode API key. OpenClaw keeps the runtime provider ids +split so upstream per-model routing stays correct, but onboarding and docs treat them +as one OpenCode setup. ## CLI setup +### Zen catalog + ```bash openclaw onboard --auth-choice opencode-zen -# or non-interactive openclaw onboard --opencode-zen-api-key "$OPENCODE_API_KEY" ``` +### Go catalog + +```bash +openclaw onboard --auth-choice opencode-go +openclaw onboard --opencode-go-api-key "$OPENCODE_API_KEY" +``` + ## Config snippet ```json5 @@ -29,8 +42,23 @@ openclaw onboard --opencode-zen-api-key "$OPENCODE_API_KEY" } ``` +## Catalogs + +### Zen + +- Runtime provider: `opencode` +- Example models: `opencode/claude-opus-4-6`, `opencode/gpt-5.2`, `opencode/gemini-3-pro` +- Best when you want the curated OpenCode multi-model proxy + +### Go + +- Runtime provider: `opencode-go` +- Example models: `opencode-go/kimi-k2.5`, `opencode-go/glm-5`, `opencode-go/minimax-m2.5` +- Best when you want the OpenCode-hosted Kimi/GLM/MiniMax lineup + ## Notes - `OPENCODE_ZEN_API_KEY` is also supported. -- You sign in to Zen, add billing details, and copy your API key. -- OpenCode Zen bills per request; check the OpenCode dashboard for details. +- Entering one OpenCode key during onboarding stores credentials for both runtime providers. +- You sign in to OpenCode, add billing details, and copy your API key. +- Billing and catalog availability are managed from the OpenCode dashboard. diff --git a/docs/providers/sglang.md b/docs/providers/sglang.md new file mode 100644 index 00000000000..ce66950c0c3 --- /dev/null +++ b/docs/providers/sglang.md @@ -0,0 +1,104 @@ +--- +summary: "Run OpenClaw with SGLang (OpenAI-compatible self-hosted server)" +read_when: + - You want to run OpenClaw against a local SGLang server + - You want OpenAI-compatible /v1 endpoints with your own models +title: "SGLang" +--- + +# SGLang + +SGLang can serve open-source models via an **OpenAI-compatible** HTTP API. +OpenClaw can connect to SGLang using the `openai-completions` API. + +OpenClaw can also **auto-discover** available models from SGLang when you opt +in with `SGLANG_API_KEY` (any value works if your server does not enforce auth) +and you do not define an explicit `models.providers.sglang` entry. + +## Quick start + +1. Start SGLang with an OpenAI-compatible server. + +Your base URL should expose `/v1` endpoints (for example `/v1/models`, +`/v1/chat/completions`). SGLang commonly runs on: + +- `http://127.0.0.1:30000/v1` + +2. Opt in (any value works if no auth is configured): + +```bash +export SGLANG_API_KEY="sglang-local" +``` + +3. Run onboarding and choose `SGLang`, or set a model directly: + +```bash +openclaw onboard +``` + +```json5 +{ + agents: { + defaults: { + model: { primary: "sglang/your-model-id" }, + }, + }, +} +``` + +## Model discovery (implicit provider) + +When `SGLANG_API_KEY` is set (or an auth profile exists) and you **do not** +define `models.providers.sglang`, OpenClaw will query: + +- `GET http://127.0.0.1:30000/v1/models` + +and convert the returned IDs into model entries. + +If you set `models.providers.sglang` explicitly, auto-discovery is skipped and +you must define models manually. + +## Explicit configuration (manual models) + +Use explicit config when: + +- SGLang runs on a different host/port. +- You want to pin `contextWindow`/`maxTokens` values. +- Your server requires a real API key (or you want to control headers). + +```json5 +{ + models: { + providers: { + sglang: { + baseUrl: "http://127.0.0.1:30000/v1", + apiKey: "${SGLANG_API_KEY}", + api: "openai-completions", + models: [ + { + id: "your-model-id", + name: "Local SGLang Model", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 128000, + maxTokens: 8192, + }, + ], + }, + }, + }, +} +``` + +## Troubleshooting + +- Check the server is reachable: + +```bash +curl http://127.0.0.1:30000/v1/models +``` + +- If requests fail with auth errors, set a real `SGLANG_API_KEY` that matches + your server configuration, or configure the provider explicitly under + `models.providers.sglang`. diff --git a/docs/reference/RELEASING.md b/docs/reference/RELEASING.md index 6b5dc29c9b9..f929d16e5f7 100644 --- a/docs/reference/RELEASING.md +++ b/docs/reference/RELEASING.md @@ -9,7 +9,7 @@ read_when: # Release Checklist (npm + macOS) -Use `pnpm` (Node 22+) from the repo root. Keep the working tree clean before tagging/publishing. +Use `pnpm` from the repo root with Node 24 by default. Node 22 LTS, currently `22.16+`, remains supported for compatibility. Keep the working tree clean before tagging/publishing. ## Operator trigger @@ -19,6 +19,32 @@ When the operator says “release”, immediately do this preflight (no extra qu - Load env from `~/.profile` and confirm `SPARKLE_PRIVATE_KEY_FILE` + App Store Connect vars are set (SPARKLE_PRIVATE_KEY_FILE should live in `~/.profile`). - Use Sparkle keys from `~/Library/CloudStorage/Dropbox/Backup/Sparkle` if needed. +## Versioning + +Current OpenClaw releases use date-based versioning. + +- Stable release version: `YYYY.M.D` + - Git tag: `vYYYY.M.D` + - Examples from repo history: `v2026.2.26`, `v2026.3.8` +- Beta prerelease version: `YYYY.M.D-beta.N` + - Git tag: `vYYYY.M.D-beta.N` + - Examples from repo history: `v2026.2.15-beta.1`, `v2026.3.8-beta.1` +- Use the same version string everywhere, minus the leading `v` where Git tags are not used: + - `package.json`: `2026.3.8` + - Git tag: `v2026.3.8` + - GitHub release title: `openclaw 2026.3.8` +- Do not zero-pad month or day. Use `2026.3.8`, not `2026.03.08`. +- Stable and beta are npm dist-tags, not separate release lines: + - `latest` = stable + - `beta` = prerelease/testing +- Dev is the moving head of `main`, not a normal git-tagged release. +- The release workflow enforces the current stable/beta tag formats and rejects versions whose CalVer date is more than 2 UTC calendar days away from the release date. + +Historical note: + +- Older tags such as `v2026.1.11-1`, `v2026.2.6-3`, and `v2.0.0-beta2` exist in repo history. +- Treat those as legacy tag patterns. New releases should use `vYYYY.M.D` for stable and `vYYYY.M.D-beta.N` for beta. + 1. **Version & metadata** - [ ] Bump `package.json` version (e.g., `2026.1.29`). @@ -67,8 +93,11 @@ When the operator says “release”, immediately do this preflight (no extra qu 6. **Publish (npm)** - [ ] Confirm git status is clean; commit and push as needed. -- [ ] `npm login` (verify 2FA) if needed. -- [ ] `npm publish --access public` (use `--tag beta` for pre-releases). +- [ ] Confirm npm trusted publishing is configured for the `openclaw` package. +- [ ] Push the matching git tag to trigger `.github/workflows/openclaw-npm-release.yml`. + - Stable tags publish to npm `latest`. + - Beta tags publish to npm `beta`. + - The workflow rejects tags that do not match `package.json`, are not on `main`, or whose CalVer date is more than 2 UTC calendar days away from the release date. - [ ] Verify the registry: `npm view openclaw version`, `npm view openclaw dist-tags`, and `npx -y openclaw@X.Y.Z --version` (or `--help`). ### Troubleshooting (notes from 2.0.0-beta2 release) @@ -84,6 +113,7 @@ When the operator says “release”, immediately do this preflight (no extra qu 7. **GitHub release + appcast** - [ ] Tag and push: `git tag vX.Y.Z && git push origin vX.Y.Z` (or `git push --tags`). + - Pushing the tag also triggers the npm release workflow. - [ ] Create/refresh the GitHub release for `vX.Y.Z` with **title `openclaw X.Y.Z`** (not just the tag); body should include the **full** changelog section for that version (Highlights + Changes + Fixes), inline (no bare links), and **must not repeat the title inside the body**. - [ ] Attach artifacts: `npm pack` tarball (optional), `OpenClaw-X.Y.Z.zip`, and `OpenClaw-X.Y.Z.dSYM.zip` (if generated). - [ ] Commit the updated `appcast.xml` and push it (Sparkle feeds from main). diff --git a/docs/reference/api-usage-costs.md b/docs/reference/api-usage-costs.md index dba017aacc1..bbb1d90de87 100644 --- a/docs/reference/api-usage-costs.md +++ b/docs/reference/api-usage-costs.md @@ -80,13 +80,13 @@ See [Memory](/concepts/memory). `web_search` uses API keys and may incur usage charges depending on your provider: - **Brave Search API**: `BRAVE_API_KEY` or `tools.web.search.apiKey` -- **Gemini (Google Search)**: `GEMINI_API_KEY` -- **Grok (xAI)**: `XAI_API_KEY` -- **Kimi (Moonshot)**: `KIMI_API_KEY` or `MOONSHOT_API_KEY` -- **Perplexity Search API**: `PERPLEXITY_API_KEY` +- **Gemini (Google Search)**: `GEMINI_API_KEY` or `tools.web.search.gemini.apiKey` +- **Grok (xAI)**: `XAI_API_KEY` or `tools.web.search.grok.apiKey` +- **Kimi (Moonshot)**: `KIMI_API_KEY`, `MOONSHOT_API_KEY`, or `tools.web.search.kimi.apiKey` +- **Perplexity Search API**: `PERPLEXITY_API_KEY`, `OPENROUTER_API_KEY`, or `tools.web.search.perplexity.apiKey` -**Brave Search free credit:** Each Brave plan includes $5/month in renewing -free credit. The Search plan costs $5 per 1,000 requests, so the credit covers +**Brave Search free credit:** Each Brave plan includes \$5/month in renewing +free credit. The Search plan costs \$5 per 1,000 requests, so the credit covers 1,000 requests/month at no charge. Set your usage limit in the Brave dashboard to avoid unexpected charges. diff --git a/docs/reference/secretref-credential-surface.md b/docs/reference/secretref-credential-surface.md index dd1b5f1fd2f..9f73c7d0112 100644 --- a/docs/reference/secretref-credential-surface.md +++ b/docs/reference/secretref-credential-surface.md @@ -31,6 +31,7 @@ Scope intent: - `talk.providers.*.apiKey` - `messages.tts.elevenlabs.apiKey` - `messages.tts.openai.apiKey` +- `tools.web.fetch.firecrawl.apiKey` - `tools.web.search.apiKey` - `tools.web.search.gemini.apiKey` - `tools.web.search.grok.apiKey` @@ -68,8 +69,10 @@ Scope intent: - `channels.bluebubbles.password` - `channels.bluebubbles.accounts.*.password` - `channels.feishu.appSecret` +- `channels.feishu.encryptKey` - `channels.feishu.verificationToken` - `channels.feishu.accounts.*.appSecret` +- `channels.feishu.accounts.*.encryptKey` - `channels.feishu.accounts.*.verificationToken` - `channels.msteams.appPassword` - `channels.mattermost.botToken` @@ -100,9 +103,11 @@ Notes: - Plan entries target `profiles.*.key` / `profiles.*.token` and write sibling refs (`keyRef` / `tokenRef`). - Auth-profile refs are included in runtime resolution and audit coverage. - For SecretRef-managed model providers, generated `agents/*/agent/models.json` entries persist non-secret markers (not resolved secret values) for `apiKey`/header surfaces. +- Marker persistence is source-authoritative: OpenClaw writes markers from the active source config snapshot (pre-resolution), not from resolved runtime secret values. - For web search: - In explicit provider mode (`tools.web.search.provider` set), only the selected provider key is active. - - In auto mode (`tools.web.search.provider` unset), `tools.web.search.apiKey` and provider-specific keys are active. + - In auto mode (`tools.web.search.provider` unset), only the first provider key that resolves by precedence is active. + - In auto mode, non-selected provider refs are treated as inactive until selected. ## Unsupported credentials diff --git a/docs/reference/secretref-user-supplied-credentials-matrix.json b/docs/reference/secretref-user-supplied-credentials-matrix.json index 773ef8ab162..f72729dbadc 100644 --- a/docs/reference/secretref-user-supplied-credentials-matrix.json +++ b/docs/reference/secretref-user-supplied-credentials-matrix.json @@ -128,6 +128,13 @@ "secretShape": "secret_input", "optIn": true }, + { + "id": "channels.feishu.accounts.*.encryptKey", + "configFile": "openclaw.json", + "path": "channels.feishu.accounts.*.encryptKey", + "secretShape": "secret_input", + "optIn": true + }, { "id": "channels.feishu.accounts.*.verificationToken", "configFile": "openclaw.json", @@ -142,6 +149,13 @@ "secretShape": "secret_input", "optIn": true }, + { + "id": "channels.feishu.encryptKey", + "configFile": "openclaw.json", + "path": "channels.feishu.encryptKey", + "secretShape": "secret_input", + "optIn": true + }, { "id": "channels.feishu.verificationToken", "configFile": "openclaw.json", @@ -454,6 +468,13 @@ "secretShape": "secret_input", "optIn": true }, + { + "id": "tools.web.fetch.firecrawl.apiKey", + "configFile": "openclaw.json", + "path": "tools.web.fetch.firecrawl.apiKey", + "secretShape": "secret_input", + "optIn": true + }, { "id": "tools.web.search.apiKey", "configFile": "openclaw.json", diff --git a/docs/reference/test.md b/docs/reference/test.md index 8d99e674c3f..378789f6d6e 100644 --- a/docs/reference/test.md +++ b/docs/reference/test.md @@ -11,7 +11,7 @@ title: "Tests" - `pnpm test:force`: Kills any lingering gateway process holding the default control port, then runs the full Vitest suite with an isolated gateway port so server tests don’t collide with a running instance. Use this when a prior gateway run left port 18789 occupied. - `pnpm test:coverage`: Runs the unit suite with V8 coverage (via `vitest.unit.config.ts`). Global thresholds are 70% lines/branches/functions/statements. Coverage excludes integration-heavy entrypoints (CLI wiring, gateway/telegram bridges, webchat static server) to keep the target focused on unit-testable logic. -- `pnpm test` on Node 24+: OpenClaw auto-disables Vitest `vmForks` and uses `forks` to avoid `ERR_VM_MODULE_LINK_FAILURE` / `module is already linked`. You can force behavior with `OPENCLAW_TEST_VM_FORKS=0|1`. +- `pnpm test` on Node 22, 23, and 24 uses Vitest `vmForks` by default for faster startup. Node 25+ falls back to `forks` until re-validated. You can force behavior with `OPENCLAW_TEST_VM_FORKS=0|1`. - `pnpm test`: runs the fast core unit lane by default for quick local feedback. - `pnpm test:channels`: runs channel-heavy suites. - `pnpm test:extensions`: runs extension/plugin suites. @@ -81,7 +81,7 @@ This script drives the interactive wizard via a pseudo-tty, verifies config/work ## QR import smoke (Docker) -Ensures `qrcode-terminal` loads under Node 22+ in Docker: +Ensures `qrcode-terminal` loads under the supported Docker Node runtimes (Node 24 default, Node 22 compatible): ```bash pnpm test:docker:qr diff --git a/docs/reference/token-use.md b/docs/reference/token-use.md index 9e85c25e687..8493e99f098 100644 --- a/docs/reference/token-use.md +++ b/docs/reference/token-use.md @@ -18,7 +18,7 @@ OpenClaw assembles its own system prompt on every run. It includes: - Tool list + short descriptions - Skills list (only metadata; instructions are loaded on demand with `read`) - Self-update instructions -- Workspace + bootstrap files (`AGENTS.md`, `SOUL.md`, `TOOLS.md`, `IDENTITY.md`, `USER.md`, `HEARTBEAT.md`, `BOOTSTRAP.md` when new, plus `MEMORY.md` and/or `memory.md` when present). Large files are truncated by `agents.defaults.bootstrapMaxChars` (default: 20000), and total bootstrap injection is capped by `agents.defaults.bootstrapTotalMaxChars` (default: 150000). `memory/*.md` files are on-demand via memory tools and are not auto-injected. +- Workspace + bootstrap files (`AGENTS.md`, `SOUL.md`, `TOOLS.md`, `IDENTITY.md`, `USER.md`, `HEARTBEAT.md`, `BOOTSTRAP.md` when new, plus `MEMORY.md` when present or `memory.md` as a lowercase fallback). Large files are truncated by `agents.defaults.bootstrapMaxChars` (default: 20000), and total bootstrap injection is capped by `agents.defaults.bootstrapTotalMaxChars` (default: 150000). `memory/*.md` files are on-demand via memory tools and are not auto-injected. - Time (UTC + user timezone) - Reply tags + heartbeat behavior - Runtime metadata (host/OS/model/thinking) diff --git a/docs/reference/wizard.md b/docs/reference/wizard.md index 2e7a43bdecc..bbaebbdc84f 100644 --- a/docs/reference/wizard.md +++ b/docs/reference/wizard.md @@ -38,7 +38,9 @@ For a high-level overview, see [Onboarding Wizard](/start/wizard). - Sets `agents.defaults.model` to `openai-codex/gpt-5.2` when model is unset or `openai/*`. - **OpenAI API key**: uses `OPENAI_API_KEY` if present or prompts for a key, then stores it in auth profiles. - **xAI (Grok) API key**: prompts for `XAI_API_KEY` and configures xAI as a model provider. - - **OpenCode Zen (multi-model proxy)**: prompts for `OPENCODE_API_KEY` (or `OPENCODE_ZEN_API_KEY`, get it at https://opencode.ai/auth). + - **OpenCode**: prompts for `OPENCODE_API_KEY` (or `OPENCODE_ZEN_API_KEY`, get it at https://opencode.ai/auth) and lets you pick the Zen or Go catalog. + - **Ollama**: prompts for the Ollama base URL, offers **Cloud + Local** or **Local** mode, discovers available models, and auto-pulls the selected local model when needed. + - More detail: [Ollama](/providers/ollama) - **API key**: stores the key for you. - **Vercel AI Gateway (multi-model proxy)**: prompts for `AI_GATEWAY_API_KEY`. - More detail: [Vercel AI Gateway](/providers/vercel-ai-gateway) @@ -165,80 +167,8 @@ openclaw onboard --non-interactive \ `--json` does **not** imply non-interactive mode. Use `--non-interactive` (and `--workspace`) for scripts. - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice gemini-api-key \ - --gemini-api-key "$GEMINI_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice zai-api-key \ - --zai-api-key "$ZAI_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice ai-gateway-api-key \ - --ai-gateway-api-key "$AI_GATEWAY_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice cloudflare-ai-gateway-api-key \ - --cloudflare-ai-gateway-account-id "your-account-id" \ - --cloudflare-ai-gateway-gateway-id "your-gateway-id" \ - --cloudflare-ai-gateway-api-key "$CLOUDFLARE_AI_GATEWAY_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice moonshot-api-key \ - --moonshot-api-key "$MOONSHOT_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice synthetic-api-key \ - --synthetic-api-key "$SYNTHETIC_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice opencode-zen \ - --opencode-zen-api-key "$OPENCODE_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - +Provider-specific command examples live in [CLI Automation](/start/wizard-cli-automation#provider-specific-examples). +Use this reference page for flag semantics and step ordering. ### Add agent (non-interactive) diff --git a/docs/start/getting-started.md b/docs/start/getting-started.md index c4bed93d33f..26b54b63f6f 100644 --- a/docs/start/getting-started.md +++ b/docs/start/getting-started.md @@ -19,7 +19,7 @@ Docs: [Dashboard](/web/dashboard) and [Control UI](/web/control-ui). ## Prereqs -- Node 22 or newer +- Node 24 recommended (Node 22 LTS, currently `22.16+`, still supported for compatibility) Check your Node version with `node --version` if you are unsure. diff --git a/docs/start/setup.md b/docs/start/setup.md index 4b6113743f8..205f14d20a5 100644 --- a/docs/start/setup.md +++ b/docs/start/setup.md @@ -127,7 +127,7 @@ openclaw health Use this when debugging auth or deciding what to back up: - **WhatsApp**: `~/.openclaw/credentials/whatsapp//creds.json` -- **Telegram bot token**: config/env or `channels.telegram.tokenFile` +- **Telegram bot token**: config/env or `channels.telegram.tokenFile` (regular file only; symlinks rejected) - **Discord bot token**: config/env or SecretRef (env/file/exec providers) - **Slack tokens**: config/env (`channels.slack.*`) - **Pairing allowlists**: diff --git a/docs/start/wizard-cli-automation.md b/docs/start/wizard-cli-automation.md index 14f4a9d5d32..cd00787c5c7 100644 --- a/docs/start/wizard-cli-automation.md +++ b/docs/start/wizard-cli-automation.md @@ -123,7 +123,7 @@ openclaw onboard --non-interactive \ --gateway-bind loopback ``` - + ```bash openclaw onboard --non-interactive \ --mode local \ @@ -132,6 +132,18 @@ openclaw onboard --non-interactive \ --gateway-port 18789 \ --gateway-bind loopback ``` + Swap to `--auth-choice opencode-go --opencode-go-api-key "$OPENCODE_API_KEY"` for the Go catalog. + + + ```bash + openclaw onboard --non-interactive \ + --mode local \ + --auth-choice ollama \ + --custom-model-id "qwen3.5:27b" \ + --accept-risk \ + --gateway-port 18789 \ + --gateway-bind loopback + ``` ```bash diff --git a/docs/start/wizard-cli-reference.md b/docs/start/wizard-cli-reference.md index 44f470ea73b..5d3e6be6e72 100644 --- a/docs/start/wizard-cli-reference.md +++ b/docs/start/wizard-cli-reference.md @@ -16,7 +16,7 @@ For the short guide, see [Onboarding Wizard (CLI)](/start/wizard). Local mode (default) walks you through: -- Model and auth setup (OpenAI Code subscription OAuth, Anthropic API key or setup token, plus MiniMax, GLM, Moonshot, and AI Gateway options) +- Model and auth setup (OpenAI Code subscription OAuth, Anthropic API key or setup token, plus MiniMax, GLM, Ollama, Moonshot, and AI Gateway options) - Workspace location and bootstrap files - Gateway settings (port, bind, auth, tailscale) - Channels and providers (Telegram, WhatsApp, Discord, Google Chat, Mattermost plugin, Signal) @@ -155,8 +155,8 @@ What you set: Prompts for `XAI_API_KEY` and configures xAI as a model provider. - - Prompts for `OPENCODE_API_KEY` (or `OPENCODE_ZEN_API_KEY`). + + Prompts for `OPENCODE_API_KEY` (or `OPENCODE_ZEN_API_KEY`) and lets you choose the Zen or Go catalog. Setup URL: [opencode.ai/auth](https://opencode.ai/auth). @@ -178,6 +178,11 @@ What you set: Prompts for `SYNTHETIC_API_KEY`. More detail: [Synthetic](/providers/synthetic). + + Prompts for base URL (default `http://127.0.0.1:11434`), then offers Cloud + Local or Local mode. + Discovers available models and suggests defaults. + More detail: [Ollama](/providers/ollama). + Moonshot (Kimi K2) and Kimi Coding configs are auto-written. More detail: [Moonshot AI (Kimi + Kimi Coding)](/providers/moonshot). diff --git a/docs/start/wizard.md b/docs/start/wizard.md index ef1fc52b31a..05c09ed53fd 100644 --- a/docs/start/wizard.md +++ b/docs/start/wizard.md @@ -111,8 +111,10 @@ Notes: ## Full reference -For detailed step-by-step breakdowns, non-interactive scripting, Signal setup, -RPC API, and a full list of config fields the wizard writes, see the +For detailed step-by-step breakdowns and config outputs, see +[CLI Onboarding Reference](/start/wizard-cli-reference). +For non-interactive examples, see [CLI Automation](/start/wizard-cli-automation). +For the deeper technical reference, including RPC details, see [Wizard Reference](/reference/wizard). ## Related docs diff --git a/docs/tools/acp-agents.md b/docs/tools/acp-agents.md index 74ed73248f1..d8ac5b5f7d3 100644 --- a/docs/tools/acp-agents.md +++ b/docs/tools/acp-agents.md @@ -243,9 +243,76 @@ Interface details: - `mode: "session"` requires `thread: true` - `cwd` (optional): requested runtime working directory (validated by backend/runtime policy). - `label` (optional): operator-facing label used in session/banner text. +- `resumeSessionId` (optional): resume an existing ACP session instead of creating a new one. The agent replays its conversation history via `session/load`. Requires `runtime: "acp"`. - `streamTo` (optional): `"parent"` streams initial ACP run progress summaries back to the requester session as system events. - When available, accepted responses include `streamLogPath` pointing to a session-scoped JSONL log (`.acp-stream.jsonl`) you can tail for full relay history. +### Resume an existing session + +Use `resumeSessionId` to continue a previous ACP session instead of starting fresh. The agent replays its conversation history via `session/load`, so it picks up with full context of what came before. + +```json +{ + "task": "Continue where we left off — fix the remaining test failures", + "runtime": "acp", + "agentId": "codex", + "resumeSessionId": "" +} +``` + +Common use cases: + +- Hand off a Codex session from your laptop to your phone — tell your agent to pick up where you left off +- Continue a coding session you started interactively in the CLI, now headlessly through your agent +- Pick up work that was interrupted by a gateway restart or idle timeout + +Notes: + +- `resumeSessionId` requires `runtime: "acp"` — returns an error if used with the sub-agent runtime. +- `resumeSessionId` restores the upstream ACP conversation history; `thread` and `mode` still apply normally to the new OpenClaw session you are creating, so `mode: "session"` still requires `thread: true`. +- The target agent must support `session/load` (Codex and Claude Code do). +- If the session ID isn't found, the spawn fails with a clear error — no silent fallback to a new session. + +### Operator smoke test + +Use this after a gateway deploy when you want a quick live check that ACP spawn +is actually working end-to-end, not just passing unit tests. + +Recommended gate: + +1. Verify the deployed gateway version/commit on the target host. +2. Confirm the deployed source includes the ACP lineage acceptance in + `src/gateway/sessions-patch.ts` (`subagent:* or acp:* sessions`). +3. Open a temporary ACPX bridge session to a live agent (for example + `razor(main)` on `jpclawhq`). +4. Ask that agent to call `sessions_spawn` with: + - `runtime: "acp"` + - `agentId: "codex"` + - `mode: "run"` + - task: `Reply with exactly LIVE-ACP-SPAWN-OK` +5. Verify the agent reports: + - `accepted=yes` + - a real `childSessionKey` + - no validator error +6. Clean up the temporary ACPX bridge session. + +Example prompt to the live agent: + +```text +Use the sessions_spawn tool now with runtime: "acp", agentId: "codex", and mode: "run". +Set the task to: "Reply with exactly LIVE-ACP-SPAWN-OK". +Then report only: accepted=; childSessionKey=; error=. +``` + +Notes: + +- Keep this smoke test on `mode: "run"` unless you are intentionally testing + thread-bound persistent ACP sessions. +- Do not require `streamTo: "parent"` for the basic gate. That path depends on + requester/session capabilities and is a separate integration check. +- Treat thread-bound `mode: "session"` testing as a second, richer integration + pass from a real Discord thread or Telegram topic. + ## Sandbox compatibility ACP sessions currently run on the host runtime, not inside the OpenClaw sandbox. @@ -354,6 +421,8 @@ Some controls depend on backend capabilities. If a backend does not support a co | `/acp doctor` | Backend health, capabilities, actionable fixes. | `/acp doctor` | | `/acp install` | Print deterministic install and enable steps. | `/acp install` | +`/acp sessions` reads the store for the current bound or requester session. Commands that accept `session-key`, `session-id`, or `session-label` tokens resolve targets through gateway session discovery, including custom per-agent `session.store` roots. + ## Runtime options mapping `/acp` has convenience commands and a generic setter. diff --git a/docs/tools/browser-linux-troubleshooting.md b/docs/tools/browser-linux-troubleshooting.md index 01e6cbc3ff9..1ab51657044 100644 --- a/docs/tools/browser-linux-troubleshooting.md +++ b/docs/tools/browser-linux-troubleshooting.md @@ -123,7 +123,7 @@ curl -s http://127.0.0.1:18791/tabs ### Problem: "Chrome extension relay is running, but no tab is connected" -You’re using the `chrome` profile (extension relay). It expects the OpenClaw +You’re using the `chrome-relay` profile (extension relay). It expects the OpenClaw browser extension to be attached to a live tab. Fix options: @@ -135,5 +135,5 @@ Fix options: Notes: -- The `chrome` profile uses your **system default Chromium browser** when possible. +- The `chrome-relay` profile uses your **system default Chromium browser** when possible. - Local `openclaw` profiles auto-assign `cdpPort`/`cdpUrl`; only set those for remote CDP. diff --git a/docs/tools/browser-login.md b/docs/tools/browser-login.md index 910c21ca218..d570b3b2e87 100644 --- a/docs/tools/browser-login.md +++ b/docs/tools/browser-login.md @@ -20,6 +20,13 @@ Back to the main browser docs: [Browser](/tools/browser). OpenClaw controls a **dedicated Chrome profile** (named `openclaw`, orange‑tinted UI). This is separate from your daily browser profile. +For agent browser tool calls: + +- Default choice: the agent should use its isolated `openclaw` browser. +- Use `profile="user"` only when existing logged-in sessions matter and the user is at the computer to click/approve any attach prompt. +- Use `profile="chrome-relay"` only for the Chrome extension / toolbar-button attach flow. +- If you have multiple user-browser profiles, specify the profile explicitly instead of guessing. + Two easy ways to access it: 1. **Ask the agent to open the browser** and then log in yourself. diff --git a/docs/tools/browser-wsl2-windows-remote-cdp-troubleshooting.md b/docs/tools/browser-wsl2-windows-remote-cdp-troubleshooting.md index d63bb891c48..2e7844860aa 100644 --- a/docs/tools/browser-wsl2-windows-remote-cdp-troubleshooting.md +++ b/docs/tools/browser-wsl2-windows-remote-cdp-troubleshooting.md @@ -33,7 +33,7 @@ Choose this when: ### Option 2: Chrome extension relay -Use the built-in `chrome` profile plus the OpenClaw Chrome extension. +Use the built-in `chrome-relay` profile plus the OpenClaw Chrome extension. Choose this when: @@ -155,7 +155,7 @@ Example: { browser: { enabled: true, - defaultProfile: "chrome", + defaultProfile: "chrome-relay", relayBindHost: "0.0.0.0", }, } @@ -197,7 +197,7 @@ openclaw browser tabs --browser-profile remote For the extension relay: ```bash -openclaw browser tabs --browser-profile chrome +openclaw browser tabs --browser-profile chrome-relay ``` Good result: diff --git a/docs/tools/browser.md b/docs/tools/browser.md index d632e713068..ebe352036c5 100644 --- a/docs/tools/browser.md +++ b/docs/tools/browser.md @@ -18,8 +18,8 @@ Beginner view: - Think of it as a **separate, agent-only browser**. - The `openclaw` profile does **not** touch your personal browser profile. - The agent can **open tabs, read pages, click, and type** in a safe lane. -- The default `chrome` profile uses the **system default Chromium browser** via the - extension relay; switch to `openclaw` for the isolated managed browser. +- The built-in `user` profile attaches to your real signed-in Chrome session; + `chrome-relay` is the explicit extension-relay profile. ## What you get @@ -43,11 +43,22 @@ openclaw browser --browser-profile openclaw snapshot If you get “Browser disabled”, enable it in config (see below) and restart the Gateway. -## Profiles: `openclaw` vs `chrome` +## Profiles: `openclaw` vs `user` vs `chrome-relay` - `openclaw`: managed, isolated browser (no extension required). -- `chrome`: extension relay to your **system browser** (requires the OpenClaw - extension to be attached to a tab). +- `user`: built-in Chrome MCP attach profile for your **real signed-in Chrome** + session. +- `chrome-relay`: extension relay to your **system browser** (requires the + OpenClaw extension to be attached to a tab). + +For agent browser tool calls: + +- Default: use the isolated `openclaw` browser. +- Prefer `profile="user"` when existing logged-in sessions matter and the user + is at the computer to click/approve any attach prompt. +- Use `profile="chrome-relay"` only when the user explicitly wants the Chrome + extension / toolbar-button attach flow. +- `profile` is the explicit override when you want a specific browser mode. Set `browser.defaultProfile: "openclaw"` if you want managed mode by default. @@ -68,7 +79,7 @@ Browser settings live in `~/.openclaw/openclaw.json`. // cdpUrl: "http://127.0.0.1:18792", // legacy single-profile override remoteCdpTimeoutMs: 1500, // remote CDP HTTP timeout (ms) remoteCdpHandshakeTimeoutMs: 3000, // remote CDP WebSocket handshake timeout (ms) - defaultProfile: "chrome", + defaultProfile: "openclaw", color: "#FF4500", headless: false, noSandbox: false, @@ -77,6 +88,16 @@ Browser settings live in `~/.openclaw/openclaw.json`. profiles: { openclaw: { cdpPort: 18800, color: "#FF4500" }, work: { cdpPort: 18801, color: "#0066CC" }, + user: { + driver: "existing-session", + attachOnly: true, + color: "#00AA00", + }, + "chrome-relay": { + driver: "extension", + cdpUrl: "http://127.0.0.1:18792", + color: "#00AA00", + }, remote: { cdpUrl: "http://10.0.0.42:9222", color: "#00AA00" }, }, }, @@ -97,9 +118,11 @@ Notes: - `browser.ssrfPolicy.allowPrivateNetwork` remains supported as a legacy alias for compatibility. - `attachOnly: true` means “never launch a local browser; only attach if it is already running.” - `color` + per-profile `color` tint the browser UI so you can see which profile is active. -- Default profile is `openclaw` (OpenClaw-managed standalone browser). Use `defaultProfile: "chrome"` to opt into the Chrome extension relay. +- Default profile is `openclaw` (OpenClaw-managed standalone browser). Use `defaultProfile: "user"` to opt into the signed-in user browser, or `defaultProfile: "chrome-relay"` for the extension relay. - Auto-detect order: system default browser if Chromium-based; otherwise Chrome → Brave → Edge → Chromium → Chrome Canary. - Local `openclaw` profiles auto-assign `cdpPort`/`cdpUrl` — set those only for remote CDP. +- `driver: "existing-session"` uses Chrome DevTools MCP instead of raw CDP. Do + not set `cdpUrl` for that driver. ## Use Brave (or another Chromium-based browser) @@ -264,11 +287,13 @@ OpenClaw supports multiple named profiles (routing configs). Profiles can be: - **openclaw-managed**: a dedicated Chromium-based browser instance with its own user data directory + CDP port - **remote**: an explicit CDP URL (Chromium-based browser running elsewhere) - **extension relay**: your existing Chrome tab(s) via the local relay + Chrome extension +- **existing session**: your existing Chrome profile via Chrome DevTools MCP auto-connect Defaults: - The `openclaw` profile is auto-created if missing. -- The `chrome` profile is built-in for the Chrome extension relay (points at `http://127.0.0.1:18792` by default). +- The `chrome-relay` profile is built-in for the Chrome extension relay (points at `http://127.0.0.1:18792` by default). +- Existing-session profiles are opt-in; create them with `--driver existing-session`. - Local CDP ports allocate from **18800–18899** by default. - Deleting a profile moves its local data directory to Trash. @@ -311,8 +336,8 @@ openclaw browser extension install 2. Use it: -- CLI: `openclaw browser --browser-profile chrome tabs` -- Agent tool: `browser` with `profile="chrome"` +- CLI: `openclaw browser --browser-profile chrome-relay tabs` +- Agent tool: `browser` with `profile="chrome-relay"` Optional: if you want a different name or relay port, create your own profile: @@ -328,6 +353,81 @@ Notes: - This mode relies on Playwright-on-CDP for most operations (screenshots/snapshots/actions). - Detach by clicking the extension icon again. +- Agent use: prefer `profile="user"` for logged-in sites. Use `profile="chrome-relay"` + only when you specifically want the extension flow. The user must be present + to click the extension and attach the tab. + +## Chrome existing-session via MCP + +OpenClaw can also attach to a running Chrome profile through the official +Chrome DevTools MCP server. This reuses the tabs and login state already open in +that Chrome profile. + +Official background and setup references: + +- [Chrome for Developers: Use Chrome DevTools MCP with your browser session](https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session) +- [Chrome DevTools MCP README](https://github.com/ChromeDevTools/chrome-devtools-mcp) + +Built-in profile: + +- `user` + +Optional: create your own custom existing-session profile if you want a +different name or color. + +Then in Chrome: + +1. Open `chrome://inspect/#remote-debugging` +2. Enable remote debugging +3. Keep Chrome running and approve the connection prompt when OpenClaw attaches + +Live attach smoke test: + +```bash +openclaw browser --browser-profile user start +openclaw browser --browser-profile user status +openclaw browser --browser-profile user tabs +openclaw browser --browser-profile user snapshot --format ai +``` + +What success looks like: + +- `status` shows `driver: existing-session` +- `status` shows `transport: chrome-mcp` +- `status` shows `running: true` +- `tabs` lists your already-open Chrome tabs +- `snapshot` returns refs from the selected live tab + +What to check if attach does not work: + +- Chrome is version `144+` +- remote debugging is enabled at `chrome://inspect/#remote-debugging` +- Chrome showed and you accepted the attach consent prompt + +Agent use: + +- Use `profile="user"` when you need the user’s logged-in browser state. +- If you use a custom existing-session profile, pass that explicit profile name. +- Prefer `profile="user"` over `profile="chrome-relay"` unless the user + explicitly wants the extension / attach-tab flow. +- Only choose this mode when the user is at the computer to approve the attach + prompt. +- the Gateway or node host can spawn `npx chrome-devtools-mcp@latest --autoConnect` + +Notes: + +- This path is higher-risk than the isolated `openclaw` profile because it can + act inside your signed-in browser session. +- OpenClaw does not launch Chrome for this driver; it attaches to an existing + session only. +- OpenClaw uses the official Chrome DevTools MCP `--autoConnect` flow here, not + the legacy default-profile remote debugging port workflow. +- Existing-session screenshots support page captures and `--ref` element + captures from snapshots, but not CSS `--element` selectors. +- Existing-session `wait --url` supports exact, substring, and glob patterns + like other browser drivers. `wait --load networkidle` is not supported yet. +- Some features still require the extension relay or managed browser path, such + as PDF export and download interception. - Leave the relay loopback-only by default. If the relay must be reachable from a different network namespace (for example Gateway in WSL2, Chrome on Windows), set `browser.relayBindHost` to an explicit bind address such as `0.0.0.0` while keeping the surrounding network private and authenticated. WSL2 / cross-namespace example: @@ -337,7 +437,7 @@ WSL2 / cross-namespace example: browser: { enabled: true, relayBindHost: "0.0.0.0", - defaultProfile: "chrome", + defaultProfile: "chrome-relay", }, } ``` diff --git a/docs/tools/chrome-extension.md b/docs/tools/chrome-extension.md index ce4b271ae9c..91a6c1240f1 100644 --- a/docs/tools/chrome-extension.md +++ b/docs/tools/chrome-extension.md @@ -13,6 +13,13 @@ The OpenClaw Chrome extension lets the agent control your **existing Chrome tabs Attach/detach happens via a **single Chrome toolbar button**. +If you want Chrome’s official DevTools MCP attach flow instead of the OpenClaw +extension relay, use an `existing-session` browser profile instead. See +[Browser](/tools/browser#chrome-existing-session-via-mcp). For Chrome’s own +setup docs, see [Chrome for Developers: Use Chrome DevTools MCP with your +browser session](https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session) +and the [Chrome DevTools MCP README](https://github.com/ChromeDevTools/chrome-devtools-mcp). + ## What it is (concept) There are three parts: @@ -55,7 +62,7 @@ After upgrading OpenClaw: ## Use it (set gateway token once) -OpenClaw ships with a built-in browser profile named `chrome` that targets the extension relay on the default port. +OpenClaw ships with a built-in browser profile named `chrome-relay` that targets the extension relay on the default port. Before first attach, open extension Options and set: @@ -64,8 +71,8 @@ Before first attach, open extension Options and set: Use it: -- CLI: `openclaw browser --browser-profile chrome tabs` -- Agent tool: `browser` with `profile="chrome"` +- CLI: `openclaw browser --browser-profile chrome-relay tabs` +- Agent tool: `browser` with `profile="chrome-relay"` If you want a different name or a different relay port, create your own profile: diff --git a/docs/tools/exec-approvals.md b/docs/tools/exec-approvals.md index d538e411093..830dfa6f159 100644 --- a/docs/tools/exec-approvals.md +++ b/docs/tools/exec-approvals.md @@ -30,9 +30,14 @@ Trust model note: - Gateway-authenticated callers are trusted operators for that Gateway. - Paired nodes extend that trusted operator capability onto the node host. - Exec approvals reduce accidental execution risk, but are not a per-user auth boundary. -- Approved node-host runs also bind canonical execution context: canonical cwd, pinned executable - path when applicable, and interpreter-style script operands. If a bound script changes after - approval but before execution, the run is denied instead of executing drifted content. +- Approved node-host runs bind canonical execution context: canonical cwd, exact argv, env + binding when present, and pinned executable path when applicable. +- For shell scripts and direct interpreter/runtime file invocations, OpenClaw also tries to bind + one concrete local file operand. If that bound file changes after approval but before execution, + the run is denied instead of executing drifted content. +- This file binding is intentionally best-effort, not a complete semantic model of every + interpreter/runtime loader path. If approval mode cannot identify exactly one concrete local + file to bind, it refuses to mint an approval-backed run instead of pretending full coverage. macOS split: @@ -259,6 +264,22 @@ For `host=node`, approval requests include a canonical `systemRunPlan` payload. that plan as the authoritative command/cwd/session context when forwarding approved `system.run` requests. +## Interpreter/runtime commands + +Approval-backed interpreter/runtime runs are intentionally conservative: + +- Exact argv/cwd/env context is always bound. +- Direct shell script and direct runtime file forms are best-effort bound to one concrete local + file snapshot. +- Common package-manager wrapper forms that still resolve to one direct local file (for example + `pnpm exec`, `pnpm node`, `npm exec`, `npx`) are unwrapped before binding. +- If OpenClaw cannot identify exactly one concrete local file for an interpreter/runtime command + (for example package scripts, eval forms, runtime-specific loader chains, or ambiguous multi-file + forms), approval-backed execution is denied instead of claiming semantic coverage it does not + have. +- For those workflows, prefer sandboxing, a separate host boundary, or an explicit trusted + allowlist/full workflow where the operator accepts the broader runtime semantics. + When approvals are required, the exec tool returns immediately with an approval id. Use that id to correlate later system events (`Exec finished` / `Exec denied`). If no decision arrives before the timeout, the request is treated as an approval timeout and surfaced as a denial reason. @@ -309,6 +330,32 @@ Reply in chat: /approve deny ``` +### Built-in chat approval clients + +Discord and Telegram can also act as explicit exec approval clients with channel-specific config. + +- Discord: `channels.discord.execApprovals.*` +- Telegram: `channels.telegram.execApprovals.*` + +These clients are opt-in. If a channel does not have exec approvals enabled, OpenClaw does not treat +that channel as an approval surface just because the conversation happened there. + +Shared behavior: + +- only configured approvers can approve or deny +- the requester does not need to be an approver +- when channel delivery is enabled, approval prompts include the command text +- if no operator UI or configured approval client can accept the request, the prompt falls back to `askFallback` + +Telegram defaults to approver DMs (`target: "dm"`). You can switch to `channel` or `both` when you +want approval prompts to appear in the originating Telegram chat/topic as well. For Telegram forum +topics, OpenClaw preserves the topic for the approval prompt and the post-approval follow-up. + +See: + +- [Discord](/channels/discord#exec-approvals-in-discord) +- [Telegram](/channels/telegram#exec-approvals-in-telegram) + ### macOS IPC flow ``` diff --git a/docs/tools/firecrawl.md b/docs/tools/firecrawl.md index e859eb2dcb1..2cd90a06bf5 100644 --- a/docs/tools/firecrawl.md +++ b/docs/tools/firecrawl.md @@ -40,7 +40,8 @@ with JS-heavy sites or pages that block plain HTTP fetches. Notes: -- `firecrawl.enabled` defaults to true when an API key is present. +- `firecrawl.enabled` defaults to `true` unless explicitly set to `false`. +- Firecrawl fallback attempts run only when an API key is available (`tools.web.fetch.firecrawl.apiKey` or `FIRECRAWL_API_KEY`). - `maxAgeMs` controls how old cached results can be (ms). Default is 2 days. ## Stealth / bot circumvention diff --git a/docs/tools/index.md b/docs/tools/index.md index 6552d6f9118..bdd9b78456f 100644 --- a/docs/tools/index.md +++ b/docs/tools/index.md @@ -316,7 +316,11 @@ Common parameters: Notes: - Requires `browser.enabled=true` (default is `true`; set `false` to disable). - All actions accept optional `profile` parameter for multi-instance support. -- When `profile` is omitted, uses `browser.defaultProfile` (defaults to "chrome"). +- Omit `profile` for the safe default: isolated OpenClaw-managed browser (`openclaw`). +- Use `profile="user"` for the real local host browser when existing logins/cookies matter and the user is present to click/approve any attach prompt. +- Use `profile="chrome-relay"` only for the Chrome extension / toolbar-button attach flow. +- `profile="user"` and `profile="chrome-relay"` are host-only; do not combine them with sandbox/node targets. +- When `profile` is omitted, uses `browser.defaultProfile` (defaults to `openclaw`). - Profile names: lowercase alphanumeric + hyphens only (max 64 chars). - Port range: 18800-18899 (~100 profiles max). - Remote profiles are attach-only (no start/stop/reset). diff --git a/docs/tools/llm-task.md b/docs/tools/llm-task.md index e6f574d078e..2626d3237e4 100644 --- a/docs/tools/llm-task.md +++ b/docs/tools/llm-task.md @@ -75,11 +75,14 @@ outside the list is rejected. - `schema` (object, optional JSON Schema) - `provider` (string, optional) - `model` (string, optional) +- `thinking` (string, optional) - `authProfileId` (string, optional) - `temperature` (number, optional) - `maxTokens` (number, optional) - `timeoutMs` (number, optional) +`thinking` accepts the standard OpenClaw reasoning presets, such as `low` or `medium`. + ## Output Returns `details.json` containing the parsed JSON (and validates against @@ -90,6 +93,7 @@ Returns `details.json` containing the parsed JSON (and validates against ```lobster openclaw.invoke --tool llm-task --action json --args-json '{ "prompt": "Given the input email, return intent and draft.", + "thinking": "low", "input": { "subject": "Hello", "body": "Can you help?" diff --git a/docs/tools/lobster.md b/docs/tools/lobster.md index 65ff4f56dfb..5c8a47e4d62 100644 --- a/docs/tools/lobster.md +++ b/docs/tools/lobster.md @@ -106,6 +106,7 @@ Use it in a pipeline: ```lobster openclaw.invoke --tool llm-task --action json --args-json '{ "prompt": "Given the input email, return intent and draft.", + "thinking": "low", "input": { "subject": "Hello", "body": "Can you help?" }, "schema": { "type": "object", diff --git a/docs/tools/plugin.md b/docs/tools/plugin.md index a257d8b7a45..5455bb2b38d 100644 --- a/docs/tools/plugin.md +++ b/docs/tools/plugin.md @@ -43,6 +43,55 @@ prerelease tag such as `@beta`/`@rc` or an exact prerelease version. See [Voice Call](/plugins/voice-call) for a concrete example plugin. Looking for third-party listings? See [Community plugins](/plugins/community). +## Architecture + +OpenClaw's plugin system has four layers: + +1. **Manifest + discovery** + OpenClaw finds candidate plugins from configured paths, workspace roots, + global extension roots, and bundled extensions. Discovery reads + `openclaw.plugin.json` plus package metadata first. +2. **Enablement + validation** + Core decides whether a discovered plugin is enabled, disabled, blocked, or + selected for an exclusive slot such as memory. +3. **Runtime loading** + Enabled plugins are loaded in-process via jiti and register capabilities into + a central registry. +4. **Surface consumption** + The rest of OpenClaw reads the registry to expose tools, channels, provider + setup, hooks, HTTP routes, CLI commands, and services. + +The important design boundary: + +- discovery + config validation should work from **manifest/schema metadata** + without executing plugin code +- runtime behavior comes from the plugin module's `register(api)` path + +That split lets OpenClaw validate config, explain missing/disabled plugins, and +build UI/schema hints before the full runtime is active. + +## Execution model + +Plugins run **in-process** with the Gateway. They are not sandboxed. A loaded +plugin has the same process-level trust boundary as core code. + +Implications: + +- a plugin can register tools, network handlers, hooks, and services +- a plugin bug can crash or destabilize the gateway +- a malicious plugin is equivalent to arbitrary code execution inside the + OpenClaw process + +Use allowlists and explicit install/load paths for non-bundled plugins. Treat +workspace plugins as development-time code, not production defaults. + +Important trust note: + +- `plugins.allow` trusts **plugin ids**, not source provenance. +- A workspace plugin with the same id as a bundled plugin intentionally shadows + the bundled copy when that workspace plugin is enabled/allowlisted. +- This is normal and useful for local development, patch testing, and hotfixes. + ## Available plugins (official) - Microsoft Teams is plugin-only as of 2026.1.15; install `@openclaw/msteams` if you use Teams. @@ -78,6 +127,48 @@ Plugins can register: Plugins run **in‑process** with the Gateway, so treat them as trusted code. Tool authoring guide: [Plugin agent tools](/plugins/agent-tools). +## Load pipeline + +At startup, OpenClaw does roughly this: + +1. discover candidate plugin roots +2. read `openclaw.plugin.json` and package metadata +3. reject unsafe candidates +4. normalize plugin config (`plugins.enabled`, `allow`, `deny`, `entries`, + `slots`, `load.paths`) +5. decide enablement for each candidate +6. load enabled modules via jiti +7. call `register(api)` and collect registrations into the plugin registry +8. expose the registry to commands/runtime surfaces + +The safety gates happen **before** runtime execution. Candidates are blocked +when the entry escapes the plugin root, the path is world-writable, or path +ownership looks suspicious for non-bundled plugins. + +### Manifest-first behavior + +The manifest is the control-plane source of truth. OpenClaw uses it to: + +- identify the plugin +- discover declared channels/skills/config schema +- validate `plugins.entries..config` +- augment Control UI labels/placeholders +- show install/catalog metadata + +The runtime module is the data-plane part. It registers actual behavior such as +hooks, tools, commands, or provider flows. + +### What the loader caches + +OpenClaw keeps short in-process caches for: + +- discovery results +- manifest registry data +- loaded plugin registries + +These caches reduce bursty startup and repeated command overhead. They are safe +to think of as short-lived performance caches, not persistence. + ## Runtime helpers Plugins can access selected core helpers via `api.runtime`. For telephony TTS: @@ -259,6 +350,10 @@ Default-on bundled plugin exceptions: Installed plugins are enabled by default, but can be disabled the same way. +Workspace plugins are **disabled by default** unless you explicitly enable them +or allowlist them. This is intentional: a checked-out repo should not silently +become production gateway code. + Hardening notes: - If `plugins.allow` is empty and non-bundled plugins are discoverable, OpenClaw logs a startup warning with plugin ids and sources. @@ -275,6 +370,34 @@ manifest. If multiple plugins resolve to the same id, the first match in the order above wins and lower-precedence copies are ignored. +That means: + +- workspace plugins intentionally shadow bundled plugins with the same id +- `plugins.allow: ["foo"]` authorizes the active `foo` plugin by id, even when + the active copy comes from the workspace instead of the bundled extension root +- if you need stricter provenance control, use explicit install/load paths and + inspect the resolved plugin source before enabling it + +### Enablement rules + +Enablement is resolved after discovery: + +- `plugins.enabled: false` disables all plugins +- `plugins.deny` always wins +- `plugins.entries..enabled: false` disables that plugin +- workspace-origin plugins are disabled by default +- allowlists restrict the active set when `plugins.allow` is non-empty +- allowlists are **id-based**, not source-based +- bundled plugins are disabled by default unless: + - the bundled id is in the built-in default-on set, or + - you explicitly enable it, or + - channel config implicitly enables the bundled channel plugin +- exclusive slots can force-enable the selected plugin for that slot + +In current core, bundled default-on ids include local/provider helpers such as +`ollama`, `sglang`, `vllm`, plus `device-pair`, `phone-control`, and +`talk-voice`. + ### Package packs A plugin directory may include a `package.json` with `openclaw.extensions`: @@ -354,6 +477,34 @@ Default plugin ids: If a plugin exports `id`, OpenClaw uses it but warns when it doesn’t match the configured id. +## Registry model + +Loaded plugins do not directly mutate random core globals. They register into a +central plugin registry. + +The registry tracks: + +- plugin records (identity, source, origin, status, diagnostics) +- tools +- legacy hooks and typed hooks +- channels +- providers +- gateway RPC handlers +- HTTP routes +- CLI registrars +- background services +- plugin-owned commands + +Core features then read from that registry instead of talking to plugin modules +directly. This keeps loading one-way: + +- plugin module -> registry registration +- core runtime -> registry consumption + +That separation matters for maintainability. It means most core surfaces only +need one integration point: "read the registry", not "special-case every plugin +module". + ## Config ```json5 @@ -390,6 +541,17 @@ Validation rules (strict): `openclaw.plugin.json` (`configSchema`). - If a plugin is disabled, its config is preserved and a **warning** is emitted. +### Disabled vs missing vs invalid + +These states are intentionally different: + +- **disabled**: plugin exists, but enablement rules turned it off +- **missing**: config references a plugin id that discovery did not find +- **invalid**: plugin exists, but its config does not match the declared schema + +OpenClaw preserves config for disabled plugins so toggling them back on is not +destructive. + ## Plugin slots (exclusive categories) Some plugin categories are **exclusive** (only one active at a time). Use @@ -488,6 +650,19 @@ Plugins export either: - A function: `(api) => { ... }` - An object: `{ id, name, configSchema, register(api) { ... } }` +`register(api)` is where plugins attach behavior. Common registrations include: + +- `registerTool` +- `registerHook` +- `on(...)` for typed lifecycle hooks +- `registerChannel` +- `registerProvider` +- `registerHttpRoute` +- `registerCommand` +- `registerCli` +- `registerContextEngine` +- `registerService` + Context engine plugins can also register a runtime-owned context manager: ```ts @@ -603,13 +778,188 @@ Migration guidance: ## Provider plugins (model auth) -Plugins can register **model provider auth** flows so users can run OAuth or -API-key setup inside OpenClaw (no external scripts needed). +Plugins can register **model providers** so users can run OAuth or API-key +setup inside OpenClaw, surface provider setup in onboarding/model-pickers, and +contribute implicit provider discovery. + +Provider plugins are the modular extension seam for model-provider setup. They +are not just "OAuth helpers" anymore. + +### Provider plugin lifecycle + +A provider plugin can participate in five distinct phases: + +1. **Auth** + `auth[].run(ctx)` performs OAuth, API-key capture, device code, or custom + setup and returns auth profiles plus optional config patches. +2. **Non-interactive setup** + `auth[].runNonInteractive(ctx)` handles `openclaw onboard --non-interactive` + without prompts. Use this when the provider needs custom headless setup + beyond the built-in simple API-key paths. +3. **Wizard integration** + `wizard.onboarding` adds an entry to `openclaw onboard`. + `wizard.modelPicker` adds a setup entry to the model picker. +4. **Implicit discovery** + `discovery.run(ctx)` can contribute provider config automatically during + model resolution/listing. +5. **Post-selection follow-up** + `onModelSelected(ctx)` runs after a model is chosen. Use this for provider- + specific work such as downloading a local model. + +This is the recommended split because these phases have different lifecycle +requirements: + +- auth is interactive and writes credentials/config +- non-interactive setup is flag/env-driven and must not prompt +- wizard metadata is static and UI-facing +- discovery should be safe, quick, and failure-tolerant +- post-select hooks are side effects tied to the chosen model + +### Provider auth contract + +`auth[].run(ctx)` returns: + +- `profiles`: auth profiles to write +- `configPatch`: optional `openclaw.json` changes +- `defaultModel`: optional `provider/model` ref +- `notes`: optional user-facing notes + +Core then: + +1. writes the returned auth profiles +2. applies auth-profile config wiring +3. merges the config patch +4. optionally applies the default model +5. runs the provider's `onModelSelected` hook when appropriate + +That means a provider plugin owns the provider-specific setup logic, while core +owns the generic persistence and config-merge path. + +### Provider non-interactive contract + +`auth[].runNonInteractive(ctx)` is optional. Implement it when the provider +needs headless setup that cannot be expressed through the built-in generic +API-key flows. + +The non-interactive context includes: + +- the current and base config +- parsed onboarding CLI options +- runtime logging/error helpers +- agent/workspace dirs +- `resolveApiKey(...)` to read provider keys from flags, env, or existing auth + profiles while honoring `--secret-input-mode` +- `toApiKeyCredential(...)` to convert a resolved key into an auth-profile + credential with the right plaintext vs secret-ref storage + +Use this surface for providers such as: + +- self-hosted OpenAI-compatible runtimes that need `--custom-base-url` + + `--custom-model-id` +- provider-specific non-interactive verification or config synthesis + +Do not prompt from `runNonInteractive`. Reject missing inputs with actionable +errors instead. + +### Provider wizard metadata + +`wizard.onboarding` controls how the provider appears in grouped onboarding: + +- `choiceId`: auth-choice value +- `choiceLabel`: option label +- `choiceHint`: short hint +- `groupId`: group bucket id +- `groupLabel`: group label +- `groupHint`: group hint +- `methodId`: auth method to run + +`wizard.modelPicker` controls how a provider appears as a "set this up now" +entry in model selection: + +- `label` +- `hint` +- `methodId` + +When a provider has multiple auth methods, the wizard can either point at one +explicit method or let OpenClaw synthesize per-method choices. + +OpenClaw validates provider wizard metadata when the plugin registers: + +- duplicate or blank auth-method ids are rejected +- wizard metadata is ignored when the provider has no auth methods +- invalid `methodId` bindings are downgraded to warnings and fall back to the + provider's remaining auth methods + +### Provider discovery contract + +`discovery.run(ctx)` returns one of: + +- `{ provider }` +- `{ providers }` +- `null` + +Use `{ provider }` for the common case where the plugin owns one provider id. +Use `{ providers }` when a plugin discovers multiple provider entries. + +The discovery context includes: + +- the current config +- agent/workspace dirs +- process env +- a helper to resolve the provider API key and a discovery-safe API key value + +Discovery should be: + +- fast +- best-effort +- safe to skip on failure +- careful about side effects + +It should not depend on prompts or long-running setup. + +### Discovery ordering + +Provider discovery runs in ordered phases: + +- `simple` +- `profile` +- `paired` +- `late` + +Use: + +- `simple` for cheap environment-only discovery +- `profile` when discovery depends on auth profiles +- `paired` for providers that need to coordinate with another discovery step +- `late` for expensive or local-network probing + +Most self-hosted providers should use `late`. + +### Good provider-plugin boundaries + +Good fit for provider plugins: + +- local/self-hosted providers with custom setup flows +- provider-specific OAuth/device-code login +- implicit discovery of local model servers +- post-selection side effects such as model pulls + +Less compelling fit: + +- trivial API-key-only providers that differ only by env var, base URL, and one + default model + +Those can still become plugins, but the main modularity payoff comes from +extracting behavior-rich providers first. Register a provider via `api.registerProvider(...)`. Each provider exposes one -or more auth methods (OAuth, API key, device code, etc.). These methods power: +or more auth methods (OAuth, API key, device code, etc.). Those methods can +power: - `openclaw models auth login --provider [--method ]` +- `openclaw onboard` +- model-picker “custom provider” setup entries +- implicit provider discovery during model resolution/listing Example: @@ -642,6 +992,31 @@ api.registerProvider({ }, }, ], + wizard: { + onboarding: { + choiceId: "acme", + choiceLabel: "AcmeAI", + groupId: "acme", + groupLabel: "AcmeAI", + methodId: "oauth", + }, + modelPicker: { + label: "AcmeAI (custom)", + hint: "Connect a self-hosted AcmeAI endpoint", + methodId: "oauth", + }, + }, + discovery: { + order: "late", + run: async () => ({ + provider: { + baseUrl: "https://acme.example/v1", + api: "openai-completions", + apiKey: "${ACME_API_KEY}", + models: [], + }, + }), + }, }); ``` @@ -649,8 +1024,19 @@ Notes: - `run` receives a `ProviderAuthContext` with `prompter`, `runtime`, `openUrl`, and `oauth.createVpsAwareHandlers` helpers. +- `runNonInteractive` receives a `ProviderAuthMethodNonInteractiveContext` + with `opts`, `resolveApiKey`, and `toApiKeyCredential` helpers for + headless onboarding. - Return `configPatch` when you need to add default models or provider config. - Return `defaultModel` so `--set-default` can update agent defaults. +- `wizard.onboarding` adds a provider choice to `openclaw onboard`. +- `wizard.modelPicker` adds a “setup this provider” entry to the model picker. +- `discovery.run` returns either `{ provider }` for the plugin’s own provider id + or `{ providers }` for multi-provider discovery. +- `discovery.order` controls when the provider runs relative to built-in + discovery phases: `simple`, `profile`, `paired`, or `late`. +- `onModelSelected` is the post-selection hook for provider-specific follow-up + work such as pulling a local model. ### Register a messaging channel @@ -952,6 +1338,8 @@ Plugins run in-process with the Gateway. Treat them as trusted code: - Only install plugins you trust. - Prefer `plugins.allow` allowlists. +- Remember that `plugins.allow` is id-based, so an enabled workspace plugin can + intentionally shadow a bundled plugin with the same id. - Restart the Gateway after changes. ## Testing plugins diff --git a/docs/tools/slash-commands.md b/docs/tools/slash-commands.md index dea4fb0d30f..e0a9f1aa365 100644 --- a/docs/tools/slash-commands.md +++ b/docs/tools/slash-commands.md @@ -14,7 +14,7 @@ The host-only bash chat command uses `! ` (with `/bash ` as an alias). There are two related systems: - **Commands**: standalone `/...` messages. -- **Directives**: `/think`, `/verbose`, `/reasoning`, `/elevated`, `/exec`, `/model`, `/queue`. +- **Directives**: `/think`, `/fast`, `/verbose`, `/reasoning`, `/elevated`, `/exec`, `/model`, `/queue`. - Directives are stripped from the message before the model sees it. - In normal chat messages (not directive-only), they are treated as “inline hints” and do **not** persist session settings. - In directive-only messages (the message contains only directives), they persist to the session and reply with an acknowledgement. @@ -102,6 +102,7 @@ Text + native (when enabled): - `/send on|off|inherit` (owner-only) - `/reset` or `/new [model]` (optional model hint; remainder is passed through) - `/think ` (dynamic choices by model/provider; aliases: `/thinking`, `/t`) +- `/fast status|on|off` (omitting the arg shows the current effective fast-mode state) - `/verbose on|full|off` (alias: `/v`) - `/reasoning on|off|stream` (alias: `/reason`; when on, sends a separate message prefixed `Reasoning:`; `stream` = Telegram draft only) - `/elevated on|off|ask|full` (alias: `/elev`; `full` skips exec approvals) @@ -123,12 +124,14 @@ Notes: - `/new ` accepts a model alias, `provider/model`, or a provider name (fuzzy match); if no match, the text is treated as the message body. - For full provider usage breakdown, use `openclaw status --usage`. - `/allowlist add|remove` requires `commands.config=true` and honors channel `configWrites`. +- In multi-account channels, config-targeted `/allowlist --account ` and `/config set channels..accounts....` also honor the target account's `configWrites`. - `/usage` controls the per-response usage footer; `/usage cost` prints a local cost summary from OpenClaw session logs. - `/restart` is enabled by default; set `commands.restart: false` to disable it. - Discord-only native command: `/vc join|leave|status` controls voice channels (requires `channels.discord.voice` and native commands; not available as text). - Discord thread-binding commands (`/focus`, `/unfocus`, `/agents`, `/session idle`, `/session max-age`) require effective thread bindings to be enabled (`session.threadBindings.enabled` and/or `channels.discord.threadBindings.enabled`). - ACP command reference and runtime behavior: [ACP Agents](/tools/acp-agents). - `/verbose` is meant for debugging and extra visibility; keep it **off** in normal use. +- `/fast on|off` persists a session override. Use the Sessions UI `inherit` option to clear it and fall back to config defaults. - Tool failure summaries are still shown when relevant, but detailed failure text is only included when `/verbose` is `on` or `full`. - `/reasoning` (and `/verbose`) are risky in group settings: they may reveal internal reasoning or tool output you did not intend to expose. Prefer leaving them off, especially in group chats. - **Fast path:** command-only messages from allowlisted senders are handled immediately (bypass queue + model). diff --git a/docs/tools/subagents.md b/docs/tools/subagents.md index d5ec66b884b..dabfc91dfc2 100644 --- a/docs/tools/subagents.md +++ b/docs/tools/subagents.md @@ -182,6 +182,7 @@ Each level only sees announces from its direct children. ### Tool policy by depth +- Role and control scope are written into session metadata at spawn time. That keeps flat or restored session keys from accidentally regaining orchestrator privileges. - **Depth 1 (orchestrator, when `maxSpawnDepth >= 2`)**: Gets `sessions_spawn`, `subagents`, `sessions_list`, `sessions_history` so it can manage its children. Other session/system tools remain denied. - **Depth 1 (leaf, when `maxSpawnDepth == 1`)**: No session tools (current default behavior). - **Depth 2 (leaf worker)**: No session tools — `sessions_spawn` is always denied at depth 2. Cannot spawn further children. diff --git a/docs/tools/thinking.md b/docs/tools/thinking.md index 9a2fdc87ea6..045911c92b2 100644 --- a/docs/tools/thinking.md +++ b/docs/tools/thinking.md @@ -1,7 +1,7 @@ --- -summary: "Directive syntax for /think + /verbose and how they affect model reasoning" +summary: "Directive syntax for /think, /fast, /verbose, and reasoning visibility" read_when: - - Adjusting thinking or verbose directive parsing or defaults + - Adjusting thinking, fast-mode, or verbose directive parsing or defaults title: "Thinking Levels" --- @@ -42,6 +42,21 @@ title: "Thinking Levels" - **Embedded Pi**: the resolved level is passed to the in-process Pi agent runtime. +## Fast mode (/fast) + +- Levels: `on|off`. +- Directive-only message toggles a session fast-mode override and replies `Fast mode enabled.` / `Fast mode disabled.`. +- Send `/fast` (or `/fast status`) with no mode to see the current effective fast-mode state. +- OpenClaw resolves fast mode in this order: + 1. Inline/directive-only `/fast on|off` + 2. Session override + 3. Per-model config: `agents.defaults.models["/"].params.fastMode` + 4. Fallback: `off` +- For `openai/*`, fast mode applies the OpenAI fast profile: `service_tier=priority` when supported, plus low reasoning effort and low text verbosity. +- For `openai-codex/*`, fast mode applies the same low-latency profile on Codex Responses. OpenClaw keeps one shared `/fast` toggle across both auth paths. +- For direct `anthropic/*` API-key requests, fast mode maps to Anthropic service tiers: `/fast on` sets `service_tier=auto`, `/fast off` sets `service_tier=standard_only`. +- Anthropic fast mode is API-key only. OpenClaw skips Anthropic service-tier injection for Claude setup-token / OAuth auth and for non-Anthropic proxy base URLs. + ## Verbose directives (/verbose or /v) - Levels: `on` (minimal) | `full` | `off` (default). diff --git a/docs/tools/web.md b/docs/tools/web.md index 1eeb4eba7db..a2aa1d37bfd 100644 --- a/docs/tools/web.md +++ b/docs/tools/web.md @@ -2,7 +2,7 @@ summary: "Web search + fetch tools (Brave, Gemini, Grok, Kimi, and Perplexity providers)" read_when: - You want to enable web_search or web_fetch - - You need Brave or Perplexity Search API key setup + - You need provider API key setup - You want to use Gemini with Google Search grounding title: "Web Tools" --- @@ -49,6 +49,12 @@ The table above is alphabetical. If no `provider` is explicitly set, runtime aut If no keys are found, it falls back to Brave (you'll get a missing-key error prompting you to configure one). +Runtime SecretRef behavior: + +- Web tool SecretRefs are resolved atomically at gateway startup/reload. +- In auto-detect mode, OpenClaw resolves only the selected provider key. Non-selected provider SecretRefs stay inactive until selected. +- If the selected provider SecretRef is unresolved and no provider env fallback exists, startup/reload fails fast. + ## Setting up web search Use `openclaw configure --section web` to set up your API key and choose a provider. @@ -59,8 +65,8 @@ Use `openclaw configure --section web` to set up your API key and choose a provi 2. In the dashboard, choose the **Search** plan and generate an API key. 3. Run `openclaw configure --section web` to store the key in config, or set `BRAVE_API_KEY` in your environment. -Each Brave plan includes **$5/month in free credit** (renewing). The Search -plan costs $5 per 1,000 requests, so the credit covers 1,000 queries/month. Set +Each Brave plan includes **\$5/month in free credit** (renewing). The Search +plan costs \$5 per 1,000 requests, so the credit covers 1,000 queries/month. Set your usage limit in the Brave dashboard to avoid unexpected charges. See the [Brave API portal](https://brave.com/search/api/) for current plans and pricing. @@ -77,9 +83,25 @@ See [Perplexity Search API Docs](https://docs.perplexity.ai/guides/search-quicks ### Where to store the key -**Via config:** run `openclaw configure --section web`. It stores the key under `tools.web.search.apiKey` or `tools.web.search.perplexity.apiKey`, depending on provider. +**Via config:** run `openclaw configure --section web`. It stores the key under the provider-specific config path: -**Via environment:** set `PERPLEXITY_API_KEY`, `OPENROUTER_API_KEY`, or `BRAVE_API_KEY` in the Gateway process environment. For a gateway install, put it in `~/.openclaw/.env` (or your service environment). See [Env vars](/help/faq#how-does-openclaw-load-environment-variables). +- Brave: `tools.web.search.apiKey` +- Gemini: `tools.web.search.gemini.apiKey` +- Grok: `tools.web.search.grok.apiKey` +- Kimi: `tools.web.search.kimi.apiKey` +- Perplexity: `tools.web.search.perplexity.apiKey` + +All of these fields also support SecretRef objects. + +**Via environment:** set provider env vars in the Gateway process environment: + +- Brave: `BRAVE_API_KEY` +- Gemini: `GEMINI_API_KEY` +- Grok: `XAI_API_KEY` +- Kimi: `KIMI_API_KEY` or `MOONSHOT_API_KEY` +- Perplexity: `PERPLEXITY_API_KEY` or `OPENROUTER_API_KEY` + +For a gateway install, put these in `~/.openclaw/.env` (or your service environment). See [Env vars](/help/faq#how-does-openclaw-load-environment-variables). ### Config examples @@ -216,6 +238,7 @@ Search the web using your configured provider. - **Grok**: `XAI_API_KEY` or `tools.web.search.grok.apiKey` - **Kimi**: `KIMI_API_KEY`, `MOONSHOT_API_KEY`, or `tools.web.search.kimi.apiKey` - **Perplexity**: `PERPLEXITY_API_KEY`, `OPENROUTER_API_KEY`, or `tools.web.search.perplexity.apiKey` +- All provider key fields above support SecretRef objects. ### Config @@ -310,6 +333,7 @@ Fetch a URL and extract readable content. - `tools.web.fetch.enabled` must not be `false` (default: enabled) - Optional Firecrawl fallback: set `tools.web.fetch.firecrawl.apiKey` or `FIRECRAWL_API_KEY`. +- `tools.web.fetch.firecrawl.apiKey` supports SecretRef objects. ### web_fetch config @@ -351,6 +375,8 @@ Notes: - `web_fetch` uses Readability (main-content extraction) first, then Firecrawl (if configured). If both fail, the tool returns an error. - Firecrawl requests use bot-circumvention mode and cache results by default. +- Firecrawl SecretRefs are resolved only when Firecrawl is active (`tools.web.fetch.enabled !== false` and `tools.web.fetch.firecrawl.enabled !== false`). +- If Firecrawl is active and its SecretRef is unresolved with no `FIRECRAWL_API_KEY` fallback, startup/reload fails fast. - `web_fetch` sends a Chrome-like User-Agent and `Accept-Language` by default; override `userAgent` if needed. - `web_fetch` blocks private/internal hostnames and re-checks redirects (limit with `maxRedirects`). - `maxChars` is clamped to `tools.web.fetch.maxCharsCap`. diff --git a/docs/web/control-ui.md b/docs/web/control-ui.md index c96a91de0ba..73487cc0eae 100644 --- a/docs/web/control-ui.md +++ b/docs/web/control-ui.md @@ -75,7 +75,7 @@ The Control UI can localize itself on first load based on your browser locale, a - Stream tool calls + live tool output cards in Chat (agent events) - Channels: WhatsApp/Telegram/Discord/Slack + plugin channels (Mattermost, etc.) status + QR login + per-channel config (`channels.status`, `web.login.*`, `config.patch`) - Instances: presence list + refresh (`system-presence`) -- Sessions: list + per-session thinking/verbose overrides (`sessions.list`, `sessions.patch`) +- Sessions: list + per-session thinking/fast/verbose/reasoning overrides (`sessions.list`, `sessions.patch`) - Cron jobs: list/add/edit/run/enable/disable + run history (`cron.*`) - Skills: status, enable/disable, install, API key updates (`skills.*`) - Nodes: list + caps (`node.list`) @@ -174,7 +174,12 @@ OpenClaw **blocks** Control UI connections without device identity. } ``` -`allowInsecureAuth` does not bypass Control UI device identity or pairing checks. +`allowInsecureAuth` is a local compatibility toggle only: + +- It allows localhost Control UI sessions to proceed without device identity in + non-secure HTTP contexts. +- It does not bypass pairing checks. +- It does not relax remote (non-localhost) device identity requirements. **Break-glass only:** diff --git a/docs/web/dashboard.md b/docs/web/dashboard.md index ab5872a6754..86cd6fffd4e 100644 --- a/docs/web/dashboard.md +++ b/docs/web/dashboard.md @@ -45,6 +45,8 @@ Prefer localhost, Tailscale Serve, or an SSH tunnel. ## If you see “unauthorized” / 1008 - Ensure the gateway is reachable (local: `openclaw status`; remote: SSH tunnel `ssh -N -L 18789:127.0.0.1:18789 user@host` then open `http://127.0.0.1:18789/`). +- For `AUTH_TOKEN_MISMATCH`, clients may do one trusted retry with a cached device token when the gateway returns retry hints. If auth still fails after that retry, resolve token drift manually. +- For token drift repair steps, follow [Token drift recovery checklist](/cli/devices#token-drift-recovery-checklist). - Retrieve or supply the token from the gateway host: - Plaintext config: `openclaw config get gateway.auth.token` - SecretRef-managed config: resolve the external secret provider or export `OPENCLAW_GATEWAY_TOKEN` in this shell, then rerun `openclaw dashboard` diff --git a/docs/web/tui.md b/docs/web/tui.md index 0c09cb1f877..d1869821d68 100644 --- a/docs/web/tui.md +++ b/docs/web/tui.md @@ -37,7 +37,7 @@ Use `--password` if your Gateway uses password auth. - Header: connection URL, current agent, current session. - Chat log: user messages, assistant replies, system notices, tool cards. - Status line: connection/run state (connecting, running, streaming, idle, error). -- Footer: connection state + agent + session + model + think/verbose/reasoning + token counts + deliver. +- Footer: connection state + agent + session + model + think/fast/verbose/reasoning + token counts + deliver. - Input: text editor with autocomplete. ## Mental model: agents + sessions @@ -92,6 +92,7 @@ Core: Session controls: - `/think ` +- `/fast ` - `/verbose ` - `/reasoning ` - `/usage ` diff --git a/docs/zh-CN/automation/cron-jobs.md b/docs/zh-CN/automation/cron-jobs.md index 185779a2636..cfdb0c178e1 100644 --- a/docs/zh-CN/automation/cron-jobs.md +++ b/docs/zh-CN/automation/cron-jobs.md @@ -28,7 +28,9 @@ x-i18n: - 任务持久化存储在 `~/.openclaw/cron/` 下,因此重启不会丢失计划。 - 两种执行方式: - **主会话**:入队一个系统事件,然后在下一次心跳时运行。 - - **隔离式**:在 `cron:` 中运行专用智能体轮次,可投递摘要(默认 announce)或不投递。 + - **隔离式**:在 `cron:` 或自定义会话中运行专用智能体轮次,可投递摘要(默认 announce)或不投递。 + - **当前会话**:绑定到创建定时任务时的会话 (`sessionTarget: "current"`)。 + - **自定义会话**:在持久化的命名会话中运行 (`sessionTarget: "session:custom-id"`)。 - 唤醒是一等功能:任务可以请求"立即唤醒"或"下次心跳时"。 ## 快速开始(可操作) @@ -83,6 +85,14 @@ openclaw cron add \ 2. **选择运行位置** - `sessionTarget: "main"` → 在下一次心跳时使用主会话上下文运行。 - `sessionTarget: "isolated"` → 在 `cron:` 中运行专用智能体轮次。 + - `sessionTarget: "current"` → 绑定到当前会话(创建时解析为 `session:`)。 + - `sessionTarget: "session:custom-id"` → 在持久化的命名会话中运行,跨运行保持上下文。 + + 默认行为(保持不变): + - `systemEvent` 负载默认使用 `main` + - `agentTurn` 负载默认使用 `isolated` + + 要使用当前会话绑定,需显式设置 `sessionTarget: "current"`。 3. **选择负载** - 主会话 → `payload.kind = "systemEvent"` @@ -129,12 +139,13 @@ Cron 表达式使用 `croner`。如果省略时区,将使用 Gateway网关主 #### 隔离任务(专用定时会话) -隔离任务在会话 `cron:` 中运行专用智能体轮次。 +隔离任务在会话 `cron:` 或自定义会话中运行专用智能体轮次。 关键行为: - 提示以 `[cron: <任务名称>]` 为前缀,便于追踪。 -- 每次运行都会启动一个**全新的会话 ID**(不继承之前的对话)。 +- 每次运行都会启动一个**全新的会话 ID**(不继承之前的对话),除非使用自定义会话。 +- 自定义会话(`session:xxx`)可跨运行保持上下文,适用于如每日站会等需要基于前次摘要的工作流。 - 如果未指定 `delivery`,隔离任务会默认以“announce”方式投递摘要。 - `delivery.mode` 可选 `announce`(投递摘要)或 `none`(内部运行)。 diff --git a/extensions/.npmignore b/extensions/.npmignore new file mode 100644 index 00000000000..7cd53fdbc08 --- /dev/null +++ b/extensions/.npmignore @@ -0,0 +1 @@ +**/node_modules/ diff --git a/extensions/acpx/openclaw.plugin.json b/extensions/acpx/openclaw.plugin.json index 1047c57484d..2dd55faf3d6 100644 --- a/extensions/acpx/openclaw.plugin.json +++ b/extensions/acpx/openclaw.plugin.json @@ -67,7 +67,7 @@ }, "expectedVersion": { "label": "Expected acpx Version", - "help": "Exact version to enforce (for example 0.1.15) or \"any\" to skip strict version matching." + "help": "Exact version to enforce (for example 0.1.16) or \"any\" to skip strict version matching." }, "cwd": { "label": "Default Working Directory", diff --git a/extensions/acpx/package.json b/extensions/acpx/package.json index 27d9296a9a2..d3947cc7552 100644 --- a/extensions/acpx/package.json +++ b/extensions/acpx/package.json @@ -1,10 +1,10 @@ { "name": "@openclaw/acpx", - "version": "2026.3.9", + "version": "2026.3.14", "description": "OpenClaw ACP runtime backend via acpx", "type": "module", "dependencies": { - "acpx": "0.1.15" + "acpx": "0.3.0" }, "openclaw": { "extensions": [ diff --git a/extensions/acpx/src/config.test.ts b/extensions/acpx/src/config.test.ts index ef1491d1682..45be08e3edf 100644 --- a/extensions/acpx/src/config.test.ts +++ b/extensions/acpx/src/config.test.ts @@ -5,7 +5,6 @@ import { ACPX_PINNED_VERSION, createAcpxPluginConfigSchema, resolveAcpxPluginConfig, - toAcpMcpServers, } from "./config.js"; describe("acpx plugin config parsing", () => { @@ -20,9 +19,9 @@ describe("acpx plugin config parsing", () => { expect(resolved.command).toBe(ACPX_BUNDLED_BIN); expect(resolved.expectedVersion).toBe(ACPX_PINNED_VERSION); expect(resolved.allowPluginLocalInstall).toBe(true); + expect(resolved.stripProviderAuthEnvVars).toBe(true); expect(resolved.cwd).toBe(path.resolve("/tmp/workspace")); expect(resolved.strictWindowsCmdWrapper).toBe(true); - expect(resolved.mcpServers).toEqual({}); }); it("accepts command override and disables plugin-local auto-install", () => { @@ -37,6 +36,7 @@ describe("acpx plugin config parsing", () => { expect(resolved.command).toBe(path.resolve(command)); expect(resolved.expectedVersion).toBeUndefined(); expect(resolved.allowPluginLocalInstall).toBe(false); + expect(resolved.stripProviderAuthEnvVars).toBe(false); }); it("resolves relative command paths against workspace directory", () => { @@ -50,6 +50,7 @@ describe("acpx plugin config parsing", () => { expect(resolved.command).toBe(path.resolve("/home/user/repos/openclaw", "../acpx/dist/cli.js")); expect(resolved.expectedVersion).toBeUndefined(); expect(resolved.allowPluginLocalInstall).toBe(false); + expect(resolved.stripProviderAuthEnvVars).toBe(false); }); it("keeps bare command names as-is", () => { @@ -63,6 +64,7 @@ describe("acpx plugin config parsing", () => { expect(resolved.command).toBe("acpx"); expect(resolved.expectedVersion).toBeUndefined(); expect(resolved.allowPluginLocalInstall).toBe(false); + expect(resolved.stripProviderAuthEnvVars).toBe(false); }); it("accepts exact expectedVersion override", () => { @@ -78,6 +80,7 @@ describe("acpx plugin config parsing", () => { expect(resolved.command).toBe(path.resolve(command)); expect(resolved.expectedVersion).toBe("0.1.99"); expect(resolved.allowPluginLocalInstall).toBe(false); + expect(resolved.stripProviderAuthEnvVars).toBe(false); }); it("treats expectedVersion=any as no version constraint", () => { @@ -134,97 +137,4 @@ describe("acpx plugin config parsing", () => { }), ).toThrow("strictWindowsCmdWrapper must be a boolean"); }); - - it("accepts mcp server maps", () => { - const resolved = resolveAcpxPluginConfig({ - rawConfig: { - mcpServers: { - canva: { - command: "npx", - args: ["-y", "mcp-remote@latest", "https://mcp.canva.com/mcp"], - env: { - CANVA_TOKEN: "secret", - }, - }, - }, - }, - workspaceDir: "/tmp/workspace", - }); - - expect(resolved.mcpServers).toEqual({ - canva: { - command: "npx", - args: ["-y", "mcp-remote@latest", "https://mcp.canva.com/mcp"], - env: { - CANVA_TOKEN: "secret", - }, - }, - }); - }); - - it("rejects invalid mcp server definitions", () => { - expect(() => - resolveAcpxPluginConfig({ - rawConfig: { - mcpServers: { - canva: { - command: "npx", - args: ["-y", 1], - }, - }, - }, - workspaceDir: "/tmp/workspace", - }), - ).toThrow( - "mcpServers.canva must have a command string, optional args array, and optional env object", - ); - }); - - it("schema accepts mcp server config", () => { - const schema = createAcpxPluginConfigSchema(); - if (!schema.safeParse) { - throw new Error("acpx config schema missing safeParse"); - } - const parsed = schema.safeParse({ - mcpServers: { - canva: { - command: "npx", - args: ["-y", "mcp-remote@latest"], - env: { - CANVA_TOKEN: "secret", - }, - }, - }, - }); - - expect(parsed.success).toBe(true); - }); -}); - -describe("toAcpMcpServers", () => { - it("converts plugin config maps into ACP stdio MCP entries", () => { - expect( - toAcpMcpServers({ - canva: { - command: "npx", - args: ["-y", "mcp-remote@latest", "https://mcp.canva.com/mcp"], - env: { - CANVA_TOKEN: "secret", - }, - }, - }), - ).toEqual([ - { - name: "canva", - command: "npx", - args: ["-y", "mcp-remote@latest", "https://mcp.canva.com/mcp"], - env: [ - { - name: "CANVA_TOKEN", - value: "secret", - }, - ], - }, - ]); - }); }); diff --git a/extensions/acpx/src/config.ts b/extensions/acpx/src/config.ts index 8866149bea9..ef0207a1365 100644 --- a/extensions/acpx/src/config.ts +++ b/extensions/acpx/src/config.ts @@ -8,7 +8,7 @@ export type AcpxPermissionMode = (typeof ACPX_PERMISSION_MODES)[number]; export const ACPX_NON_INTERACTIVE_POLICIES = ["deny", "fail"] as const; export type AcpxNonInteractivePermissionPolicy = (typeof ACPX_NON_INTERACTIVE_POLICIES)[number]; -export const ACPX_PINNED_VERSION = "0.1.15"; +export const ACPX_PINNED_VERSION = "0.1.16"; export const ACPX_VERSION_ANY = "any"; const ACPX_BIN_NAME = process.platform === "win32" ? "acpx.cmd" : "acpx"; export const ACPX_PLUGIN_ROOT = path.resolve(path.dirname(fileURLToPath(import.meta.url)), ".."); @@ -47,6 +47,7 @@ export type ResolvedAcpxPluginConfig = { command: string; expectedVersion?: string; allowPluginLocalInstall: boolean; + stripProviderAuthEnvVars: boolean; installCommand: string; cwd: string; permissionMode: AcpxPermissionMode; @@ -332,6 +333,7 @@ export function resolveAcpxPluginConfig(params: { workspaceDir: params.workspaceDir, }); const allowPluginLocalInstall = command === ACPX_BUNDLED_BIN; + const stripProviderAuthEnvVars = command === ACPX_BUNDLED_BIN; const configuredExpectedVersion = normalized.expectedVersion; const expectedVersion = configuredExpectedVersion === ACPX_VERSION_ANY @@ -343,6 +345,7 @@ export function resolveAcpxPluginConfig(params: { command, expectedVersion, allowPluginLocalInstall, + stripProviderAuthEnvVars, installCommand, cwd, permissionMode: normalized.permissionMode ?? DEFAULT_PERMISSION_MODE, diff --git a/extensions/acpx/src/ensure.test.ts b/extensions/acpx/src/ensure.test.ts index 3bc6f666031..c0bb5469b29 100644 --- a/extensions/acpx/src/ensure.test.ts +++ b/extensions/acpx/src/ensure.test.ts @@ -54,6 +54,49 @@ describe("acpx ensure", () => { } }); + function mockEnsureInstallFlow() { + spawnAndCollectMock + .mockResolvedValueOnce({ + stdout: "acpx 0.0.9\n", + stderr: "", + code: 0, + error: null, + }) + .mockResolvedValueOnce({ + stdout: "added 1 package\n", + stderr: "", + code: 0, + error: null, + }) + .mockResolvedValueOnce({ + stdout: `acpx ${ACPX_PINNED_VERSION}\n`, + stderr: "", + code: 0, + error: null, + }); + } + + function expectEnsureInstallCalls(stripProviderAuthEnvVars?: boolean) { + expect(spawnAndCollectMock.mock.calls[0]?.[0]).toMatchObject({ + command: "/plugin/node_modules/.bin/acpx", + args: ["--version"], + cwd: "/plugin", + stripProviderAuthEnvVars, + }); + expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({ + command: "npm", + args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`], + cwd: "/plugin", + stripProviderAuthEnvVars, + }); + expect(spawnAndCollectMock.mock.calls[2]?.[0]).toMatchObject({ + command: "/plugin/node_modules/.bin/acpx", + args: ["--version"], + cwd: "/plugin", + stripProviderAuthEnvVars, + }); + } + it("accepts the pinned acpx version", async () => { spawnAndCollectMock.mockResolvedValueOnce({ stdout: `acpx ${ACPX_PINNED_VERSION}\n`, @@ -77,6 +120,7 @@ describe("acpx ensure", () => { command: "/plugin/node_modules/.bin/acpx", args: ["--version"], cwd: "/plugin", + stripProviderAuthEnvVars: undefined, }); }); @@ -148,29 +192,35 @@ describe("acpx ensure", () => { command: "/custom/acpx", args: ["--help"], cwd: "/custom", + stripProviderAuthEnvVars: undefined, + }); + }); + + it("forwards stripProviderAuthEnvVars to version checks", async () => { + spawnAndCollectMock.mockResolvedValueOnce({ + stdout: "Usage: acpx [options]\n", + stderr: "", + code: 0, + error: null, + }); + + await checkAcpxVersion({ + command: "/plugin/node_modules/.bin/acpx", + cwd: "/plugin", + expectedVersion: undefined, + stripProviderAuthEnvVars: true, + }); + + expect(spawnAndCollectMock).toHaveBeenCalledWith({ + command: "/plugin/node_modules/.bin/acpx", + args: ["--help"], + cwd: "/plugin", + stripProviderAuthEnvVars: true, }); }); it("installs and verifies pinned acpx when precheck fails", async () => { - spawnAndCollectMock - .mockResolvedValueOnce({ - stdout: "acpx 0.0.9\n", - stderr: "", - code: 0, - error: null, - }) - .mockResolvedValueOnce({ - stdout: "added 1 package\n", - stderr: "", - code: 0, - error: null, - }) - .mockResolvedValueOnce({ - stdout: `acpx ${ACPX_PINNED_VERSION}\n`, - stderr: "", - code: 0, - error: null, - }); + mockEnsureInstallFlow(); await ensureAcpx({ command: "/plugin/node_modules/.bin/acpx", @@ -179,11 +229,20 @@ describe("acpx ensure", () => { }); expect(spawnAndCollectMock).toHaveBeenCalledTimes(3); - expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({ - command: "npm", - args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`], - cwd: "/plugin", + expectEnsureInstallCalls(); + }); + + it("threads stripProviderAuthEnvVars through version probes and install", async () => { + mockEnsureInstallFlow(); + + await ensureAcpx({ + command: "/plugin/node_modules/.bin/acpx", + pluginRoot: "/plugin", + expectedVersion: ACPX_PINNED_VERSION, + stripProviderAuthEnvVars: true, }); + + expectEnsureInstallCalls(true); }); it("fails with actionable error when npm install fails", async () => { diff --git a/extensions/acpx/src/ensure.ts b/extensions/acpx/src/ensure.ts index 39307db1f4f..9b85d53f618 100644 --- a/extensions/acpx/src/ensure.ts +++ b/extensions/acpx/src/ensure.ts @@ -102,6 +102,7 @@ export async function checkAcpxVersion(params: { command: string; cwd?: string; expectedVersion?: string; + stripProviderAuthEnvVars?: boolean; spawnOptions?: SpawnCommandOptions; }): Promise { const expectedVersion = params.expectedVersion?.trim() || undefined; @@ -113,6 +114,7 @@ export async function checkAcpxVersion(params: { command: params.command, args: probeArgs, cwd, + stripProviderAuthEnvVars: params.stripProviderAuthEnvVars, }; let result: Awaited>; try { @@ -198,6 +200,7 @@ export async function ensureAcpx(params: { pluginRoot?: string; expectedVersion?: string; allowInstall?: boolean; + stripProviderAuthEnvVars?: boolean; spawnOptions?: SpawnCommandOptions; }): Promise { if (pendingEnsure) { @@ -214,6 +217,7 @@ export async function ensureAcpx(params: { command: params.command, cwd: pluginRoot, expectedVersion, + stripProviderAuthEnvVars: params.stripProviderAuthEnvVars, spawnOptions: params.spawnOptions, }); if (precheck.ok) { @@ -231,6 +235,7 @@ export async function ensureAcpx(params: { command: "npm", args: ["install", "--omit=dev", "--no-save", `acpx@${installVersion}`], cwd: pluginRoot, + stripProviderAuthEnvVars: params.stripProviderAuthEnvVars, }); if (install.error) { @@ -252,6 +257,7 @@ export async function ensureAcpx(params: { command: params.command, cwd: pluginRoot, expectedVersion, + stripProviderAuthEnvVars: params.stripProviderAuthEnvVars, spawnOptions: params.spawnOptions, }); diff --git a/extensions/acpx/src/runtime-internals/events.ts b/extensions/acpx/src/runtime-internals/events.ts index f83f4ddabb9..f0326bbe938 100644 --- a/extensions/acpx/src/runtime-internals/events.ts +++ b/extensions/acpx/src/runtime-internals/events.ts @@ -162,6 +162,39 @@ function resolveTextChunk(params: { }; } +function createTextDeltaEvent(params: { + content: string | null | undefined; + stream: "output" | "thought"; + tag?: AcpSessionUpdateTag; +}): AcpRuntimeEvent | null { + if (params.content == null || params.content.length === 0) { + return null; + } + return { + type: "text_delta", + text: params.content, + stream: params.stream, + ...(params.tag ? { tag: params.tag } : {}), + }; +} + +function createToolCallEvent(params: { + payload: Record; + tag: AcpSessionUpdateTag; +}): AcpRuntimeEvent { + const title = asTrimmedString(params.payload.title) || "tool call"; + const status = asTrimmedString(params.payload.status); + const toolCallId = asOptionalString(params.payload.toolCallId); + return { + type: "tool_call", + text: status ? `${title} (${status})` : title, + tag: params.tag, + ...(toolCallId ? { toolCallId } : {}), + ...(status ? { status } : {}), + title, + }; +} + export function parsePromptEventLine(line: string): AcpRuntimeEvent | null { const trimmed = line.trim(); if (!trimmed) { @@ -187,57 +220,28 @@ export function parsePromptEventLine(line: string): AcpRuntimeEvent | null { const tag = structured.tag; switch (type) { - case "text": { - const content = asString(payload.content); - if (content == null || content.length === 0) { - return null; - } - return { - type: "text_delta", - text: content, + case "text": + return createTextDeltaEvent({ + content: asString(payload.content), stream: "output", - ...(tag ? { tag } : {}), - }; - } - case "thought": { - const content = asString(payload.content); - if (content == null || content.length === 0) { - return null; - } - return { - type: "text_delta", - text: content, + tag, + }); + case "thought": + return createTextDeltaEvent({ + content: asString(payload.content), stream: "thought", - ...(tag ? { tag } : {}), - }; - } - case "tool_call": { - const title = asTrimmedString(payload.title) || "tool call"; - const status = asTrimmedString(payload.status); - const toolCallId = asOptionalString(payload.toolCallId); - return { - type: "tool_call", - text: status ? `${title} (${status})` : title, + tag, + }); + case "tool_call": + return createToolCallEvent({ + payload, tag: (tag ?? "tool_call") as AcpSessionUpdateTag, - ...(toolCallId ? { toolCallId } : {}), - ...(status ? { status } : {}), - title, - }; - } - case "tool_call_update": { - const title = asTrimmedString(payload.title) || "tool call"; - const status = asTrimmedString(payload.status); - const toolCallId = asOptionalString(payload.toolCallId); - const text = status ? `${title} (${status})` : title; - return { - type: "tool_call", - text, + }); + case "tool_call_update": + return createToolCallEvent({ + payload, tag: (tag ?? "tool_call_update") as AcpSessionUpdateTag, - ...(toolCallId ? { toolCallId } : {}), - ...(status ? { status } : {}), - title, - }; - } + }); case "agent_message_chunk": return resolveTextChunk({ payload, diff --git a/extensions/acpx/src/runtime-internals/mcp-agent-command.test.ts b/extensions/acpx/src/runtime-internals/mcp-agent-command.test.ts new file mode 100644 index 00000000000..5deed2e8f0f --- /dev/null +++ b/extensions/acpx/src/runtime-internals/mcp-agent-command.test.ts @@ -0,0 +1,59 @@ +import { describe, expect, it, vi } from "vitest"; + +const { spawnAndCollectMock } = vi.hoisted(() => ({ + spawnAndCollectMock: vi.fn(), +})); + +vi.mock("./process.js", () => ({ + spawnAndCollect: spawnAndCollectMock, +})); + +import { __testing, resolveAcpxAgentCommand } from "./mcp-agent-command.js"; + +describe("resolveAcpxAgentCommand", () => { + it("threads stripProviderAuthEnvVars through the config show probe", async () => { + spawnAndCollectMock.mockResolvedValueOnce({ + stdout: JSON.stringify({ + agents: { + codex: { + command: "custom-codex", + }, + }, + }), + stderr: "", + code: 0, + error: null, + }); + + const command = await resolveAcpxAgentCommand({ + acpxCommand: "/plugin/node_modules/.bin/acpx", + cwd: "/plugin", + agent: "codex", + stripProviderAuthEnvVars: true, + }); + + expect(command).toBe("custom-codex"); + expect(spawnAndCollectMock).toHaveBeenCalledWith( + { + command: "/plugin/node_modules/.bin/acpx", + args: ["--cwd", "/plugin", "config", "show"], + cwd: "/plugin", + stripProviderAuthEnvVars: true, + }, + undefined, + ); + }); +}); + +describe("buildMcpProxyAgentCommand", () => { + it("escapes Windows-style proxy paths without double-escaping backslashes", () => { + const quoted = __testing.quoteCommandPart( + "C:\\repo\\extensions\\acpx\\src\\runtime-internals\\mcp-proxy.mjs", + ); + + expect(quoted).toBe( + '"C:\\\\repo\\\\extensions\\\\acpx\\\\src\\\\runtime-internals\\\\mcp-proxy.mjs"', + ); + expect(quoted).not.toContain("\\\\\\"); + }); +}); diff --git a/extensions/acpx/src/runtime-internals/mcp-agent-command.ts b/extensions/acpx/src/runtime-internals/mcp-agent-command.ts index f494bd3d32b..481c8156aca 100644 --- a/extensions/acpx/src/runtime-internals/mcp-agent-command.ts +++ b/extensions/acpx/src/runtime-internals/mcp-agent-command.ts @@ -37,6 +37,10 @@ function quoteCommandPart(value: string): string { return `"${value.replace(/["\\]/g, "\\$&")}"`; } +export const __testing = { + quoteCommandPart, +}; + function toCommandLine(parts: string[]): string { return parts.map(quoteCommandPart).join(" "); } @@ -62,6 +66,7 @@ function readConfiguredAgentOverrides(value: unknown): Record { async function loadAgentOverrides(params: { acpxCommand: string; cwd: string; + stripProviderAuthEnvVars?: boolean; spawnOptions?: SpawnCommandOptions; }): Promise> { const result = await spawnAndCollect( @@ -69,6 +74,7 @@ async function loadAgentOverrides(params: { command: params.acpxCommand, args: ["--cwd", params.cwd, "config", "show"], cwd: params.cwd, + stripProviderAuthEnvVars: params.stripProviderAuthEnvVars, }, params.spawnOptions, ); @@ -87,12 +93,14 @@ export async function resolveAcpxAgentCommand(params: { acpxCommand: string; cwd: string; agent: string; + stripProviderAuthEnvVars?: boolean; spawnOptions?: SpawnCommandOptions; }): Promise { const normalizedAgent = normalizeAgentName(params.agent); const overrides = await loadAgentOverrides({ acpxCommand: params.acpxCommand, cwd: params.cwd, + stripProviderAuthEnvVars: params.stripProviderAuthEnvVars, spawnOptions: params.spawnOptions, }); return overrides[normalizedAgent] ?? ACPX_BUILTIN_AGENT_COMMANDS[normalizedAgent] ?? params.agent; diff --git a/extensions/acpx/src/runtime-internals/process.test.ts b/extensions/acpx/src/runtime-internals/process.test.ts index 0eee162eddf..ef0492308ae 100644 --- a/extensions/acpx/src/runtime-internals/process.test.ts +++ b/extensions/acpx/src/runtime-internals/process.test.ts @@ -2,7 +2,7 @@ import { spawn } from "node:child_process"; import { mkdir, mkdtemp, rm, writeFile } from "node:fs/promises"; import { tmpdir } from "node:os"; import path from "node:path"; -import { afterEach, describe, expect, it } from "vitest"; +import { afterEach, describe, expect, it, vi } from "vitest"; import { createWindowsCmdShimFixture } from "../../../shared/windows-cmd-shim-test-fixtures.js"; import { resolveSpawnCommand, @@ -28,6 +28,7 @@ async function createTempDir(): Promise { } afterEach(async () => { + vi.unstubAllEnvs(); while (tempDirs.length > 0) { const dir = tempDirs.pop(); if (!dir) { @@ -253,6 +254,44 @@ describe("waitForExit", () => { }); describe("spawnAndCollect", () => { + type SpawnedEnvSnapshot = { + openai?: string; + github?: string; + hf?: string; + openclaw?: string; + shell?: string; + }; + + function stubProviderAuthEnv(env: Record) { + for (const [key, value] of Object.entries(env)) { + vi.stubEnv(key, value); + } + } + + async function collectSpawnedEnvSnapshot(options?: { + stripProviderAuthEnvVars?: boolean; + openAiEnvKey?: string; + githubEnvKey?: string; + hfEnvKey?: string; + }): Promise { + const openAiEnvKey = options?.openAiEnvKey ?? "OPENAI_API_KEY"; + const githubEnvKey = options?.githubEnvKey ?? "GITHUB_TOKEN"; + const hfEnvKey = options?.hfEnvKey ?? "HF_TOKEN"; + const result = await spawnAndCollect({ + command: process.execPath, + args: [ + "-e", + `process.stdout.write(JSON.stringify({openai:process.env.${openAiEnvKey},github:process.env.${githubEnvKey},hf:process.env.${hfEnvKey},openclaw:process.env.OPENCLAW_API_KEY,shell:process.env.OPENCLAW_SHELL}))`, + ], + cwd: process.cwd(), + stripProviderAuthEnvVars: options?.stripProviderAuthEnvVars, + }); + + expect(result.code).toBe(0); + expect(result.error).toBeNull(); + return JSON.parse(result.stdout) as SpawnedEnvSnapshot; + } + it("returns abort error immediately when signal is already aborted", async () => { const controller = new AbortController(); controller.abort(); @@ -289,4 +328,53 @@ describe("spawnAndCollect", () => { const result = await resultPromise; expect(result.error?.name).toBe("AbortError"); }); + + it("strips shared provider auth env vars from spawned acpx children", async () => { + stubProviderAuthEnv({ + OPENAI_API_KEY: "openai-secret", + GITHUB_TOKEN: "gh-secret", + HF_TOKEN: "hf-secret", + OPENCLAW_API_KEY: "keep-me", + }); + const parsed = await collectSpawnedEnvSnapshot({ + stripProviderAuthEnvVars: true, + }); + expect(parsed.openai).toBeUndefined(); + expect(parsed.github).toBeUndefined(); + expect(parsed.hf).toBeUndefined(); + expect(parsed.openclaw).toBe("keep-me"); + expect(parsed.shell).toBe("acp"); + }); + + it("strips provider auth env vars case-insensitively", async () => { + stubProviderAuthEnv({ + OpenAI_Api_Key: "openai-secret", + Github_Token: "gh-secret", + OPENCLAW_API_KEY: "keep-me", + }); + const parsed = await collectSpawnedEnvSnapshot({ + stripProviderAuthEnvVars: true, + openAiEnvKey: "OpenAI_Api_Key", + githubEnvKey: "Github_Token", + }); + expect(parsed.openai).toBeUndefined(); + expect(parsed.github).toBeUndefined(); + expect(parsed.openclaw).toBe("keep-me"); + expect(parsed.shell).toBe("acp"); + }); + + it("preserves provider auth env vars for explicit custom commands by default", async () => { + stubProviderAuthEnv({ + OPENAI_API_KEY: "openai-secret", + GITHUB_TOKEN: "gh-secret", + HF_TOKEN: "hf-secret", + OPENCLAW_API_KEY: "keep-me", + }); + const parsed = await collectSpawnedEnvSnapshot(); + expect(parsed.openai).toBe("openai-secret"); + expect(parsed.github).toBe("gh-secret"); + expect(parsed.hf).toBe("hf-secret"); + expect(parsed.openclaw).toBe("keep-me"); + expect(parsed.shell).toBe("acp"); + }); }); diff --git a/extensions/acpx/src/runtime-internals/process.ts b/extensions/acpx/src/runtime-internals/process.ts index 4df84aece2f..2724f467ab1 100644 --- a/extensions/acpx/src/runtime-internals/process.ts +++ b/extensions/acpx/src/runtime-internals/process.ts @@ -7,7 +7,9 @@ import type { } from "openclaw/plugin-sdk/acpx"; import { applyWindowsSpawnProgramPolicy, + listKnownProviderAuthEnvVarNames, materializeWindowsSpawnProgram, + omitEnvKeysCaseInsensitive, resolveWindowsSpawnProgramCandidate, } from "openclaw/plugin-sdk/acpx"; @@ -125,6 +127,7 @@ export function spawnWithResolvedCommand( command: string; args: string[]; cwd: string; + stripProviderAuthEnvVars?: boolean; }, options?: SpawnCommandOptions, ): ChildProcessWithoutNullStreams { @@ -136,9 +139,15 @@ export function spawnWithResolvedCommand( options, ); + const childEnv = omitEnvKeysCaseInsensitive( + process.env, + params.stripProviderAuthEnvVars ? listKnownProviderAuthEnvVarNames() : [], + ); + childEnv.OPENCLAW_SHELL = "acp"; + return spawn(resolved.command, resolved.args, { cwd: params.cwd, - env: { ...process.env, OPENCLAW_SHELL: "acp" }, + env: childEnv, stdio: ["pipe", "pipe", "pipe"], shell: resolved.shell, windowsHide: resolved.windowsHide, @@ -180,6 +189,7 @@ export async function spawnAndCollect( command: string; args: string[]; cwd: string; + stripProviderAuthEnvVars?: boolean; }, options?: SpawnCommandOptions, runtime?: { diff --git a/extensions/acpx/src/runtime.test.ts b/extensions/acpx/src/runtime.test.ts index bb3b94cec9e..198a0367b59 100644 --- a/extensions/acpx/src/runtime.test.ts +++ b/extensions/acpx/src/runtime.test.ts @@ -1,6 +1,6 @@ import os from "node:os"; import path from "node:path"; -import { afterAll, beforeAll, describe, expect, it } from "vitest"; +import { afterAll, beforeAll, describe, expect, it, vi } from "vitest"; import { runAcpRuntimeAdapterContract } from "../../../src/acp/runtime/adapter-contract.testkit.js"; import { AcpxRuntime, decodeAcpxRuntimeHandleState } from "./runtime.js"; import { @@ -19,13 +19,14 @@ beforeAll(async () => { { command: "/definitely/missing/acpx", allowPluginLocalInstall: false, + stripProviderAuthEnvVars: false, installCommand: "n/a", cwd: process.cwd(), - mcpServers: {}, permissionMode: "approve-reads", nonInteractivePermissions: "fail", strictWindowsCmdWrapper: true, queueOwnerTtlSeconds: 0.1, + mcpServers: {}, }, { logger: NOOP_LOGGER }, ); @@ -127,6 +128,99 @@ describe("AcpxRuntime", () => { expect(promptArgs).toContain("--approve-all"); }); + it("uses sessions new with --resume-session when resumeSessionId is provided", async () => { + const { runtime, logPath } = await createMockRuntimeFixture(); + const resumeSessionId = "sid-resume-123"; + const sessionKey = "agent:codex:acp:resume"; + const handle = await runtime.ensureSession({ + sessionKey, + agent: "codex", + mode: "persistent", + resumeSessionId, + }); + + expect(handle.backend).toBe("acpx"); + expect(handle.acpxRecordId).toBe("rec-" + sessionKey); + + const logs = await readMockRuntimeLogEntries(logPath); + expect(logs.some((entry) => entry.kind === "ensure")).toBe(false); + const resumeEntry = logs.find( + (entry) => entry.kind === "new" && String(entry.sessionName ?? "") === sessionKey, + ); + expect(resumeEntry).toBeDefined(); + const resumeArgs = (resumeEntry?.args as string[]) ?? []; + const resumeFlagIndex = resumeArgs.indexOf("--resume-session"); + expect(resumeFlagIndex).toBeGreaterThanOrEqual(0); + expect(resumeArgs[resumeFlagIndex + 1]).toBe(resumeSessionId); + }); + + it("serializes text plus image attachments into ACP prompt blocks", async () => { + const { runtime, logPath } = await createMockRuntimeFixture(); + + const handle = await runtime.ensureSession({ + sessionKey: "agent:codex:acp:with-image", + agent: "codex", + mode: "persistent", + }); + + for await (const _event of runtime.runTurn({ + handle, + text: "describe this image", + attachments: [{ mediaType: "image/png", data: "aW1hZ2UtYnl0ZXM=" }], // pragma: allowlist secret + mode: "prompt", + requestId: "req-image", + })) { + // Consume stream to completion so prompt logging is finalized. + } + + const logs = await readMockRuntimeLogEntries(logPath); + const prompt = logs.find( + (entry) => + entry.kind === "prompt" && String(entry.sessionName ?? "") === "agent:codex:acp:with-image", + ); + expect(prompt).toBeDefined(); + + const stdinBlocks = JSON.parse(String(prompt?.stdinText ?? "")); + expect(stdinBlocks).toEqual([ + { type: "text", text: "describe this image" }, + { type: "image", mimeType: "image/png", data: "aW1hZ2UtYnl0ZXM=" }, + ]); + }); + + it("preserves provider auth env vars when runtime uses a custom acpx command", async () => { + vi.stubEnv("OPENAI_API_KEY", "openai-secret"); // pragma: allowlist secret + vi.stubEnv("GITHUB_TOKEN", "gh-secret"); // pragma: allowlist secret + + try { + const { runtime, logPath } = await createMockRuntimeFixture(); + const handle = await runtime.ensureSession({ + sessionKey: "agent:codex:acp:custom-env", + agent: "codex", + mode: "persistent", + }); + + for await (const _event of runtime.runTurn({ + handle, + text: "custom-env", + mode: "prompt", + requestId: "req-custom-env", + })) { + // Drain events; assertions inspect the mock runtime log. + } + + const logs = await readMockRuntimeLogEntries(logPath); + const prompt = logs.find( + (entry) => + entry.kind === "prompt" && + String(entry.sessionName ?? "") === "agent:codex:acp:custom-env", + ); + expect(prompt?.openaiApiKey).toBe("openai-secret"); + expect(prompt?.githubToken).toBe("gh-secret"); + } finally { + vi.unstubAllEnvs(); + } + }); + it("preserves leading spaces across streamed text deltas", async () => { const runtime = sharedFixture?.runtime; expect(runtime).toBeDefined(); @@ -336,7 +430,7 @@ describe("AcpxRuntime", () => { command: "npx", args: ["-y", "mcp-remote@latest", "https://mcp.canva.com/mcp"], env: { - CANVA_TOKEN: "secret", + CANVA_TOKEN: "secret", // pragma: allowlist secret }, }, }, diff --git a/extensions/acpx/src/runtime.ts b/extensions/acpx/src/runtime.ts index 5fa56d109e5..e55ef360424 100644 --- a/extensions/acpx/src/runtime.ts +++ b/extensions/acpx/src/runtime.ts @@ -13,7 +13,7 @@ import type { } from "openclaw/plugin-sdk/acpx"; import { AcpRuntimeError } from "openclaw/plugin-sdk/acpx"; import { toAcpMcpServers, type ResolvedAcpxPluginConfig } from "./config.js"; -import { checkAcpxVersion } from "./ensure.js"; +import { checkAcpxVersion, type AcpxVersionCheckResult } from "./ensure.js"; import { parseJsonLines, parsePromptEventLine, @@ -51,6 +51,28 @@ const ACPX_CAPABILITIES: AcpRuntimeCapabilities = { controls: ["session/set_mode", "session/set_config_option", "session/status"], }; +type AcpxHealthCheckResult = + | { + ok: true; + versionCheck: Extract; + } + | { + ok: false; + failure: + | { + kind: "version-check"; + versionCheck: Extract; + } + | { + kind: "help-check"; + result: Awaited>; + } + | { + kind: "exception"; + error: unknown; + }; + }; + function formatPermissionModeGuidance(): string { return "Configure plugins.entries.acpx.config.permissionMode to one of: approve-reads, approve-all, deny-all."; } @@ -165,33 +187,71 @@ export class AcpxRuntime implements AcpRuntime { ); } - async probeAvailability(): Promise { - const versionCheck = await checkAcpxVersion({ + private async checkVersion(): Promise { + return await checkAcpxVersion({ command: this.config.command, cwd: this.config.cwd, expectedVersion: this.config.expectedVersion, + stripProviderAuthEnvVars: this.config.stripProviderAuthEnvVars, spawnOptions: this.spawnCommandOptions, }); + } + + private async runHelpCheck(): Promise>> { + return await spawnAndCollect( + { + command: this.config.command, + args: ["--help"], + cwd: this.config.cwd, + stripProviderAuthEnvVars: this.config.stripProviderAuthEnvVars, + }, + this.spawnCommandOptions, + ); + } + + private async checkHealth(): Promise { + const versionCheck = await this.checkVersion(); if (!versionCheck.ok) { - this.healthy = false; - return; + return { + ok: false, + failure: { + kind: "version-check", + versionCheck, + }, + }; } try { - const result = await spawnAndCollect( - { - command: this.config.command, - args: ["--help"], - cwd: this.config.cwd, + const result = await this.runHelpCheck(); + if (result.error != null || (result.code ?? 0) !== 0) { + return { + ok: false, + failure: { + kind: "help-check", + result, + }, + }; + } + return { + ok: true, + versionCheck, + }; + } catch (error) { + return { + ok: false, + failure: { + kind: "exception", + error, }, - this.spawnCommandOptions, - ); - this.healthy = result.error == null && (result.code ?? 0) === 0; - } catch { - this.healthy = false; + }; } } + async probeAvailability(): Promise { + const result = await this.checkHealth(); + this.healthy = result.ok; + } + async ensureSession(input: AcpRuntimeEnsureInput): Promise { const sessionName = asTrimmedString(input.sessionKey); if (!sessionName) { @@ -203,10 +263,14 @@ export class AcpxRuntime implements AcpRuntime { } const cwd = asTrimmedString(input.cwd) || this.config.cwd; const mode = input.mode; + const resumeSessionId = asTrimmedString(input.resumeSessionId); + const ensureSubcommand = resumeSessionId + ? ["sessions", "new", "--name", sessionName, "--resume-session", resumeSessionId] + : ["sessions", "ensure", "--name", sessionName]; const ensureCommand = await this.buildVerbArgs({ agent, cwd, - command: ["sessions", "ensure", "--name", sessionName], + command: ensureSubcommand, }); let events = await this.runControlCommand({ @@ -221,7 +285,7 @@ export class AcpxRuntime implements AcpRuntime { asOptionalString(event.acpxRecordId), ); - if (!ensuredEvent) { + if (!ensuredEvent && !resumeSessionId) { const newCommand = await this.buildVerbArgs({ agent, cwd, @@ -238,12 +302,14 @@ export class AcpxRuntime implements AcpRuntime { asOptionalString(event.acpxSessionId) || asOptionalString(event.acpxRecordId), ); - if (!ensuredEvent) { - throw new AcpRuntimeError( - "ACP_SESSION_INIT_FAILED", - `ACP session init failed: neither 'sessions ensure' nor 'sessions new' returned valid session identifiers for ${sessionName}.`, - ); - } + } + if (!ensuredEvent) { + throw new AcpRuntimeError( + "ACP_SESSION_INIT_FAILED", + resumeSessionId + ? `ACP session init failed: 'sessions new --resume-session' returned no session identifiers for ${sessionName}.` + : `ACP session init failed: neither 'sessions ensure' nor 'sessions new' returned valid session identifiers for ${sessionName}.`, + ); } const acpxRecordId = ensuredEvent ? asOptionalString(ensuredEvent.acpxRecordId) : undefined; @@ -303,6 +369,7 @@ export class AcpxRuntime implements AcpRuntime { command: this.config.command, args, cwd: state.cwd, + stripProviderAuthEnvVars: this.config.stripProviderAuthEnvVars, }, this.spawnCommandOptions, ); @@ -310,7 +377,20 @@ export class AcpxRuntime implements AcpRuntime { // Ignore EPIPE when the child exits before stdin flush completes. }); - child.stdin.end(input.text); + if (input.attachments && input.attachments.length > 0) { + const blocks: unknown[] = []; + if (input.text) { + blocks.push({ type: "text", text: input.text }); + } + for (const attachment of input.attachments) { + if (attachment.mediaType.startsWith("image/")) { + blocks.push({ type: "image", mimeType: attachment.mediaType, data: attachment.data }); + } + } + child.stdin.end(blocks.length > 0 ? JSON.stringify(blocks) : input.text); + } else { + child.stdin.end(input.text); + } let stderr = ""; child.stderr.on("data", (chunk) => { @@ -472,13 +552,9 @@ export class AcpxRuntime implements AcpRuntime { } async doctor(): Promise { - const versionCheck = await checkAcpxVersion({ - command: this.config.command, - cwd: this.config.cwd, - expectedVersion: this.config.expectedVersion, - spawnOptions: this.spawnCommandOptions, - }); - if (!versionCheck.ok) { + const result = await this.checkHealth(); + if (!result.ok && result.failure.kind === "version-check") { + const { versionCheck } = result.failure; this.healthy = false; const details = [ versionCheck.expectedVersion ? `expected=${versionCheck.expectedVersion}` : null, @@ -493,19 +569,12 @@ export class AcpxRuntime implements AcpRuntime { }; } - try { - const result = await spawnAndCollect( - { - command: this.config.command, - args: ["--help"], - cwd: this.config.cwd, - }, - this.spawnCommandOptions, - ); - if (result.error) { - const spawnFailure = resolveSpawnFailure(result.error, this.config.cwd); + if (!result.ok && result.failure.kind === "help-check") { + const { result: helpResult } = result.failure; + this.healthy = false; + if (helpResult.error) { + const spawnFailure = resolveSpawnFailure(helpResult.error, this.config.cwd); if (spawnFailure === "missing-command") { - this.healthy = false; return { ok: false, code: "ACP_BACKEND_UNAVAILABLE", @@ -514,42 +583,47 @@ export class AcpxRuntime implements AcpRuntime { }; } if (spawnFailure === "missing-cwd") { - this.healthy = false; return { ok: false, code: "ACP_BACKEND_UNAVAILABLE", message: `ACP runtime working directory does not exist: ${this.config.cwd}`, }; } - this.healthy = false; return { ok: false, code: "ACP_BACKEND_UNAVAILABLE", - message: result.error.message, - details: [String(result.error)], + message: helpResult.error.message, + details: [String(helpResult.error)], }; } - if ((result.code ?? 0) !== 0) { - this.healthy = false; - return { - ok: false, - code: "ACP_BACKEND_UNAVAILABLE", - message: result.stderr.trim() || `acpx exited with code ${result.code ?? "unknown"}`, - }; - } - this.healthy = true; - return { - ok: true, - message: `acpx command available (${this.config.command}, version ${versionCheck.version}${this.config.expectedVersion ? `, expected ${this.config.expectedVersion}` : ""})`, - }; - } catch (error) { - this.healthy = false; return { ok: false, code: "ACP_BACKEND_UNAVAILABLE", - message: error instanceof Error ? error.message : String(error), + message: + helpResult.stderr.trim() || `acpx exited with code ${helpResult.code ?? "unknown"}`, }; } + + if (!result.ok) { + this.healthy = false; + const failure = result.failure; + return { + ok: false, + code: "ACP_BACKEND_UNAVAILABLE", + message: + failure.kind === "exception" + ? failure.error instanceof Error + ? failure.error.message + : String(failure.error) + : "acpx backend unavailable", + }; + } + + this.healthy = true; + return { + ok: true, + message: `acpx command available (${this.config.command}, version ${result.versionCheck.version}${this.config.expectedVersion ? `, expected ${this.config.expectedVersion}` : ""})`, + }; } async cancel(input: { handle: AcpRuntimeHandle; reason?: string }): Promise { @@ -664,6 +738,7 @@ export class AcpxRuntime implements AcpRuntime { acpxCommand: this.config.command, cwd: params.cwd, agent: params.agent, + stripProviderAuthEnvVars: this.config.stripProviderAuthEnvVars, spawnOptions: this.spawnCommandOptions, }); const resolved = buildMcpProxyAgentCommand({ @@ -686,6 +761,7 @@ export class AcpxRuntime implements AcpRuntime { command: this.config.command, args: params.args, cwd: params.cwd, + stripProviderAuthEnvVars: this.config.stripProviderAuthEnvVars, }, this.spawnCommandOptions, { diff --git a/extensions/acpx/src/service.test.ts b/extensions/acpx/src/service.test.ts index 402fd9ae67b..a4572bf2c90 100644 --- a/extensions/acpx/src/service.test.ts +++ b/extensions/acpx/src/service.test.ts @@ -89,6 +89,11 @@ describe("createAcpxRuntimeService", () => { await vi.waitFor(() => { expect(ensureAcpxSpy).toHaveBeenCalledOnce(); + expect(ensureAcpxSpy).toHaveBeenCalledWith( + expect.objectContaining({ + stripProviderAuthEnvVars: true, + }), + ); expect(probeAvailabilitySpy).toHaveBeenCalledOnce(); }); diff --git a/extensions/acpx/src/service.ts b/extensions/acpx/src/service.ts index ab57dc8b885..a863546fb30 100644 --- a/extensions/acpx/src/service.ts +++ b/extensions/acpx/src/service.ts @@ -59,9 +59,8 @@ export function createAcpxRuntimeService( }); const expectedVersionLabel = pluginConfig.expectedVersion ?? "any"; const installLabel = pluginConfig.allowPluginLocalInstall ? "enabled" : "disabled"; - const mcpServerCount = Object.keys(pluginConfig.mcpServers).length; ctx.logger.info( - `acpx runtime backend registered (command: ${pluginConfig.command}, expectedVersion: ${expectedVersionLabel}, pluginLocalInstall: ${installLabel}${mcpServerCount > 0 ? `, mcpServers: ${mcpServerCount}` : ""})`, + `acpx runtime backend registered (command: ${pluginConfig.command}, expectedVersion: ${expectedVersionLabel}, pluginLocalInstall: ${installLabel})`, ); lifecycleRevision += 1; @@ -73,6 +72,7 @@ export function createAcpxRuntimeService( logger: ctx.logger, expectedVersion: pluginConfig.expectedVersion, allowInstall: pluginConfig.allowPluginLocalInstall, + stripProviderAuthEnvVars: pluginConfig.stripProviderAuthEnvVars, spawnOptions: { strictWindowsCmdWrapper: pluginConfig.strictWindowsCmdWrapper, }, diff --git a/extensions/acpx/src/test-utils/runtime-fixtures.ts b/extensions/acpx/src/test-utils/runtime-fixtures.ts index c99417fbd21..c5cbef83877 100644 --- a/extensions/acpx/src/test-utils/runtime-fixtures.ts +++ b/extensions/acpx/src/test-utils/runtime-fixtures.ts @@ -204,6 +204,8 @@ if (command === "prompt") { sessionName: sessionFromOption, stdinText, openclawShell, + openaiApiKey: process.env.OPENAI_API_KEY || "", + githubToken: process.env.GITHUB_TOKEN || "", }); const requestId = "req-1"; @@ -326,6 +328,7 @@ export async function createMockRuntimeFixture(params?: { const config: ResolvedAcpxPluginConfig = { command: scriptPath, allowPluginLocalInstall: false, + stripProviderAuthEnvVars: false, installCommand: "n/a", cwd: dir, permissionMode: params?.permissionMode ?? "approve-all", @@ -378,6 +381,7 @@ export async function readMockRuntimeLogEntries( export async function cleanupMockRuntimeFixtures(): Promise { delete process.env.MOCK_ACPX_LOG; + delete process.env.MOCK_ACPX_CONFIG_SHOW_AGENTS; sharedMockCliScriptPath = null; logFileSequence = 0; while (tempDirs.length > 0) { diff --git a/extensions/bluebubbles/package.json b/extensions/bluebubbles/package.json index 3c8605ef312..67df516b8d7 100644 --- a/extensions/bluebubbles/package.json +++ b/extensions/bluebubbles/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/bluebubbles", - "version": "2026.3.9", + "version": "2026.3.14", "description": "OpenClaw BlueBubbles channel plugin", "type": "module", "dependencies": { diff --git a/extensions/bluebubbles/src/attachments.test.ts b/extensions/bluebubbles/src/attachments.test.ts index 8ef94cf08ae..704b907eb8b 100644 --- a/extensions/bluebubbles/src/attachments.test.ts +++ b/extensions/bluebubbles/src/attachments.test.ts @@ -82,6 +82,15 @@ describe("downloadBlueBubblesAttachment", () => { ).rejects.toThrow("too large"); } + function mockSuccessfulAttachmentDownload(buffer = new Uint8Array([1])) { + mockFetch.mockResolvedValueOnce({ + ok: true, + headers: new Headers(), + arrayBuffer: () => Promise.resolve(buffer.buffer), + }); + return buffer; + } + it("throws when guid is missing", async () => { const attachment: BlueBubblesAttachment = {}; await expect( @@ -159,12 +168,7 @@ describe("downloadBlueBubblesAttachment", () => { }); it("encodes guid in URL", async () => { - const mockBuffer = new Uint8Array([1]); - mockFetch.mockResolvedValueOnce({ - ok: true, - headers: new Headers(), - arrayBuffer: () => Promise.resolve(mockBuffer.buffer), - }); + mockSuccessfulAttachmentDownload(); const attachment: BlueBubblesAttachment = { guid: "att/with/special chars" }; await downloadBlueBubblesAttachment(attachment, { @@ -244,12 +248,7 @@ describe("downloadBlueBubblesAttachment", () => { }); it("resolves credentials from config when opts not provided", async () => { - const mockBuffer = new Uint8Array([1]); - mockFetch.mockResolvedValueOnce({ - ok: true, - headers: new Headers(), - arrayBuffer: () => Promise.resolve(mockBuffer.buffer), - }); + mockSuccessfulAttachmentDownload(); const attachment: BlueBubblesAttachment = { guid: "att-config" }; const result = await downloadBlueBubblesAttachment(attachment, { @@ -270,12 +269,7 @@ describe("downloadBlueBubblesAttachment", () => { }); it("passes ssrfPolicy with allowPrivateNetwork when config enables it", async () => { - const mockBuffer = new Uint8Array([1]); - mockFetch.mockResolvedValueOnce({ - ok: true, - headers: new Headers(), - arrayBuffer: () => Promise.resolve(mockBuffer.buffer), - }); + mockSuccessfulAttachmentDownload(); const attachment: BlueBubblesAttachment = { guid: "att-ssrf" }; await downloadBlueBubblesAttachment(attachment, { @@ -295,12 +289,7 @@ describe("downloadBlueBubblesAttachment", () => { }); it("auto-allowlists serverUrl hostname when allowPrivateNetwork is not set", async () => { - const mockBuffer = new Uint8Array([1]); - mockFetch.mockResolvedValueOnce({ - ok: true, - headers: new Headers(), - arrayBuffer: () => Promise.resolve(mockBuffer.buffer), - }); + mockSuccessfulAttachmentDownload(); const attachment: BlueBubblesAttachment = { guid: "att-no-ssrf" }; await downloadBlueBubblesAttachment(attachment, { @@ -313,12 +302,7 @@ describe("downloadBlueBubblesAttachment", () => { }); it("auto-allowlists private IP serverUrl hostname when allowPrivateNetwork is not set", async () => { - const mockBuffer = new Uint8Array([1]); - mockFetch.mockResolvedValueOnce({ - ok: true, - headers: new Headers(), - arrayBuffer: () => Promise.resolve(mockBuffer.buffer), - }); + mockSuccessfulAttachmentDownload(); const attachment: BlueBubblesAttachment = { guid: "att-private-ip" }; await downloadBlueBubblesAttachment(attachment, { @@ -352,6 +336,14 @@ describe("sendBlueBubblesAttachment", () => { return Buffer.from(body).toString("utf8"); } + function expectVoiceAttachmentBody() { + const body = mockFetch.mock.calls[0][1]?.body as Uint8Array; + const bodyText = decodeBody(body); + expect(bodyText).toContain('name="isAudioMessage"'); + expect(bodyText).toContain("true"); + return bodyText; + } + it("marks voice memos when asVoice is true and mp3 is provided", async () => { mockFetch.mockResolvedValueOnce({ ok: true, @@ -367,10 +359,7 @@ describe("sendBlueBubblesAttachment", () => { opts: { serverUrl: "http://localhost:1234", password: "test" }, }); - const body = mockFetch.mock.calls[0][1]?.body as Uint8Array; - const bodyText = decodeBody(body); - expect(bodyText).toContain('name="isAudioMessage"'); - expect(bodyText).toContain("true"); + const bodyText = expectVoiceAttachmentBody(); expect(bodyText).toContain('filename="voice.mp3"'); }); @@ -389,8 +378,7 @@ describe("sendBlueBubblesAttachment", () => { opts: { serverUrl: "http://localhost:1234", password: "test" }, }); - const body = mockFetch.mock.calls[0][1]?.body as Uint8Array; - const bodyText = decodeBody(body); + const bodyText = expectVoiceAttachmentBody(); expect(bodyText).toContain('filename="voice.mp3"'); expect(bodyText).toContain('name="voice.mp3"'); }); diff --git a/extensions/bluebubbles/src/attachments.ts b/extensions/bluebubbles/src/attachments.ts index cbd8a74d807..c5392fd2595 100644 --- a/extensions/bluebubbles/src/attachments.ts +++ b/extensions/bluebubbles/src/attachments.ts @@ -2,7 +2,7 @@ import crypto from "node:crypto"; import path from "node:path"; import type { OpenClawConfig } from "openclaw/plugin-sdk/bluebubbles"; import { resolveBlueBubblesServerAccount } from "./account-resolve.js"; -import { postMultipartFormData } from "./multipart.js"; +import { assertMultipartActionOk, postMultipartFormData } from "./multipart.js"; import { getCachedBlueBubblesPrivateApiStatus, isBlueBubblesPrivateApiStatusEnabled, @@ -262,12 +262,7 @@ export async function sendBlueBubblesAttachment(params: { timeoutMs: opts.timeoutMs ?? 60_000, // longer timeout for file uploads }); - if (!res.ok) { - const errorText = await res.text(); - throw new Error( - `BlueBubbles attachment send failed (${res.status}): ${errorText || "unknown"}`, - ); - } + await assertMultipartActionOk(res, "attachment send"); const responseBody = await res.text(); if (!responseBody) { diff --git a/extensions/bluebubbles/src/channel.ts b/extensions/bluebubbles/src/channel.ts index d0f076f6e84..747fba5b67b 100644 --- a/extensions/bluebubbles/src/channel.ts +++ b/extensions/bluebubbles/src/channel.ts @@ -21,6 +21,7 @@ import { import { buildAccountScopedDmSecurityPolicy, collectOpenGroupPolicyRestrictSendersWarnings, + createAccountStatusSink, formatNormalizedAllowFromEntries, mapAllowFromEntries, } from "openclaw/plugin-sdk/compat"; @@ -369,8 +370,11 @@ export const bluebubblesPlugin: ChannelPlugin = { startAccount: async (ctx) => { const account = ctx.account; const webhookPath = resolveWebhookPathFromConfig(account.config); - ctx.setStatus({ - accountId: account.accountId, + const statusSink = createAccountStatusSink({ + accountId: ctx.accountId, + setStatus: ctx.setStatus, + }); + statusSink({ baseUrl: account.baseUrl, }); ctx.log?.info(`[${account.accountId}] starting provider (webhook=${webhookPath})`); @@ -379,7 +383,7 @@ export const bluebubblesPlugin: ChannelPlugin = { config: ctx.cfg, runtime: ctx.runtime, abortSignal: ctx.abortSignal, - statusSink: (patch) => ctx.setStatus({ accountId: ctx.accountId, ...patch }), + statusSink, webhookPath, }); }, diff --git a/extensions/bluebubbles/src/chat.test.ts b/extensions/bluebubbles/src/chat.test.ts index cc37829bc9d..f8adc9b86fd 100644 --- a/extensions/bluebubbles/src/chat.test.ts +++ b/extensions/bluebubbles/src/chat.test.ts @@ -29,6 +29,11 @@ describe("chat", () => { }); } + function mockTwoOkTextResponses() { + mockOkTextResponse(); + mockOkTextResponse(); + } + async function expectCalledUrlIncludesPassword(params: { password: string; invoke: () => Promise; @@ -198,15 +203,7 @@ describe("chat", () => { }); it("uses POST for start and DELETE for stop", async () => { - mockFetch - .mockResolvedValueOnce({ - ok: true, - text: () => Promise.resolve(""), - }) - .mockResolvedValueOnce({ - ok: true, - text: () => Promise.resolve(""), - }); + mockTwoOkTextResponses(); await sendBlueBubblesTyping("iMessage;-;+15551234567", true, { serverUrl: "http://localhost:1234", @@ -442,15 +439,7 @@ describe("chat", () => { }); it("adds and removes participant using matching endpoint", async () => { - mockFetch - .mockResolvedValueOnce({ - ok: true, - text: () => Promise.resolve(""), - }) - .mockResolvedValueOnce({ - ok: true, - text: () => Promise.resolve(""), - }); + mockTwoOkTextResponses(); await addBlueBubblesParticipant("chat-guid", "+15551234567", { serverUrl: "http://localhost:1234", diff --git a/extensions/bluebubbles/src/chat.ts b/extensions/bluebubbles/src/chat.ts index b63f09272f2..17340b7f980 100644 --- a/extensions/bluebubbles/src/chat.ts +++ b/extensions/bluebubbles/src/chat.ts @@ -2,7 +2,7 @@ import crypto from "node:crypto"; import path from "node:path"; import type { OpenClawConfig } from "openclaw/plugin-sdk/bluebubbles"; import { resolveBlueBubblesServerAccount } from "./account-resolve.js"; -import { postMultipartFormData } from "./multipart.js"; +import { assertMultipartActionOk, postMultipartFormData } from "./multipart.js"; import { getCachedBlueBubblesPrivateApiStatus } from "./probe.js"; import { blueBubblesFetchWithTimeout, buildBlueBubblesApiUrl } from "./types.js"; @@ -55,12 +55,7 @@ async function sendBlueBubblesChatEndpointRequest(params: { { method: params.method }, params.opts.timeoutMs, ); - if (!res.ok) { - const errorText = await res.text().catch(() => ""); - throw new Error( - `BlueBubbles ${params.action} failed (${res.status}): ${errorText || "unknown"}`, - ); - } + await assertMultipartActionOk(res, params.action); } async function sendPrivateApiJsonRequest(params: { @@ -86,12 +81,7 @@ async function sendPrivateApiJsonRequest(params: { } const res = await blueBubblesFetchWithTimeout(url, request, params.opts.timeoutMs); - if (!res.ok) { - const errorText = await res.text().catch(() => ""); - throw new Error( - `BlueBubbles ${params.action} failed (${res.status}): ${errorText || "unknown"}`, - ); - } + await assertMultipartActionOk(res, params.action); } export async function markBlueBubblesChatRead( @@ -329,8 +319,5 @@ export async function setGroupIconBlueBubbles( timeoutMs: opts.timeoutMs ?? 60_000, // longer timeout for file uploads }); - if (!res.ok) { - const errorText = await res.text().catch(() => ""); - throw new Error(`BlueBubbles setGroupIcon failed (${res.status}): ${errorText || "unknown"}`); - } + await assertMultipartActionOk(res, "setGroupIcon"); } diff --git a/extensions/bluebubbles/src/config-schema.ts b/extensions/bluebubbles/src/config-schema.ts index 32e239d3f45..76fe4523f16 100644 --- a/extensions/bluebubbles/src/config-schema.ts +++ b/extensions/bluebubbles/src/config-schema.ts @@ -1,7 +1,9 @@ import { MarkdownConfigSchema, ToolPolicySchema } from "openclaw/plugin-sdk/bluebubbles"; import { - AllowFromEntrySchema, + AllowFromListSchema, buildCatchallMultiAccountChannelSchema, + DmPolicySchema, + GroupPolicySchema, } from "openclaw/plugin-sdk/compat"; import { z } from "zod"; import { buildSecretInputSchema, hasConfiguredSecretInput } from "./secret-input.js"; @@ -35,10 +37,10 @@ const bluebubblesAccountSchema = z serverUrl: z.string().optional(), password: buildSecretInputSchema().optional(), webhookPath: z.string().optional(), - dmPolicy: z.enum(["pairing", "allowlist", "open", "disabled"]).optional(), - allowFrom: z.array(AllowFromEntrySchema).optional(), - groupAllowFrom: z.array(AllowFromEntrySchema).optional(), - groupPolicy: z.enum(["open", "disabled", "allowlist"]).optional(), + dmPolicy: DmPolicySchema.optional(), + allowFrom: AllowFromListSchema, + groupAllowFrom: AllowFromListSchema, + groupPolicy: GroupPolicySchema.optional(), historyLimit: z.number().int().min(0).optional(), dmHistoryLimit: z.number().int().min(0).optional(), textChunkLimit: z.number().int().positive().optional(), diff --git a/extensions/bluebubbles/src/media-send.test.ts b/extensions/bluebubbles/src/media-send.test.ts index 9f065599bfb..59fe82cbeae 100644 --- a/extensions/bluebubbles/src/media-send.test.ts +++ b/extensions/bluebubbles/src/media-send.test.ts @@ -70,6 +70,70 @@ async function makeTempDir(): Promise { return dir; } +async function makeTempFile( + fileName: string, + contents: string, + dir?: string, +): Promise<{ dir: string; filePath: string }> { + const resolvedDir = dir ?? (await makeTempDir()); + const filePath = path.join(resolvedDir, fileName); + await fs.writeFile(filePath, contents, "utf8"); + return { dir: resolvedDir, filePath }; +} + +async function sendLocalMedia(params: { + cfg: OpenClawConfig; + mediaPath: string; + accountId?: string; +}) { + return sendBlueBubblesMedia({ + cfg: params.cfg, + to: "chat:123", + accountId: params.accountId, + mediaPath: params.mediaPath, + }); +} + +async function expectRejectedLocalMedia(params: { + cfg: OpenClawConfig; + mediaPath: string; + error: RegExp; + accountId?: string; +}) { + await expect( + sendLocalMedia({ + cfg: params.cfg, + mediaPath: params.mediaPath, + accountId: params.accountId, + }), + ).rejects.toThrow(params.error); + + expect(sendBlueBubblesAttachmentMock).not.toHaveBeenCalled(); +} + +async function expectAllowedLocalMedia(params: { + cfg: OpenClawConfig; + mediaPath: string; + expectedAttachment: Record; + accountId?: string; + expectMimeDetection?: boolean; +}) { + const result = await sendLocalMedia({ + cfg: params.cfg, + mediaPath: params.mediaPath, + accountId: params.accountId, + }); + + expect(result).toEqual({ messageId: "msg-1" }); + expect(sendBlueBubblesAttachmentMock).toHaveBeenCalledTimes(1); + expect(sendBlueBubblesAttachmentMock.mock.calls[0]?.[0]).toEqual( + expect.objectContaining(params.expectedAttachment), + ); + if (params.expectMimeDetection) { + expect(runtimeMocks.detectMime).toHaveBeenCalled(); + } +} + beforeEach(() => { const runtime = createMockRuntime(); runtimeMocks = runtime.mocks; @@ -110,57 +174,43 @@ describe("sendBlueBubblesMedia local-path hardening", () => { const outsideFile = path.join(outsideDir, "outside.txt"); await fs.writeFile(outsideFile, "not allowed", "utf8"); - await expect( - sendBlueBubblesMedia({ - cfg: createConfig({ mediaLocalRoots: [allowedRoot] }), - to: "chat:123", - mediaPath: outsideFile, - }), - ).rejects.toThrow(/not under any configured mediaLocalRoots/i); - - expect(sendBlueBubblesAttachmentMock).not.toHaveBeenCalled(); + await expectRejectedLocalMedia({ + cfg: createConfig({ mediaLocalRoots: [allowedRoot] }), + mediaPath: outsideFile, + error: /not under any configured mediaLocalRoots/i, + }); }); it("allows local paths that are explicitly configured", async () => { - const allowedRoot = await makeTempDir(); - const allowedFile = path.join(allowedRoot, "allowed.txt"); - await fs.writeFile(allowedFile, "allowed", "utf8"); + const { dir: allowedRoot, filePath: allowedFile } = await makeTempFile( + "allowed.txt", + "allowed", + ); - const result = await sendBlueBubblesMedia({ + await expectAllowedLocalMedia({ cfg: createConfig({ mediaLocalRoots: [allowedRoot] }), - to: "chat:123", mediaPath: allowedFile, - }); - - expect(result).toEqual({ messageId: "msg-1" }); - expect(sendBlueBubblesAttachmentMock).toHaveBeenCalledTimes(1); - expect(sendBlueBubblesAttachmentMock.mock.calls[0]?.[0]).toEqual( - expect.objectContaining({ + expectedAttachment: { filename: "allowed.txt", contentType: "text/plain", - }), - ); - expect(runtimeMocks.detectMime).toHaveBeenCalled(); + }, + expectMimeDetection: true, + }); }); it("allows file:// media paths and file:// local roots", async () => { - const allowedRoot = await makeTempDir(); - const allowedFile = path.join(allowedRoot, "allowed.txt"); - await fs.writeFile(allowedFile, "allowed", "utf8"); - - const result = await sendBlueBubblesMedia({ - cfg: createConfig({ mediaLocalRoots: [pathToFileURL(allowedRoot).toString()] }), - to: "chat:123", - mediaPath: pathToFileURL(allowedFile).toString(), - }); - - expect(result).toEqual({ messageId: "msg-1" }); - expect(sendBlueBubblesAttachmentMock).toHaveBeenCalledTimes(1); - expect(sendBlueBubblesAttachmentMock.mock.calls[0]?.[0]).toEqual( - expect.objectContaining({ - filename: "allowed.txt", - }), + const { dir: allowedRoot, filePath: allowedFile } = await makeTempFile( + "allowed.txt", + "allowed", ); + + await expectAllowedLocalMedia({ + cfg: createConfig({ mediaLocalRoots: [pathToFileURL(allowedRoot).toString()] }), + mediaPath: pathToFileURL(allowedFile).toString(), + expectedAttachment: { + filename: "allowed.txt", + }, + }); }); it("uses account-specific mediaLocalRoots over top-level roots", async () => { @@ -213,15 +263,11 @@ describe("sendBlueBubblesMedia local-path hardening", () => { return; } - await expect( - sendBlueBubblesMedia({ - cfg: createConfig({ mediaLocalRoots: [allowedRoot] }), - to: "chat:123", - mediaPath: linkPath, - }), - ).rejects.toThrow(/not under any configured mediaLocalRoots/i); - - expect(sendBlueBubblesAttachmentMock).not.toHaveBeenCalled(); + await expectRejectedLocalMedia({ + cfg: createConfig({ mediaLocalRoots: [allowedRoot] }), + mediaPath: linkPath, + error: /not under any configured mediaLocalRoots/i, + }); }); it("rejects relative mediaLocalRoots entries", async () => { diff --git a/extensions/bluebubbles/src/monitor-normalize.test.ts b/extensions/bluebubbles/src/monitor-normalize.test.ts index 3986909c259..62651279237 100644 --- a/extensions/bluebubbles/src/monitor-normalize.test.ts +++ b/extensions/bluebubbles/src/monitor-normalize.test.ts @@ -1,23 +1,48 @@ import { describe, expect, it } from "vitest"; import { normalizeWebhookMessage, normalizeWebhookReaction } from "./monitor-normalize.js"; +function createFallbackDmPayload(overrides: Record = {}) { + return { + guid: "msg-1", + isGroup: false, + isFromMe: false, + handle: null, + chatGuid: "iMessage;-;+15551234567", + ...overrides, + }; +} + describe("normalizeWebhookMessage", () => { it("falls back to DM chatGuid handle when sender handle is missing", () => { + const result = normalizeWebhookMessage({ + type: "new-message", + data: createFallbackDmPayload({ + text: "hello", + }), + }); + + expect(result).not.toBeNull(); + expect(result?.senderId).toBe("+15551234567"); + expect(result?.senderIdExplicit).toBe(false); + expect(result?.chatGuid).toBe("iMessage;-;+15551234567"); + }); + + it("marks explicit sender handles as explicit identity", () => { const result = normalizeWebhookMessage({ type: "new-message", data: { - guid: "msg-1", + guid: "msg-explicit-1", text: "hello", isGroup: false, - isFromMe: false, - handle: null, + isFromMe: true, + handle: { address: "+15551234567" }, chatGuid: "iMessage;-;+15551234567", }, }); expect(result).not.toBeNull(); expect(result?.senderId).toBe("+15551234567"); - expect(result?.chatGuid).toBe("iMessage;-;+15551234567"); + expect(result?.senderIdExplicit).toBe(true); }); it("does not infer sender from group chatGuid when sender handle is missing", () => { @@ -59,19 +84,16 @@ describe("normalizeWebhookReaction", () => { it("falls back to DM chatGuid handle when reaction sender handle is missing", () => { const result = normalizeWebhookReaction({ type: "updated-message", - data: { + data: createFallbackDmPayload({ guid: "msg-2", associatedMessageGuid: "p:0/msg-1", associatedMessageType: 2000, - isGroup: false, - isFromMe: false, - handle: null, - chatGuid: "iMessage;-;+15551234567", - }, + }), }); expect(result).not.toBeNull(); expect(result?.senderId).toBe("+15551234567"); + expect(result?.senderIdExplicit).toBe(false); expect(result?.messageId).toBe("p:0/msg-1"); expect(result?.action).toBe("added"); }); diff --git a/extensions/bluebubbles/src/monitor-normalize.ts b/extensions/bluebubbles/src/monitor-normalize.ts index 173ea9c24a6..085bd8923e1 100644 --- a/extensions/bluebubbles/src/monitor-normalize.ts +++ b/extensions/bluebubbles/src/monitor-normalize.ts @@ -191,12 +191,13 @@ function readFirstChatRecord(message: Record): Record): { senderId: string; + senderIdExplicit: boolean; senderName?: string; } { const handleValue = message.handle ?? message.sender; const handle = asRecord(handleValue) ?? (typeof handleValue === "string" ? { address: handleValue } : null); - const senderId = + const senderIdRaw = readString(handle, "address") ?? readString(handle, "handle") ?? readString(handle, "id") ?? @@ -204,13 +205,18 @@ function extractSenderInfo(message: Record): { readString(message, "sender") ?? readString(message, "from") ?? ""; + const senderId = senderIdRaw.trim(); const senderName = readString(handle, "displayName") ?? readString(handle, "name") ?? readString(message, "senderName") ?? undefined; - return { senderId, senderName }; + return { + senderId, + senderIdExplicit: Boolean(senderId), + senderName, + }; } function extractChatContext(message: Record): { @@ -441,6 +447,7 @@ export type BlueBubblesParticipant = { export type NormalizedWebhookMessage = { text: string; senderId: string; + senderIdExplicit: boolean; senderName?: string; messageId?: string; timestamp?: number; @@ -466,6 +473,7 @@ export type NormalizedWebhookReaction = { action: "added" | "removed"; emoji: string; senderId: string; + senderIdExplicit: boolean; senderName?: string; messageId: string; timestamp?: number; @@ -574,6 +582,29 @@ export function parseTapbackText(params: { return null; } + const parseLeadingReactionAction = ( + prefix: "reacted" | "removed", + defaultAction: "added" | "removed", + ) => { + if (!lower.startsWith(prefix)) { + return null; + } + const emoji = extractFirstEmoji(trimmed) ?? params.emojiHint; + if (!emoji) { + return null; + } + const quotedText = extractQuotedTapbackText(trimmed); + if (params.requireQuoted && !quotedText) { + return null; + } + const fallback = trimmed.slice(prefix.length).trim(); + return { + emoji, + action: params.actionHint ?? defaultAction, + quotedText: quotedText ?? fallback, + }; + }; + for (const [pattern, { emoji, action }] of TAPBACK_TEXT_MAP) { if (lower.startsWith(pattern)) { // Extract quoted text if present (e.g., 'Loved "hello"' -> "hello") @@ -591,30 +622,14 @@ export function parseTapbackText(params: { } } - if (lower.startsWith("reacted")) { - const emoji = extractFirstEmoji(trimmed) ?? params.emojiHint; - if (!emoji) { - return null; - } - const quotedText = extractQuotedTapbackText(trimmed); - if (params.requireQuoted && !quotedText) { - return null; - } - const fallback = trimmed.slice("reacted".length).trim(); - return { emoji, action: params.actionHint ?? "added", quotedText: quotedText ?? fallback }; + const reacted = parseLeadingReactionAction("reacted", "added"); + if (reacted) { + return reacted; } - if (lower.startsWith("removed")) { - const emoji = extractFirstEmoji(trimmed) ?? params.emojiHint; - if (!emoji) { - return null; - } - const quotedText = extractQuotedTapbackText(trimmed); - if (params.requireQuoted && !quotedText) { - return null; - } - const fallback = trimmed.slice("removed".length).trim(); - return { emoji, action: params.actionHint ?? "removed", quotedText: quotedText ?? fallback }; + const removed = parseLeadingReactionAction("removed", "removed"); + if (removed) { + return removed; } return null; } @@ -672,7 +687,7 @@ export function normalizeWebhookMessage( readString(message, "subject") ?? ""; - const { senderId, senderName } = extractSenderInfo(message); + const { senderId, senderIdExplicit, senderName } = extractSenderInfo(message); const { chatGuid, chatIdentifier, chatId, chatName, isGroup, participants } = extractChatContext(message); const normalizedParticipants = normalizeParticipantList(participants); @@ -717,7 +732,7 @@ export function normalizeWebhookMessage( // BlueBubbles may omit `handle` in webhook payloads; for DM chat GUIDs we can still infer sender. const senderFallbackFromChatGuid = - !senderId && !isGroup && chatGuid ? extractHandleFromChatGuid(chatGuid) : null; + !senderIdExplicit && !isGroup && chatGuid ? extractHandleFromChatGuid(chatGuid) : null; const normalizedSender = normalizeBlueBubblesHandle(senderId || senderFallbackFromChatGuid || ""); if (!normalizedSender) { return null; @@ -727,6 +742,7 @@ export function normalizeWebhookMessage( return { text, senderId: normalizedSender, + senderIdExplicit, senderName, messageId, timestamp, @@ -777,7 +793,7 @@ export function normalizeWebhookReaction( const emoji = (associatedEmoji?.trim() || mapping?.emoji) ?? `reaction:${associatedType}`; const action = mapping?.action ?? resolveTapbackActionHint(associatedType) ?? "added"; - const { senderId, senderName } = extractSenderInfo(message); + const { senderId, senderIdExplicit, senderName } = extractSenderInfo(message); const { chatGuid, chatIdentifier, chatId, chatName, isGroup } = extractChatContext(message); const fromMe = readBoolean(message, "isFromMe") ?? readBoolean(message, "is_from_me"); @@ -793,7 +809,7 @@ export function normalizeWebhookReaction( : undefined; const senderFallbackFromChatGuid = - !senderId && !isGroup && chatGuid ? extractHandleFromChatGuid(chatGuid) : null; + !senderIdExplicit && !isGroup && chatGuid ? extractHandleFromChatGuid(chatGuid) : null; const normalizedSender = normalizeBlueBubblesHandle(senderId || senderFallbackFromChatGuid || ""); if (!normalizedSender) { return null; @@ -803,6 +819,7 @@ export function normalizeWebhookReaction( action, emoji, senderId: normalizedSender, + senderIdExplicit, senderName, messageId: associatedGuid, timestamp, diff --git a/extensions/bluebubbles/src/monitor-processing.ts b/extensions/bluebubbles/src/monitor-processing.ts index 6eb2ab08bc0..9cf72ea1efd 100644 --- a/extensions/bluebubbles/src/monitor-processing.ts +++ b/extensions/bluebubbles/src/monitor-processing.ts @@ -38,6 +38,10 @@ import { resolveBlueBubblesMessageId, resolveReplyContextFromCache, } from "./monitor-reply-cache.js"; +import { + hasBlueBubblesSelfChatCopy, + rememberBlueBubblesSelfChatCopy, +} from "./monitor-self-chat-cache.js"; import type { BlueBubblesCoreRuntime, BlueBubblesRuntimeEnv, @@ -47,7 +51,12 @@ import { isBlueBubblesPrivateApiEnabled } from "./probe.js"; import { normalizeBlueBubblesReactionInput, sendBlueBubblesReaction } from "./reactions.js"; import { normalizeSecretInputString } from "./secret-input.js"; import { resolveChatGuidForTarget, sendMessageBlueBubbles } from "./send.js"; -import { formatBlueBubblesChatTarget, isAllowedBlueBubblesSender } from "./targets.js"; +import { + extractHandleFromChatGuid, + formatBlueBubblesChatTarget, + isAllowedBlueBubblesSender, + normalizeBlueBubblesHandle, +} from "./targets.js"; const DEFAULT_TEXT_LIMIT = 4000; const invalidAckReactions = new Set(); @@ -80,6 +89,19 @@ function normalizeSnippet(value: string): string { return stripMarkdown(value).replace(/\s+/g, " ").trim().toLowerCase(); } +function isBlueBubblesSelfChatMessage( + message: NormalizedWebhookMessage, + isGroup: boolean, +): boolean { + if (isGroup || !message.senderIdExplicit) { + return false; + } + const chatHandle = + (message.chatGuid ? extractHandleFromChatGuid(message.chatGuid) : null) ?? + normalizeBlueBubblesHandle(message.chatIdentifier ?? ""); + return Boolean(chatHandle) && chatHandle === message.senderId; +} + function prunePendingOutboundMessageIds(now = Date.now()): void { const cutoff = now - PENDING_OUTBOUND_MESSAGE_ID_TTL_MS; for (let i = pendingOutboundMessageIds.length - 1; i >= 0; i--) { @@ -453,8 +475,27 @@ export async function processMessage( ? `removed ${tapbackParsed.emoji} reaction` : `reacted with ${tapbackParsed.emoji}` : text || placeholder; + const isSelfChatMessage = isBlueBubblesSelfChatMessage(message, isGroup); + const selfChatLookup = { + accountId: account.accountId, + chatGuid: message.chatGuid, + chatIdentifier: message.chatIdentifier, + chatId: message.chatId, + senderId: message.senderId, + body: rawBody, + timestamp: message.timestamp, + }; const cacheMessageId = message.messageId?.trim(); + const confirmedOutboundCacheEntry = cacheMessageId + ? resolveReplyContextFromCache({ + accountId: account.accountId, + replyToId: cacheMessageId, + chatGuid: message.chatGuid, + chatIdentifier: message.chatIdentifier, + chatId: message.chatId, + }) + : null; let messageShortId: string | undefined; const cacheInboundMessage = () => { if (!cacheMessageId) { @@ -476,6 +517,12 @@ export async function processMessage( if (message.fromMe) { // Cache from-me messages so reply context can resolve sender/body. cacheInboundMessage(); + const confirmedAssistantOutbound = + confirmedOutboundCacheEntry?.senderLabel === "me" && + normalizeSnippet(confirmedOutboundCacheEntry.body ?? "") === normalizeSnippet(rawBody); + if (isSelfChatMessage && confirmedAssistantOutbound) { + rememberBlueBubblesSelfChatCopy(selfChatLookup); + } if (cacheMessageId) { const pending = consumePendingOutboundMessageId({ accountId: account.accountId, @@ -499,6 +546,11 @@ export async function processMessage( return; } + if (isSelfChatMessage && hasBlueBubblesSelfChatCopy(selfChatLookup)) { + logVerbose(core, runtime, `drop: reflected self-chat duplicate sender=${message.senderId}`); + return; + } + if (!rawBody) { logVerbose(core, runtime, `drop: empty text sender=${message.senderId}`); return; diff --git a/extensions/bluebubbles/src/monitor-self-chat-cache.test.ts b/extensions/bluebubbles/src/monitor-self-chat-cache.test.ts new file mode 100644 index 00000000000..3e843f6943d --- /dev/null +++ b/extensions/bluebubbles/src/monitor-self-chat-cache.test.ts @@ -0,0 +1,190 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import { + hasBlueBubblesSelfChatCopy, + rememberBlueBubblesSelfChatCopy, + resetBlueBubblesSelfChatCache, +} from "./monitor-self-chat-cache.js"; + +describe("BlueBubbles self-chat cache", () => { + const directLookup = { + accountId: "default", + chatGuid: "iMessage;-;+15551234567", + senderId: "+15551234567", + } as const; + + afterEach(() => { + resetBlueBubblesSelfChatCache(); + vi.useRealTimers(); + }); + + it("matches repeated lookups for the same scope, timestamp, and text", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + rememberBlueBubblesSelfChatCopy({ + ...directLookup, + body: " hello\r\nworld ", + timestamp: 123, + }); + + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: "hello\nworld", + timestamp: 123, + }), + ).toBe(true); + }); + + it("canonicalizes DM scope across chatIdentifier and chatGuid", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + rememberBlueBubblesSelfChatCopy({ + accountId: "default", + chatIdentifier: "+15551234567", + senderId: "+15551234567", + body: "hello", + timestamp: 123, + }); + + expect( + hasBlueBubblesSelfChatCopy({ + accountId: "default", + chatGuid: "iMessage;-;+15551234567", + senderId: "+15551234567", + body: "hello", + timestamp: 123, + }), + ).toBe(true); + + resetBlueBubblesSelfChatCache(); + + rememberBlueBubblesSelfChatCopy({ + accountId: "default", + chatGuid: "iMessage;-;+15551234567", + senderId: "+15551234567", + body: "hello", + timestamp: 123, + }); + + expect( + hasBlueBubblesSelfChatCopy({ + accountId: "default", + chatIdentifier: "+15551234567", + senderId: "+15551234567", + body: "hello", + timestamp: 123, + }), + ).toBe(true); + }); + + it("expires entries after the ttl window", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + rememberBlueBubblesSelfChatCopy({ + ...directLookup, + body: "hello", + timestamp: 123, + }); + + vi.advanceTimersByTime(11_001); + + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: "hello", + timestamp: 123, + }), + ).toBe(false); + }); + + it("evicts older entries when the cache exceeds its cap", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + for (let i = 0; i < 513; i += 1) { + rememberBlueBubblesSelfChatCopy({ + ...directLookup, + body: `message-${i}`, + timestamp: i, + }); + vi.advanceTimersByTime(1_001); + } + + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: "message-0", + timestamp: 0, + }), + ).toBe(false); + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: "message-512", + timestamp: 512, + }), + ).toBe(true); + }); + + it("enforces the cache cap even when cleanup is throttled", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + for (let i = 0; i < 513; i += 1) { + rememberBlueBubblesSelfChatCopy({ + ...directLookup, + body: `burst-${i}`, + timestamp: i, + }); + } + + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: "burst-0", + timestamp: 0, + }), + ).toBe(false); + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: "burst-512", + timestamp: 512, + }), + ).toBe(true); + }); + + it("does not collide long texts that differ only in the middle", () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + const prefix = "a".repeat(256); + const suffix = "b".repeat(256); + const longBodyA = `${prefix}${"x".repeat(300)}${suffix}`; + const longBodyB = `${prefix}${"y".repeat(300)}${suffix}`; + + rememberBlueBubblesSelfChatCopy({ + ...directLookup, + body: longBodyA, + timestamp: 123, + }); + + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: longBodyA, + timestamp: 123, + }), + ).toBe(true); + expect( + hasBlueBubblesSelfChatCopy({ + ...directLookup, + body: longBodyB, + timestamp: 123, + }), + ).toBe(false); + }); +}); diff --git a/extensions/bluebubbles/src/monitor-self-chat-cache.ts b/extensions/bluebubbles/src/monitor-self-chat-cache.ts new file mode 100644 index 00000000000..09d7167d769 --- /dev/null +++ b/extensions/bluebubbles/src/monitor-self-chat-cache.ts @@ -0,0 +1,127 @@ +import { createHash } from "node:crypto"; +import { extractHandleFromChatGuid, normalizeBlueBubblesHandle } from "./targets.js"; + +type SelfChatCacheKeyParts = { + accountId: string; + chatGuid?: string; + chatIdentifier?: string; + chatId?: number; + senderId: string; +}; + +type SelfChatLookup = SelfChatCacheKeyParts & { + body?: string; + timestamp?: number; +}; + +const SELF_CHAT_TTL_MS = 10_000; +const MAX_SELF_CHAT_CACHE_ENTRIES = 512; +const CLEANUP_MIN_INTERVAL_MS = 1_000; +const MAX_SELF_CHAT_BODY_CHARS = 32_768; +const cache = new Map(); +let lastCleanupAt = 0; + +function normalizeBody(body: string | undefined): string | null { + if (!body) { + return null; + } + const bounded = + body.length > MAX_SELF_CHAT_BODY_CHARS ? body.slice(0, MAX_SELF_CHAT_BODY_CHARS) : body; + const normalized = bounded.replace(/\r\n?/g, "\n").trim(); + return normalized ? normalized : null; +} + +function isUsableTimestamp(timestamp: number | undefined): timestamp is number { + return typeof timestamp === "number" && Number.isFinite(timestamp); +} + +function digestText(text: string): string { + return createHash("sha256").update(text).digest("base64url"); +} + +function trimOrUndefined(value?: string | null): string | undefined { + const trimmed = value?.trim(); + return trimmed ? trimmed : undefined; +} + +function resolveCanonicalChatTarget(parts: SelfChatCacheKeyParts): string | null { + const handleFromGuid = parts.chatGuid ? extractHandleFromChatGuid(parts.chatGuid) : null; + if (handleFromGuid) { + return handleFromGuid; + } + + const normalizedIdentifier = normalizeBlueBubblesHandle(parts.chatIdentifier ?? ""); + if (normalizedIdentifier) { + return normalizedIdentifier; + } + + return ( + trimOrUndefined(parts.chatGuid) ?? + trimOrUndefined(parts.chatIdentifier) ?? + (typeof parts.chatId === "number" ? String(parts.chatId) : null) + ); +} + +function buildScope(parts: SelfChatCacheKeyParts): string { + const target = resolveCanonicalChatTarget(parts) ?? parts.senderId; + return `${parts.accountId}:${target}`; +} + +function cleanupExpired(now = Date.now()): void { + if ( + lastCleanupAt !== 0 && + now >= lastCleanupAt && + now - lastCleanupAt < CLEANUP_MIN_INTERVAL_MS + ) { + return; + } + lastCleanupAt = now; + for (const [key, seenAt] of cache.entries()) { + if (now - seenAt > SELF_CHAT_TTL_MS) { + cache.delete(key); + } + } +} + +function enforceSizeCap(): void { + while (cache.size > MAX_SELF_CHAT_CACHE_ENTRIES) { + const oldestKey = cache.keys().next().value; + if (typeof oldestKey !== "string") { + break; + } + cache.delete(oldestKey); + } +} + +function buildKey(lookup: SelfChatLookup): string | null { + const body = normalizeBody(lookup.body); + if (!body || !isUsableTimestamp(lookup.timestamp)) { + return null; + } + return `${buildScope(lookup)}:${lookup.timestamp}:${digestText(body)}`; +} + +export function rememberBlueBubblesSelfChatCopy(lookup: SelfChatLookup): void { + cleanupExpired(); + const key = buildKey(lookup); + if (!key) { + return; + } + cache.set(key, Date.now()); + enforceSizeCap(); +} + +export function hasBlueBubblesSelfChatCopy(lookup: SelfChatLookup): boolean { + cleanupExpired(); + const key = buildKey(lookup); + if (!key) { + return false; + } + const seenAt = cache.get(key); + return typeof seenAt === "number" && Date.now() - seenAt <= SELF_CHAT_TTL_MS; +} + +export function resetBlueBubblesSelfChatCache(): void { + cache.clear(); + lastCleanupAt = 0; +} diff --git a/extensions/bluebubbles/src/monitor.test.ts b/extensions/bluebubbles/src/monitor.test.ts index b02019058b8..1ba2e27f0b6 100644 --- a/extensions/bluebubbles/src/monitor.test.ts +++ b/extensions/bluebubbles/src/monitor.test.ts @@ -5,6 +5,7 @@ import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import { createPluginRuntimeMock } from "../../test-utils/plugin-runtime-mock.js"; import type { ResolvedBlueBubblesAccount } from "./accounts.js"; import { fetchBlueBubblesHistory } from "./history.js"; +import { resetBlueBubblesSelfChatCache } from "./monitor-self-chat-cache.js"; import { handleBlueBubblesWebhookRequest, registerBlueBubblesWebhookTarget, @@ -246,6 +247,7 @@ describe("BlueBubbles webhook monitor", () => { vi.clearAllMocks(); // Reset short ID state between tests for predictable behavior _resetBlueBubblesShortIdState(); + resetBlueBubblesSelfChatCache(); mockFetchBlueBubblesHistory.mockResolvedValue({ entries: [], resolved: true }); mockReadAllowFromStore.mockResolvedValue([]); mockUpsertPairingRequest.mockResolvedValue({ code: "TESTCODE", created: true }); @@ -259,6 +261,7 @@ describe("BlueBubbles webhook monitor", () => { afterEach(() => { unregister?.(); + vi.useRealTimers(); }); describe("DM pairing behavior vs allowFrom", () => { @@ -2676,5 +2679,449 @@ describe("BlueBubbles webhook monitor", () => { expect(mockDispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled(); }); + + it("drops reflected self-chat duplicates after a confirmed assistant outbound", async () => { + const account = createMockAccount({ dmPolicy: "open" }); + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + const { sendMessageBlueBubbles } = await import("./send.js"); + vi.mocked(sendMessageBlueBubbles).mockResolvedValueOnce({ messageId: "msg-self-1" }); + + mockDispatchReplyWithBufferedBlockDispatcher.mockImplementationOnce(async (params) => { + await params.dispatcherOptions.deliver({ text: "replying now" }, { kind: "final" }); + return EMPTY_DISPATCH_RESULT; + }); + + unregister = registerBlueBubblesWebhookTarget({ + account, + config, + runtime: { log: vi.fn(), error: vi.fn() }, + core, + path: "/bluebubbles-webhook", + }); + + const timestamp = Date.now(); + const inboundPayload = { + type: "new-message", + data: { + text: "hello", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-self-0", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", inboundPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); + mockDispatchReplyWithBufferedBlockDispatcher.mockClear(); + + const fromMePayload = { + type: "new-message", + data: { + text: "replying now", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: true, + guid: "msg-self-1", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", fromMePayload), + createMockResponse(), + ); + await flushAsync(); + + const reflectedPayload = { + type: "new-message", + data: { + text: "replying now", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-self-2", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", reflectedPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled(); + }); + + it("does not drop inbound messages when no fromMe self-chat copy was seen", async () => { + const account = createMockAccount({ dmPolicy: "open" }); + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + unregister = registerBlueBubblesWebhookTarget({ + account, + config, + runtime: { log: vi.fn(), error: vi.fn() }, + core, + path: "/bluebubbles-webhook", + }); + + const inboundPayload = { + type: "new-message", + data: { + text: "genuinely new message", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-inbound-1", + chatGuid: "iMessage;-;+15551234567", + date: Date.now(), + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", inboundPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled(); + }); + + it("does not drop reflected copies after the self-chat cache TTL expires", async () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-07T00:00:00Z")); + + const account = createMockAccount({ dmPolicy: "open" }); + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + unregister = registerBlueBubblesWebhookTarget({ + account, + config, + runtime: { log: vi.fn(), error: vi.fn() }, + core, + path: "/bluebubbles-webhook", + }); + + const timestamp = Date.now(); + const fromMePayload = { + type: "new-message", + data: { + text: "ttl me", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: true, + guid: "msg-self-ttl-1", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", fromMePayload), + createMockResponse(), + ); + await vi.runAllTimersAsync(); + + mockDispatchReplyWithBufferedBlockDispatcher.mockClear(); + vi.advanceTimersByTime(10_001); + + const reflectedPayload = { + type: "new-message", + data: { + text: "ttl me", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-self-ttl-2", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", reflectedPayload), + createMockResponse(), + ); + await vi.runAllTimersAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled(); + }); + + it("does not cache regular fromMe DMs as self-chat reflections", async () => { + const account = createMockAccount({ dmPolicy: "open" }); + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + unregister = registerBlueBubblesWebhookTarget({ + account, + config, + runtime: { log: vi.fn(), error: vi.fn() }, + core, + path: "/bluebubbles-webhook", + }); + + const timestamp = Date.now(); + const fromMePayload = { + type: "new-message", + data: { + text: "shared text", + handle: { address: "+15557654321" }, + isGroup: false, + isFromMe: true, + guid: "msg-normal-fromme", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", fromMePayload), + createMockResponse(), + ); + await flushAsync(); + + mockDispatchReplyWithBufferedBlockDispatcher.mockClear(); + + const inboundPayload = { + type: "new-message", + data: { + text: "shared text", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-normal-inbound", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", inboundPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled(); + }); + + it("does not drop user-authored self-chat prompts without a confirmed assistant outbound", async () => { + const account = createMockAccount({ dmPolicy: "open" }); + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + unregister = registerBlueBubblesWebhookTarget({ + account, + config, + runtime: { log: vi.fn(), error: vi.fn() }, + core, + path: "/bluebubbles-webhook", + }); + + const timestamp = Date.now(); + const fromMePayload = { + type: "new-message", + data: { + text: "user-authored self prompt", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: true, + guid: "msg-self-user-1", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", fromMePayload), + createMockResponse(), + ); + await flushAsync(); + + mockDispatchReplyWithBufferedBlockDispatcher.mockClear(); + + const reflectedPayload = { + type: "new-message", + data: { + text: "user-authored self prompt", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-self-user-2", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", reflectedPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled(); + }); + + it("does not treat a pending text-only match as confirmed assistant outbound", async () => { + const account = createMockAccount({ dmPolicy: "open" }); + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + const { sendMessageBlueBubbles } = await import("./send.js"); + vi.mocked(sendMessageBlueBubbles).mockResolvedValueOnce({ messageId: "ok" }); + + mockDispatchReplyWithBufferedBlockDispatcher.mockImplementationOnce(async (params) => { + await params.dispatcherOptions.deliver({ text: "same text" }, { kind: "final" }); + return EMPTY_DISPATCH_RESULT; + }); + + unregister = registerBlueBubblesWebhookTarget({ + account, + config, + runtime: { log: vi.fn(), error: vi.fn() }, + core, + path: "/bluebubbles-webhook", + }); + + const timestamp = Date.now(); + const inboundPayload = { + type: "new-message", + data: { + text: "hello", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-self-race-0", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", inboundPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); + mockDispatchReplyWithBufferedBlockDispatcher.mockClear(); + + const fromMePayload = { + type: "new-message", + data: { + text: "same text", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: true, + guid: "msg-self-race-1", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", fromMePayload), + createMockResponse(), + ); + await flushAsync(); + + const reflectedPayload = { + type: "new-message", + data: { + text: "same text", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-self-race-2", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", reflectedPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled(); + }); + + it("does not treat chatGuid-inferred sender ids as self-chat evidence", async () => { + const account = createMockAccount({ dmPolicy: "open" }); + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + unregister = registerBlueBubblesWebhookTarget({ + account, + config, + runtime: { log: vi.fn(), error: vi.fn() }, + core, + path: "/bluebubbles-webhook", + }); + + const timestamp = Date.now(); + const fromMePayload = { + type: "new-message", + data: { + text: "shared inferred text", + handle: null, + isGroup: false, + isFromMe: true, + guid: "msg-inferred-fromme", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", fromMePayload), + createMockResponse(), + ); + await flushAsync(); + + mockDispatchReplyWithBufferedBlockDispatcher.mockClear(); + + const inboundPayload = { + type: "new-message", + data: { + text: "shared inferred text", + handle: { address: "+15551234567" }, + isGroup: false, + isFromMe: false, + guid: "msg-inferred-inbound", + chatGuid: "iMessage;-;+15551234567", + date: timestamp, + }, + }; + + await handleBlueBubblesWebhookRequest( + createMockRequest("POST", "/bluebubbles-webhook", inboundPayload), + createMockResponse(), + ); + await flushAsync(); + + expect(mockDispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalled(); + }); }); }); diff --git a/extensions/bluebubbles/src/monitor.webhook-auth.test.ts b/extensions/bluebubbles/src/monitor.webhook-auth.test.ts index 7a6a29353bd..f6826ac510b 100644 --- a/extensions/bluebubbles/src/monitor.webhook-auth.test.ts +++ b/extensions/bluebubbles/src/monitor.webhook-auth.test.ts @@ -302,65 +302,102 @@ describe("BlueBubbles webhook monitor", () => { }; } - describe("webhook parsing + auth handling", () => { - it("rejects non-POST requests", async () => { - const account = createMockAccount(); - const config: OpenClawConfig = {}; - const core = createMockRuntime(); - setBlueBubblesRuntime(core); + async function dispatchWebhook(req: IncomingMessage) { + const res = createMockResponse(); + const handled = await handleBlueBubblesWebhookRequest(req, res); + return { handled, res }; + } - unregister = registerBlueBubblesWebhookTarget({ + function createWebhookRequestForTest(params?: { + method?: string; + url?: string; + body?: unknown; + headers?: Record; + remoteAddress?: string; + }) { + const req = createMockRequest( + params?.method ?? "POST", + params?.url ?? "/bluebubbles-webhook", + params?.body ?? {}, + params?.headers, + ); + if (params?.remoteAddress) { + setRequestRemoteAddress(req, params.remoteAddress); + } + return req; + } + + function createHangingWebhookRequest(url = "/bluebubbles-webhook?password=test-password") { + const req = new EventEmitter() as IncomingMessage; + const destroyMock = vi.fn(); + req.method = "POST"; + req.url = url; + req.headers = {}; + req.destroy = destroyMock as unknown as IncomingMessage["destroy"]; + setRequestRemoteAddress(req, "127.0.0.1"); + return { req, destroyMock }; + } + + function registerWebhookTargets( + params: Array<{ + account: ResolvedBlueBubblesAccount; + statusSink?: (event: unknown) => void; + }>, + ) { + const config: OpenClawConfig = {}; + const core = createMockRuntime(); + setBlueBubblesRuntime(core); + + const unregisterFns = params.map(({ account, statusSink }) => + registerBlueBubblesWebhookTarget({ account, config, runtime: { log: vi.fn(), error: vi.fn() }, core, path: "/bluebubbles-webhook", - }); + statusSink, + }), + ); - const req = createMockRequest("GET", "/bluebubbles-webhook", {}); - const res = createMockResponse(); + unregister = () => { + for (const unregisterFn of unregisterFns) { + unregisterFn(); + } + }; + } - const handled = await handleBlueBubblesWebhookRequest(req, res); + async function expectWebhookStatus( + req: IncomingMessage, + expectedStatus: number, + expectedBody?: string, + ) { + const { handled, res } = await dispatchWebhook(req); + expect(handled).toBe(true); + expect(res.statusCode).toBe(expectedStatus); + if (expectedBody !== undefined) { + expect(res.body).toBe(expectedBody); + } + return res; + } - expect(handled).toBe(true); - expect(res.statusCode).toBe(405); + describe("webhook parsing + auth handling", () => { + it("rejects non-POST requests", async () => { + setupWebhookTarget(); + const req = createWebhookRequestForTest({ method: "GET" }); + await expectWebhookStatus(req, 405); }); it("accepts POST requests with valid JSON payload", async () => { setupWebhookTarget(); const payload = createNewMessagePayload({ date: Date.now() }); - - const req = createMockRequest("POST", "/bluebubbles-webhook", payload); - const res = createMockResponse(); - - const handled = await handleBlueBubblesWebhookRequest(req, res); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(200); - expect(res.body).toBe("ok"); + const req = createWebhookRequestForTest({ body: payload }); + await expectWebhookStatus(req, 200, "ok"); }); it("rejects requests with invalid JSON", async () => { - const account = createMockAccount(); - const config: OpenClawConfig = {}; - const core = createMockRuntime(); - setBlueBubblesRuntime(core); - - unregister = registerBlueBubblesWebhookTarget({ - account, - config, - runtime: { log: vi.fn(), error: vi.fn() }, - core, - path: "/bluebubbles-webhook", - }); - - const req = createMockRequest("POST", "/bluebubbles-webhook", "invalid json {{"); - const res = createMockResponse(); - - const handled = await handleBlueBubblesWebhookRequest(req, res); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(400); + setupWebhookTarget(); + const req = createWebhookRequestForTest({ body: "invalid json {{" }); + await expectWebhookStatus(req, 400); }); it("accepts URL-encoded payload wrappers", async () => { @@ -369,42 +406,17 @@ describe("BlueBubbles webhook monitor", () => { const encodedBody = new URLSearchParams({ payload: JSON.stringify(payload), }).toString(); - - const req = createMockRequest("POST", "/bluebubbles-webhook", encodedBody); - const res = createMockResponse(); - - const handled = await handleBlueBubblesWebhookRequest(req, res); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(200); - expect(res.body).toBe("ok"); + const req = createWebhookRequestForTest({ body: encodedBody }); + await expectWebhookStatus(req, 200, "ok"); }); it("returns 408 when request body times out (Slow-Loris protection)", async () => { vi.useFakeTimers(); try { - const account = createMockAccount(); - const config: OpenClawConfig = {}; - const core = createMockRuntime(); - setBlueBubblesRuntime(core); - - unregister = registerBlueBubblesWebhookTarget({ - account, - config, - runtime: { log: vi.fn(), error: vi.fn() }, - core, - path: "/bluebubbles-webhook", - }); + setupWebhookTarget(); // Create a request that never sends data or ends (simulates slow-loris) - const req = new EventEmitter() as IncomingMessage; - req.method = "POST"; - req.url = "/bluebubbles-webhook?password=test-password"; - req.headers = {}; - (req as unknown as { socket: { remoteAddress: string } }).socket = { - remoteAddress: "127.0.0.1", - }; - req.destroy = vi.fn(); + const { req, destroyMock } = createHangingWebhookRequest(); const res = createMockResponse(); @@ -416,7 +428,7 @@ describe("BlueBubbles webhook monitor", () => { const handled = await handledPromise; expect(handled).toBe(true); expect(res.statusCode).toBe(408); - expect(req.destroy).toHaveBeenCalled(); + expect(destroyMock).toHaveBeenCalled(); } finally { vi.useRealTimers(); } @@ -424,140 +436,62 @@ describe("BlueBubbles webhook monitor", () => { it("rejects unauthorized requests before reading the body", async () => { const account = createMockAccount({ password: "secret-token" }); - const config: OpenClawConfig = {}; - const core = createMockRuntime(); - setBlueBubblesRuntime(core); - - unregister = registerBlueBubblesWebhookTarget({ - account, - config, - runtime: { log: vi.fn(), error: vi.fn() }, - core, - path: "/bluebubbles-webhook", - }); - - const req = new EventEmitter() as IncomingMessage; - req.method = "POST"; - req.url = "/bluebubbles-webhook?password=wrong-token"; - req.headers = {}; + setupWebhookTarget({ account }); + const { req } = createHangingWebhookRequest("/bluebubbles-webhook?password=wrong-token"); const onSpy = vi.spyOn(req, "on"); - (req as unknown as { socket: { remoteAddress: string } }).socket = { - remoteAddress: "127.0.0.1", - }; - - const res = createMockResponse(); - const handled = await handleBlueBubblesWebhookRequest(req, res); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(401); + await expectWebhookStatus(req, 401); expect(onSpy).not.toHaveBeenCalledWith("data", expect.any(Function)); }); it("authenticates via password query parameter", async () => { const account = createMockAccount({ password: "secret-token" }); - - // Mock non-localhost request - const req = createMockRequest( - "POST", - "/bluebubbles-webhook?password=secret-token", - createNewMessagePayload(), - ); - setRequestRemoteAddress(req, "192.168.1.100"); setupWebhookTarget({ account }); - - const res = createMockResponse(); - const handled = await handleBlueBubblesWebhookRequest(req, res); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(200); + const req = createWebhookRequestForTest({ + url: "/bluebubbles-webhook?password=secret-token", + body: createNewMessagePayload(), + remoteAddress: "192.168.1.100", + }); + await expectWebhookStatus(req, 200); }); it("authenticates via x-password header", async () => { const account = createMockAccount({ password: "secret-token" }); - - const req = createMockRequest( - "POST", - "/bluebubbles-webhook", - createNewMessagePayload(), - { "x-password": "secret-token" }, // pragma: allowlist secret - ); - setRequestRemoteAddress(req, "192.168.1.100"); setupWebhookTarget({ account }); - - const res = createMockResponse(); - const handled = await handleBlueBubblesWebhookRequest(req, res); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(200); + const req = createWebhookRequestForTest({ + body: createNewMessagePayload(), + headers: { "x-password": "secret-token" }, // pragma: allowlist secret + remoteAddress: "192.168.1.100", + }); + await expectWebhookStatus(req, 200); }); it("rejects unauthorized requests with wrong password", async () => { const account = createMockAccount({ password: "secret-token" }); - const req = createMockRequest( - "POST", - "/bluebubbles-webhook?password=wrong-token", - createNewMessagePayload(), - ); - setRequestRemoteAddress(req, "192.168.1.100"); setupWebhookTarget({ account }); - - const res = createMockResponse(); - const handled = await handleBlueBubblesWebhookRequest(req, res); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(401); + const req = createWebhookRequestForTest({ + url: "/bluebubbles-webhook?password=wrong-token", + body: createNewMessagePayload(), + remoteAddress: "192.168.1.100", + }); + await expectWebhookStatus(req, 401); }); it("rejects ambiguous routing when multiple targets match the same password", async () => { const accountA = createMockAccount({ password: "secret-token" }); const accountB = createMockAccount({ password: "secret-token" }); - const config: OpenClawConfig = {}; - const core = createMockRuntime(); - setBlueBubblesRuntime(core); - const sinkA = vi.fn(); const sinkB = vi.fn(); + registerWebhookTargets([ + { account: accountA, statusSink: sinkA }, + { account: accountB, statusSink: sinkB }, + ]); - const req = createMockRequest("POST", "/bluebubbles-webhook?password=secret-token", { - type: "new-message", - data: { - text: "hello", - handle: { address: "+15551234567" }, - isGroup: false, - isFromMe: false, - guid: "msg-1", - }, - }); - (req as unknown as { socket: { remoteAddress: string } }).socket = { + const req = createWebhookRequestForTest({ + url: "/bluebubbles-webhook?password=secret-token", + body: createNewMessagePayload(), remoteAddress: "192.168.1.100", - }; - - const unregisterA = registerBlueBubblesWebhookTarget({ - account: accountA, - config, - runtime: { log: vi.fn(), error: vi.fn() }, - core, - path: "/bluebubbles-webhook", - statusSink: sinkA, }); - const unregisterB = registerBlueBubblesWebhookTarget({ - account: accountB, - config, - runtime: { log: vi.fn(), error: vi.fn() }, - core, - path: "/bluebubbles-webhook", - statusSink: sinkB, - }); - unregister = () => { - unregisterA(); - unregisterB(); - }; - - const res = createMockResponse(); - const handled = await handleBlueBubblesWebhookRequest(req, res); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(401); + await expectWebhookStatus(req, 401); expect(sinkA).not.toHaveBeenCalled(); expect(sinkB).not.toHaveBeenCalled(); }); @@ -565,107 +499,38 @@ describe("BlueBubbles webhook monitor", () => { it("ignores targets without passwords when a password-authenticated target matches", async () => { const accountStrict = createMockAccount({ password: "secret-token" }); const accountWithoutPassword = createMockAccount({ password: undefined }); - const config: OpenClawConfig = {}; - const core = createMockRuntime(); - setBlueBubblesRuntime(core); - const sinkStrict = vi.fn(); const sinkWithoutPassword = vi.fn(); + registerWebhookTargets([ + { account: accountStrict, statusSink: sinkStrict }, + { account: accountWithoutPassword, statusSink: sinkWithoutPassword }, + ]); - const req = createMockRequest("POST", "/bluebubbles-webhook?password=secret-token", { - type: "new-message", - data: { - text: "hello", - handle: { address: "+15551234567" }, - isGroup: false, - isFromMe: false, - guid: "msg-1", - }, - }); - (req as unknown as { socket: { remoteAddress: string } }).socket = { + const req = createWebhookRequestForTest({ + url: "/bluebubbles-webhook?password=secret-token", + body: createNewMessagePayload(), remoteAddress: "192.168.1.100", - }; - - const unregisterStrict = registerBlueBubblesWebhookTarget({ - account: accountStrict, - config, - runtime: { log: vi.fn(), error: vi.fn() }, - core, - path: "/bluebubbles-webhook", - statusSink: sinkStrict, }); - const unregisterNoPassword = registerBlueBubblesWebhookTarget({ - account: accountWithoutPassword, - config, - runtime: { log: vi.fn(), error: vi.fn() }, - core, - path: "/bluebubbles-webhook", - statusSink: sinkWithoutPassword, - }); - unregister = () => { - unregisterStrict(); - unregisterNoPassword(); - }; - - const res = createMockResponse(); - const handled = await handleBlueBubblesWebhookRequest(req, res); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(200); + await expectWebhookStatus(req, 200); expect(sinkStrict).toHaveBeenCalledTimes(1); expect(sinkWithoutPassword).not.toHaveBeenCalled(); }); it("requires authentication for loopback requests when password is configured", async () => { const account = createMockAccount({ password: "secret-token" }); - const config: OpenClawConfig = {}; - const core = createMockRuntime(); - setBlueBubblesRuntime(core); + setupWebhookTarget({ account }); for (const remoteAddress of ["127.0.0.1", "::1", "::ffff:127.0.0.1"]) { - const req = createMockRequest("POST", "/bluebubbles-webhook", { - type: "new-message", - data: { - text: "hello", - handle: { address: "+15551234567" }, - isGroup: false, - isFromMe: false, - guid: "msg-1", - }, - }); - (req as unknown as { socket: { remoteAddress: string } }).socket = { + const req = createWebhookRequestForTest({ + body: createNewMessagePayload(), remoteAddress, - }; - - const loopbackUnregister = registerBlueBubblesWebhookTarget({ - account, - config, - runtime: { log: vi.fn(), error: vi.fn() }, - core, - path: "/bluebubbles-webhook", }); - - const res = createMockResponse(); - const handled = await handleBlueBubblesWebhookRequest(req, res); - expect(handled).toBe(true); - expect(res.statusCode).toBe(401); - - loopbackUnregister(); + await expectWebhookStatus(req, 401); } }); it("rejects targets without passwords for loopback and proxied-looking requests", async () => { const account = createMockAccount({ password: undefined }); - const config: OpenClawConfig = {}; - const core = createMockRuntime(); - setBlueBubblesRuntime(core); - - unregister = registerBlueBubblesWebhookTarget({ - account, - config, - runtime: { log: vi.fn(), error: vi.fn() }, - core, - path: "/bluebubbles-webhook", - }); + setupWebhookTarget({ account }); const headerVariants: Record[] = [ { host: "localhost" }, @@ -673,28 +538,12 @@ describe("BlueBubbles webhook monitor", () => { { host: "localhost", forwarded: "for=203.0.113.10;proto=https;host=example.com" }, ]; for (const headers of headerVariants) { - const req = createMockRequest( - "POST", - "/bluebubbles-webhook", - { - type: "new-message", - data: { - text: "hello", - handle: { address: "+15551234567" }, - isGroup: false, - isFromMe: false, - guid: "msg-1", - }, - }, + const req = createWebhookRequestForTest({ + body: createNewMessagePayload(), headers, - ); - (req as unknown as { socket: { remoteAddress: string } }).socket = { remoteAddress: "127.0.0.1", - }; - const res = createMockResponse(); - const handled = await handleBlueBubblesWebhookRequest(req, res); - expect(handled).toBe(true); - expect(res.statusCode).toBe(401); + }); + await expectWebhookStatus(req, 401); } }); diff --git a/extensions/bluebubbles/src/multipart.ts b/extensions/bluebubbles/src/multipart.ts index 851cca016b7..e7c840745bb 100644 --- a/extensions/bluebubbles/src/multipart.ts +++ b/extensions/bluebubbles/src/multipart.ts @@ -30,3 +30,11 @@ export async function postMultipartFormData(params: { params.timeoutMs, ); } + +export async function assertMultipartActionOk(response: Response, action: string): Promise { + if (response.ok) { + return; + } + const errorText = await response.text().catch(() => ""); + throw new Error(`BlueBubbles ${action} failed (${response.status}): ${errorText || "unknown"}`); +} diff --git a/extensions/bluebubbles/src/onboarding.ts b/extensions/bluebubbles/src/onboarding.ts index 86b9719ae24..eb66afdfe21 100644 --- a/extensions/bluebubbles/src/onboarding.ts +++ b/extensions/bluebubbles/src/onboarding.ts @@ -10,6 +10,7 @@ import { formatDocsLink, mergeAllowFromEntries, normalizeAccountId, + patchScopedAccountConfig, resolveAccountIdForConfigure, setTopLevelChannelDmPolicyWithAllowFrom, } from "openclaw/plugin-sdk/bluebubbles"; @@ -38,34 +39,14 @@ function setBlueBubblesAllowFrom( accountId: string, allowFrom: string[], ): OpenClawConfig { - if (accountId === DEFAULT_ACCOUNT_ID) { - return { - ...cfg, - channels: { - ...cfg.channels, - bluebubbles: { - ...cfg.channels?.bluebubbles, - allowFrom, - }, - }, - }; - } - return { - ...cfg, - channels: { - ...cfg.channels, - bluebubbles: { - ...cfg.channels?.bluebubbles, - accounts: { - ...cfg.channels?.bluebubbles?.accounts, - [accountId]: { - ...cfg.channels?.bluebubbles?.accounts?.[accountId], - allowFrom, - }, - }, - }, - }, - }; + return patchScopedAccountConfig({ + cfg, + channelKey: channel, + accountId, + patch: { allowFrom }, + ensureChannelEnabled: false, + ensureAccountEnabled: false, + }); } function parseBlueBubblesAllowFromInput(raw: string): string[] { diff --git a/extensions/bluebubbles/src/reactions.test.ts b/extensions/bluebubbles/src/reactions.test.ts index 419ccc81e45..0b55337b35c 100644 --- a/extensions/bluebubbles/src/reactions.test.ts +++ b/extensions/bluebubbles/src/reactions.test.ts @@ -19,7 +19,7 @@ describe("reactions", () => { }); describe("sendBlueBubblesReaction", () => { - async function expectRemovedReaction(emoji: string) { + async function expectRemovedReaction(emoji: string, expectedReaction = "-love") { mockFetch.mockResolvedValueOnce({ ok: true, text: () => Promise.resolve(""), @@ -37,7 +37,7 @@ describe("reactions", () => { }); const body = JSON.parse(mockFetch.mock.calls[0][1].body); - expect(body.reaction).toBe("-love"); + expect(body.reaction).toBe(expectedReaction); } it("throws when chatGuid is empty", async () => { @@ -327,45 +327,11 @@ describe("reactions", () => { describe("reaction removal aliases", () => { it("handles emoji-based removal", async () => { - mockFetch.mockResolvedValueOnce({ - ok: true, - text: () => Promise.resolve(""), - }); - - await sendBlueBubblesReaction({ - chatGuid: "chat-123", - messageGuid: "msg-123", - emoji: "👍", - remove: true, - opts: { - serverUrl: "http://localhost:1234", - password: "test", - }, - }); - - const body = JSON.parse(mockFetch.mock.calls[0][1].body); - expect(body.reaction).toBe("-like"); + await expectRemovedReaction("👍", "-like"); }); it("handles text alias removal", async () => { - mockFetch.mockResolvedValueOnce({ - ok: true, - text: () => Promise.resolve(""), - }); - - await sendBlueBubblesReaction({ - chatGuid: "chat-123", - messageGuid: "msg-123", - emoji: "haha", - remove: true, - opts: { - serverUrl: "http://localhost:1234", - password: "test", - }, - }); - - const body = JSON.parse(mockFetch.mock.calls[0][1].body); - expect(body.reaction).toBe("-laugh"); + await expectRemovedReaction("haha", "-laugh"); }); }); }); diff --git a/extensions/copilot-proxy/package.json b/extensions/copilot-proxy/package.json index e060ddd67f1..fdab55b3da8 100644 --- a/extensions/copilot-proxy/package.json +++ b/extensions/copilot-proxy/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/copilot-proxy", - "version": "2026.3.9", + "version": "2026.3.14", "private": true, "description": "OpenClaw Copilot Proxy provider plugin", "type": "module", diff --git a/extensions/device-pair/index.ts b/extensions/device-pair/index.ts index 7590703a32b..7ba88842a7a 100644 --- a/extensions/device-pair/index.ts +++ b/extensions/device-pair/index.ts @@ -2,6 +2,7 @@ import os from "node:os"; import type { OpenClawPluginApi } from "openclaw/plugin-sdk/device-pair"; import { approveDevicePairing, + issueDeviceBootstrapToken, listDevicePairing, resolveGatewayBindUrl, runPluginCommandWithTimeout, @@ -31,8 +32,7 @@ type DevicePairPluginConfig = { type SetupPayload = { url: string; - token?: string; - password?: string; + bootstrapToken: string; }; type ResolveUrlResult = { @@ -41,10 +41,8 @@ type ResolveUrlResult = { error?: string; }; -type ResolveAuthResult = { - token?: string; - password?: string; - label?: string; +type ResolveAuthLabelResult = { + label?: "token" | "password"; error?: string; }; @@ -110,13 +108,21 @@ function resolveScheme( return cfg.gateway?.tls?.enabled === true ? "wss" : "ws"; } -function isPrivateIPv4(address: string): boolean { +function parseIPv4Octets(address: string): [number, number, number, number] | null { const parts = address.split("."); - if (parts.length != 4) { - return false; + if (parts.length !== 4) { + return null; } const octets = parts.map((part) => Number.parseInt(part, 10)); if (octets.some((value) => !Number.isFinite(value) || value < 0 || value > 255)) { + return null; + } + return octets as [number, number, number, number]; +} + +function isPrivateIPv4(address: string): boolean { + const octets = parseIPv4Octets(address); + if (!octets) { return false; } const [a, b] = octets; @@ -133,12 +139,8 @@ function isPrivateIPv4(address: string): boolean { } function isTailnetIPv4(address: string): boolean { - const parts = address.split("."); - if (parts.length !== 4) { - return false; - } - const octets = parts.map((part) => Number.parseInt(part, 10)); - if (octets.some((value) => !Number.isFinite(value) || value < 0 || value > 255)) { + const octets = parseIPv4Octets(address); + if (!octets) { return false; } const [a, b] = octets; @@ -187,7 +189,7 @@ async function resolveTailnetHost(): Promise { ); } -function resolveAuth(cfg: OpenClawPluginApi["config"]): ResolveAuthResult { +function resolveAuthLabel(cfg: OpenClawPluginApi["config"]): ResolveAuthLabelResult { const mode = cfg.gateway?.auth?.mode; const token = pickFirstDefined([ @@ -203,13 +205,13 @@ function resolveAuth(cfg: OpenClawPluginApi["config"]): ResolveAuthResult { ]) ?? undefined; if (mode === "token" || mode === "password") { - return resolveRequiredAuth(mode, { token, password }); + return resolveRequiredAuthLabel(mode, { token, password }); } if (token) { - return { token, label: "token" }; + return { label: "token" }; } if (password) { - return { password, label: "password" }; + return { label: "password" }; } return { error: "Gateway auth is not configured (no token or password)." }; } @@ -227,17 +229,17 @@ function pickFirstDefined(candidates: Array): string | null { return null; } -function resolveRequiredAuth( +function resolveRequiredAuthLabel( mode: "token" | "password", values: { token?: string; password?: string }, -): ResolveAuthResult { +): ResolveAuthLabelResult { if (mode === "token") { return values.token - ? { token: values.token, label: "token" } + ? { label: "token" } : { error: "Gateway auth is set to token, but no token is configured." }; } return values.password - ? { password: values.password, label: "password" } + ? { label: "password" } : { error: "Gateway auth is set to password, but no password is configured." }; } @@ -393,9 +395,9 @@ export default function register(api: OpenClawPluginApi) { return { text: `✅ Paired ${label}${platformLabel}.` }; } - const auth = resolveAuth(api.config); - if (auth.error) { - return { text: `Error: ${auth.error}` }; + const authLabelResult = resolveAuthLabel(api.config); + if (authLabelResult.error) { + return { text: `Error: ${authLabelResult.error}` }; } const urlResult = await resolveGatewayUrl(api); @@ -405,14 +407,13 @@ export default function register(api: OpenClawPluginApi) { const payload: SetupPayload = { url: urlResult.url, - token: auth.token, - password: auth.password, + bootstrapToken: (await issueDeviceBootstrapToken()).token, }; if (action === "qr") { const setupCode = encodeSetupCode(payload); const qrAscii = await renderQrAscii(setupCode); - const authLabel = auth.label ?? "auth"; + const authLabel = authLabelResult.label ?? "auth"; const channel = ctx.channel; const target = ctx.senderId?.trim() || ctx.from?.trim() || ctx.to?.trim() || ""; @@ -503,7 +504,7 @@ export default function register(api: OpenClawPluginApi) { const channel = ctx.channel; const target = ctx.senderId?.trim() || ctx.from?.trim() || ctx.to?.trim() || ""; - const authLabel = auth.label ?? "auth"; + const authLabel = authLabelResult.label ?? "auth"; if (channel === "telegram" && target) { try { diff --git a/extensions/diagnostics-otel/package.json b/extensions/diagnostics-otel/package.json index 29c9b0ac79b..b51ead550ef 100644 --- a/extensions/diagnostics-otel/package.json +++ b/extensions/diagnostics-otel/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/diagnostics-otel", - "version": "2026.3.9", + "version": "2026.3.14", "description": "OpenClaw diagnostics OpenTelemetry exporter", "type": "module", "dependencies": { diff --git a/extensions/diffs/index.test.ts b/extensions/diffs/index.test.ts index df0a0a79192..c38da12bfcd 100644 --- a/extensions/diffs/index.test.ts +++ b/extensions/diffs/index.test.ts @@ -1,6 +1,8 @@ import type { IncomingMessage } from "node:http"; +import type { OpenClawPluginApi } from "openclaw/plugin-sdk/diffs"; import { describe, expect, it, vi } from "vitest"; import { createMockServerResponse } from "../../src/test-utils/mock-http-response.js"; +import { createTestPluginApi } from "../test-utils/plugin-api.js"; import plugin from "./index.js"; describe("diffs plugin registration", () => { @@ -9,33 +11,19 @@ describe("diffs plugin registration", () => { const registerHttpRoute = vi.fn(); const on = vi.fn(); - plugin.register?.({ - id: "diffs", - name: "Diffs", - description: "Diffs", - source: "test", - config: {}, - runtime: {} as never, - logger: { - info() {}, - warn() {}, - error() {}, - }, - registerTool, - registerHook() {}, - registerHttpRoute, - registerChannel() {}, - registerGatewayMethod() {}, - registerCli() {}, - registerService() {}, - registerProvider() {}, - registerCommand() {}, - registerContextEngine() {}, - resolvePath(input: string) { - return input; - }, - on, - }); + plugin.register?.( + createTestPluginApi({ + id: "diffs", + name: "Diffs", + description: "Diffs", + source: "test", + config: {}, + runtime: {} as never, + registerTool, + registerHttpRoute, + on, + }), + ); expect(registerTool).toHaveBeenCalledTimes(1); expect(registerHttpRoute).toHaveBeenCalledTimes(1); @@ -55,17 +43,15 @@ describe("diffs plugin registration", () => { }); it("applies plugin-config defaults through registered tool and viewer handler", async () => { - let registeredTool: - | { execute?: (toolCallId: string, params: Record) => Promise } - | undefined; - let registeredHttpRouteHandler: - | (( - req: IncomingMessage, - res: ReturnType, - ) => Promise) - | undefined; + type RegisteredTool = { + execute?: (toolCallId: string, params: Record) => Promise; + }; + type RegisteredHttpRouteParams = Parameters[0]; - plugin.register?.({ + let registeredTool: RegisteredTool | undefined; + let registeredHttpRouteHandler: RegisteredHttpRouteParams["handler"] | undefined; + + const api = createTestPluginApi({ id: "diffs", name: "Diffs", description: "Diffs", @@ -88,31 +74,16 @@ describe("diffs plugin registration", () => { }, }, runtime: {} as never, - logger: { - info() {}, - warn() {}, - error() {}, - }, - registerTool(tool) { + registerTool(tool: Parameters[0]) { registeredTool = typeof tool === "function" ? undefined : tool; }, - registerHook() {}, - registerHttpRoute(params) { - registeredHttpRouteHandler = params.handler as typeof registeredHttpRouteHandler; + registerHttpRoute(params: RegisteredHttpRouteParams) { + registeredHttpRouteHandler = params.handler; }, - registerChannel() {}, - registerGatewayMethod() {}, - registerCli() {}, - registerService() {}, - registerProvider() {}, - registerCommand() {}, - registerContextEngine() {}, - resolvePath(input: string) { - return input; - }, - on() {}, }); + plugin.register?.(api as unknown as OpenClawPluginApi); + const result = await registeredTool?.execute?.("tool-1", { before: "one\n", after: "two\n", diff --git a/extensions/diffs/package.json b/extensions/diffs/package.json index b685f985108..b92b16052b8 100644 --- a/extensions/diffs/package.json +++ b/extensions/diffs/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/diffs", - "version": "2026.3.9", + "version": "2026.3.14", "private": true, "description": "OpenClaw diff viewer plugin", "type": "module", @@ -8,7 +8,7 @@ "build:viewer": "bun build src/viewer-client.ts --target browser --format esm --minify --outfile assets/viewer-runtime.js" }, "dependencies": { - "@pierre/diffs": "1.0.11", + "@pierre/diffs": "1.1.0", "@sinclair/typebox": "0.34.48", "playwright-core": "1.58.2" }, diff --git a/extensions/diffs/src/browser.test.ts b/extensions/diffs/src/browser.test.ts index 9c3cf1365ea..c0b03d62cc0 100644 --- a/extensions/diffs/src/browser.test.ts +++ b/extensions/diffs/src/browser.test.ts @@ -1,8 +1,8 @@ import fs from "node:fs/promises"; -import os from "node:os"; import path from "node:path"; import type { OpenClawConfig } from "openclaw/plugin-sdk/diffs"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { createTempDiffRoot } from "./test-helpers.js"; const { launchMock } = vi.hoisted(() => ({ launchMock: vi.fn(), @@ -17,10 +17,11 @@ vi.mock("playwright-core", () => ({ describe("PlaywrightDiffScreenshotter", () => { let rootDir: string; let outputPath: string; + let cleanupRootDir: () => Promise; beforeEach(async () => { vi.useFakeTimers(); - rootDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-diffs-browser-")); + ({ rootDir, cleanup: cleanupRootDir } = await createTempDiffRoot("openclaw-diffs-browser-")); outputPath = path.join(rootDir, "preview.png"); launchMock.mockReset(); const browserModule = await import("./browser.js"); @@ -31,7 +32,7 @@ describe("PlaywrightDiffScreenshotter", () => { const browserModule = await import("./browser.js"); await browserModule.resetSharedBrowserStateForTests(); vi.useRealTimers(); - await fs.rm(rootDir, { recursive: true, force: true }); + await cleanupRootDir(); }); it("reuses the same browser across renders and closes it after the idle window", async () => { diff --git a/extensions/diffs/src/http.test.ts b/extensions/diffs/src/http.test.ts index 5e8c2927691..a1caef018e4 100644 --- a/extensions/diffs/src/http.test.ts +++ b/extensions/diffs/src/http.test.ts @@ -1,42 +1,38 @@ -import fs from "node:fs/promises"; import type { IncomingMessage } from "node:http"; -import os from "node:os"; -import path from "node:path"; import { afterEach, beforeEach, describe, expect, it } from "vitest"; import { createMockServerResponse } from "../../../src/test-utils/mock-http-response.js"; import { createDiffsHttpHandler } from "./http.js"; import { DiffArtifactStore } from "./store.js"; +import { createDiffStoreHarness } from "./test-helpers.js"; describe("createDiffsHttpHandler", () => { - let rootDir: string; let store: DiffArtifactStore; + let cleanupRootDir: () => Promise; - beforeEach(async () => { - rootDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-diffs-http-")); - store = new DiffArtifactStore({ rootDir }); - }); - - afterEach(async () => { - await fs.rm(rootDir, { recursive: true, force: true }); - }); - - it("serves a stored diff document", async () => { - const artifact = await store.createArtifact({ - html: "viewer", - title: "Demo", - inputKind: "before_after", - fileCount: 1, - }); - + async function handleLocalGet(url: string) { const handler = createDiffsHttpHandler({ store }); const res = createMockServerResponse(); const handled = await handler( localReq({ method: "GET", - url: artifact.viewerPath, + url, }), res, ); + return { handled, res }; + } + + beforeEach(async () => { + ({ store, cleanup: cleanupRootDir } = await createDiffStoreHarness("openclaw-diffs-http-")); + }); + + afterEach(async () => { + await cleanupRootDir(); + }); + + it("serves a stored diff document", async () => { + const artifact = await createViewerArtifact(store); + const { handled, res } = await handleLocalGet(artifact.viewerPath); expect(handled).toBe(true); expect(res.statusCode).toBe(200); @@ -45,21 +41,9 @@ describe("createDiffsHttpHandler", () => { }); it("rejects invalid tokens", async () => { - const artifact = await store.createArtifact({ - html: "viewer", - title: "Demo", - inputKind: "before_after", - fileCount: 1, - }); - - const handler = createDiffsHttpHandler({ store }); - const res = createMockServerResponse(); - const handled = await handler( - localReq({ - method: "GET", - url: artifact.viewerPath.replace(artifact.token, "bad-token"), - }), - res, + const artifact = await createViewerArtifact(store); + const { handled, res } = await handleLocalGet( + artifact.viewerPath.replace(artifact.token, "bad-token"), ); expect(handled).toBe(true); @@ -113,96 +97,52 @@ describe("createDiffsHttpHandler", () => { expect(String(res.body)).toContain("openclawDiffsReady"); }); - it("blocks non-loopback viewer access by default", async () => { - const artifact = await store.createArtifact({ - html: "viewer", - title: "Demo", - inputKind: "before_after", - fileCount: 1, - }); + it.each([ + { + name: "blocks non-loopback viewer access by default", + request: remoteReq, + allowRemoteViewer: false, + expectedStatusCode: 404, + }, + { + name: "blocks loopback requests that carry proxy forwarding headers by default", + request: localReq, + headers: { "x-forwarded-for": "203.0.113.10" }, + allowRemoteViewer: false, + expectedStatusCode: 404, + }, + { + name: "allows remote access when allowRemoteViewer is enabled", + request: remoteReq, + allowRemoteViewer: true, + expectedStatusCode: 200, + }, + { + name: "allows proxied loopback requests when allowRemoteViewer is enabled", + request: localReq, + headers: { "x-forwarded-for": "203.0.113.10" }, + allowRemoteViewer: true, + expectedStatusCode: 200, + }, + ])("$name", async ({ request, headers, allowRemoteViewer, expectedStatusCode }) => { + const artifact = await createViewerArtifact(store); - const handler = createDiffsHttpHandler({ store }); + const handler = createDiffsHttpHandler({ store, allowRemoteViewer }); const res = createMockServerResponse(); const handled = await handler( - remoteReq({ + request({ method: "GET", url: artifact.viewerPath, + headers, }), res, ); expect(handled).toBe(true); - expect(res.statusCode).toBe(404); - }); - - it("blocks loopback requests that carry proxy forwarding headers by default", async () => { - const artifact = await store.createArtifact({ - html: "viewer", - title: "Demo", - inputKind: "before_after", - fileCount: 1, - }); - - const handler = createDiffsHttpHandler({ store }); - const res = createMockServerResponse(); - const handled = await handler( - localReq({ - method: "GET", - url: artifact.viewerPath, - headers: { "x-forwarded-for": "203.0.113.10" }, - }), - res, - ); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(404); - }); - - it("allows remote access when allowRemoteViewer is enabled", async () => { - const artifact = await store.createArtifact({ - html: "viewer", - title: "Demo", - inputKind: "before_after", - fileCount: 1, - }); - - const handler = createDiffsHttpHandler({ store, allowRemoteViewer: true }); - const res = createMockServerResponse(); - const handled = await handler( - remoteReq({ - method: "GET", - url: artifact.viewerPath, - }), - res, - ); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(200); - expect(res.body).toBe("viewer"); - }); - - it("allows proxied loopback requests when allowRemoteViewer is enabled", async () => { - const artifact = await store.createArtifact({ - html: "viewer", - title: "Demo", - inputKind: "before_after", - fileCount: 1, - }); - - const handler = createDiffsHttpHandler({ store, allowRemoteViewer: true }); - const res = createMockServerResponse(); - const handled = await handler( - localReq({ - method: "GET", - url: artifact.viewerPath, - headers: { "x-forwarded-for": "203.0.113.10" }, - }), - res, - ); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(200); - expect(res.body).toBe("viewer"); + expect(res.statusCode).toBe(expectedStatusCode); + if (expectedStatusCode === 200) { + expect(res.body).toBe("viewer"); + } }); it("rate-limits repeated remote misses", async () => { @@ -232,6 +172,15 @@ describe("createDiffsHttpHandler", () => { }); }); +async function createViewerArtifact(store: DiffArtifactStore) { + return await store.createArtifact({ + html: "viewer", + title: "Demo", + inputKind: "before_after", + fileCount: 1, + }); +} + function localReq(input: { method: string; url: string; diff --git a/extensions/diffs/src/render.test.ts b/extensions/diffs/src/render.test.ts index f46a2c9abe9..006b239a39f 100644 --- a/extensions/diffs/src/render.test.ts +++ b/extensions/diffs/src/render.test.ts @@ -23,8 +23,7 @@ describe("renderDiffDocument", () => { expect(rendered.html).toContain("data-openclaw-diff-root"); expect(rendered.html).toContain("src/example.ts"); expect(rendered.html).toContain("/plugins/diffs/assets/viewer.js"); - expect(rendered.imageHtml).not.toContain("/plugins/diffs/assets/viewer.js"); - expect(rendered.imageHtml).toContain('data-openclaw-diffs-ready="true"'); + expect(rendered.imageHtml).toContain("/plugins/diffs/assets/viewer.js"); expect(rendered.imageHtml).toContain("max-width: 960px;"); expect(rendered.imageHtml).toContain("--diffs-font-size: 16px;"); expect(rendered.html).toContain("min-height: 100vh;"); diff --git a/extensions/diffs/src/render.ts b/extensions/diffs/src/render.ts index fb3d089c90a..364252c0b3b 100644 --- a/extensions/diffs/src/render.ts +++ b/extensions/diffs/src/render.ts @@ -1,5 +1,12 @@ -import type { FileContents, FileDiffMetadata, SupportedLanguages } from "@pierre/diffs"; -import { parsePatchFiles } from "@pierre/diffs"; +import fs from "node:fs/promises"; +import { createRequire } from "node:module"; +import type { + FileContents, + FileDiffMetadata, + SupportedLanguages, + ThemeRegistrationResolved, +} from "@pierre/diffs"; +import { RegisteredCustomThemes, parsePatchFiles } from "@pierre/diffs"; import { preloadFileDiff, preloadMultiFileDiff } from "@pierre/diffs/ssr"; import type { DiffInput, @@ -13,6 +20,45 @@ import { VIEWER_LOADER_PATH } from "./viewer-assets.js"; const DEFAULT_FILE_NAME = "diff.txt"; const MAX_PATCH_FILE_COUNT = 128; const MAX_PATCH_TOTAL_LINES = 120_000; +const diffsRequire = createRequire(import.meta.resolve("@pierre/diffs")); + +let pierreThemesPatched = false; + +function createThemeLoader( + themeName: "pierre-dark" | "pierre-light", + themePath: string, +): () => Promise { + let cachedTheme: ThemeRegistrationResolved | undefined; + return async () => { + if (cachedTheme) { + return cachedTheme; + } + const raw = await fs.readFile(themePath, "utf8"); + const parsed = JSON.parse(raw) as Record; + cachedTheme = { + ...parsed, + name: themeName, + } as ThemeRegistrationResolved; + return cachedTheme; + }; +} + +function patchPierreThemeLoadersForNode24(): void { + if (pierreThemesPatched) { + return; + } + try { + const darkThemePath = diffsRequire.resolve("@pierre/theme/themes/pierre-dark.json"); + const lightThemePath = diffsRequire.resolve("@pierre/theme/themes/pierre-light.json"); + RegisteredCustomThemes.set("pierre-dark", createThemeLoader("pierre-dark", darkThemePath)); + RegisteredCustomThemes.set("pierre-light", createThemeLoader("pierre-light", lightThemePath)); + pierreThemesPatched = true; + } catch { + // Keep upstream loaders if theme files cannot be resolved. + } +} + +patchPierreThemeLoadersForNode24(); function escapeCssString(value: string): string { return value.replaceAll("\\", "\\\\").replaceAll('"', '\\"'); @@ -195,14 +241,6 @@ function renderDiffCard(payload: DiffViewerPayload): string { `; } -function renderStaticDiffCard(prerenderedHTML: string): string { - return `
- - - -
`; -} - function buildHtmlDocument(params: { title: string; bodyHtml: string; @@ -211,7 +249,7 @@ function buildHtmlDocument(params: { runtimeMode: "viewer" | "image"; }): string { return ` - + @@ -303,7 +341,7 @@ function buildHtmlDocument(params: { ${params.bodyHtml} - ${params.runtimeMode === "viewer" ? `` : ""} + `; } @@ -314,16 +352,12 @@ type RenderedSection = { }; function buildRenderedSection(params: { - viewerPrerenderedHtml: string; - imagePrerenderedHtml: string; - payload: Omit; + viewerPayload: DiffViewerPayload; + imagePayload: DiffViewerPayload; }): RenderedSection { return { - viewer: renderDiffCard({ - prerenderedHTML: params.viewerPrerenderedHtml, - ...params.payload, - }), - image: renderStaticDiffCard(params.imagePrerenderedHtml), + viewer: renderDiffCard(params.viewerPayload), + image: renderDiffCard(params.imagePayload), }; } @@ -355,21 +389,20 @@ async function renderBeforeAfterDiff( }; const { viewerOptions, imageOptions } = buildRenderVariants(options); const [viewerResult, imageResult] = await Promise.all([ - preloadMultiFileDiff({ + preloadMultiFileDiffWithFallback({ oldFile, newFile, options: viewerOptions, }), - preloadMultiFileDiff({ + preloadMultiFileDiffWithFallback({ oldFile, newFile, options: imageOptions, }), ]); const section = buildRenderedSection({ - viewerPrerenderedHtml: viewerResult.prerenderedHTML, - imagePrerenderedHtml: imageResult.prerenderedHTML, - payload: { + viewerPayload: { + prerenderedHTML: viewerResult.prerenderedHTML, oldFile: viewerResult.oldFile, newFile: viewerResult.newFile, options: viewerOptions, @@ -378,6 +411,16 @@ async function renderBeforeAfterDiff( newFile: viewerResult.newFile, }), }, + imagePayload: { + prerenderedHTML: imageResult.prerenderedHTML, + oldFile: imageResult.oldFile, + newFile: imageResult.newFile, + options: imageOptions, + langs: buildPayloadLanguages({ + oldFile: imageResult.oldFile, + newFile: imageResult.newFile, + }), + }, }); return { @@ -410,24 +453,29 @@ async function renderPatchDiff( const sections = await Promise.all( files.map(async (fileDiff) => { const [viewerResult, imageResult] = await Promise.all([ - preloadFileDiff({ + preloadFileDiffWithFallback({ fileDiff, options: viewerOptions, }), - preloadFileDiff({ + preloadFileDiffWithFallback({ fileDiff, options: imageOptions, }), ]); return buildRenderedSection({ - viewerPrerenderedHtml: viewerResult.prerenderedHTML, - imagePrerenderedHtml: imageResult.prerenderedHTML, - payload: { + viewerPayload: { + prerenderedHTML: viewerResult.prerenderedHTML, fileDiff: viewerResult.fileDiff, options: viewerOptions, langs: buildPayloadLanguages({ fileDiff: viewerResult.fileDiff }), }, + imagePayload: { + prerenderedHTML: imageResult.prerenderedHTML, + fileDiff: imageResult.fileDiff, + options: imageOptions, + langs: buildPayloadLanguages({ fileDiff: imageResult.fileDiff }), + }, }); }), ); @@ -468,3 +516,49 @@ export async function renderDiffDocument( inputKind: input.kind, }; } + +type PreloadedFileDiffResult = Awaited>; +type PreloadedMultiFileDiffResult = Awaited>; + +function shouldFallbackToClientHydration(error: unknown): boolean { + return ( + error instanceof TypeError && + error.message.includes('needs an import attribute of "type: json"') + ); +} + +async function preloadFileDiffWithFallback(params: { + fileDiff: FileDiffMetadata; + options: DiffViewerOptions; +}): Promise { + try { + return await preloadFileDiff(params); + } catch (error) { + if (!shouldFallbackToClientHydration(error)) { + throw error; + } + return { + fileDiff: params.fileDiff, + prerenderedHTML: "", + }; + } +} + +async function preloadMultiFileDiffWithFallback(params: { + oldFile: FileContents; + newFile: FileContents; + options: DiffViewerOptions; +}): Promise { + try { + return await preloadMultiFileDiff(params); + } catch (error) { + if (!shouldFallbackToClientHydration(error)) { + throw error; + } + return { + oldFile: params.oldFile, + newFile: params.newFile, + prerenderedHTML: "", + }; + } +} diff --git a/extensions/diffs/src/store.test.ts b/extensions/diffs/src/store.test.ts index d4e6aacd409..8039865b71b 100644 --- a/extensions/diffs/src/store.test.ts +++ b/extensions/diffs/src/store.test.ts @@ -1,21 +1,25 @@ import fs from "node:fs/promises"; -import os from "node:os"; import path from "node:path"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import { DiffArtifactStore } from "./store.js"; +import { createDiffStoreHarness } from "./test-helpers.js"; describe("DiffArtifactStore", () => { let rootDir: string; let store: DiffArtifactStore; + let cleanupRootDir: () => Promise; beforeEach(async () => { - rootDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-diffs-store-")); - store = new DiffArtifactStore({ rootDir }); + ({ + rootDir, + store, + cleanup: cleanupRootDir, + } = await createDiffStoreHarness("openclaw-diffs-store-")); }); afterEach(async () => { vi.useRealTimers(); - await fs.rm(rootDir, { recursive: true, force: true }); + await cleanupRootDir(); }); it("creates and retrieves an artifact", async () => { diff --git a/extensions/diffs/src/test-helpers.ts b/extensions/diffs/src/test-helpers.ts new file mode 100644 index 00000000000..f97ed9573e1 --- /dev/null +++ b/extensions/diffs/src/test-helpers.ts @@ -0,0 +1,30 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { DiffArtifactStore } from "./store.js"; + +export async function createTempDiffRoot(prefix: string): Promise<{ + rootDir: string; + cleanup: () => Promise; +}> { + const rootDir = await fs.mkdtemp(path.join(os.tmpdir(), prefix)); + return { + rootDir, + cleanup: async () => { + await fs.rm(rootDir, { recursive: true, force: true }); + }, + }; +} + +export async function createDiffStoreHarness(prefix: string): Promise<{ + rootDir: string; + store: DiffArtifactStore; + cleanup: () => Promise; +}> { + const { rootDir, cleanup } = await createTempDiffRoot(prefix); + return { + rootDir, + store: new DiffArtifactStore({ rootDir }), + cleanup, + }; +} diff --git a/extensions/diffs/src/tool.test.ts b/extensions/diffs/src/tool.test.ts index 97ee6234148..2f845727274 100644 --- a/extensions/diffs/src/tool.test.ts +++ b/extensions/diffs/src/tool.test.ts @@ -1,25 +1,25 @@ import fs from "node:fs/promises"; -import os from "node:os"; import path from "node:path"; import type { OpenClawPluginApi } from "openclaw/plugin-sdk/diffs"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { createTestPluginApi } from "../../test-utils/plugin-api.js"; import type { DiffScreenshotter } from "./browser.js"; import { DEFAULT_DIFFS_TOOL_DEFAULTS } from "./config.js"; import { DiffArtifactStore } from "./store.js"; +import { createDiffStoreHarness } from "./test-helpers.js"; import { createDiffsTool } from "./tool.js"; import type { DiffRenderOptions } from "./types.js"; describe("diffs tool", () => { - let rootDir: string; let store: DiffArtifactStore; + let cleanupRootDir: () => Promise; beforeEach(async () => { - rootDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-diffs-tool-")); - store = new DiffArtifactStore({ rootDir }); + ({ store, cleanup: cleanupRootDir } = await createDiffStoreHarness("openclaw-diffs-tool-")); }); afterEach(async () => { - await fs.rm(rootDir, { recursive: true, force: true }); + await cleanupRootDir(); }); it("returns a viewer URL in view mode", async () => { @@ -57,7 +57,7 @@ describe("diffs tool", () => { const cleanupSpy = vi.spyOn(store, "scheduleCleanup"); const screenshotter = createPngScreenshotter({ assertHtml: (html) => { - expect(html).not.toContain("/plugins/diffs/assets/viewer.js"); + expect(html).toContain("/plugins/diffs/assets/viewer.js"); }, assertImage: (image) => { expect(image).toMatchObject({ @@ -136,9 +136,7 @@ describe("diffs tool", () => { mode: "file", }); - expect(screenshotter.screenshotHtml).toHaveBeenCalledTimes(1); - expect((result?.details as Record).mode).toBe("file"); - expect((result?.details as Record).viewerUrl).toBeUndefined(); + expectArtifactOnlyFileResult(screenshotter, result); }); it("honors ttlSeconds for artifact-only file output", async () => { @@ -228,9 +226,7 @@ describe("diffs tool", () => { after: "two\n", }); - expect(screenshotter.screenshotHtml).toHaveBeenCalledTimes(1); - expect((result?.details as Record).mode).toBe("file"); - expect((result?.details as Record).viewerUrl).toBeUndefined(); + expectArtifactOnlyFileResult(screenshotter, result); }); it("falls back to view output when both mode cannot render an image", async () => { @@ -336,13 +332,13 @@ describe("diffs tool", () => { const html = await store.readHtml(id); expect(html).toContain('body data-theme="light"'); expect(html).toContain("--diffs-font-size: 17px;"); - expect(html).toContain('--diffs-font-family: "JetBrains Mono"'); + expect(html).toContain("JetBrains Mono"); }); it("prefers explicit tool params over configured defaults", async () => { const screenshotter = createPngScreenshotter({ assertHtml: (html) => { - expect(html).not.toContain("/plugins/diffs/assets/viewer.js"); + expect(html).toContain("/plugins/diffs/assets/viewer.js"); }, assertImage: (image) => { expect(image).toMatchObject({ @@ -388,7 +384,7 @@ describe("diffs tool", () => { }); function createApi(): OpenClawPluginApi { - return { + return createTestPluginApi({ id: "diffs", name: "Diffs", description: "Diffs", @@ -400,26 +396,7 @@ function createApi(): OpenClawPluginApi { }, }, runtime: {} as OpenClawPluginApi["runtime"], - logger: { - info() {}, - warn() {}, - error() {}, - }, - registerTool() {}, - registerHook() {}, - registerHttpRoute() {}, - registerChannel() {}, - registerGatewayMethod() {}, - registerCli() {}, - registerService() {}, - registerProvider() {}, - registerCommand() {}, - registerContextEngine() {}, - resolvePath(input: string) { - return input; - }, - on() {}, - }; + }) as OpenClawPluginApi; } function createToolWithScreenshotter( @@ -435,6 +412,15 @@ function createToolWithScreenshotter( }); } +function expectArtifactOnlyFileResult( + screenshotter: DiffScreenshotter, + result: { details?: unknown } | null | undefined, +) { + expect(screenshotter.screenshotHtml).toHaveBeenCalledTimes(1); + expect((result?.details as Record).mode).toBe("file"); + expect((result?.details as Record).viewerUrl).toBeUndefined(); +} + function createPngScreenshotter( params: { assertHtml?: (html: string) => void; diff --git a/extensions/discord/package.json b/extensions/discord/package.json index f30f10ade51..a85eb37b85f 100644 --- a/extensions/discord/package.json +++ b/extensions/discord/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/discord", - "version": "2026.3.9", + "version": "2026.3.14", "description": "OpenClaw Discord channel plugin", "type": "module", "openclaw": { diff --git a/extensions/discord/src/subagent-hooks.test.ts b/extensions/discord/src/subagent-hooks.test.ts index d58f07c1314..6d5824f69ae 100644 --- a/extensions/discord/src/subagent-hooks.test.ts +++ b/extensions/discord/src/subagent-hooks.test.ts @@ -75,6 +75,27 @@ function getRequiredHandler( return handler; } +function resolveSubagentDeliveryTargetForTest(requesterOrigin: { + channel: string; + accountId: string; + to: string; + threadId?: string; +}) { + const handlers = registerHandlersForTest(); + const handler = getRequiredHandler(handlers, "subagent_delivery_target"); + return handler( + { + childSessionKey: "agent:main:subagent:child", + requesterSessionKey: "agent:main:main", + requesterOrigin, + childRunId: "run-1", + spawnMode: "session", + expectsCompletionMessage: true, + }, + {}, + ); +} + function createSpawnEvent(overrides?: { childSessionKey?: string; agentId?: string; @@ -324,25 +345,12 @@ describe("discord subagent hook handlers", () => { hookMocks.listThreadBindingsBySessionKey.mockReturnValueOnce([ { accountId: "work", threadId: "777" }, ]); - const handlers = registerHandlersForTest(); - const handler = getRequiredHandler(handlers, "subagent_delivery_target"); - - const result = handler( - { - childSessionKey: "agent:main:subagent:child", - requesterSessionKey: "agent:main:main", - requesterOrigin: { - channel: "discord", - accountId: "work", - to: "channel:123", - threadId: "777", - }, - childRunId: "run-1", - spawnMode: "session", - expectsCompletionMessage: true, - }, - {}, - ); + const result = resolveSubagentDeliveryTargetForTest({ + channel: "discord", + accountId: "work", + to: "channel:123", + threadId: "777", + }); expect(hookMocks.listThreadBindingsBySessionKey).toHaveBeenCalledWith({ targetSessionKey: "agent:main:subagent:child", @@ -364,24 +372,11 @@ describe("discord subagent hook handlers", () => { { accountId: "work", threadId: "777" }, { accountId: "work", threadId: "888" }, ]); - const handlers = registerHandlersForTest(); - const handler = getRequiredHandler(handlers, "subagent_delivery_target"); - - const result = handler( - { - childSessionKey: "agent:main:subagent:child", - requesterSessionKey: "agent:main:main", - requesterOrigin: { - channel: "discord", - accountId: "work", - to: "channel:123", - }, - childRunId: "run-1", - spawnMode: "session", - expectsCompletionMessage: true, - }, - {}, - ); + const result = resolveSubagentDeliveryTargetForTest({ + channel: "discord", + accountId: "work", + to: "channel:123", + }); expect(result).toBeUndefined(); }); diff --git a/extensions/feishu/package.json b/extensions/feishu/package.json index fc38816e1bd..805dd389b0a 100644 --- a/extensions/feishu/package.json +++ b/extensions/feishu/package.json @@ -1,12 +1,12 @@ { "name": "@openclaw/feishu", - "version": "2026.3.9", + "version": "2026.3.14", "description": "OpenClaw Feishu/Lark channel plugin (community maintained by @m1heng)", "type": "module", "dependencies": { "@larksuiteoapi/node-sdk": "^1.59.0", "@sinclair/typebox": "0.34.48", - "https-proxy-agent": "^7.0.6", + "https-proxy-agent": "^8.0.0", "zod": "^4.3.6" }, "openclaw": { diff --git a/extensions/feishu/src/accounts.test.ts b/extensions/feishu/src/accounts.test.ts index 979f2fa3791..cfe8d0abcdc 100644 --- a/extensions/feishu/src/accounts.test.ts +++ b/extensions/feishu/src/accounts.test.ts @@ -9,6 +9,23 @@ import type { FeishuConfig } from "./types.js"; const asConfig = (value: Partial) => value as FeishuConfig; +function makeDefaultAndRouterAccounts() { + return { + default: { appId: "cli_default", appSecret: "secret_default" }, // pragma: allowlist secret + "router-d": { appId: "cli_router", appSecret: "secret_router" }, // pragma: allowlist secret + }; +} + +function expectExplicitDefaultAccountSelection( + account: ReturnType, + appId: string, +) { + expect(account.accountId).toBe("router-d"); + expect(account.selectionSource).toBe("explicit-default"); + expect(account.configured).toBe(true); + expect(account.appId).toBe(appId); +} + function withEnvVar(key: string, value: string | undefined, run: () => void) { const prev = process.env[key]; if (value === undefined) { @@ -44,10 +61,7 @@ describe("resolveDefaultFeishuAccountId", () => { channels: { feishu: { defaultAccount: "router-d", - accounts: { - default: { appId: "cli_default", appSecret: "secret_default" }, // pragma: allowlist secret - "router-d": { appId: "cli_router", appSecret: "secret_router" }, // pragma: allowlist secret - }, + accounts: makeDefaultAndRouterAccounts(), }, }, }; @@ -241,6 +255,25 @@ describe("resolveFeishuCredentials", () => { domain: "feishu", }); }); + + it("does not resolve encryptKey SecretRefs outside webhook mode", () => { + const creds = resolveFeishuCredentials( + asConfig({ + connectionMode: "websocket", + appId: "cli_123", + appSecret: "secret_456", + encryptKey: { source: "file", provider: "default", id: "path/to/secret" } as never, + }), + ); + + expect(creds).toEqual({ + appId: "cli_123", + appSecret: "secret_456", // pragma: allowlist secret + encryptKey: undefined, + verificationToken: undefined, + domain: "feishu", + }); + }); }); describe("resolveFeishuAccount", () => { @@ -259,10 +292,7 @@ describe("resolveFeishuAccount", () => { }; const account = resolveFeishuAccount({ cfg: cfg as never, accountId: undefined }); - expect(account.accountId).toBe("router-d"); - expect(account.selectionSource).toBe("explicit-default"); - expect(account.configured).toBe(true); - expect(account.appId).toBe("top_level_app"); + expectExplicitDefaultAccountSelection(account, "top_level_app"); }); it("uses configured default account when accountId is omitted", () => { @@ -279,10 +309,7 @@ describe("resolveFeishuAccount", () => { }; const account = resolveFeishuAccount({ cfg: cfg as never, accountId: undefined }); - expect(account.accountId).toBe("router-d"); - expect(account.selectionSource).toBe("explicit-default"); - expect(account.configured).toBe(true); - expect(account.appId).toBe("cli_router"); + expectExplicitDefaultAccountSelection(account, "cli_router"); }); it("keeps explicit accountId selection", () => { @@ -290,10 +317,7 @@ describe("resolveFeishuAccount", () => { channels: { feishu: { defaultAccount: "router-d", - accounts: { - default: { appId: "cli_default", appSecret: "secret_default" }, // pragma: allowlist secret - "router-d": { appId: "cli_router", appSecret: "secret_router" }, // pragma: allowlist secret - }, + accounts: makeDefaultAndRouterAccounts(), }, }, }; diff --git a/extensions/feishu/src/accounts.ts b/extensions/feishu/src/accounts.ts index 016bc997458..b528f6ae0e5 100644 --- a/extensions/feishu/src/accounts.ts +++ b/extensions/feishu/src/accounts.ts @@ -169,10 +169,14 @@ export function resolveFeishuCredentials( if (!appId || !appSecret) { return null; } + const connectionMode = cfg?.connectionMode ?? "websocket"; return { appId, appSecret, - encryptKey: normalizeString(cfg?.encryptKey), + encryptKey: + connectionMode === "webhook" + ? resolveSecretLike(cfg?.encryptKey, "channels.feishu.encryptKey") + : normalizeString(cfg?.encryptKey), verificationToken: resolveSecretLike( cfg?.verificationToken, "channels.feishu.verificationToken", diff --git a/extensions/feishu/src/bot.ts b/extensions/feishu/src/bot.ts index 13a130b3d79..815f935ed94 100644 --- a/extensions/feishu/src/bot.ts +++ b/extensions/feishu/src/bot.ts @@ -15,7 +15,7 @@ import { } from "openclaw/plugin-sdk/feishu"; import { resolveFeishuAccount } from "./accounts.js"; import { createFeishuClient } from "./client.js"; -import { tryRecordMessage, tryRecordMessagePersistent } from "./dedup.js"; +import { finalizeFeishuMessageProcessing, tryRecordMessagePersistent } from "./dedup.js"; import { maybeCreateDynamicAgent } from "./dynamic-agent.js"; import { normalizeFeishuExternalKey } from "./external-keys.js"; import { downloadMessageResourceFeishu } from "./media.js"; @@ -867,8 +867,18 @@ export async function handleFeishuMessage(params: { runtime?: RuntimeEnv; chatHistories?: Map; accountId?: string; + processingClaimHeld?: boolean; }): Promise { - const { cfg, event, botOpenId, botName, runtime, chatHistories, accountId } = params; + const { + cfg, + event, + botOpenId, + botName, + runtime, + chatHistories, + accountId, + processingClaimHeld = false, + } = params; // Resolve account with merged config const account = resolveFeishuAccount({ cfg, accountId }); @@ -877,16 +887,15 @@ export async function handleFeishuMessage(params: { const log = runtime?.log ?? console.log; const error = runtime?.error ?? console.error; - // Dedup: synchronous memory guard prevents concurrent duplicate dispatch - // before the async persistent check completes. const messageId = event.message.message_id; - const memoryDedupeKey = `${account.accountId}:${messageId}`; - if (!tryRecordMessage(memoryDedupeKey)) { - log(`feishu: skipping duplicate message ${messageId} (memory dedup)`); - return; - } - // Persistent dedup survives restarts and reconnects. - if (!(await tryRecordMessagePersistent(messageId, account.accountId, log))) { + if ( + !(await finalizeFeishuMessageProcessing({ + messageId, + namespace: account.accountId, + log, + claimHeld: processingClaimHeld, + })) + ) { log(`feishu: skipping duplicate message ${messageId}`); return; } diff --git a/extensions/feishu/src/channel.ts b/extensions/feishu/src/channel.ts index 7c90136e70f..856941c4b21 100644 --- a/extensions/feishu/src/channel.ts +++ b/extensions/feishu/src/channel.ts @@ -129,7 +129,7 @@ export const feishuPlugin: ChannelPlugin = { defaultAccount: { type: "string" }, appId: { type: "string" }, appSecret: secretInputJsonSchema, - encryptKey: { type: "string" }, + encryptKey: secretInputJsonSchema, verificationToken: secretInputJsonSchema, domain: { oneOf: [ @@ -170,7 +170,7 @@ export const feishuPlugin: ChannelPlugin = { name: { type: "string" }, appId: { type: "string" }, appSecret: secretInputJsonSchema, - encryptKey: { type: "string" }, + encryptKey: secretInputJsonSchema, verificationToken: secretInputJsonSchema, domain: { type: "string", enum: ["feishu", "lark"] }, connectionMode: { type: "string", enum: ["websocket", "webhook"] }, diff --git a/extensions/feishu/src/config-schema.test.ts b/extensions/feishu/src/config-schema.test.ts index cdd4724d3fb..aacbac85062 100644 --- a/extensions/feishu/src/config-schema.test.ts +++ b/extensions/feishu/src/config-schema.test.ts @@ -1,6 +1,16 @@ import { describe, expect, it } from "vitest"; import { FeishuConfigSchema, FeishuGroupSchema } from "./config-schema.js"; +function expectSchemaIssue( + result: ReturnType, + issuePath: string, +) { + expect(result.success).toBe(false); + if (!result.success) { + expect(result.error.issues.some((issue) => issue.path.join(".") === issuePath)).toBe(true); + } +} + describe("FeishuConfigSchema webhook validation", () => { it("applies top-level defaults", () => { const result = FeishuConfigSchema.parse({}); @@ -39,15 +49,10 @@ describe("FeishuConfigSchema webhook validation", () => { appSecret: "secret_top", // pragma: allowlist secret }); - expect(result.success).toBe(false); - if (!result.success) { - expect( - result.error.issues.some((issue) => issue.path.join(".") === "verificationToken"), - ).toBe(true); - } + expectSchemaIssue(result, "verificationToken"); }); - it("accepts top-level webhook mode with verificationToken", () => { + it("rejects top-level webhook mode without encryptKey", () => { const result = FeishuConfigSchema.safeParse({ connectionMode: "webhook", verificationToken: "token_top", @@ -55,6 +60,18 @@ describe("FeishuConfigSchema webhook validation", () => { appSecret: "secret_top", // pragma: allowlist secret }); + expectSchemaIssue(result, "encryptKey"); + }); + + it("accepts top-level webhook mode with verificationToken and encryptKey", () => { + const result = FeishuConfigSchema.safeParse({ + connectionMode: "webhook", + verificationToken: "token_top", + encryptKey: "encrypt_top", + appId: "cli_top", + appSecret: "secret_top", // pragma: allowlist secret + }); + expect(result.success).toBe(true); }); @@ -69,19 +86,28 @@ describe("FeishuConfigSchema webhook validation", () => { }, }); - expect(result.success).toBe(false); - if (!result.success) { - expect( - result.error.issues.some( - (issue) => issue.path.join(".") === "accounts.main.verificationToken", - ), - ).toBe(true); - } + expectSchemaIssue(result, "accounts.main.verificationToken"); }); - it("accepts account webhook mode inheriting top-level verificationToken", () => { + it("rejects account webhook mode without encryptKey", () => { + const result = FeishuConfigSchema.safeParse({ + accounts: { + main: { + connectionMode: "webhook", + verificationToken: "token_main", + appId: "cli_main", + appSecret: "secret_main", // pragma: allowlist secret + }, + }, + }); + + expectSchemaIssue(result, "accounts.main.encryptKey"); + }); + + it("accepts account webhook mode inheriting top-level verificationToken and encryptKey", () => { const result = FeishuConfigSchema.safeParse({ verificationToken: "token_top", + encryptKey: "encrypt_top", accounts: { main: { connectionMode: "webhook", @@ -102,6 +128,31 @@ describe("FeishuConfigSchema webhook validation", () => { provider: "default", id: "FEISHU_VERIFICATION_TOKEN", }, + encryptKey: "encrypt_top", + appId: "cli_top", + appSecret: { + source: "env", + provider: "default", + id: "FEISHU_APP_SECRET", + }, + }); + + expect(result.success).toBe(true); + }); + + it("accepts SecretRef encryptKey in webhook mode", () => { + const result = FeishuConfigSchema.safeParse({ + connectionMode: "webhook", + verificationToken: { + source: "env", + provider: "default", + id: "FEISHU_VERIFICATION_TOKEN", + }, + encryptKey: { + source: "env", + provider: "default", + id: "FEISHU_ENCRYPT_KEY", + }, appId: "cli_top", appSecret: { source: "env", diff --git a/extensions/feishu/src/config-schema.ts b/extensions/feishu/src/config-schema.ts index 4060e6e2cbb..b78404de6f8 100644 --- a/extensions/feishu/src/config-schema.ts +++ b/extensions/feishu/src/config-schema.ts @@ -186,7 +186,7 @@ export const FeishuAccountConfigSchema = z name: z.string().optional(), // Display name for this account appId: z.string().optional(), appSecret: buildSecretInputSchema().optional(), - encryptKey: z.string().optional(), + encryptKey: buildSecretInputSchema().optional(), verificationToken: buildSecretInputSchema().optional(), domain: FeishuDomainSchema.optional(), connectionMode: FeishuConnectionModeSchema.optional(), @@ -204,7 +204,7 @@ export const FeishuConfigSchema = z // Top-level credentials (backward compatible for single-account mode) appId: z.string().optional(), appSecret: buildSecretInputSchema().optional(), - encryptKey: z.string().optional(), + encryptKey: buildSecretInputSchema().optional(), verificationToken: buildSecretInputSchema().optional(), domain: FeishuDomainSchema.optional().default("feishu"), connectionMode: FeishuConnectionModeSchema.optional().default("websocket"), @@ -240,13 +240,23 @@ export const FeishuConfigSchema = z const defaultConnectionMode = value.connectionMode ?? "websocket"; const defaultVerificationTokenConfigured = hasConfiguredSecretInput(value.verificationToken); - if (defaultConnectionMode === "webhook" && !defaultVerificationTokenConfigured) { - ctx.addIssue({ - code: z.ZodIssueCode.custom, - path: ["verificationToken"], - message: - 'channels.feishu.connectionMode="webhook" requires channels.feishu.verificationToken', - }); + const defaultEncryptKeyConfigured = hasConfiguredSecretInput(value.encryptKey); + if (defaultConnectionMode === "webhook") { + if (!defaultVerificationTokenConfigured) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + path: ["verificationToken"], + message: + 'channels.feishu.connectionMode="webhook" requires channels.feishu.verificationToken', + }); + } + if (!defaultEncryptKeyConfigured) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + path: ["encryptKey"], + message: 'channels.feishu.connectionMode="webhook" requires channels.feishu.encryptKey', + }); + } } for (const [accountId, account] of Object.entries(value.accounts ?? {})) { @@ -259,6 +269,8 @@ export const FeishuConfigSchema = z } const accountVerificationTokenConfigured = hasConfiguredSecretInput(account.verificationToken) || defaultVerificationTokenConfigured; + const accountEncryptKeyConfigured = + hasConfiguredSecretInput(account.encryptKey) || defaultEncryptKeyConfigured; if (!accountVerificationTokenConfigured) { ctx.addIssue({ code: z.ZodIssueCode.custom, @@ -268,6 +280,15 @@ export const FeishuConfigSchema = z "a verificationToken (account-level or top-level)", }); } + if (!accountEncryptKeyConfigured) { + ctx.addIssue({ + code: z.ZodIssueCode.custom, + path: ["accounts", accountId, "encryptKey"], + message: + `channels.feishu.accounts.${accountId}.connectionMode="webhook" requires ` + + "an encryptKey (account-level or top-level)", + }); + } } if (value.dmPolicy === "open") { diff --git a/extensions/feishu/src/dedup.ts b/extensions/feishu/src/dedup.ts index 35f95d5c76b..fc3e9baad65 100644 --- a/extensions/feishu/src/dedup.ts +++ b/extensions/feishu/src/dedup.ts @@ -10,9 +10,15 @@ import { const DEDUP_TTL_MS = 24 * 60 * 60 * 1000; const MEMORY_MAX_SIZE = 1_000; const FILE_MAX_ENTRIES = 10_000; +const EVENT_DEDUP_TTL_MS = 5 * 60 * 1000; +const EVENT_MEMORY_MAX_SIZE = 2_000; type PersistentDedupeData = Record; const memoryDedupe = createDedupeCache({ ttlMs: DEDUP_TTL_MS, maxSize: MEMORY_MAX_SIZE }); +const processingClaims = createDedupeCache({ + ttlMs: EVENT_DEDUP_TTL_MS, + maxSize: EVENT_MEMORY_MAX_SIZE, +}); function resolveStateDirFromEnv(env: NodeJS.ProcessEnv = process.env): string { const stateOverride = env.OPENCLAW_STATE_DIR?.trim() || env.CLAWDBOT_STATE_DIR?.trim(); @@ -37,6 +43,103 @@ const persistentDedupe = createPersistentDedupe({ resolveFilePath: resolveNamespaceFilePath, }); +function resolveEventDedupeKey( + namespace: string, + messageId: string | undefined | null, +): string | null { + const trimmed = messageId?.trim(); + if (!trimmed) { + return null; + } + return `${namespace}:${trimmed}`; +} + +function normalizeMessageId(messageId: string | undefined | null): string | null { + const trimmed = messageId?.trim(); + return trimmed ? trimmed : null; +} + +function resolveMemoryDedupeKey( + namespace: string, + messageId: string | undefined | null, +): string | null { + const trimmed = normalizeMessageId(messageId); + if (!trimmed) { + return null; + } + return `${namespace}:${trimmed}`; +} + +export function tryBeginFeishuMessageProcessing( + messageId: string | undefined | null, + namespace = "global", +): boolean { + return !processingClaims.check(resolveEventDedupeKey(namespace, messageId)); +} + +export function releaseFeishuMessageProcessing( + messageId: string | undefined | null, + namespace = "global", +): void { + processingClaims.delete(resolveEventDedupeKey(namespace, messageId)); +} + +export async function finalizeFeishuMessageProcessing(params: { + messageId: string | undefined | null; + namespace?: string; + log?: (...args: unknown[]) => void; + claimHeld?: boolean; +}): Promise { + const { messageId, namespace = "global", log, claimHeld = false } = params; + const normalizedMessageId = normalizeMessageId(messageId); + const memoryKey = resolveMemoryDedupeKey(namespace, messageId); + if (!memoryKey || !normalizedMessageId) { + return false; + } + if (!claimHeld && !tryBeginFeishuMessageProcessing(normalizedMessageId, namespace)) { + return false; + } + if (!tryRecordMessage(memoryKey)) { + releaseFeishuMessageProcessing(normalizedMessageId, namespace); + return false; + } + if (!(await tryRecordMessagePersistent(normalizedMessageId, namespace, log))) { + releaseFeishuMessageProcessing(normalizedMessageId, namespace); + return false; + } + return true; +} + +export async function recordProcessedFeishuMessage( + messageId: string | undefined | null, + namespace = "global", + log?: (...args: unknown[]) => void, +): Promise { + const normalizedMessageId = normalizeMessageId(messageId); + const memoryKey = resolveMemoryDedupeKey(namespace, messageId); + if (!memoryKey || !normalizedMessageId) { + return false; + } + tryRecordMessage(memoryKey); + return await tryRecordMessagePersistent(normalizedMessageId, namespace, log); +} + +export async function hasProcessedFeishuMessage( + messageId: string | undefined | null, + namespace = "global", + log?: (...args: unknown[]) => void, +): Promise { + const normalizedMessageId = normalizeMessageId(messageId); + const memoryKey = resolveMemoryDedupeKey(namespace, messageId); + if (!memoryKey || !normalizedMessageId) { + return false; + } + if (hasRecordedMessage(memoryKey)) { + return true; + } + return hasRecordedMessagePersistent(normalizedMessageId, namespace, log); +} + /** * Synchronous dedup — memory only. * Kept for backward compatibility; prefer {@link tryRecordMessagePersistent}. diff --git a/extensions/feishu/src/media.test.ts b/extensions/feishu/src/media.test.ts index 813e5090292..80555c294ae 100644 --- a/extensions/feishu/src/media.test.ts +++ b/extensions/feishu/src/media.test.ts @@ -64,18 +64,21 @@ function expectMediaTimeoutClientConfigured(): void { ); } +function mockResolvedFeishuAccount() { + resolveFeishuAccountMock.mockReturnValue({ + configured: true, + accountId: "main", + config: {}, + appId: "app_id", + appSecret: "app_secret", + domain: "feishu", + }); +} + describe("sendMediaFeishu msg_type routing", () => { beforeEach(() => { vi.clearAllMocks(); - - resolveFeishuAccountMock.mockReturnValue({ - configured: true, - accountId: "main", - config: {}, - appId: "app_id", - appSecret: "app_secret", - domain: "feishu", - }); + mockResolvedFeishuAccount(); normalizeFeishuTargetMock.mockReturnValue("ou_target"); resolveReceiveIdTypeMock.mockReturnValue("open_id"); @@ -381,7 +384,7 @@ describe("sendMediaFeishu msg_type routing", () => { expect(messageResourceGetMock).not.toHaveBeenCalled(); }); - it("encodes Chinese filenames for file uploads", async () => { + it("preserves Chinese filenames for file uploads", async () => { await sendMediaFeishu({ cfg: {} as any, to: "user:ou_target", @@ -390,8 +393,7 @@ describe("sendMediaFeishu msg_type routing", () => { }); const createCall = fileCreateMock.mock.calls[0][0]; - expect(createCall.data.file_name).not.toBe("测试文档.pdf"); - expect(createCall.data.file_name).toBe(encodeURIComponent("测试文档") + ".pdf"); + expect(createCall.data.file_name).toBe("测试文档.pdf"); }); it("preserves ASCII filenames unchanged for file uploads", async () => { @@ -406,7 +408,7 @@ describe("sendMediaFeishu msg_type routing", () => { expect(createCall.data.file_name).toBe("report-2026.pdf"); }); - it("encodes special characters (em-dash, full-width brackets) in filenames", async () => { + it("preserves special Unicode characters (em-dash, full-width brackets) in filenames", async () => { await sendMediaFeishu({ cfg: {} as any, to: "user:ou_target", @@ -415,9 +417,7 @@ describe("sendMediaFeishu msg_type routing", () => { }); const createCall = fileCreateMock.mock.calls[0][0]; - expect(createCall.data.file_name).toMatch(/\.md$/); - expect(createCall.data.file_name).not.toContain("—"); - expect(createCall.data.file_name).not.toContain("("); + expect(createCall.data.file_name).toBe("报告—详情(2026).md"); }); }); @@ -427,71 +427,48 @@ describe("sanitizeFileNameForUpload", () => { expect(sanitizeFileNameForUpload("my-file_v2.txt")).toBe("my-file_v2.txt"); }); - it("encodes Chinese characters in basename, preserves extension", () => { - const result = sanitizeFileNameForUpload("测试文件.md"); - expect(result).toBe(encodeURIComponent("测试文件") + ".md"); - expect(result).toMatch(/\.md$/); + it("preserves Chinese characters", () => { + expect(sanitizeFileNameForUpload("测试文件.md")).toBe("测试文件.md"); + expect(sanitizeFileNameForUpload("武汉15座山登山信息汇总.csv")).toBe( + "武汉15座山登山信息汇总.csv", + ); }); - it("encodes em-dash and full-width brackets", () => { - const result = sanitizeFileNameForUpload("文件—说明(v2).pdf"); - expect(result).toMatch(/\.pdf$/); - expect(result).not.toContain("—"); - expect(result).not.toContain("("); - expect(result).not.toContain(")"); + it("preserves em-dash and full-width brackets", () => { + expect(sanitizeFileNameForUpload("文件—说明(v2).pdf")).toBe("文件—说明(v2).pdf"); }); - it("encodes single quotes and parentheses per RFC 5987", () => { - const result = sanitizeFileNameForUpload("文件'(test).txt"); - expect(result).toContain("%27"); - expect(result).toContain("%28"); - expect(result).toContain("%29"); - expect(result).toMatch(/\.txt$/); + it("preserves single quotes and parentheses", () => { + expect(sanitizeFileNameForUpload("文件'(test).txt")).toBe("文件'(test).txt"); }); - it("handles filenames without extension", () => { - const result = sanitizeFileNameForUpload("测试文件"); - expect(result).toBe(encodeURIComponent("测试文件")); + it("preserves filenames without extension", () => { + expect(sanitizeFileNameForUpload("测试文件")).toBe("测试文件"); }); - it("handles mixed ASCII and non-ASCII", () => { - const result = sanitizeFileNameForUpload("Report_报告_2026.xlsx"); - expect(result).toMatch(/\.xlsx$/); - expect(result).not.toContain("报告"); + it("preserves mixed ASCII and non-ASCII", () => { + expect(sanitizeFileNameForUpload("Report_报告_2026.xlsx")).toBe("Report_报告_2026.xlsx"); }); - it("encodes non-ASCII extensions", () => { - const result = sanitizeFileNameForUpload("报告.文档"); - expect(result).toContain("%E6%96%87%E6%A1%A3"); - expect(result).not.toContain("文档"); + it("preserves emoji filenames", () => { + expect(sanitizeFileNameForUpload("report_😀.txt")).toBe("report_😀.txt"); }); - it("encodes emoji filenames", () => { - const result = sanitizeFileNameForUpload("report_😀.txt"); - expect(result).toContain("%F0%9F%98%80"); - expect(result).toMatch(/\.txt$/); + it("strips control characters", () => { + expect(sanitizeFileNameForUpload("bad\x00file.txt")).toBe("bad_file.txt"); + expect(sanitizeFileNameForUpload("inject\r\nheader.txt")).toBe("inject__header.txt"); }); - it("encodes mixed ASCII and non-ASCII extensions", () => { - const result = sanitizeFileNameForUpload("notes_总结.v测试"); - expect(result).toContain("notes_"); - expect(result).toContain("%E6%B5%8B%E8%AF%95"); - expect(result).not.toContain("测试"); + it("strips quotes and backslashes to prevent header injection", () => { + expect(sanitizeFileNameForUpload('file"name.txt')).toBe("file_name.txt"); + expect(sanitizeFileNameForUpload("file\\name.txt")).toBe("file_name.txt"); }); }); describe("downloadMessageResourceFeishu", () => { beforeEach(() => { vi.clearAllMocks(); - - resolveFeishuAccountMock.mockReturnValue({ - configured: true, - accountId: "main", - config: {}, - appId: "app_id", - appSecret: "app_secret", - domain: "feishu", - }); + mockResolvedFeishuAccount(); createFeishuClientMock.mockReturnValue({ im: { diff --git a/extensions/feishu/src/media.ts b/extensions/feishu/src/media.ts index 4aba038b4a9..45596fe45ed 100644 --- a/extensions/feishu/src/media.ts +++ b/extensions/feishu/src/media.ts @@ -22,6 +22,45 @@ export type DownloadMessageResourceResult = { fileName?: string; }; +function createConfiguredFeishuMediaClient(params: { cfg: ClawdbotConfig; accountId?: string }): { + account: ReturnType; + client: ReturnType; +} { + const account = resolveFeishuAccount({ cfg: params.cfg, accountId: params.accountId }); + if (!account.configured) { + throw new Error(`Feishu account "${account.accountId}" not configured`); + } + + return { + account, + client: createFeishuClient({ + ...account, + httpTimeoutMs: FEISHU_MEDIA_HTTP_TIMEOUT_MS, + }), + }; +} + +function extractFeishuUploadKey( + response: unknown, + params: { + key: "image_key" | "file_key"; + errorPrefix: string; + }, +): string { + // SDK v1.30+ returns data directly without code wrapper on success. + // eslint-disable-next-line @typescript-eslint/no-explicit-any -- SDK response type + const responseAny = response as any; + if (responseAny.code !== undefined && responseAny.code !== 0) { + throw new Error(`${params.errorPrefix}: ${responseAny.msg || `code ${responseAny.code}`}`); + } + + const key = responseAny[params.key] ?? responseAny.data?.[params.key]; + if (!key) { + throw new Error(`${params.errorPrefix}: no ${params.key} returned`); + } + return key; +} + async function readFeishuResponseBuffer(params: { response: unknown; tmpDirPrefix: string; @@ -94,15 +133,7 @@ export async function downloadImageFeishu(params: { if (!normalizedImageKey) { throw new Error("Feishu image download failed: invalid image_key"); } - const account = resolveFeishuAccount({ cfg, accountId }); - if (!account.configured) { - throw new Error(`Feishu account "${account.accountId}" not configured`); - } - - const client = createFeishuClient({ - ...account, - httpTimeoutMs: FEISHU_MEDIA_HTTP_TIMEOUT_MS, - }); + const { client } = createConfiguredFeishuMediaClient({ cfg, accountId }); const response = await client.im.image.get({ path: { image_key: normalizedImageKey }, @@ -132,15 +163,7 @@ export async function downloadMessageResourceFeishu(params: { if (!normalizedFileKey) { throw new Error("Feishu message resource download failed: invalid file_key"); } - const account = resolveFeishuAccount({ cfg, accountId }); - if (!account.configured) { - throw new Error(`Feishu account "${account.accountId}" not configured`); - } - - const client = createFeishuClient({ - ...account, - httpTimeoutMs: FEISHU_MEDIA_HTTP_TIMEOUT_MS, - }); + const { client } = createConfiguredFeishuMediaClient({ cfg, accountId }); const response = await client.im.messageResource.get({ path: { message_id: messageId, file_key: normalizedFileKey }, @@ -179,15 +202,7 @@ export async function uploadImageFeishu(params: { accountId?: string; }): Promise { const { cfg, image, imageType = "message", accountId } = params; - const account = resolveFeishuAccount({ cfg, accountId }); - if (!account.configured) { - throw new Error(`Feishu account "${account.accountId}" not configured`); - } - - const client = createFeishuClient({ - ...account, - httpTimeoutMs: FEISHU_MEDIA_HTTP_TIMEOUT_MS, - }); + const { client } = createConfiguredFeishuMediaClient({ cfg, accountId }); // SDK accepts Buffer directly or fs.ReadStream for file paths // Using Readable.from(buffer) causes issues with form-data library @@ -202,38 +217,26 @@ export async function uploadImageFeishu(params: { }, }); - // SDK v1.30+ returns data directly without code wrapper on success - // On error, it throws or returns { code, msg } - // eslint-disable-next-line @typescript-eslint/no-explicit-any -- SDK response type - const responseAny = response as any; - if (responseAny.code !== undefined && responseAny.code !== 0) { - throw new Error(`Feishu image upload failed: ${responseAny.msg || `code ${responseAny.code}`}`); - } - - const imageKey = responseAny.image_key ?? responseAny.data?.image_key; - if (!imageKey) { - throw new Error("Feishu image upload failed: no image_key returned"); - } - - return { imageKey }; + return { + imageKey: extractFeishuUploadKey(response, { + key: "image_key", + errorPrefix: "Feishu image upload failed", + }), + }; } /** - * Encode a filename for safe use in Feishu multipart/form-data uploads. - * Non-ASCII characters (Chinese, em-dash, full-width brackets, etc.) cause - * the upload to silently fail when passed raw through the SDK's form-data - * serialization. RFC 5987 percent-encoding keeps headers 7-bit clean while - * Feishu's server decodes and preserves the original display name. + * Sanitize a filename for safe use in Feishu multipart/form-data uploads. + * Strips control characters and multipart-injection vectors (CWE-93) while + * preserving the original UTF-8 display name (Chinese, emoji, etc.). + * + * Previous versions percent-encoded non-ASCII characters, but the Feishu + * `im.file.create` API uses `file_name` as a literal display name — it does + * NOT decode percent-encoding — so encoded filenames appeared as garbled text + * in chat (regression in v2026.3.2). */ export function sanitizeFileNameForUpload(fileName: string): string { - const ASCII_ONLY = /^[\x20-\x7E]+$/; - if (ASCII_ONLY.test(fileName)) { - return fileName; - } - return encodeURIComponent(fileName) - .replace(/'/g, "%27") - .replace(/\(/g, "%28") - .replace(/\)/g, "%29"); + return fileName.replace(/[\x00-\x1F\x7F\r\n"\\]/g, "_"); } /** @@ -249,15 +252,7 @@ export async function uploadFileFeishu(params: { accountId?: string; }): Promise { const { cfg, file, fileName, fileType, duration, accountId } = params; - const account = resolveFeishuAccount({ cfg, accountId }); - if (!account.configured) { - throw new Error(`Feishu account "${account.accountId}" not configured`); - } - - const client = createFeishuClient({ - ...account, - httpTimeoutMs: FEISHU_MEDIA_HTTP_TIMEOUT_MS, - }); + const { client } = createConfiguredFeishuMediaClient({ cfg, accountId }); // SDK accepts Buffer directly or fs.ReadStream for file paths // Using Readable.from(buffer) causes issues with form-data library @@ -276,19 +271,12 @@ export async function uploadFileFeishu(params: { }, }); - // SDK v1.30+ returns data directly without code wrapper on success - // eslint-disable-next-line @typescript-eslint/no-explicit-any -- SDK response type - const responseAny = response as any; - if (responseAny.code !== undefined && responseAny.code !== 0) { - throw new Error(`Feishu file upload failed: ${responseAny.msg || `code ${responseAny.code}`}`); - } - - const fileKey = responseAny.file_key ?? responseAny.data?.file_key; - if (!fileKey) { - throw new Error("Feishu file upload failed: no file_key returned"); - } - - return { fileKey }; + return { + fileKey: extractFeishuUploadKey(response, { + key: "file_key", + errorPrefix: "Feishu file upload failed", + }), + }; } /** diff --git a/extensions/feishu/src/monitor.account.ts b/extensions/feishu/src/monitor.account.ts index 601f78f0843..3f3cad8ddc3 100644 --- a/extensions/feishu/src/monitor.account.ts +++ b/extensions/feishu/src/monitor.account.ts @@ -12,10 +12,10 @@ import { import { handleFeishuCardAction, type FeishuCardActionEvent } from "./card-action.js"; import { createEventDispatcher } from "./client.js"; import { - hasRecordedMessage, - hasRecordedMessagePersistent, - tryRecordMessage, - tryRecordMessagePersistent, + hasProcessedFeishuMessage, + recordProcessedFeishuMessage, + releaseFeishuMessageProcessing, + tryBeginFeishuMessageProcessing, warmupDedupFromDisk, } from "./dedup.js"; import { isMentionForwardRequest } from "./mention.js"; @@ -24,14 +24,14 @@ import { botNames, botOpenIds } from "./monitor.state.js"; import { monitorWebhook, monitorWebSocket } from "./monitor.transport.js"; import { getFeishuRuntime } from "./runtime.js"; import { getMessageFeishu } from "./send.js"; -import type { ResolvedFeishuAccount } from "./types.js"; +import type { FeishuChatType, ResolvedFeishuAccount } from "./types.js"; const FEISHU_REACTION_VERIFY_TIMEOUT_MS = 1_500; export type FeishuReactionCreatedEvent = { message_id: string; chat_id?: string; - chat_type?: "p2p" | "group" | "private"; + chat_type?: string; reaction_type?: { emoji_type?: string }; operator_type?: string; user_id?: { open_id?: string }; @@ -105,10 +105,19 @@ export async function resolveReactionSyntheticEvent( return null; } + const fallbackChatType = reactedMsg.chatType; + const normalizedEventChatType = normalizeFeishuChatType(event.chat_type); + const resolvedChatType = normalizedEventChatType ?? fallbackChatType; + if (!resolvedChatType) { + logger?.( + `feishu[${accountId}]: skipping reaction ${emoji} on ${messageId} without chat type context`, + ); + return null; + } + const syntheticChatIdRaw = event.chat_id ?? reactedMsg.chatId; const syntheticChatId = syntheticChatIdRaw?.trim() ? syntheticChatIdRaw : `p2p:${senderId}`; - const syntheticChatType: "p2p" | "group" | "private" = - event.chat_type === "group" ? "group" : "p2p"; + const syntheticChatType: FeishuChatType = resolvedChatType; return { sender: { sender_id: { open_id: senderId }, @@ -126,6 +135,10 @@ export async function resolveReactionSyntheticEvent( }; } +function normalizeFeishuChatType(value: unknown): FeishuChatType | undefined { + return value === "group" || value === "private" || value === "p2p" ? value : undefined; +} + type RegisterEventHandlersContext = { cfg: ClawdbotConfig; accountId: string; @@ -251,6 +264,7 @@ function registerEventHandlers( runtime, chatHistories, accountId, + processingClaimHeld: true, }); await enqueue(chatId, task); }; @@ -278,10 +292,8 @@ function registerEventHandlers( return; } for (const messageId of suppressedIds) { - // Keep in-memory dedupe in sync with handleFeishuMessage's keying. - tryRecordMessage(`${accountId}:${messageId}`); try { - await tryRecordMessagePersistent(messageId, accountId, log); + await recordProcessedFeishuMessage(messageId, accountId, log); } catch (err) { error( `feishu[${accountId}]: failed to record merged dedupe id ${messageId}: ${String(err)}`, @@ -290,15 +302,7 @@ function registerEventHandlers( } }; const isMessageAlreadyProcessed = async (entry: FeishuMessageEvent): Promise => { - const messageId = entry.message.message_id?.trim(); - if (!messageId) { - return false; - } - const memoryKey = `${accountId}:${messageId}`; - if (hasRecordedMessage(memoryKey)) { - return true; - } - return hasRecordedMessagePersistent(messageId, accountId, log); + return await hasProcessedFeishuMessage(entry.message.message_id, accountId, log); }; const inboundDebouncer = core.channel.debounce.createInboundDebouncer({ debounceMs: inboundDebounceMs, @@ -371,19 +375,28 @@ function registerEventHandlers( }, }); }, - onError: (err) => { + onError: (err, entries) => { + for (const entry of entries) { + releaseFeishuMessageProcessing(entry.message.message_id, accountId); + } error(`feishu[${accountId}]: inbound debounce flush failed: ${String(err)}`); }, }); eventDispatcher.register({ "im.message.receive_v1": async (data) => { + const event = data as unknown as FeishuMessageEvent; + const messageId = event.message?.message_id?.trim(); + if (!tryBeginFeishuMessageProcessing(messageId, accountId)) { + log(`feishu[${accountId}]: dropping duplicate event for message ${messageId}`); + return; + } const processMessage = async () => { - const event = data as unknown as FeishuMessageEvent; await inboundDebouncer.enqueue(event); }; if (fireAndForget) { void processMessage().catch((err) => { + releaseFeishuMessageProcessing(messageId, accountId); error(`feishu[${accountId}]: error handling message: ${String(err)}`); }); return; @@ -391,6 +404,7 @@ function registerEventHandlers( try { await processMessage(); } catch (err) { + releaseFeishuMessageProcessing(messageId, accountId); error(`feishu[${accountId}]: error handling message: ${String(err)}`); } }, @@ -521,6 +535,9 @@ export async function monitorSingleAccount(params: MonitorSingleAccountParams): if (connectionMode === "webhook" && !account.verificationToken?.trim()) { throw new Error(`Feishu account "${accountId}" webhook mode requires verificationToken`); } + if (connectionMode === "webhook" && !account.encryptKey?.trim()) { + throw new Error(`Feishu account "${accountId}" webhook mode requires encryptKey`); + } const warmupCount = await warmupDedupFromDisk(accountId, log); if (warmupCount > 0) { diff --git a/extensions/feishu/src/monitor.reaction.test.ts b/extensions/feishu/src/monitor.reaction.test.ts index 5537af6b214..49da928ea3b 100644 --- a/extensions/feishu/src/monitor.reaction.test.ts +++ b/extensions/feishu/src/monitor.reaction.test.ts @@ -51,10 +51,11 @@ function makeReactionEvent( }; } -function createFetchedReactionMessage(chatId: string) { +function createFetchedReactionMessage(chatId: string, chatType?: "p2p" | "group" | "private") { return { messageId: "om_msg1", chatId, + chatType, senderOpenId: "ou_bot", content: "hello", contentType: "text", @@ -64,17 +65,38 @@ function createFetchedReactionMessage(chatId: string) { async function resolveReactionWithLookup(params: { event?: FeishuReactionCreatedEvent; lookupChatId: string; + lookupChatType?: "p2p" | "group" | "private"; }) { return await resolveReactionSyntheticEvent({ cfg, accountId: "default", event: params.event ?? makeReactionEvent(), botOpenId: "ou_bot", - fetchMessage: async () => createFetchedReactionMessage(params.lookupChatId), + fetchMessage: async () => + createFetchedReactionMessage(params.lookupChatId, params.lookupChatType), uuid: () => "fixed-uuid", }); } +async function resolveNonBotReaction(params?: { cfg?: ClawdbotConfig; uuid?: () => string }) { + return await resolveReactionSyntheticEvent({ + cfg: params?.cfg ?? cfg, + accountId: "default", + event: makeReactionEvent(), + botOpenId: "ou_bot", + fetchMessage: async () => ({ + messageId: "om_msg1", + chatId: "oc_group", + chatType: "group", + senderOpenId: "ou_other", + senderType: "user", + content: "hello", + contentType: "text", + }), + ...(params?.uuid ? { uuid: params.uuid } : {}), + }); +} + type FeishuMention = NonNullable[number]; function buildDebounceConfig(): ClawdbotConfig { @@ -176,11 +198,23 @@ function getFirstDispatchedEvent(): FeishuMessageEvent { return firstParams.event; } +function expectSingleDispatchedEvent(): FeishuMessageEvent { + expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1); + return getFirstDispatchedEvent(); +} + +function expectParsedFirstDispatchedEvent(botOpenId = "ou_bot") { + const dispatched = expectSingleDispatchedEvent(); + return { + dispatched, + parsed: parseFeishuMessageEvent(dispatched, botOpenId), + }; +} + function setDedupPassThroughMocks(): void { - vi.spyOn(dedup, "tryRecordMessage").mockReturnValue(true); - vi.spyOn(dedup, "tryRecordMessagePersistent").mockResolvedValue(true); - vi.spyOn(dedup, "hasRecordedMessage").mockReturnValue(false); - vi.spyOn(dedup, "hasRecordedMessagePersistent").mockResolvedValue(false); + vi.spyOn(dedup, "tryBeginFeishuMessageProcessing").mockReturnValue(true); + vi.spyOn(dedup, "recordProcessedFeishuMessage").mockResolvedValue(true); + vi.spyOn(dedup, "hasProcessedFeishuMessage").mockResolvedValue(false); } function createMention(params: { openId: string; name: string; key?: string }): FeishuMention { @@ -200,6 +234,12 @@ async function enqueueDebouncedMessage( await Promise.resolve(); } +function setStaleRetryMocks(messageId = "om_old") { + vi.spyOn(dedup, "hasProcessedFeishuMessage").mockImplementation( + async (currentMessageId) => currentMessageId === messageId, + ); +} + describe("resolveReactionSyntheticEvent", () => { it("filters app self-reactions", async () => { const event = makeReactionEvent({ operator_type: "app" }); @@ -259,27 +299,12 @@ describe("resolveReactionSyntheticEvent", () => { }); it("filters reactions on non-bot messages", async () => { - const event = makeReactionEvent(); - const result = await resolveReactionSyntheticEvent({ - cfg, - accountId: "default", - event, - botOpenId: "ou_bot", - fetchMessage: async () => ({ - messageId: "om_msg1", - chatId: "oc_group", - senderOpenId: "ou_other", - senderType: "user", - content: "hello", - contentType: "text", - }), - }); + const result = await resolveNonBotReaction(); expect(result).toBeNull(); }); it("allows non-bot reactions when reactionNotifications is all", async () => { - const event = makeReactionEvent(); - const result = await resolveReactionSyntheticEvent({ + const result = await resolveNonBotReaction({ cfg: { channels: { feishu: { @@ -287,17 +312,6 @@ describe("resolveReactionSyntheticEvent", () => { }, }, } as ClawdbotConfig, - accountId: "default", - event, - botOpenId: "ou_bot", - fetchMessage: async () => ({ - messageId: "om_msg1", - chatId: "oc_group", - senderOpenId: "ou_other", - senderType: "user", - content: "hello", - contentType: "text", - }), uuid: () => "fixed-uuid", }); expect(result?.message.message_id).toBe("om_msg1:reaction:THUMBSUP:fixed-uuid"); @@ -348,21 +362,43 @@ describe("resolveReactionSyntheticEvent", () => { it("falls back to reacted message chat_id when event chat_id is absent", async () => { const result = await resolveReactionWithLookup({ lookupChatId: "oc_group_from_lookup", + lookupChatType: "group", }); expect(result?.message.chat_id).toBe("oc_group_from_lookup"); - expect(result?.message.chat_type).toBe("p2p"); + expect(result?.message.chat_type).toBe("group"); }); it("falls back to sender p2p chat when lookup returns empty chat_id", async () => { const result = await resolveReactionWithLookup({ lookupChatId: "", + lookupChatType: "p2p", }); expect(result?.message.chat_id).toBe("p2p:ou_user1"); expect(result?.message.chat_type).toBe("p2p"); }); + it("drops reactions without chat context when lookup does not provide chat_type", async () => { + const result = await resolveReactionWithLookup({ + lookupChatId: "oc_group_from_lookup", + }); + + expect(result).toBeNull(); + }); + + it("drops reactions when event chat_type is invalid and lookup cannot recover it", async () => { + const result = await resolveReactionWithLookup({ + event: makeReactionEvent({ + chat_id: "oc_group_from_event", + chat_type: "bogus" as "group", + }), + lookupChatId: "oc_group_from_lookup", + }); + + expect(result).toBeNull(); + }); + it("logs and drops reactions when lookup throws", async () => { const log = vi.fn(); const event = makeReactionEvent(); @@ -430,18 +466,16 @@ describe("Feishu inbound debounce regressions", () => { ); await vi.advanceTimersByTimeAsync(25); - expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1); - const dispatched = getFirstDispatchedEvent(); + const dispatched = expectSingleDispatchedEvent(); const mergedMentions = dispatched.message.mentions ?? []; expect(mergedMentions.some((mention) => mention.id.open_id === "ou_bot")).toBe(true); expect(mergedMentions.some((mention) => mention.id.open_id === "ou_user_a")).toBe(false); }); it("passes prefetched botName through to handleFeishuMessage", async () => { - vi.spyOn(dedup, "tryRecordMessage").mockReturnValue(true); - vi.spyOn(dedup, "tryRecordMessagePersistent").mockResolvedValue(true); - vi.spyOn(dedup, "hasRecordedMessage").mockReturnValue(false); - vi.spyOn(dedup, "hasRecordedMessagePersistent").mockResolvedValue(false); + vi.spyOn(dedup, "tryBeginFeishuMessageProcessing").mockReturnValue(true); + vi.spyOn(dedup, "recordProcessedFeishuMessage").mockResolvedValue(true); + vi.spyOn(dedup, "hasProcessedFeishuMessage").mockResolvedValue(false); const onMessage = await setupDebounceMonitor({ botName: "OpenClaw Bot" }); await onMessage( @@ -490,9 +524,7 @@ describe("Feishu inbound debounce regressions", () => { ); await vi.advanceTimersByTimeAsync(25); - expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1); - const dispatched = getFirstDispatchedEvent(); - const parsed = parseFeishuMessageEvent(dispatched, "ou_bot"); + const { dispatched, parsed } = expectParsedFirstDispatchedEvent(); expect(parsed.mentionedBot).toBe(true); expect(parsed.mentionTargets).toBeUndefined(); const mergedMentions = dispatched.message.mentions ?? []; @@ -520,19 +552,14 @@ describe("Feishu inbound debounce regressions", () => { ); await vi.advanceTimersByTimeAsync(25); - expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1); - const dispatched = getFirstDispatchedEvent(); - const parsed = parseFeishuMessageEvent(dispatched, "ou_bot"); + const { parsed } = expectParsedFirstDispatchedEvent(); expect(parsed.mentionedBot).toBe(true); }); it("excludes previously processed retries from combined debounce text", async () => { - vi.spyOn(dedup, "tryRecordMessage").mockReturnValue(true); - vi.spyOn(dedup, "tryRecordMessagePersistent").mockResolvedValue(true); - vi.spyOn(dedup, "hasRecordedMessage").mockImplementation((key) => key.endsWith(":om_old")); - vi.spyOn(dedup, "hasRecordedMessagePersistent").mockImplementation( - async (messageId) => messageId === "om_old", - ); + vi.spyOn(dedup, "tryBeginFeishuMessageProcessing").mockReturnValue(true); + vi.spyOn(dedup, "recordProcessedFeishuMessage").mockResolvedValue(true); + setStaleRetryMocks(); const onMessage = await setupDebounceMonitor(); await onMessage(createTextEvent({ messageId: "om_old", text: "stale" })); @@ -549,20 +576,16 @@ describe("Feishu inbound debounce regressions", () => { await Promise.resolve(); await vi.advanceTimersByTimeAsync(25); - expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1); - const dispatched = getFirstDispatchedEvent(); + const dispatched = expectSingleDispatchedEvent(); expect(dispatched.message.message_id).toBe("om_new_2"); const combined = JSON.parse(dispatched.message.content) as { text?: string }; expect(combined.text).toBe("first\nsecond"); }); it("uses latest fresh message id when debounce batch ends with stale retry", async () => { - const recordSpy = vi.spyOn(dedup, "tryRecordMessage").mockReturnValue(true); - vi.spyOn(dedup, "tryRecordMessagePersistent").mockResolvedValue(true); - vi.spyOn(dedup, "hasRecordedMessage").mockImplementation((key) => key.endsWith(":om_old")); - vi.spyOn(dedup, "hasRecordedMessagePersistent").mockImplementation( - async (messageId) => messageId === "om_old", - ); + vi.spyOn(dedup, "tryBeginFeishuMessageProcessing").mockReturnValue(true); + const recordSpy = vi.spyOn(dedup, "recordProcessedFeishuMessage").mockResolvedValue(true); + setStaleRetryMocks(); const onMessage = await setupDebounceMonitor(); await onMessage(createTextEvent({ messageId: "om_new", text: "fresh" })); @@ -573,12 +596,58 @@ describe("Feishu inbound debounce regressions", () => { await Promise.resolve(); await vi.advanceTimersByTimeAsync(25); - expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1); - const dispatched = getFirstDispatchedEvent(); + const dispatched = expectSingleDispatchedEvent(); expect(dispatched.message.message_id).toBe("om_new"); const combined = JSON.parse(dispatched.message.content) as { text?: string }; expect(combined.text).toBe("fresh"); - expect(recordSpy).toHaveBeenCalledWith("default:om_old"); - expect(recordSpy).not.toHaveBeenCalledWith("default:om_new"); + expect(recordSpy).toHaveBeenCalledWith("om_old", "default", expect.any(Function)); + expect(recordSpy).not.toHaveBeenCalledWith("om_new", "default", expect.any(Function)); + }); + + it("releases early event dedupe when debounced dispatch fails", async () => { + setDedupPassThroughMocks(); + const enqueueMock = vi.fn(); + setFeishuRuntime( + createPluginRuntimeMock({ + channel: { + debounce: { + createInboundDebouncer: (params: { + onError?: (err: unknown, items: T[]) => void; + }) => ({ + enqueue: async (item: T) => { + enqueueMock(item); + params.onError?.(new Error("dispatch failed"), [item]); + }, + flushKey: async () => {}, + }), + resolveInboundDebounceMs, + }, + text: { + hasControlCommand, + }, + }, + }), + ); + const onMessage = await setupDebounceMonitor(); + const event = createTextEvent({ messageId: "om_retryable", text: "hello" }); + + await enqueueDebouncedMessage(onMessage, event); + expect(enqueueMock).toHaveBeenCalledTimes(1); + + await enqueueDebouncedMessage(onMessage, event); + expect(enqueueMock).toHaveBeenCalledTimes(2); + expect(handleFeishuMessageMock).not.toHaveBeenCalled(); + }); + + it("drops duplicate inbound events before they re-enter the debounce pipeline", async () => { + const onMessage = await setupDebounceMonitor(); + const event = createTextEvent({ messageId: "om_duplicate", text: "hello" }); + + await enqueueDebouncedMessage(onMessage, event); + await vi.advanceTimersByTimeAsync(25); + await enqueueDebouncedMessage(onMessage, event); + await vi.advanceTimersByTimeAsync(25); + + expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1); }); }); diff --git a/extensions/feishu/src/monitor.startup.test.ts b/extensions/feishu/src/monitor.startup.test.ts index f5e19159f0a..96dbd52b8ef 100644 --- a/extensions/feishu/src/monitor.startup.test.ts +++ b/extensions/feishu/src/monitor.startup.test.ts @@ -3,33 +3,19 @@ import { afterEach, describe, expect, it, vi } from "vitest"; import { monitorFeishuProvider, stopFeishuMonitor } from "./monitor.js"; const probeFeishuMock = vi.hoisted(() => vi.fn()); -const feishuClientMockModule = vi.hoisted(() => ({ - createFeishuWSClient: vi.fn(() => ({ start: vi.fn() })), - createEventDispatcher: vi.fn(() => ({ register: vi.fn() })), -})); -const feishuRuntimeMockModule = vi.hoisted(() => ({ - getFeishuRuntime: () => ({ - channel: { - debounce: { - resolveInboundDebounceMs: () => 0, - createInboundDebouncer: () => ({ - enqueue: async () => {}, - flushKey: async () => {}, - }), - }, - text: { - hasControlCommand: () => false, - }, - }, - }), -})); vi.mock("./probe.js", () => ({ probeFeishu: probeFeishuMock, })); -vi.mock("./client.js", () => feishuClientMockModule); -vi.mock("./runtime.js", () => feishuRuntimeMockModule); +vi.mock("./client.js", async () => { + const { createFeishuClientMockModule } = await import("./monitor.test-mocks.js"); + return createFeishuClientMockModule(); +}); +vi.mock("./runtime.js", async () => { + const { createFeishuRuntimeMockModule } = await import("./monitor.test-mocks.js"); + return createFeishuRuntimeMockModule(); +}); function buildMultiAccountWebsocketConfig(accountIds: string[]): ClawdbotConfig { return { @@ -52,6 +38,12 @@ function buildMultiAccountWebsocketConfig(accountIds: string[]): ClawdbotConfig } as ClawdbotConfig; } +async function waitForStartedAccount(started: string[], accountId: string) { + for (let i = 0; i < 10 && !started.includes(accountId); i += 1) { + await Promise.resolve(); + } +} + afterEach(() => { stopFeishuMonitor(); }); @@ -116,10 +108,7 @@ describe("Feishu monitor startup preflight", () => { }); try { - for (let i = 0; i < 10 && !started.includes("beta"); i += 1) { - await Promise.resolve(); - } - + await waitForStartedAccount(started, "beta"); expect(started).toEqual(["alpha", "beta"]); expect(started.filter((accountId) => accountId === "alpha")).toHaveLength(1); } finally { @@ -153,10 +142,7 @@ describe("Feishu monitor startup preflight", () => { }); try { - for (let i = 0; i < 10 && !started.includes("beta"); i += 1) { - await Promise.resolve(); - } - + await waitForStartedAccount(started, "beta"); expect(started).toEqual(["alpha", "beta"]); expect(runtime.error).toHaveBeenCalledWith( expect.stringContaining("bot info probe timed out"), diff --git a/extensions/feishu/src/monitor.transport.ts b/extensions/feishu/src/monitor.transport.ts index 49a9130bb61..d619f3cddb3 100644 --- a/extensions/feishu/src/monitor.transport.ts +++ b/extensions/feishu/src/monitor.transport.ts @@ -1,7 +1,9 @@ import * as http from "http"; +import crypto from "node:crypto"; import * as Lark from "@larksuiteoapi/node-sdk"; import { applyBasicWebhookRequestGuards, + readJsonBodyWithLimit, type RuntimeEnv, installRequestBodyLimitGuard, } from "openclaw/plugin-sdk/feishu"; @@ -26,6 +28,50 @@ export type MonitorTransportParams = { eventDispatcher: Lark.EventDispatcher; }; +function isFeishuWebhookPayload(value: unknown): value is Record { + return !!value && typeof value === "object" && !Array.isArray(value); +} + +function buildFeishuWebhookEnvelope( + req: http.IncomingMessage, + payload: Record, +): Record { + return Object.assign(Object.create({ headers: req.headers }), payload) as Record; +} + +function isFeishuWebhookSignatureValid(params: { + headers: http.IncomingHttpHeaders; + payload: Record; + encryptKey?: string; +}): boolean { + const encryptKey = params.encryptKey?.trim(); + if (!encryptKey) { + return true; + } + + const timestampHeader = params.headers["x-lark-request-timestamp"]; + const nonceHeader = params.headers["x-lark-request-nonce"]; + const signatureHeader = params.headers["x-lark-signature"]; + const timestamp = Array.isArray(timestampHeader) ? timestampHeader[0] : timestampHeader; + const nonce = Array.isArray(nonceHeader) ? nonceHeader[0] : nonceHeader; + const signature = Array.isArray(signatureHeader) ? signatureHeader[0] : signatureHeader; + if (!timestamp || !nonce || !signature) { + return false; + } + + const computedSignature = crypto + .createHash("sha256") + .update(timestamp + nonce + encryptKey + JSON.stringify(params.payload)) + .digest("hex"); + return computedSignature === signature; +} + +function respondText(res: http.ServerResponse, statusCode: number, body: string): void { + res.statusCode = statusCode; + res.setHeader("Content-Type", "text/plain; charset=utf-8"); + res.end(body); +} + export async function monitorWebSocket({ account, accountId, @@ -88,7 +134,6 @@ export async function monitorWebhook({ log(`feishu[${accountId}]: starting Webhook server on ${host}:${port}, path ${path}...`); const server = http.createServer(); - const webhookHandler = Lark.adaptDefault(path, eventDispatcher, { autoChallenge: true }); server.on("request", (req, res) => { res.on("finish", () => { @@ -118,15 +163,68 @@ export async function monitorWebhook({ return; } - void Promise.resolve(webhookHandler(req, res)) - .catch((err) => { + void (async () => { + try { + const bodyResult = await readJsonBodyWithLimit(req, { + maxBytes: FEISHU_WEBHOOK_MAX_BODY_BYTES, + timeoutMs: FEISHU_WEBHOOK_BODY_TIMEOUT_MS, + }); + if (guard.isTripped() || res.writableEnded) { + return; + } + if (!bodyResult.ok) { + if (bodyResult.code === "INVALID_JSON") { + respondText(res, 400, "Invalid JSON"); + } + return; + } + if (!isFeishuWebhookPayload(bodyResult.value)) { + respondText(res, 400, "Invalid JSON"); + return; + } + + // Lark's default adapter drops invalid signatures as an empty 200. Reject here instead. + if ( + !isFeishuWebhookSignatureValid({ + headers: req.headers, + payload: bodyResult.value, + encryptKey: account.encryptKey, + }) + ) { + respondText(res, 401, "Invalid signature"); + return; + } + + const { isChallenge, challenge } = Lark.generateChallenge(bodyResult.value, { + encryptKey: account.encryptKey ?? "", + }); + if (isChallenge) { + res.statusCode = 200; + res.setHeader("Content-Type", "application/json; charset=utf-8"); + res.end(JSON.stringify(challenge)); + return; + } + + const value = await eventDispatcher.invoke( + buildFeishuWebhookEnvelope(req, bodyResult.value), + { needCheck: false }, + ); + if (!res.headersSent) { + res.statusCode = 200; + res.setHeader("Content-Type", "application/json; charset=utf-8"); + res.end(JSON.stringify(value)); + } + } catch (err) { if (!guard.isTripped()) { error(`feishu[${accountId}]: webhook handler error: ${String(err)}`); + if (!res.headersSent) { + respondText(res, 500, "Internal Server Error"); + } } - }) - .finally(() => { + } finally { guard.dispose(); - }); + } + })(); }); httpServers.set(accountId, server); diff --git a/extensions/feishu/src/monitor.webhook-e2e.test.ts b/extensions/feishu/src/monitor.webhook-e2e.test.ts new file mode 100644 index 00000000000..a11957e3393 --- /dev/null +++ b/extensions/feishu/src/monitor.webhook-e2e.test.ts @@ -0,0 +1,214 @@ +import crypto from "node:crypto"; +import { afterEach, describe, expect, it, vi } from "vitest"; +import { createFeishuRuntimeMockModule } from "./monitor.test-mocks.js"; +import { withRunningWebhookMonitor } from "./monitor.webhook.test-helpers.js"; + +const probeFeishuMock = vi.hoisted(() => vi.fn()); + +vi.mock("./probe.js", () => ({ + probeFeishu: probeFeishuMock, +})); + +vi.mock("./client.js", async () => { + const actual = await vi.importActual("./client.js"); + return { + ...actual, + createFeishuWSClient: vi.fn(() => ({ start: vi.fn() })), + }; +}); + +vi.mock("./runtime.js", () => createFeishuRuntimeMockModule()); + +import { monitorFeishuProvider, stopFeishuMonitor } from "./monitor.js"; + +function signFeishuPayload(params: { + encryptKey: string; + payload: Record; + timestamp?: string; + nonce?: string; +}): Record { + const timestamp = params.timestamp ?? "1711111111"; + const nonce = params.nonce ?? "nonce-test"; + const signature = crypto + .createHash("sha256") + .update(timestamp + nonce + params.encryptKey + JSON.stringify(params.payload)) + .digest("hex"); + return { + "content-type": "application/json", + "x-lark-request-timestamp": timestamp, + "x-lark-request-nonce": nonce, + "x-lark-signature": signature, + }; +} + +function encryptFeishuPayload(encryptKey: string, payload: Record): string { + const iv = crypto.randomBytes(16); + const key = crypto.createHash("sha256").update(encryptKey).digest(); + const cipher = crypto.createCipheriv("aes-256-cbc", key, iv); + const plaintext = Buffer.from(JSON.stringify(payload), "utf8"); + const encrypted = Buffer.concat([cipher.update(plaintext), cipher.final()]); + return Buffer.concat([iv, encrypted]).toString("base64"); +} + +async function postSignedPayload(url: string, payload: Record) { + return await fetch(url, { + method: "POST", + headers: signFeishuPayload({ encryptKey: "encrypt_key", payload }), + body: JSON.stringify(payload), + }); +} + +afterEach(() => { + stopFeishuMonitor(); +}); + +describe("Feishu webhook signed-request e2e", () => { + it("rejects invalid signatures with 401 instead of empty 200", async () => { + probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" }); + + await withRunningWebhookMonitor( + { + accountId: "invalid-signature", + path: "/hook-e2e-invalid-signature", + verificationToken: "verify_token", + encryptKey: "encrypt_key", + }, + monitorFeishuProvider, + async (url) => { + const payload = { type: "url_verification", challenge: "challenge-token" }; + const response = await fetch(url, { + method: "POST", + headers: { + ...signFeishuPayload({ encryptKey: "wrong_key", payload }), + }, + body: JSON.stringify(payload), + }); + + expect(response.status).toBe(401); + expect(await response.text()).toBe("Invalid signature"); + }, + ); + }); + + it("rejects missing signature headers with 401", async () => { + probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" }); + + await withRunningWebhookMonitor( + { + accountId: "missing-signature", + path: "/hook-e2e-missing-signature", + verificationToken: "verify_token", + encryptKey: "encrypt_key", + }, + monitorFeishuProvider, + async (url) => { + const response = await fetch(url, { + method: "POST", + headers: { "content-type": "application/json" }, + body: JSON.stringify({ type: "url_verification", challenge: "challenge-token" }), + }); + + expect(response.status).toBe(401); + expect(await response.text()).toBe("Invalid signature"); + }, + ); + }); + + it("returns 400 for invalid json before invoking the sdk", async () => { + probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" }); + + await withRunningWebhookMonitor( + { + accountId: "invalid-json", + path: "/hook-e2e-invalid-json", + verificationToken: "verify_token", + encryptKey: "encrypt_key", + }, + monitorFeishuProvider, + async (url) => { + const response = await fetch(url, { + method: "POST", + headers: { "content-type": "application/json" }, + body: "{not-json", + }); + + expect(response.status).toBe(400); + expect(await response.text()).toBe("Invalid JSON"); + }, + ); + }); + + it("accepts signed plaintext url_verification challenges end-to-end", async () => { + probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" }); + + await withRunningWebhookMonitor( + { + accountId: "signed-challenge", + path: "/hook-e2e-signed-challenge", + verificationToken: "verify_token", + encryptKey: "encrypt_key", + }, + monitorFeishuProvider, + async (url) => { + const payload = { type: "url_verification", challenge: "challenge-token" }; + const response = await postSignedPayload(url, payload); + + expect(response.status).toBe(200); + await expect(response.json()).resolves.toEqual({ challenge: "challenge-token" }); + }, + ); + }); + + it("accepts signed non-challenge events and reaches the dispatcher", async () => { + probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" }); + + await withRunningWebhookMonitor( + { + accountId: "signed-dispatch", + path: "/hook-e2e-signed-dispatch", + verificationToken: "verify_token", + encryptKey: "encrypt_key", + }, + monitorFeishuProvider, + async (url) => { + const payload = { + schema: "2.0", + header: { event_type: "unknown.event" }, + event: {}, + }; + const response = await postSignedPayload(url, payload); + + expect(response.status).toBe(200); + expect(await response.text()).toContain("no unknown.event event handle"); + }, + ); + }); + + it("accepts signed encrypted url_verification challenges end-to-end", async () => { + probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" }); + + await withRunningWebhookMonitor( + { + accountId: "encrypted-challenge", + path: "/hook-e2e-encrypted-challenge", + verificationToken: "verify_token", + encryptKey: "encrypt_key", + }, + monitorFeishuProvider, + async (url) => { + const payload = { + encrypt: encryptFeishuPayload("encrypt_key", { + type: "url_verification", + challenge: "encrypted-challenge-token", + }), + }; + const response = await postSignedPayload(url, payload); + + expect(response.status).toBe(200); + await expect(response.json()).resolves.toEqual({ + challenge: "encrypted-challenge-token", + }); + }, + ); + }); +}); diff --git a/extensions/feishu/src/monitor.webhook-security.test.ts b/extensions/feishu/src/monitor.webhook-security.test.ts index 466b9a4201a..957d874cc3a 100644 --- a/extensions/feishu/src/monitor.webhook-security.test.ts +++ b/extensions/feishu/src/monitor.webhook-security.test.ts @@ -1,11 +1,13 @@ -import { createServer } from "node:http"; -import type { AddressInfo } from "node:net"; -import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu"; import { afterEach, describe, expect, it, vi } from "vitest"; import { createFeishuClientMockModule, createFeishuRuntimeMockModule, } from "./monitor.test-mocks.js"; +import { + buildWebhookConfig, + getFreePort, + withRunningWebhookMonitor, +} from "./monitor.webhook.test-helpers.js"; const probeFeishuMock = vi.hoisted(() => vi.fn()); @@ -33,94 +35,6 @@ import { stopFeishuMonitor, } from "./monitor.js"; -async function getFreePort(): Promise { - const server = createServer(); - await new Promise((resolve) => server.listen(0, "127.0.0.1", () => resolve())); - const address = server.address() as AddressInfo | null; - if (!address) { - throw new Error("missing server address"); - } - await new Promise((resolve) => server.close(() => resolve())); - return address.port; -} - -async function waitUntilServerReady(url: string): Promise { - for (let i = 0; i < 50; i += 1) { - try { - const response = await fetch(url, { method: "GET" }); - if (response.status >= 200 && response.status < 500) { - return; - } - } catch { - // retry - } - await new Promise((resolve) => setTimeout(resolve, 20)); - } - throw new Error(`server did not start: ${url}`); -} - -function buildConfig(params: { - accountId: string; - path: string; - port: number; - verificationToken?: string; -}): ClawdbotConfig { - return { - channels: { - feishu: { - enabled: true, - accounts: { - [params.accountId]: { - enabled: true, - appId: "cli_test", - appSecret: "secret_test", // pragma: allowlist secret - connectionMode: "webhook", - webhookHost: "127.0.0.1", - webhookPort: params.port, - webhookPath: params.path, - verificationToken: params.verificationToken, - }, - }, - }, - }, - } as ClawdbotConfig; -} - -async function withRunningWebhookMonitor( - params: { - accountId: string; - path: string; - verificationToken: string; - }, - run: (url: string) => Promise, -) { - const port = await getFreePort(); - const cfg = buildConfig({ - accountId: params.accountId, - path: params.path, - port, - verificationToken: params.verificationToken, - }); - - const abortController = new AbortController(); - const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() }; - const monitorPromise = monitorFeishuProvider({ - config: cfg, - runtime, - abortSignal: abortController.signal, - }); - - const url = `http://127.0.0.1:${port}${params.path}`; - await waitUntilServerReady(url); - - try { - await run(url); - } finally { - abortController.abort(); - await monitorPromise; - } -} - afterEach(() => { clearFeishuWebhookRateLimitStateForTest(); stopFeishuMonitor(); @@ -130,7 +44,7 @@ describe("Feishu webhook security hardening", () => { it("rejects webhook mode without verificationToken", async () => { probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" }); - const cfg = buildConfig({ + const cfg = buildWebhookConfig({ accountId: "missing-token", path: "/hook-missing-token", port: await getFreePort(), @@ -141,6 +55,19 @@ describe("Feishu webhook security hardening", () => { ); }); + it("rejects webhook mode without encryptKey", async () => { + probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" }); + + const cfg = buildWebhookConfig({ + accountId: "missing-encrypt-key", + path: "/hook-missing-encrypt", + port: await getFreePort(), + verificationToken: "verify_token", + }); + + await expect(monitorFeishuProvider({ config: cfg })).rejects.toThrow(/requires encryptKey/i); + }); + it("returns 415 for POST requests without json content type", async () => { probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" }); await withRunningWebhookMonitor( @@ -148,7 +75,9 @@ describe("Feishu webhook security hardening", () => { accountId: "content-type", path: "/hook-content-type", verificationToken: "verify_token", + encryptKey: "encrypt_key", }, + monitorFeishuProvider, async (url) => { const response = await fetch(url, { method: "POST", @@ -169,7 +98,9 @@ describe("Feishu webhook security hardening", () => { accountId: "rate-limit", path: "/hook-rate-limit", verificationToken: "verify_token", + encryptKey: "encrypt_key", }, + monitorFeishuProvider, async (url) => { let saw429 = false; for (let i = 0; i < 130; i += 1) { diff --git a/extensions/feishu/src/monitor.webhook.test-helpers.ts b/extensions/feishu/src/monitor.webhook.test-helpers.ts new file mode 100644 index 00000000000..b9de2150bd4 --- /dev/null +++ b/extensions/feishu/src/monitor.webhook.test-helpers.ts @@ -0,0 +1,98 @@ +import { createServer } from "node:http"; +import type { AddressInfo } from "node:net"; +import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu"; +import { vi } from "vitest"; +import type { monitorFeishuProvider } from "./monitor.js"; + +export async function getFreePort(): Promise { + const server = createServer(); + await new Promise((resolve) => server.listen(0, "127.0.0.1", () => resolve())); + const address = server.address() as AddressInfo | null; + if (!address) { + throw new Error("missing server address"); + } + await new Promise((resolve) => server.close(() => resolve())); + return address.port; +} + +async function waitUntilServerReady(url: string): Promise { + for (let i = 0; i < 50; i += 1) { + try { + const response = await fetch(url, { method: "GET" }); + if (response.status >= 200 && response.status < 500) { + return; + } + } catch { + // retry + } + await new Promise((resolve) => setTimeout(resolve, 20)); + } + throw new Error(`server did not start: ${url}`); +} + +export function buildWebhookConfig(params: { + accountId: string; + path: string; + port: number; + verificationToken?: string; + encryptKey?: string; +}): ClawdbotConfig { + return { + channels: { + feishu: { + enabled: true, + accounts: { + [params.accountId]: { + enabled: true, + appId: "cli_test", + appSecret: "secret_test", // pragma: allowlist secret + connectionMode: "webhook", + webhookHost: "127.0.0.1", + webhookPort: params.port, + webhookPath: params.path, + encryptKey: params.encryptKey, + verificationToken: params.verificationToken, + }, + }, + }, + }, + } as ClawdbotConfig; +} + +export async function withRunningWebhookMonitor( + params: { + accountId: string; + path: string; + verificationToken: string; + encryptKey: string; + }, + monitor: typeof monitorFeishuProvider, + run: (url: string) => Promise, +) { + const port = await getFreePort(); + const cfg = buildWebhookConfig({ + accountId: params.accountId, + path: params.path, + port, + encryptKey: params.encryptKey, + verificationToken: params.verificationToken, + }); + + const abortController = new AbortController(); + const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() }; + const monitorPromise = monitor({ + config: cfg, + runtime, + abortSignal: abortController.signal, + }); + + const url = `http://127.0.0.1:${port}${params.path}`; + await waitUntilServerReady(url); + + try { + await run(url); + } finally { + abortController.abort(); + await monitorPromise; + } +} diff --git a/extensions/feishu/src/onboarding.ts b/extensions/feishu/src/onboarding.ts index 46ad40d7681..24d3bbcc413 100644 --- a/extensions/feishu/src/onboarding.ts +++ b/extensions/feishu/src/onboarding.ts @@ -370,6 +370,37 @@ export const feishuOnboardingAdapter: ChannelOnboardingAdapter = { }, }; } + const currentEncryptKey = (next.channels?.feishu as FeishuConfig | undefined)?.encryptKey; + const encryptKeyPromptState = buildSingleChannelSecretPromptState({ + accountConfigured: hasConfiguredSecretInput(currentEncryptKey), + hasConfigToken: hasConfiguredSecretInput(currentEncryptKey), + allowEnv: false, + }); + const encryptKeyResult = await promptSingleChannelSecretInput({ + cfg: next, + prompter, + providerHint: "feishu-webhook", + credentialLabel: "encrypt key", + accountConfigured: encryptKeyPromptState.accountConfigured, + canUseEnv: encryptKeyPromptState.canUseEnv, + hasConfigToken: encryptKeyPromptState.hasConfigToken, + envPrompt: "", + keepPrompt: "Feishu encrypt key already configured. Keep it?", + inputPrompt: "Enter Feishu encrypt key", + preferredEnvVar: "FEISHU_ENCRYPT_KEY", + }); + if (encryptKeyResult.action === "set") { + next = { + ...next, + channels: { + ...next.channels, + feishu: { + ...next.channels?.feishu, + encryptKey: encryptKeyResult.value, + }, + }, + }; + } const currentWebhookPath = (next.channels?.feishu as FeishuConfig | undefined)?.webhookPath; const webhookPath = String( await prompter.text({ diff --git a/extensions/feishu/src/outbound.test.ts b/extensions/feishu/src/outbound.test.ts index bed44df77a6..39b7c1e4a63 100644 --- a/extensions/feishu/src/outbound.test.ts +++ b/extensions/feishu/src/outbound.test.ts @@ -29,12 +29,16 @@ vi.mock("./runtime.js", () => ({ import { feishuOutbound } from "./outbound.js"; const sendText = feishuOutbound.sendText!; +function resetOutboundMocks() { + vi.clearAllMocks(); + sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" }); + sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" }); + sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" }); +} + describe("feishuOutbound.sendText local-image auto-convert", () => { beforeEach(() => { - vi.clearAllMocks(); - sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" }); - sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" }); - sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" }); + resetOutboundMocks(); }); async function createTmpImage(ext = ".png"): Promise<{ dir: string; file: string }> { @@ -52,6 +56,7 @@ describe("feishuOutbound.sendText local-image auto-convert", () => { to: "chat_1", text: file, accountId: "main", + mediaLocalRoots: [dir], }); expect(sendMediaFeishuMock).toHaveBeenCalledWith( @@ -59,6 +64,7 @@ describe("feishuOutbound.sendText local-image auto-convert", () => { to: "chat_1", mediaUrl: file, accountId: "main", + mediaLocalRoots: [dir], }), ); expect(sendMessageFeishuMock).not.toHaveBeenCalled(); @@ -179,10 +185,7 @@ describe("feishuOutbound.sendText local-image auto-convert", () => { describe("feishuOutbound.sendText replyToId forwarding", () => { beforeEach(() => { - vi.clearAllMocks(); - sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" }); - sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" }); - sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" }); + resetOutboundMocks(); }); it("forwards replyToId as replyToMessageId to sendMessageFeishu", async () => { @@ -247,10 +250,7 @@ describe("feishuOutbound.sendText replyToId forwarding", () => { describe("feishuOutbound.sendMedia replyToId forwarding", () => { beforeEach(() => { - vi.clearAllMocks(); - sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" }); - sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" }); - sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" }); + resetOutboundMocks(); }); it("forwards replyToId to sendMediaFeishu", async () => { @@ -290,10 +290,7 @@ describe("feishuOutbound.sendMedia replyToId forwarding", () => { describe("feishuOutbound.sendMedia renderMode", () => { beforeEach(() => { - vi.clearAllMocks(); - sendMessageFeishuMock.mockResolvedValue({ messageId: "text_msg" }); - sendMarkdownCardFeishuMock.mockResolvedValue({ messageId: "card_msg" }); - sendMediaFeishuMock.mockResolvedValue({ messageId: "media_msg" }); + resetOutboundMocks(); }); it("uses markdown cards for captions when renderMode=card", async () => { diff --git a/extensions/feishu/src/outbound.ts b/extensions/feishu/src/outbound.ts index 955777676ef..75e1fa8d42b 100644 --- a/extensions/feishu/src/outbound.ts +++ b/extensions/feishu/src/outbound.ts @@ -81,7 +81,7 @@ export const feishuOutbound: ChannelOutboundAdapter = { chunker: (text, limit) => getFeishuRuntime().channel.text.chunkMarkdownText(text, limit), chunkerMode: "markdown", textChunkLimit: 4000, - sendText: async ({ cfg, to, text, accountId, replyToId, threadId }) => { + sendText: async ({ cfg, to, text, accountId, replyToId, threadId, mediaLocalRoots }) => { const replyToMessageId = resolveReplyToMessageId({ replyToId, threadId }); // Scheme A compatibility shim: // when upstream accidentally returns a local image path as plain text, @@ -95,6 +95,7 @@ export const feishuOutbound: ChannelOutboundAdapter = { mediaUrl: localImagePath, accountId: accountId ?? undefined, replyToMessageId, + mediaLocalRoots, }); return { channel: "feishu", ...result }; } catch (err) { diff --git a/extensions/feishu/src/probe.test.ts b/extensions/feishu/src/probe.test.ts index b93935cccc6..bfc270a4459 100644 --- a/extensions/feishu/src/probe.test.ts +++ b/extensions/feishu/src/probe.test.ts @@ -8,6 +8,22 @@ vi.mock("./client.js", () => ({ import { FEISHU_PROBE_REQUEST_TIMEOUT_MS, probeFeishu, clearProbeCache } from "./probe.js"; +const DEFAULT_CREDS = { appId: "cli_123", appSecret: "secret" } as const; // pragma: allowlist secret +const DEFAULT_SUCCESS_RESPONSE = { + code: 0, + bot: { bot_name: "TestBot", open_id: "ou_abc123" }, +} as const; +const DEFAULT_SUCCESS_RESULT = { + ok: true, + appId: "cli_123", + botName: "TestBot", + botOpenId: "ou_abc123", +} as const; +const BOT1_RESPONSE = { + code: 0, + bot: { bot_name: "Bot1", open_id: "ou_1" }, +} as const; + function makeRequestFn(response: Record) { return vi.fn().mockResolvedValue(response); } @@ -18,6 +34,64 @@ function setupClient(response: Record) { return requestFn; } +function setupSuccessClient() { + return setupClient(DEFAULT_SUCCESS_RESPONSE); +} + +async function expectDefaultSuccessResult( + creds = DEFAULT_CREDS, + expected: Awaited> = DEFAULT_SUCCESS_RESULT, +) { + const result = await probeFeishu(creds); + expect(result).toEqual(expected); +} + +async function withFakeTimers(run: () => Promise) { + vi.useFakeTimers(); + try { + await run(); + } finally { + vi.useRealTimers(); + } +} + +async function expectErrorResultCached(params: { + requestFn: ReturnType; + expectedError: string; + ttlMs: number; +}) { + createFeishuClientMock.mockReturnValue({ request: params.requestFn }); + + const first = await probeFeishu(DEFAULT_CREDS); + const second = await probeFeishu(DEFAULT_CREDS); + expect(first).toMatchObject({ ok: false, error: params.expectedError }); + expect(second).toMatchObject({ ok: false, error: params.expectedError }); + expect(params.requestFn).toHaveBeenCalledTimes(1); + + vi.advanceTimersByTime(params.ttlMs + 1); + + await probeFeishu(DEFAULT_CREDS); + expect(params.requestFn).toHaveBeenCalledTimes(2); +} + +async function expectFreshDefaultProbeAfter( + requestFn: ReturnType, + invalidate: () => void, +) { + await probeFeishu(DEFAULT_CREDS); + expect(requestFn).toHaveBeenCalledTimes(1); + + invalidate(); + + await probeFeishu(DEFAULT_CREDS); + expect(requestFn).toHaveBeenCalledTimes(2); +} + +async function readSequentialDefaultProbePair() { + const first = await probeFeishu(DEFAULT_CREDS); + return { first, second: await probeFeishu(DEFAULT_CREDS) }; +} + describe("probeFeishu", () => { beforeEach(() => { clearProbeCache(); @@ -44,28 +118,16 @@ describe("probeFeishu", () => { }); it("returns bot info on successful probe", async () => { - const requestFn = setupClient({ - code: 0, - bot: { bot_name: "TestBot", open_id: "ou_abc123" }, - }); + const requestFn = setupSuccessClient(); - const result = await probeFeishu({ appId: "cli_123", appSecret: "secret" }); // pragma: allowlist secret - expect(result).toEqual({ - ok: true, - appId: "cli_123", - botName: "TestBot", - botOpenId: "ou_abc123", - }); + await expectDefaultSuccessResult(); expect(requestFn).toHaveBeenCalledTimes(1); }); it("passes the probe timeout to the Feishu request", async () => { - const requestFn = setupClient({ - code: 0, - bot: { bot_name: "TestBot", open_id: "ou_abc123" }, - }); + const requestFn = setupSuccessClient(); - await probeFeishu({ appId: "cli_123", appSecret: "secret" }); // pragma: allowlist secret + await probeFeishu(DEFAULT_CREDS); expect(requestFn).toHaveBeenCalledWith( expect.objectContaining({ @@ -77,19 +139,16 @@ describe("probeFeishu", () => { }); it("returns timeout error when request exceeds timeout", async () => { - vi.useFakeTimers(); - try { + await withFakeTimers(async () => { const requestFn = vi.fn().mockImplementation(() => new Promise(() => {})); createFeishuClientMock.mockReturnValue({ request: requestFn }); - const promise = probeFeishu({ appId: "cli_123", appSecret: "secret" }, { timeoutMs: 1_000 }); + const promise = probeFeishu(DEFAULT_CREDS, { timeoutMs: 1_000 }); await vi.advanceTimersByTimeAsync(1_000); const result = await promise; expect(result).toMatchObject({ ok: false, error: "probe timed out after 1000ms" }); - } finally { - vi.useRealTimers(); - } + }); }); it("returns aborted when abort signal is already aborted", async () => { @@ -106,14 +165,9 @@ describe("probeFeishu", () => { expect(createFeishuClientMock).not.toHaveBeenCalled(); }); it("returns cached result on subsequent calls within TTL", async () => { - const requestFn = setupClient({ - code: 0, - bot: { bot_name: "TestBot", open_id: "ou_abc123" }, - }); + const requestFn = setupSuccessClient(); - const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret - const first = await probeFeishu(creds); - const second = await probeFeishu(creds); + const { first, second } = await readSequentialDefaultProbePair(); expect(first).toEqual(second); // Only one API call should have been made @@ -121,76 +175,37 @@ describe("probeFeishu", () => { }); it("makes a fresh API call after cache expires", async () => { - vi.useFakeTimers(); - try { - const requestFn = setupClient({ - code: 0, - bot: { bot_name: "TestBot", open_id: "ou_abc123" }, + await withFakeTimers(async () => { + const requestFn = setupSuccessClient(); + + await expectFreshDefaultProbeAfter(requestFn, () => { + vi.advanceTimersByTime(10 * 60 * 1000 + 1); }); - - const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret - await probeFeishu(creds); - expect(requestFn).toHaveBeenCalledTimes(1); - - // Advance time past the success TTL - vi.advanceTimersByTime(10 * 60 * 1000 + 1); - - await probeFeishu(creds); - expect(requestFn).toHaveBeenCalledTimes(2); - } finally { - vi.useRealTimers(); - } + }); }); it("caches failed probe results (API error) for the error TTL", async () => { - vi.useFakeTimers(); - try { - const requestFn = makeRequestFn({ code: 99, msg: "token expired" }); - createFeishuClientMock.mockReturnValue({ request: requestFn }); - - const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret - const first = await probeFeishu(creds); - const second = await probeFeishu(creds); - expect(first).toMatchObject({ ok: false, error: "API error: token expired" }); - expect(second).toMatchObject({ ok: false, error: "API error: token expired" }); - expect(requestFn).toHaveBeenCalledTimes(1); - - vi.advanceTimersByTime(60 * 1000 + 1); - - await probeFeishu(creds); - expect(requestFn).toHaveBeenCalledTimes(2); - } finally { - vi.useRealTimers(); - } + await withFakeTimers(async () => { + await expectErrorResultCached({ + requestFn: makeRequestFn({ code: 99, msg: "token expired" }), + expectedError: "API error: token expired", + ttlMs: 60 * 1000, + }); + }); }); it("caches thrown request errors for the error TTL", async () => { - vi.useFakeTimers(); - try { - const requestFn = vi.fn().mockRejectedValue(new Error("network error")); - createFeishuClientMock.mockReturnValue({ request: requestFn }); - - const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret - const first = await probeFeishu(creds); - const second = await probeFeishu(creds); - expect(first).toMatchObject({ ok: false, error: "network error" }); - expect(second).toMatchObject({ ok: false, error: "network error" }); - expect(requestFn).toHaveBeenCalledTimes(1); - - vi.advanceTimersByTime(60 * 1000 + 1); - - await probeFeishu(creds); - expect(requestFn).toHaveBeenCalledTimes(2); - } finally { - vi.useRealTimers(); - } + await withFakeTimers(async () => { + await expectErrorResultCached({ + requestFn: vi.fn().mockRejectedValue(new Error("network error")), + expectedError: "network error", + ttlMs: 60 * 1000, + }); + }); }); it("caches per account independently", async () => { - const requestFn = setupClient({ - code: 0, - bot: { bot_name: "Bot1", open_id: "ou_1" }, - }); + const requestFn = setupClient(BOT1_RESPONSE); await probeFeishu({ appId: "cli_aaa", appSecret: "s1" }); // pragma: allowlist secret expect(requestFn).toHaveBeenCalledTimes(1); @@ -205,10 +220,7 @@ describe("probeFeishu", () => { }); it("does not share cache between accounts with same appId but different appSecret", async () => { - const requestFn = setupClient({ - code: 0, - bot: { bot_name: "Bot1", open_id: "ou_1" }, - }); + const requestFn = setupClient(BOT1_RESPONSE); // First account with appId + secret A await probeFeishu({ appId: "cli_shared", appSecret: "secret_aaa" }); // pragma: allowlist secret @@ -221,10 +233,7 @@ describe("probeFeishu", () => { }); it("uses accountId for cache key when available", async () => { - const requestFn = setupClient({ - code: 0, - bot: { bot_name: "Bot1", open_id: "ou_1" }, - }); + const requestFn = setupClient(BOT1_RESPONSE); // Two accounts with same appId+appSecret but different accountIds are cached separately await probeFeishu({ accountId: "acct-1", appId: "cli_123", appSecret: "secret" }); // pragma: allowlist secret @@ -239,19 +248,11 @@ describe("probeFeishu", () => { }); it("clearProbeCache forces fresh API call", async () => { - const requestFn = setupClient({ - code: 0, - bot: { bot_name: "TestBot", open_id: "ou_abc123" }, + const requestFn = setupSuccessClient(); + + await expectFreshDefaultProbeAfter(requestFn, () => { + clearProbeCache(); }); - - const creds = { appId: "cli_123", appSecret: "secret" }; // pragma: allowlist secret - await probeFeishu(creds); - expect(requestFn).toHaveBeenCalledTimes(1); - - clearProbeCache(); - - await probeFeishu(creds); - expect(requestFn).toHaveBeenCalledTimes(2); }); it("handles response.data.bot fallback path", async () => { @@ -260,10 +261,8 @@ describe("probeFeishu", () => { data: { bot: { bot_name: "DataBot", open_id: "ou_data" } }, }); - const result = await probeFeishu({ appId: "cli_123", appSecret: "secret" }); // pragma: allowlist secret - expect(result).toEqual({ - ok: true, - appId: "cli_123", + await expectDefaultSuccessResult(DEFAULT_CREDS, { + ...DEFAULT_SUCCESS_RESULT, botName: "DataBot", botOpenId: "ou_data", }); diff --git a/extensions/feishu/src/reactions.ts b/extensions/feishu/src/reactions.ts index d446a674b88..951b3d03c6b 100644 --- a/extensions/feishu/src/reactions.ts +++ b/extensions/feishu/src/reactions.ts @@ -9,6 +9,20 @@ export type FeishuReaction = { operatorId: string; }; +function resolveConfiguredFeishuClient(params: { cfg: ClawdbotConfig; accountId?: string }) { + const account = resolveFeishuAccount(params); + if (!account.configured) { + throw new Error(`Feishu account "${account.accountId}" not configured`); + } + return createFeishuClient(account); +} + +function assertFeishuReactionApiSuccess(response: { code?: number; msg?: string }, action: string) { + if (response.code !== 0) { + throw new Error(`Feishu ${action} failed: ${response.msg || `code ${response.code}`}`); + } +} + /** * Add a reaction (emoji) to a message. * @param emojiType - Feishu emoji type, e.g., "SMILE", "THUMBSUP", "HEART" @@ -21,12 +35,7 @@ export async function addReactionFeishu(params: { accountId?: string; }): Promise<{ reactionId: string }> { const { cfg, messageId, emojiType, accountId } = params; - const account = resolveFeishuAccount({ cfg, accountId }); - if (!account.configured) { - throw new Error(`Feishu account "${account.accountId}" not configured`); - } - - const client = createFeishuClient(account); + const client = resolveConfiguredFeishuClient({ cfg, accountId }); const response = (await client.im.messageReaction.create({ path: { message_id: messageId }, @@ -41,9 +50,7 @@ export async function addReactionFeishu(params: { data?: { reaction_id?: string }; }; - if (response.code !== 0) { - throw new Error(`Feishu add reaction failed: ${response.msg || `code ${response.code}`}`); - } + assertFeishuReactionApiSuccess(response, "add reaction"); const reactionId = response.data?.reaction_id; if (!reactionId) { @@ -63,12 +70,7 @@ export async function removeReactionFeishu(params: { accountId?: string; }): Promise { const { cfg, messageId, reactionId, accountId } = params; - const account = resolveFeishuAccount({ cfg, accountId }); - if (!account.configured) { - throw new Error(`Feishu account "${account.accountId}" not configured`); - } - - const client = createFeishuClient(account); + const client = resolveConfiguredFeishuClient({ cfg, accountId }); const response = (await client.im.messageReaction.delete({ path: { @@ -77,9 +79,7 @@ export async function removeReactionFeishu(params: { }, })) as { code?: number; msg?: string }; - if (response.code !== 0) { - throw new Error(`Feishu remove reaction failed: ${response.msg || `code ${response.code}`}`); - } + assertFeishuReactionApiSuccess(response, "remove reaction"); } /** @@ -92,12 +92,7 @@ export async function listReactionsFeishu(params: { accountId?: string; }): Promise { const { cfg, messageId, emojiType, accountId } = params; - const account = resolveFeishuAccount({ cfg, accountId }); - if (!account.configured) { - throw new Error(`Feishu account "${account.accountId}" not configured`); - } - - const client = createFeishuClient(account); + const client = resolveConfiguredFeishuClient({ cfg, accountId }); const response = (await client.im.messageReaction.list({ path: { message_id: messageId }, @@ -115,9 +110,7 @@ export async function listReactionsFeishu(params: { }; }; - if (response.code !== 0) { - throw new Error(`Feishu list reactions failed: ${response.msg || `code ${response.code}`}`); - } + assertFeishuReactionApiSuccess(response, "list reactions"); const items = response.data?.items ?? []; return items.map((item) => ({ diff --git a/extensions/feishu/src/reply-dispatcher.test.ts b/extensions/feishu/src/reply-dispatcher.test.ts index 744532320de..10b829857a1 100644 --- a/extensions/feishu/src/reply-dispatcher.test.ts +++ b/extensions/feishu/src/reply-dispatcher.test.ts @@ -25,44 +25,33 @@ vi.mock("./typing.js", () => ({ addTypingIndicator: addTypingIndicatorMock, removeTypingIndicator: removeTypingIndicatorMock, })); -vi.mock("./streaming-card.js", () => ({ - mergeStreamingText: (previousText: string | undefined, nextText: string | undefined) => { - const previous = typeof previousText === "string" ? previousText : ""; - const next = typeof nextText === "string" ? nextText : ""; - if (!next) { - return previous; - } - if (!previous || next === previous) { - return next; - } - if (next.startsWith(previous)) { - return next; - } - if (previous.startsWith(next)) { - return previous; - } - return `${previous}${next}`; - }, - FeishuStreamingSession: class { - active = false; - start = vi.fn(async () => { - this.active = true; - }); - update = vi.fn(async () => {}); - close = vi.fn(async () => { - this.active = false; - }); - isActive = vi.fn(() => this.active); +vi.mock("./streaming-card.js", async () => { + const actual = await vi.importActual("./streaming-card.js"); + return { + mergeStreamingText: actual.mergeStreamingText, + FeishuStreamingSession: class { + active = false; + start = vi.fn(async () => { + this.active = true; + }); + update = vi.fn(async () => {}); + close = vi.fn(async () => { + this.active = false; + }); + isActive = vi.fn(() => this.active); - constructor() { - streamingInstances.push(this); - } - }, -})); + constructor() { + streamingInstances.push(this); + } + }, + }; +}); import { createFeishuReplyDispatcher } from "./reply-dispatcher.js"; describe("createFeishuReplyDispatcher streaming behavior", () => { + type ReplyDispatcherArgs = Parameters[0]; + beforeEach(() => { vi.clearAllMocks(); streamingInstances.length = 0; @@ -128,6 +117,25 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { return createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; } + function createRuntimeLogger() { + return { log: vi.fn(), error: vi.fn() } as never; + } + + function createDispatcherHarness(overrides: Partial = {}) { + const result = createFeishuReplyDispatcher({ + cfg: {} as never, + agentId: "agent", + runtime: {} as never, + chatId: "oc_chat", + ...overrides, + }); + + return { + result, + options: createReplyDispatcherWithTypingMock.mock.calls.at(-1)?.[0], + }; + } + it("skips typing indicator when account typingIndicator is disabled", async () => { resolveFeishuAccountMock.mockReturnValue({ accountId: "main", @@ -209,14 +217,7 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("keeps auto mode plain text on non-streaming send path", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: {} as never, - chatId: "oc_chat", - }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; + const { options } = createDispatcherHarness(); await options.deliver({ text: "plain text" }, { kind: "final" }); expect(streamingInstances).toHaveLength(0); @@ -225,14 +226,7 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("suppresses internal block payload delivery", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: {} as never, - chatId: "oc_chat", - }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; + const { options } = createDispatcherHarness(); await options.deliver({ text: "internal reasoning chunk" }, { kind: "block" }); expect(streamingInstances).toHaveLength(0); @@ -253,15 +247,10 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("uses streaming session for auto mode markdown payloads", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: { log: vi.fn(), error: vi.fn() } as never, - chatId: "oc_chat", + const { options } = createDispatcherHarness({ + runtime: createRuntimeLogger(), rootId: "om_root_topic", }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; await options.deliver({ text: "```ts\nconst x = 1\n```" }, { kind: "final" }); expect(streamingInstances).toHaveLength(1); @@ -277,14 +266,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("closes streaming with block text when final reply is missing", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: { log: vi.fn(), error: vi.fn() } as never, - chatId: "oc_chat", + const { options } = createDispatcherHarness({ + runtime: createRuntimeLogger(), }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; await options.deliver({ text: "```md\npartial answer\n```" }, { kind: "block" }); await options.onIdle?.(); @@ -295,14 +279,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("delivers distinct final payloads after streaming close", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: { log: vi.fn(), error: vi.fn() } as never, - chatId: "oc_chat", + const { options } = createDispatcherHarness({ + runtime: createRuntimeLogger(), }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; await options.deliver({ text: "```md\n完整回复第一段\n```" }, { kind: "final" }); await options.deliver({ text: "```md\n完整回复第一段 + 第二段\n```" }, { kind: "final" }); @@ -316,14 +295,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("skips exact duplicate final text after streaming close", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: { log: vi.fn(), error: vi.fn() } as never, - chatId: "oc_chat", + const { options } = createDispatcherHarness({ + runtime: createRuntimeLogger(), }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; await options.deliver({ text: "```md\n同一条回复\n```" }, { kind: "final" }); await options.deliver({ text: "```md\n同一条回复\n```" }, { kind: "final" }); @@ -383,14 +357,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }, }); - const result = createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: { log: vi.fn(), error: vi.fn() } as never, - chatId: "oc_chat", + const { result, options } = createDispatcherHarness({ + runtime: createRuntimeLogger(), }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; await options.onReplyStart?.(); await result.replyOptions.onPartialReply?.({ text: "hello" }); await options.deliver({ text: "lo world" }, { kind: "block" }); @@ -402,14 +371,7 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("sends media-only payloads as attachments", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: {} as never, - chatId: "oc_chat", - }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; + const { options } = createDispatcherHarness(); await options.deliver({ mediaUrl: "https://example.com/a.png" }, { kind: "final" }); expect(sendMediaFeishuMock).toHaveBeenCalledTimes(1); @@ -424,14 +386,7 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("falls back to legacy mediaUrl when mediaUrls is an empty array", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: {} as never, - chatId: "oc_chat", - }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; + const { options } = createDispatcherHarness(); await options.deliver( { text: "caption", mediaUrl: "https://example.com/a.png", mediaUrls: [] }, { kind: "final" }, @@ -447,14 +402,9 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("sends attachments after streaming final markdown replies", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: { log: vi.fn(), error: vi.fn() } as never, - chatId: "oc_chat", + const { options } = createDispatcherHarness({ + runtime: createRuntimeLogger(), }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; await options.deliver( { text: "```ts\nconst x = 1\n```", mediaUrls: ["https://example.com/a.png"] }, { kind: "final" }, @@ -472,16 +422,10 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("passes replyInThread to sendMessageFeishu for plain text", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: {} as never, - chatId: "oc_chat", + const { options } = createDispatcherHarness({ replyToMessageId: "om_msg", replyInThread: true, }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; await options.deliver({ text: "plain text" }, { kind: "final" }); expect(sendMessageFeishuMock).toHaveBeenCalledWith( @@ -504,16 +448,10 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }, }); - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: {} as never, - chatId: "oc_chat", + const { options } = createDispatcherHarness({ replyToMessageId: "om_msg", replyInThread: true, }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; await options.deliver({ text: "card text" }, { kind: "final" }); expect(sendMarkdownCardFeishuMock).toHaveBeenCalledWith( @@ -525,16 +463,11 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("passes replyToMessageId and replyInThread to streaming.start()", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: { log: vi.fn(), error: vi.fn() } as never, - chatId: "oc_chat", + const { options } = createDispatcherHarness({ + runtime: createRuntimeLogger(), replyToMessageId: "om_msg", replyInThread: true, }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; await options.deliver({ text: "```ts\nconst x = 1\n```" }, { kind: "final" }); expect(streamingInstances).toHaveLength(1); @@ -545,18 +478,13 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("disables streaming for thread replies and keeps reply metadata", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: { log: vi.fn(), error: vi.fn() } as never, - chatId: "oc_chat", + const { options } = createDispatcherHarness({ + runtime: createRuntimeLogger(), replyToMessageId: "om_msg", replyInThread: false, threadReply: true, rootId: "om_root_topic", }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; await options.deliver({ text: "```ts\nconst x = 1\n```" }, { kind: "final" }); expect(streamingInstances).toHaveLength(0); @@ -569,16 +497,10 @@ describe("createFeishuReplyDispatcher streaming behavior", () => { }); it("passes replyInThread to media attachments", async () => { - createFeishuReplyDispatcher({ - cfg: {} as never, - agentId: "agent", - runtime: {} as never, - chatId: "oc_chat", + const { options } = createDispatcherHarness({ replyToMessageId: "om_msg", replyInThread: true, }); - - const options = createReplyDispatcherWithTypingMock.mock.calls[0]?.[0]; await options.deliver({ mediaUrl: "https://example.com/a.png" }, { kind: "final" }); expect(sendMediaFeishuMock).toHaveBeenCalledWith( diff --git a/extensions/feishu/src/reply-dispatcher.ts b/extensions/feishu/src/reply-dispatcher.ts index 3bd1353825d..6f66ffffa58 100644 --- a/extensions/feishu/src/reply-dispatcher.ts +++ b/extensions/feishu/src/reply-dispatcher.ts @@ -224,6 +224,41 @@ export function createFeishuReplyDispatcher(params: CreateFeishuReplyDispatcherP lastPartial = ""; }; + const sendChunkedTextReply = async (params: { + text: string; + useCard: boolean; + infoKind?: string; + }) => { + let first = true; + const chunkSource = params.useCard + ? params.text + : core.channel.text.convertMarkdownTables(params.text, tableMode); + for (const chunk of core.channel.text.chunkTextWithMode( + chunkSource, + textChunkLimit, + chunkMode, + )) { + const message = { + cfg, + to: chatId, + text: chunk, + replyToMessageId: sendReplyToMessageId, + replyInThread: effectiveReplyInThread, + mentions: first ? mentionTargets : undefined, + accountId, + }; + if (params.useCard) { + await sendMarkdownCardFeishu(message); + } else { + await sendMessageFeishu(message); + } + first = false; + } + if (params.infoKind === "final") { + deliveredFinalTexts.add(params.text); + } + }; + const { dispatcher, replyOptions, markDispatchIdle } = core.channel.reply.createReplyDispatcherWithTyping({ responsePrefix: prefixContext.responsePrefix, @@ -303,48 +338,10 @@ export function createFeishuReplyDispatcher(params: CreateFeishuReplyDispatcherP return; } - let first = true; if (useCard) { - for (const chunk of core.channel.text.chunkTextWithMode( - text, - textChunkLimit, - chunkMode, - )) { - await sendMarkdownCardFeishu({ - cfg, - to: chatId, - text: chunk, - replyToMessageId: sendReplyToMessageId, - replyInThread: effectiveReplyInThread, - mentions: first ? mentionTargets : undefined, - accountId, - }); - first = false; - } - if (info?.kind === "final") { - deliveredFinalTexts.add(text); - } + await sendChunkedTextReply({ text, useCard: true, infoKind: info?.kind }); } else { - const converted = core.channel.text.convertMarkdownTables(text, tableMode); - for (const chunk of core.channel.text.chunkTextWithMode( - converted, - textChunkLimit, - chunkMode, - )) { - await sendMessageFeishu({ - cfg, - to: chatId, - text: chunk, - replyToMessageId: sendReplyToMessageId, - replyInThread: effectiveReplyInThread, - mentions: first ? mentionTargets : undefined, - accountId, - }); - first = false; - } - if (info?.kind === "final") { - deliveredFinalTexts.add(text); - } + await sendChunkedTextReply({ text, useCard: false, infoKind: info?.kind }); } } diff --git a/extensions/feishu/src/send.reply-fallback.test.ts b/extensions/feishu/src/send.reply-fallback.test.ts index 75dda353bbe..610ded167fd 100644 --- a/extensions/feishu/src/send.reply-fallback.test.ts +++ b/extensions/feishu/src/send.reply-fallback.test.ts @@ -25,6 +25,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => { const replyMock = vi.fn(); const createMock = vi.fn(); + async function expectFallbackResult( + send: () => Promise<{ messageId?: string }>, + expectedMessageId: string, + ) { + const result = await send(); + expect(replyMock).toHaveBeenCalledTimes(1); + expect(createMock).toHaveBeenCalledTimes(1); + expect(result.messageId).toBe(expectedMessageId); + } + beforeEach(() => { vi.clearAllMocks(); resolveFeishuSendTargetMock.mockReturnValue({ @@ -51,16 +61,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => { data: { message_id: "om_new" }, }); - const result = await sendMessageFeishu({ - cfg: {} as never, - to: "user:ou_target", - text: "hello", - replyToMessageId: "om_parent", - }); - - expect(replyMock).toHaveBeenCalledTimes(1); - expect(createMock).toHaveBeenCalledTimes(1); - expect(result.messageId).toBe("om_new"); + await expectFallbackResult( + () => + sendMessageFeishu({ + cfg: {} as never, + to: "user:ou_target", + text: "hello", + replyToMessageId: "om_parent", + }), + "om_new", + ); }); it("falls back to create for withdrawn card replies", async () => { @@ -73,16 +83,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => { data: { message_id: "om_card_new" }, }); - const result = await sendCardFeishu({ - cfg: {} as never, - to: "user:ou_target", - card: { schema: "2.0" }, - replyToMessageId: "om_parent", - }); - - expect(replyMock).toHaveBeenCalledTimes(1); - expect(createMock).toHaveBeenCalledTimes(1); - expect(result.messageId).toBe("om_card_new"); + await expectFallbackResult( + () => + sendCardFeishu({ + cfg: {} as never, + to: "user:ou_target", + card: { schema: "2.0" }, + replyToMessageId: "om_parent", + }), + "om_card_new", + ); }); it("still throws for non-withdrawn reply failures", async () => { @@ -111,16 +121,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => { data: { message_id: "om_thrown_fallback" }, }); - const result = await sendMessageFeishu({ - cfg: {} as never, - to: "user:ou_target", - text: "hello", - replyToMessageId: "om_parent", - }); - - expect(replyMock).toHaveBeenCalledTimes(1); - expect(createMock).toHaveBeenCalledTimes(1); - expect(result.messageId).toBe("om_thrown_fallback"); + await expectFallbackResult( + () => + sendMessageFeishu({ + cfg: {} as never, + to: "user:ou_target", + text: "hello", + replyToMessageId: "om_parent", + }), + "om_thrown_fallback", + ); }); it("falls back to create when card reply throws a not-found AxiosError", async () => { @@ -133,16 +143,16 @@ describe("Feishu reply fallback for withdrawn/deleted targets", () => { data: { message_id: "om_axios_fallback" }, }); - const result = await sendCardFeishu({ - cfg: {} as never, - to: "user:ou_target", - card: { schema: "2.0" }, - replyToMessageId: "om_parent", - }); - - expect(replyMock).toHaveBeenCalledTimes(1); - expect(createMock).toHaveBeenCalledTimes(1); - expect(result.messageId).toBe("om_axios_fallback"); + await expectFallbackResult( + () => + sendCardFeishu({ + cfg: {} as never, + to: "user:ou_target", + card: { schema: "2.0" }, + replyToMessageId: "om_parent", + }), + "om_axios_fallback", + ); }); it("re-throws non-withdrawn thrown errors for text messages", async () => { diff --git a/extensions/feishu/src/send.ts b/extensions/feishu/src/send.ts index 928ef07f949..5692edd32ff 100644 --- a/extensions/feishu/src/send.ts +++ b/extensions/feishu/src/send.ts @@ -7,7 +7,7 @@ import { parsePostContent } from "./post.js"; import { getFeishuRuntime } from "./runtime.js"; import { assertFeishuMessageApiSuccess, toFeishuSendResult } from "./send-result.js"; import { resolveFeishuSendTarget } from "./send-target.js"; -import type { FeishuSendResult } from "./types.js"; +import type { FeishuChatType, FeishuMessageInfo, FeishuSendResult } from "./types.js"; const WITHDRAWN_REPLY_ERROR_CODES = new Set([230011, 231003]); @@ -43,6 +43,10 @@ function isWithdrawnReplyError(err: unknown): boolean { type FeishuCreateMessageClient = { im: { message: { + reply: (opts: { + path: { message_id: string }; + data: { content: string; msg_type: string; reply_in_thread?: true }; + }) => Promise<{ code?: number; msg?: string; data?: { message_id?: string } }>; create: (opts: { params: { receive_id_type: "chat_id" | "email" | "open_id" | "union_id" | "user_id" }; data: { receive_id: string; content: string; msg_type: string }; @@ -51,6 +55,30 @@ type FeishuCreateMessageClient = { }; }; +type FeishuMessageSender = { + id?: string; + id_type?: string; + sender_type?: string; +}; + +type FeishuMessageGetItem = { + message_id?: string; + chat_id?: string; + chat_type?: FeishuChatType; + msg_type?: string; + body?: { content?: string }; + sender?: FeishuMessageSender; + create_time?: string; +}; + +type FeishuGetMessageResponse = { + code?: number; + msg?: string; + data?: FeishuMessageGetItem & { + items?: FeishuMessageGetItem[]; + }; +}; + /** Send a direct message as a fallback when a reply target is unavailable. */ async function sendFallbackDirect( client: FeishuCreateMessageClient, @@ -74,16 +102,49 @@ async function sendFallbackDirect( return toFeishuSendResult(response, params.receiveId); } -export type FeishuMessageInfo = { - messageId: string; - chatId: string; - senderId?: string; - senderOpenId?: string; - senderType?: string; - content: string; - contentType: string; - createTime?: number; -}; +async function sendReplyOrFallbackDirect( + client: FeishuCreateMessageClient, + params: { + replyToMessageId?: string; + replyInThread?: boolean; + content: string; + msgType: string; + directParams: { + receiveId: string; + receiveIdType: "chat_id" | "email" | "open_id" | "union_id" | "user_id"; + content: string; + msgType: string; + }; + directErrorPrefix: string; + replyErrorPrefix: string; + }, +): Promise { + if (!params.replyToMessageId) { + return sendFallbackDirect(client, params.directParams, params.directErrorPrefix); + } + + let response: { code?: number; msg?: string; data?: { message_id?: string } }; + try { + response = await client.im.message.reply({ + path: { message_id: params.replyToMessageId }, + data: { + content: params.content, + msg_type: params.msgType, + ...(params.replyInThread ? { reply_in_thread: true } : {}), + }, + }); + } catch (err) { + if (!isWithdrawnReplyError(err)) { + throw err; + } + return sendFallbackDirect(client, params.directParams, params.directErrorPrefix); + } + if (shouldFallbackFromReplyTarget(response)) { + return sendFallbackDirect(client, params.directParams, params.directErrorPrefix); + } + assertFeishuMessageApiSuccess(response, params.replyErrorPrefix); + return toFeishuSendResult(response, params.directParams.receiveId); +} function parseInteractiveCardContent(parsed: unknown): string { if (!parsed || typeof parsed !== "object") { @@ -177,34 +238,7 @@ export async function getMessageFeishu(params: { try { const response = (await client.im.message.get({ path: { message_id: messageId }, - })) as { - code?: number; - msg?: string; - data?: { - items?: Array<{ - message_id?: string; - chat_id?: string; - msg_type?: string; - body?: { content?: string }; - sender?: { - id?: string; - id_type?: string; - sender_type?: string; - }; - create_time?: string; - }>; - message_id?: string; - chat_id?: string; - msg_type?: string; - body?: { content?: string }; - sender?: { - id?: string; - id_type?: string; - sender_type?: string; - }; - create_time?: string; - }; - }; + })) as FeishuGetMessageResponse; if (response.code !== 0) { return null; @@ -228,6 +262,10 @@ export async function getMessageFeishu(params: { return { messageId: item.message_id ?? messageId, chatId: item.chat_id ?? "", + chatType: + item.chat_type === "group" || item.chat_type === "private" || item.chat_type === "p2p" + ? item.chat_type + : undefined, senderId: item.sender?.id, senderOpenId: item.sender?.id_type === "open_id" ? item.sender?.id : undefined, senderType: item.sender?.sender_type, @@ -295,32 +333,15 @@ export async function sendMessageFeishu( const { content, msgType } = buildFeishuPostMessagePayload({ messageText }); const directParams = { receiveId, receiveIdType, content, msgType }; - - if (replyToMessageId) { - let response: { code?: number; msg?: string; data?: { message_id?: string } }; - try { - response = await client.im.message.reply({ - path: { message_id: replyToMessageId }, - data: { - content, - msg_type: msgType, - ...(replyInThread ? { reply_in_thread: true } : {}), - }, - }); - } catch (err) { - if (!isWithdrawnReplyError(err)) { - throw err; - } - return sendFallbackDirect(client, directParams, "Feishu send failed"); - } - if (shouldFallbackFromReplyTarget(response)) { - return sendFallbackDirect(client, directParams, "Feishu send failed"); - } - assertFeishuMessageApiSuccess(response, "Feishu reply failed"); - return toFeishuSendResult(response, receiveId); - } - - return sendFallbackDirect(client, directParams, "Feishu send failed"); + return sendReplyOrFallbackDirect(client, { + replyToMessageId, + replyInThread, + content, + msgType, + directParams, + directErrorPrefix: "Feishu send failed", + replyErrorPrefix: "Feishu reply failed", + }); } export type SendFeishuCardParams = { @@ -339,32 +360,15 @@ export async function sendCardFeishu(params: SendFeishuCardParams): Promise & { appId?: string; botName?: string; diff --git a/extensions/google-gemini-cli-auth/oauth.test.ts b/extensions/google-gemini-cli-auth/oauth.test.ts index 1471f804771..02100b73b1f 100644 --- a/extensions/google-gemini-cli-auth/oauth.test.ts +++ b/extensions/google-gemini-cli-auth/oauth.test.ts @@ -144,6 +144,13 @@ describe("extractGeminiCliCredentials", () => { } } + function expectFakeCliCredentials(result: unknown) { + expect(result).toEqual({ + clientId: FAKE_CLIENT_ID, + clientSecret: FAKE_CLIENT_SECRET, + }); + } + beforeEach(async () => { vi.clearAllMocks(); originalPath = process.env.PATH; @@ -169,10 +176,7 @@ describe("extractGeminiCliCredentials", () => { clearCredentialsCache(); const result = extractGeminiCliCredentials(); - expect(result).toEqual({ - clientId: FAKE_CLIENT_ID, - clientSecret: FAKE_CLIENT_SECRET, - }); + expectFakeCliCredentials(result); }); it("extracts credentials when PATH entry is an npm global shim", async () => { @@ -182,10 +186,7 @@ describe("extractGeminiCliCredentials", () => { clearCredentialsCache(); const result = extractGeminiCliCredentials(); - expect(result).toEqual({ - clientId: FAKE_CLIENT_ID, - clientSecret: FAKE_CLIENT_SECRET, - }); + expectFakeCliCredentials(result); }); it("returns null when oauth2.js cannot be found", async () => { @@ -274,16 +275,16 @@ describe("loginGeminiCliOAuth", () => { }); } - async function runRemoteLoginWithCapturedAuthUrl( - loginGeminiCliOAuth: (options: { - isRemote: boolean; - openUrl: () => Promise; - log: (msg: string) => void; - note: () => Promise; - prompt: () => Promise; - progress: { update: () => void; stop: () => void }; - }) => Promise<{ projectId: string }>, - ) { + type LoginGeminiCliOAuthFn = (options: { + isRemote: boolean; + openUrl: () => Promise; + log: (msg: string) => void; + note: () => Promise; + prompt: () => Promise; + progress: { update: () => void; stop: () => void }; + }) => Promise<{ projectId: string }>; + + async function runRemoteLoginWithCapturedAuthUrl(loginGeminiCliOAuth: LoginGeminiCliOAuthFn) { let authUrl = ""; const result = await loginGeminiCliOAuth({ isRemote: true, @@ -304,6 +305,14 @@ describe("loginGeminiCliOAuth", () => { return { result, authUrl }; } + async function runRemoteLoginExpectingProjectId( + loginGeminiCliOAuth: LoginGeminiCliOAuthFn, + projectId: string, + ) { + const { result } = await runRemoteLoginWithCapturedAuthUrl(loginGeminiCliOAuth); + expect(result.projectId).toBe(projectId); + } + let envSnapshot: Partial>; beforeEach(() => { envSnapshot = Object.fromEntries(ENV_KEYS.map((key) => [key, process.env[key]])); @@ -357,9 +366,7 @@ describe("loginGeminiCliOAuth", () => { vi.stubGlobal("fetch", fetchMock); const { loginGeminiCliOAuth } = await import("./oauth.js"); - const { result } = await runRemoteLoginWithCapturedAuthUrl(loginGeminiCliOAuth); - - expect(result.projectId).toBe("daily-project"); + await runRemoteLoginExpectingProjectId(loginGeminiCliOAuth, "daily-project"); const loadRequests = requests.filter((request) => request.url.includes("v1internal:loadCodeAssist"), ); @@ -414,9 +421,7 @@ describe("loginGeminiCliOAuth", () => { vi.stubGlobal("fetch", fetchMock); const { loginGeminiCliOAuth } = await import("./oauth.js"); - const { result } = await runRemoteLoginWithCapturedAuthUrl(loginGeminiCliOAuth); - - expect(result.projectId).toBe("env-project"); + await runRemoteLoginExpectingProjectId(loginGeminiCliOAuth, "env-project"); expect(requests.filter((url) => url.includes("v1internal:loadCodeAssist"))).toHaveLength(3); expect(requests.some((url) => url.includes("v1internal:onboardUser"))).toBe(false); }); diff --git a/extensions/google-gemini-cli-auth/package.json b/extensions/google-gemini-cli-auth/package.json index 2ab1c6a6ca8..61ae5be803c 100644 --- a/extensions/google-gemini-cli-auth/package.json +++ b/extensions/google-gemini-cli-auth/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/google-gemini-cli-auth", - "version": "2026.3.9", + "version": "2026.3.14", "private": true, "description": "OpenClaw Gemini CLI OAuth provider plugin", "type": "module", diff --git a/extensions/googlechat/package.json b/extensions/googlechat/package.json index 2abe2abbe38..3514ac52b90 100644 --- a/extensions/googlechat/package.json +++ b/extensions/googlechat/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/googlechat", - "version": "2026.3.9", + "version": "2026.3.14", "private": true, "description": "OpenClaw Google Chat channel plugin", "type": "module", @@ -8,7 +8,7 @@ "google-auth-library": "^10.6.1" }, "peerDependencies": { - "openclaw": ">=2026.3.2" + "openclaw": ">=2026.3.11" }, "peerDependenciesMeta": { "openclaw": { diff --git a/extensions/googlechat/src/api.test.ts b/extensions/googlechat/src/api.test.ts index fc011268ec2..81312d39820 100644 --- a/extensions/googlechat/src/api.test.ts +++ b/extensions/googlechat/src/api.test.ts @@ -13,6 +13,21 @@ const account = { config: {}, } as ResolvedGoogleChatAccount; +function stubSuccessfulSend(name: string) { + const fetchMock = vi + .fn() + .mockResolvedValue(new Response(JSON.stringify({ name }), { status: 200 })); + vi.stubGlobal("fetch", fetchMock); + return fetchMock; +} + +async function expectDownloadToRejectForResponse(response: Response) { + vi.stubGlobal("fetch", vi.fn().mockResolvedValue(response)); + await expect( + downloadGoogleChatMedia({ account, resourceName: "media/123", maxBytes: 10 }), + ).rejects.toThrow(/max bytes/i); +} + describe("downloadGoogleChatMedia", () => { afterEach(() => { vi.unstubAllGlobals(); @@ -29,11 +44,7 @@ describe("downloadGoogleChatMedia", () => { status: 200, headers: { "content-length": "50", "content-type": "application/octet-stream" }, }); - vi.stubGlobal("fetch", vi.fn().mockResolvedValue(response)); - - await expect( - downloadGoogleChatMedia({ account, resourceName: "media/123", maxBytes: 10 }), - ).rejects.toThrow(/max bytes/i); + await expectDownloadToRejectForResponse(response); }); it("rejects when streamed payload exceeds max bytes", async () => { @@ -52,11 +63,7 @@ describe("downloadGoogleChatMedia", () => { status: 200, headers: { "content-type": "application/octet-stream" }, }); - vi.stubGlobal("fetch", vi.fn().mockResolvedValue(response)); - - await expect( - downloadGoogleChatMedia({ account, resourceName: "media/123", maxBytes: 10 }), - ).rejects.toThrow(/max bytes/i); + await expectDownloadToRejectForResponse(response); }); }); @@ -66,12 +73,7 @@ describe("sendGoogleChatMessage", () => { }); it("adds messageReplyOption when sending to an existing thread", async () => { - const fetchMock = vi - .fn() - .mockResolvedValue( - new Response(JSON.stringify({ name: "spaces/AAA/messages/123" }), { status: 200 }), - ); - vi.stubGlobal("fetch", fetchMock); + const fetchMock = stubSuccessfulSend("spaces/AAA/messages/123"); await sendGoogleChatMessage({ account, @@ -89,12 +91,7 @@ describe("sendGoogleChatMessage", () => { }); it("does not set messageReplyOption for non-thread sends", async () => { - const fetchMock = vi - .fn() - .mockResolvedValue( - new Response(JSON.stringify({ name: "spaces/AAA/messages/124" }), { status: 200 }), - ); - vi.stubGlobal("fetch", fetchMock); + const fetchMock = stubSuccessfulSend("spaces/AAA/messages/124"); await sendGoogleChatMessage({ account, diff --git a/extensions/googlechat/src/api.ts b/extensions/googlechat/src/api.ts index 7c4f26b8db9..d9c7b666ff0 100644 --- a/extensions/googlechat/src/api.ts +++ b/extensions/googlechat/src/api.ts @@ -14,70 +14,24 @@ const headersToObject = (headers?: HeadersInit): Record => ? Object.fromEntries(headers) : headers || {}; -async function fetchJson( - account: ResolvedGoogleChatAccount, - url: string, - init: RequestInit, -): Promise { - const token = await getGoogleChatAccessToken(account); - const { response: res, release } = await fetchWithSsrFGuard({ +async function withGoogleChatResponse(params: { + account: ResolvedGoogleChatAccount; + url: string; + init?: RequestInit; + auditContext: string; + errorPrefix?: string; + handleResponse: (response: Response) => Promise; +}): Promise { + const { + account, url, - init: { - ...init, - headers: { - ...headersToObject(init.headers), - Authorization: `Bearer ${token}`, - "Content-Type": "application/json", - }, - }, - auditContext: "googlechat.api.json", - }); - try { - if (!res.ok) { - const text = await res.text().catch(() => ""); - throw new Error(`Google Chat API ${res.status}: ${text || res.statusText}`); - } - return (await res.json()) as T; - } finally { - await release(); - } -} - -async function fetchOk( - account: ResolvedGoogleChatAccount, - url: string, - init: RequestInit, -): Promise { + init, + auditContext, + errorPrefix = "Google Chat API", + handleResponse, + } = params; const token = await getGoogleChatAccessToken(account); - const { response: res, release } = await fetchWithSsrFGuard({ - url, - init: { - ...init, - headers: { - ...headersToObject(init.headers), - Authorization: `Bearer ${token}`, - }, - }, - auditContext: "googlechat.api.ok", - }); - try { - if (!res.ok) { - const text = await res.text().catch(() => ""); - throw new Error(`Google Chat API ${res.status}: ${text || res.statusText}`); - } - } finally { - await release(); - } -} - -async function fetchBuffer( - account: ResolvedGoogleChatAccount, - url: string, - init?: RequestInit, - options?: { maxBytes?: number }, -): Promise<{ buffer: Buffer; contentType?: string }> { - const token = await getGoogleChatAccessToken(account); - const { response: res, release } = await fetchWithSsrFGuard({ + const { response, release } = await fetchWithSsrFGuard({ url, init: { ...init, @@ -86,52 +40,103 @@ async function fetchBuffer( Authorization: `Bearer ${token}`, }, }, - auditContext: "googlechat.api.buffer", + auditContext, }); try { - if (!res.ok) { - const text = await res.text().catch(() => ""); - throw new Error(`Google Chat API ${res.status}: ${text || res.statusText}`); + if (!response.ok) { + const text = await response.text().catch(() => ""); + throw new Error(`${errorPrefix} ${response.status}: ${text || response.statusText}`); } - const maxBytes = options?.maxBytes; - const lengthHeader = res.headers.get("content-length"); - if (maxBytes && lengthHeader) { - const length = Number(lengthHeader); - if (Number.isFinite(length) && length > maxBytes) { - throw new Error(`Google Chat media exceeds max bytes (${maxBytes})`); - } - } - if (!maxBytes || !res.body) { - const buffer = Buffer.from(await res.arrayBuffer()); - const contentType = res.headers.get("content-type") ?? undefined; - return { buffer, contentType }; - } - const reader = res.body.getReader(); - const chunks: Buffer[] = []; - let total = 0; - while (true) { - const { done, value } = await reader.read(); - if (done) { - break; - } - if (!value) { - continue; - } - total += value.length; - if (total > maxBytes) { - await reader.cancel(); - throw new Error(`Google Chat media exceeds max bytes (${maxBytes})`); - } - chunks.push(Buffer.from(value)); - } - const buffer = Buffer.concat(chunks, total); - const contentType = res.headers.get("content-type") ?? undefined; - return { buffer, contentType }; + return await handleResponse(response); } finally { await release(); } } +async function fetchJson( + account: ResolvedGoogleChatAccount, + url: string, + init: RequestInit, +): Promise { + return await withGoogleChatResponse({ + account, + url, + init: { + ...init, + headers: { + ...headersToObject(init.headers), + "Content-Type": "application/json", + }, + }, + auditContext: "googlechat.api.json", + handleResponse: async (response) => (await response.json()) as T, + }); +} + +async function fetchOk( + account: ResolvedGoogleChatAccount, + url: string, + init: RequestInit, +): Promise { + await withGoogleChatResponse({ + account, + url, + init, + auditContext: "googlechat.api.ok", + handleResponse: async () => undefined, + }); +} + +async function fetchBuffer( + account: ResolvedGoogleChatAccount, + url: string, + init?: RequestInit, + options?: { maxBytes?: number }, +): Promise<{ buffer: Buffer; contentType?: string }> { + return await withGoogleChatResponse({ + account, + url, + init, + auditContext: "googlechat.api.buffer", + handleResponse: async (res) => { + const maxBytes = options?.maxBytes; + const lengthHeader = res.headers.get("content-length"); + if (maxBytes && lengthHeader) { + const length = Number(lengthHeader); + if (Number.isFinite(length) && length > maxBytes) { + throw new Error(`Google Chat media exceeds max bytes (${maxBytes})`); + } + } + if (!maxBytes || !res.body) { + const buffer = Buffer.from(await res.arrayBuffer()); + const contentType = res.headers.get("content-type") ?? undefined; + return { buffer, contentType }; + } + const reader = res.body.getReader(); + const chunks: Buffer[] = []; + let total = 0; + while (true) { + const { done, value } = await reader.read(); + if (done) { + break; + } + if (!value) { + continue; + } + total += value.length; + if (total > maxBytes) { + await reader.cancel(); + throw new Error(`Google Chat media exceeds max bytes (${maxBytes})`); + } + chunks.push(Buffer.from(value)); + } + const buffer = Buffer.concat(chunks, total); + const contentType = res.headers.get("content-type") ?? undefined; + return { buffer, contentType }; + }, + }); +} + export async function sendGoogleChatMessage(params: { account: ResolvedGoogleChatAccount; space: string; @@ -208,34 +213,29 @@ export async function uploadGoogleChatAttachment(params: { Buffer.from(footer, "utf8"), ]); - const token = await getGoogleChatAccessToken(account); const url = `${CHAT_UPLOAD_BASE}/${space}/attachments:upload?uploadType=multipart`; - const { response: res, release } = await fetchWithSsrFGuard({ + const payload = await withGoogleChatResponse<{ + attachmentDataRef?: { attachmentUploadToken?: string }; + }>({ + account, url, init: { method: "POST", headers: { - Authorization: `Bearer ${token}`, "Content-Type": `multipart/related; boundary=${boundary}`, }, body, }, auditContext: "googlechat.upload", + errorPrefix: "Google Chat upload", + handleResponse: async (response) => + (await response.json()) as { + attachmentDataRef?: { attachmentUploadToken?: string }; + }, }); - try { - if (!res.ok) { - const text = await res.text().catch(() => ""); - throw new Error(`Google Chat upload ${res.status}: ${text || res.statusText}`); - } - const payload = (await res.json()) as { - attachmentDataRef?: { attachmentUploadToken?: string }; - }; - return { - attachmentUploadToken: payload.attachmentDataRef?.attachmentUploadToken, - }; - } finally { - await release(); - } + return { + attachmentUploadToken: payload.attachmentDataRef?.attachmentUploadToken, + }; } export async function downloadGoogleChatMedia(params: { diff --git a/extensions/googlechat/src/channel.startup.test.ts b/extensions/googlechat/src/channel.startup.test.ts index 521cbb94c5f..11c46aa663a 100644 --- a/extensions/googlechat/src/channel.startup.test.ts +++ b/extensions/googlechat/src/channel.startup.test.ts @@ -1,6 +1,10 @@ import type { ChannelAccountSnapshot } from "openclaw/plugin-sdk/googlechat"; import { afterEach, describe, expect, it, vi } from "vitest"; -import { createStartAccountContext } from "../../test-utils/start-account-context.js"; +import { + abortStartedAccount, + expectPendingUntilAbort, + startAccountAndTrackLifecycle, +} from "../../test-utils/start-account-lifecycle.js"; import type { ResolvedGoogleChatAccount } from "./accounts.js"; const hoisted = vi.hoisted(() => ({ @@ -39,29 +43,25 @@ describe("googlechatPlugin gateway.startAccount", () => { }, }; - const patches: ChannelAccountSnapshot[] = []; - const abort = new AbortController(); - const task = googlechatPlugin.gateway!.startAccount!( - createStartAccountContext({ - account, - abortSignal: abort.signal, - statusPatchSink: (next) => patches.push({ ...next }), - }), - ); - let settled = false; - void task.then(() => { - settled = true; + const { abort, patches, task, isSettled } = startAccountAndTrackLifecycle({ + startAccount: googlechatPlugin.gateway!.startAccount!, + account, }); - await vi.waitFor(() => { - expect(hoisted.startGoogleChatMonitor).toHaveBeenCalledOnce(); + await expectPendingUntilAbort({ + waitForStarted: () => + vi.waitFor(() => { + expect(hoisted.startGoogleChatMonitor).toHaveBeenCalledOnce(); + }), + isSettled, + abort, + task, + assertBeforeAbort: () => { + expect(unregister).not.toHaveBeenCalled(); + }, + assertAfterAbort: () => { + expect(unregister).toHaveBeenCalledOnce(); + }, }); - expect(settled).toBe(false); - expect(unregister).not.toHaveBeenCalled(); - - abort.abort(); - await task; - - expect(unregister).toHaveBeenCalledOnce(); expect(patches.some((entry) => entry.running === true)).toBe(true); expect(patches.some((entry) => entry.running === false)).toBe(true); }); diff --git a/extensions/googlechat/src/channel.ts b/extensions/googlechat/src/channel.ts index 2be9ae3335b..3ae992d3e9e 100644 --- a/extensions/googlechat/src/channel.ts +++ b/extensions/googlechat/src/channel.ts @@ -1,9 +1,9 @@ import { createScopedChannelConfigBase } from "openclaw/plugin-sdk/compat"; import { - buildAccountScopedDmSecurityPolicy, buildOpenGroupPolicyConfigureRouteAllowlistWarning, collectAllowlistProviderGroupPolicyWarnings, createScopedAccountConfigAccessors, + createScopedDmSecurityResolver, formatNormalizedAllowFromEntries, } from "openclaw/plugin-sdk/compat"; import { @@ -12,6 +12,7 @@ import { buildComputedAccountStatusSnapshot, buildChannelConfigSchema, DEFAULT_ACCOUNT_ID, + createAccountStatusSink, getChatChannelMeta, listDirectoryGroupEntriesFromMapKeys, listDirectoryUserEntriesFromAllowFrom, @@ -21,6 +22,7 @@ import { PAIRING_APPROVED_MESSAGE, resolveChannelMediaMaxBytes, resolveGoogleChatGroupRequireMention, + runPassiveAccountLifecycle, type ChannelDock, type ChannelMessageActionAdapter, type ChannelPlugin, @@ -28,6 +30,7 @@ import { type OpenClawConfig, } from "openclaw/plugin-sdk/googlechat"; import { GoogleChatConfigSchema } from "openclaw/plugin-sdk/googlechat"; +import { buildPassiveProbedChannelStatusSummary } from "../../shared/channel-status-summary.js"; import { listGoogleChatAccountIds, resolveDefaultGoogleChatAccountId, @@ -84,6 +87,14 @@ const googleChatConfigBase = createScopedChannelConfigBase({ + channelKey: "googlechat", + resolvePolicy: (account) => account.config.dm?.policy, + resolveAllowFrom: (account) => account.config.dm?.allowFrom, + allowFromPathSuffix: "dm.", + normalizeEntry: (raw) => formatAllowFromEntry(raw), +}); + export const googlechatDock: ChannelDock = { id: "googlechat", capabilities: { @@ -170,18 +181,7 @@ export const googlechatPlugin: ChannelPlugin = { ...googleChatConfigAccessors, }, security: { - resolveDmPolicy: ({ cfg, accountId, account }) => { - return buildAccountScopedDmSecurityPolicy({ - cfg, - channelKey: "googlechat", - accountId, - fallbackAccountId: account.accountId ?? DEFAULT_ACCOUNT_ID, - policy: account.config.dm?.policy, - allowFrom: account.config.dm?.allowFrom ?? [], - allowFromPathSuffix: "dm.", - normalizeEntry: (raw) => formatAllowFromEntry(raw), - }); - }, + resolveDmPolicy: resolveGoogleChatDmPolicy, collectWarnings: ({ account, cfg }) => { const warnings = collectAllowlistProviderGroupPolicyWarnings({ cfg, @@ -474,20 +474,14 @@ export const googlechatPlugin: ChannelPlugin = { } return issues; }), - buildChannelSummary: ({ snapshot }) => ({ - configured: snapshot.configured ?? false, - credentialSource: snapshot.credentialSource ?? "none", - audienceType: snapshot.audienceType ?? null, - audience: snapshot.audience ?? null, - webhookPath: snapshot.webhookPath ?? null, - webhookUrl: snapshot.webhookUrl ?? null, - running: snapshot.running ?? false, - lastStartAt: snapshot.lastStartAt ?? null, - lastStopAt: snapshot.lastStopAt ?? null, - lastError: snapshot.lastError ?? null, - probe: snapshot.probe, - lastProbeAt: snapshot.lastProbeAt ?? null, - }), + buildChannelSummary: ({ snapshot }) => + buildPassiveProbedChannelStatusSummary(snapshot, { + credentialSource: snapshot.credentialSource ?? "none", + audienceType: snapshot.audienceType ?? null, + audience: snapshot.audience ?? null, + webhookPath: snapshot.webhookPath ?? null, + webhookUrl: snapshot.webhookUrl ?? null, + }), probeAccount: async ({ account }) => probeGoogleChat(account), buildAccountSnapshot: ({ account, runtime, probe }) => { const base = buildComputedAccountStatusSnapshot({ @@ -512,37 +506,39 @@ export const googlechatPlugin: ChannelPlugin = { gateway: { startAccount: async (ctx) => { const account = ctx.account; - ctx.log?.info(`[${account.accountId}] starting Google Chat webhook`); - ctx.setStatus({ + const statusSink = createAccountStatusSink({ accountId: account.accountId, + setStatus: ctx.setStatus, + }); + ctx.log?.info(`[${account.accountId}] starting Google Chat webhook`); + statusSink({ running: true, lastStartAt: Date.now(), webhookPath: resolveGoogleChatWebhookPath({ account }), audienceType: account.config.audienceType, audience: account.config.audience, }); - const unregister = await startGoogleChatMonitor({ - account, - config: ctx.cfg, - runtime: ctx.runtime, + await runPassiveAccountLifecycle({ abortSignal: ctx.abortSignal, - webhookPath: account.config.webhookPath, - webhookUrl: account.config.webhookUrl, - statusSink: (patch) => ctx.setStatus({ accountId: account.accountId, ...patch }), - }); - // Keep the promise pending until abort (webhook mode is passive). - await new Promise((resolve) => { - if (ctx.abortSignal.aborted) { - resolve(); - return; - } - ctx.abortSignal.addEventListener("abort", () => resolve(), { once: true }); - }); - unregister?.(); - ctx.setStatus({ - accountId: account.accountId, - running: false, - lastStopAt: Date.now(), + start: async () => + await startGoogleChatMonitor({ + account, + config: ctx.cfg, + runtime: ctx.runtime, + abortSignal: ctx.abortSignal, + webhookPath: account.config.webhookPath, + webhookUrl: account.config.webhookUrl, + statusSink, + }), + stop: async (unregister) => { + unregister?.(); + }, + onStop: async () => { + statusSink({ + running: false, + lastStopAt: Date.now(), + }); + }, }); }, }, diff --git a/extensions/googlechat/src/monitor.webhook-routing.test.ts b/extensions/googlechat/src/monitor.webhook-routing.test.ts index 812883f1b4c..9896efce645 100644 --- a/extensions/googlechat/src/monitor.webhook-routing.test.ts +++ b/extensions/googlechat/src/monitor.webhook-routing.test.ts @@ -117,6 +117,34 @@ function registerTwoTargets() { }; } +async function dispatchWebhookRequest(req: IncomingMessage) { + const res = createMockServerResponse(); + const handled = await handleGoogleChatWebhookRequest(req, res); + expect(handled).toBe(true); + return res; +} + +async function expectVerifiedRoute(params: { + request: IncomingMessage; + expectedStatus: number; + sinkA: ReturnType; + sinkB: ReturnType; + expectedSink: "none" | "A" | "B"; +}) { + const res = await dispatchWebhookRequest(params.request); + expect(res.statusCode).toBe(params.expectedStatus); + const expectedCounts = + params.expectedSink === "A" ? [1, 0] : params.expectedSink === "B" ? [0, 1] : [0, 0]; + expect(params.sinkA).toHaveBeenCalledTimes(expectedCounts[0]); + expect(params.sinkB).toHaveBeenCalledTimes(expectedCounts[1]); +} + +function mockSecondVerifierSuccess() { + vi.mocked(verifyGoogleChatRequest) + .mockResolvedValueOnce({ ok: false, reason: "invalid" }) + .mockResolvedValueOnce({ ok: true }); +} + describe("Google Chat webhook routing", () => { afterEach(() => { setActivePluginRegistry(createEmptyPluginRegistry()); @@ -165,45 +193,37 @@ describe("Google Chat webhook routing", () => { const { sinkA, sinkB, unregister } = registerTwoTargets(); try { - const res = createMockServerResponse(); - const handled = await handleGoogleChatWebhookRequest( - createWebhookRequest({ + await expectVerifiedRoute({ + request: createWebhookRequest({ authorization: "Bearer test-token", payload: { type: "ADDED_TO_SPACE", space: { name: "spaces/AAA" } }, }), - res, - ); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(401); - expect(sinkA).not.toHaveBeenCalled(); - expect(sinkB).not.toHaveBeenCalled(); + expectedStatus: 401, + sinkA, + sinkB, + expectedSink: "none", + }); } finally { unregister(); } }); it("routes to the single verified target when earlier targets fail verification", async () => { - vi.mocked(verifyGoogleChatRequest) - .mockResolvedValueOnce({ ok: false, reason: "invalid" }) - .mockResolvedValueOnce({ ok: true }); + mockSecondVerifierSuccess(); const { sinkA, sinkB, unregister } = registerTwoTargets(); try { - const res = createMockServerResponse(); - const handled = await handleGoogleChatWebhookRequest( - createWebhookRequest({ + await expectVerifiedRoute({ + request: createWebhookRequest({ authorization: "Bearer test-token", payload: { type: "ADDED_TO_SPACE", space: { name: "spaces/BBB" } }, }), - res, - ); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(200); - expect(sinkA).not.toHaveBeenCalled(); - expect(sinkB).toHaveBeenCalledTimes(1); + expectedStatus: 200, + sinkA, + sinkB, + expectedSink: "B", + }); } finally { unregister(); } @@ -218,10 +238,7 @@ describe("Google Chat webhook routing", () => { authorization: "Bearer invalid-token", }); const onSpy = vi.spyOn(req, "on"); - const res = createMockServerResponse(); - const handled = await handleGoogleChatWebhookRequest(req, res); - - expect(handled).toBe(true); + const res = await dispatchWebhookRequest(req); expect(res.statusCode).toBe(401); expect(onSpy).not.toHaveBeenCalledWith("data", expect.any(Function)); } finally { @@ -230,15 +247,12 @@ describe("Google Chat webhook routing", () => { }); it("supports add-on requests that provide systemIdToken in the body", async () => { - vi.mocked(verifyGoogleChatRequest) - .mockResolvedValueOnce({ ok: false, reason: "invalid" }) - .mockResolvedValueOnce({ ok: true }); + mockSecondVerifierSuccess(); const { sinkA, sinkB, unregister } = registerTwoTargets(); try { - const res = createMockServerResponse(); - const handled = await handleGoogleChatWebhookRequest( - createWebhookRequest({ + await expectVerifiedRoute({ + request: createWebhookRequest({ payload: { commonEventObject: { hostApp: "CHAT" }, authorizationEventObject: { systemIdToken: "addon-token" }, @@ -252,13 +266,11 @@ describe("Google Chat webhook routing", () => { }, }, }), - res, - ); - - expect(handled).toBe(true); - expect(res.statusCode).toBe(200); - expect(sinkA).not.toHaveBeenCalled(); - expect(sinkB).toHaveBeenCalledTimes(1); + expectedStatus: 200, + sinkA, + sinkB, + expectedSink: "B", + }); } finally { unregister(); } diff --git a/extensions/googlechat/src/onboarding.ts b/extensions/googlechat/src/onboarding.ts index 2fadfe7661a..f7708dd30b9 100644 --- a/extensions/googlechat/src/onboarding.ts +++ b/extensions/googlechat/src/onboarding.ts @@ -1,5 +1,7 @@ import type { OpenClawConfig, DmPolicy } from "openclaw/plugin-sdk/googlechat"; import { + DEFAULT_ACCOUNT_ID, + applySetupAccountConfigPatch, addWildcardAllowFrom, formatDocsLink, mergeAllowFromEntries, @@ -8,7 +10,6 @@ import { type ChannelOnboardingAdapter, type ChannelOnboardingDmPolicy, type WizardPrompter, - DEFAULT_ACCOUNT_ID, migrateBaseNameToDefaultAccount, } from "openclaw/plugin-sdk/googlechat"; import { @@ -83,45 +84,6 @@ const dmPolicy: ChannelOnboardingDmPolicy = { promptAllowFrom, }; -function applyAccountConfig(params: { - cfg: OpenClawConfig; - accountId: string; - patch: Record; -}): OpenClawConfig { - const { cfg, accountId, patch } = params; - if (accountId === DEFAULT_ACCOUNT_ID) { - return { - ...cfg, - channels: { - ...cfg.channels, - googlechat: { - ...cfg.channels?.["googlechat"], - enabled: true, - ...patch, - }, - }, - }; - } - return { - ...cfg, - channels: { - ...cfg.channels, - googlechat: { - ...cfg.channels?.["googlechat"], - enabled: true, - accounts: { - ...cfg.channels?.["googlechat"]?.accounts, - [accountId]: { - ...cfg.channels?.["googlechat"]?.accounts?.[accountId], - enabled: true, - ...patch, - }, - }, - }, - }, - }; -} - async function promptCredentials(params: { cfg: OpenClawConfig; prompter: WizardPrompter; @@ -137,7 +99,7 @@ async function promptCredentials(params: { initialValue: true, }); if (useEnv) { - return applyAccountConfig({ cfg, accountId, patch: {} }); + return applySetupAccountConfigPatch({ cfg, channelKey: channel, accountId, patch: {} }); } } @@ -156,8 +118,9 @@ async function promptCredentials(params: { placeholder: "/path/to/service-account.json", validate: (value) => (String(value ?? "").trim() ? undefined : "Required"), }); - return applyAccountConfig({ + return applySetupAccountConfigPatch({ cfg, + channelKey: channel, accountId, patch: { serviceAccountFile: String(path).trim() }, }); @@ -168,8 +131,9 @@ async function promptCredentials(params: { placeholder: '{"type":"service_account", ... }', validate: (value) => (String(value ?? "").trim() ? undefined : "Required"), }); - return applyAccountConfig({ + return applySetupAccountConfigPatch({ cfg, + channelKey: channel, accountId, patch: { serviceAccount: String(json).trim() }, }); @@ -200,8 +164,9 @@ async function promptAudience(params: { initialValue: currentAudience || undefined, validate: (value) => (String(value ?? "").trim() ? undefined : "Required"), }); - return applyAccountConfig({ + return applySetupAccountConfigPatch({ cfg: params.cfg, + channelKey: channel, accountId: params.accountId, patch: { audienceType, audience: String(audience).trim() }, }); diff --git a/extensions/imessage/package.json b/extensions/imessage/package.json index 3f38e01efe1..c0988ee601c 100644 --- a/extensions/imessage/package.json +++ b/extensions/imessage/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/imessage", - "version": "2026.3.9", + "version": "2026.3.14", "private": true, "description": "OpenClaw iMessage channel plugin", "type": "module", diff --git a/extensions/imessage/src/channel.ts b/extensions/imessage/src/channel.ts index 22c45cf6072..17023599eb1 100644 --- a/extensions/imessage/src/channel.ts +++ b/extensions/imessage/src/channel.ts @@ -29,6 +29,7 @@ import { type ChannelPlugin, type ResolvedIMessageAccount, } from "openclaw/plugin-sdk/imessage"; +import { buildPassiveProbedChannelStatusSummary } from "../../shared/channel-status-summary.js"; import { getIMessageRuntime } from "./runtime.js"; const meta = getChatChannelMeta("imessage"); @@ -264,17 +265,11 @@ export const imessagePlugin: ChannelPlugin = { dbPath: null, }, collectStatusIssues: (accounts) => collectStatusIssuesFromLastError("imessage", accounts), - buildChannelSummary: ({ snapshot }) => ({ - configured: snapshot.configured ?? false, - running: snapshot.running ?? false, - lastStartAt: snapshot.lastStartAt ?? null, - lastStopAt: snapshot.lastStopAt ?? null, - lastError: snapshot.lastError ?? null, - cliPath: snapshot.cliPath ?? null, - dbPath: snapshot.dbPath ?? null, - probe: snapshot.probe, - lastProbeAt: snapshot.lastProbeAt ?? null, - }), + buildChannelSummary: ({ snapshot }) => + buildPassiveProbedChannelStatusSummary(snapshot, { + cliPath: snapshot.cliPath ?? null, + dbPath: snapshot.dbPath ?? null, + }), probeAccount: async ({ timeoutMs }) => getIMessageRuntime().channel.imessage.probeIMessage(timeoutMs), buildAccountSnapshot: ({ account, runtime, probe }) => ({ diff --git a/extensions/irc/package.json b/extensions/irc/package.json index 34c7de1dcfb..8d162b9ac20 100644 --- a/extensions/irc/package.json +++ b/extensions/irc/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/irc", - "version": "2026.3.9", + "version": "2026.3.14", "description": "OpenClaw IRC channel plugin", "type": "module", "dependencies": { diff --git a/extensions/irc/src/accounts.test.ts b/extensions/irc/src/accounts.test.ts index 59a72d7cbcb..5b4685795c6 100644 --- a/extensions/irc/src/accounts.test.ts +++ b/extensions/irc/src/accounts.test.ts @@ -1,5 +1,8 @@ +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; import { describe, expect, it } from "vitest"; -import { listIrcAccountIds, resolveDefaultIrcAccountId } from "./accounts.js"; +import { listIrcAccountIds, resolveDefaultIrcAccountId, resolveIrcAccount } from "./accounts.js"; import type { CoreConfig } from "./types.js"; function asConfig(value: unknown): CoreConfig { @@ -76,3 +79,54 @@ describe("resolveDefaultIrcAccountId", () => { expect(resolveDefaultIrcAccountId(cfg)).toBe("aaa"); }); }); + +describe("resolveIrcAccount", () => { + it("parses delimited IRC_CHANNELS env values for the default account", () => { + const previousChannels = process.env.IRC_CHANNELS; + process.env.IRC_CHANNELS = "alpha, beta\ngamma; delta"; + + try { + const account = resolveIrcAccount({ + cfg: asConfig({ + channels: { + irc: { + host: "irc.example.com", + nick: "claw", + }, + }, + }), + }); + + expect(account.config.channels).toEqual(["alpha", "beta", "gamma", "delta"]); + } finally { + if (previousChannels === undefined) { + delete process.env.IRC_CHANNELS; + } else { + process.env.IRC_CHANNELS = previousChannels; + } + } + }); + + it.runIf(process.platform !== "win32")("rejects symlinked password files", () => { + const dir = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-irc-account-")); + const passwordFile = path.join(dir, "password.txt"); + const passwordLink = path.join(dir, "password-link.txt"); + fs.writeFileSync(passwordFile, "secret-pass\n", "utf8"); + fs.symlinkSync(passwordFile, passwordLink); + + const cfg = asConfig({ + channels: { + irc: { + host: "irc.example.com", + nick: "claw", + passwordFile: passwordLink, + }, + }, + }); + + const account = resolveIrcAccount({ cfg }); + expect(account.password).toBe(""); + expect(account.passwordSource).toBe("none"); + fs.rmSync(dir, { recursive: true, force: true }); + }); +}); diff --git a/extensions/irc/src/accounts.ts b/extensions/irc/src/accounts.ts index d61499c4d39..9367a7d2123 100644 --- a/extensions/irc/src/accounts.ts +++ b/extensions/irc/src/accounts.ts @@ -1,8 +1,9 @@ -import { readFileSync } from "node:fs"; import { DEFAULT_ACCOUNT_ID, normalizeAccountId } from "openclaw/plugin-sdk/account-id"; +import { tryReadSecretFileSync } from "openclaw/plugin-sdk/core"; import { createAccountListHelpers, normalizeResolvedSecretInputString, + parseOptionalDelimitedEntries, } from "openclaw/plugin-sdk/irc"; import type { CoreConfig, IrcAccountConfig, IrcNickServConfig } from "./types.js"; @@ -42,17 +43,6 @@ function parseIntEnv(value?: string): number | undefined { return parsed; } -function parseListEnv(value?: string): string[] | undefined { - if (!value?.trim()) { - return undefined; - } - const parsed = value - .split(/[\n,;]+/g) - .map((entry) => entry.trim()) - .filter(Boolean); - return parsed.length > 0 ? parsed : undefined; -} - const { listAccountIds: listIrcAccountIds, resolveDefaultAccountId: resolveDefaultIrcAccountId } = createAccountListHelpers("irc", { normalizeAccountId }); export { listIrcAccountIds, resolveDefaultIrcAccountId }; @@ -100,13 +90,11 @@ function resolvePassword(accountId: string, merged: IrcAccountConfig) { } if (merged.passwordFile?.trim()) { - try { - const filePassword = readFileSync(merged.passwordFile.trim(), "utf-8").trim(); - if (filePassword) { - return { password: filePassword, source: "passwordFile" as const }; - } - } catch { - // Ignore unreadable files here; status will still surface missing configuration. + const filePassword = tryReadSecretFileSync(merged.passwordFile, "IRC password file", { + rejectSymlink: true, + }); + if (filePassword) { + return { password: filePassword, source: "passwordFile" as const }; } } @@ -137,11 +125,10 @@ function resolveNickServConfig(accountId: string, nickserv?: IrcNickServConfig): envPassword || ""; if (!resolvedPassword && passwordFile) { - try { - resolvedPassword = readFileSync(passwordFile, "utf-8").trim(); - } catch { - // Ignore unreadable files; monitor/probe status will surface failures. - } + resolvedPassword = + tryReadSecretFileSync(passwordFile, "IRC NickServ password file", { + rejectSymlink: true, + }) ?? ""; } const merged: IrcNickServConfig = { @@ -177,7 +164,9 @@ export function resolveIrcAccount(params: { accountId === DEFAULT_ACCOUNT_ID ? parseIntEnv(process.env.IRC_PORT) : undefined; const port = merged.port ?? envPort ?? (tls ? 6697 : 6667); const envChannels = - accountId === DEFAULT_ACCOUNT_ID ? parseListEnv(process.env.IRC_CHANNELS) : undefined; + accountId === DEFAULT_ACCOUNT_ID + ? parseOptionalDelimitedEntries(process.env.IRC_CHANNELS) + : undefined; const host = ( merged.host?.trim() || diff --git a/extensions/irc/src/channel.startup.test.ts b/extensions/irc/src/channel.startup.test.ts new file mode 100644 index 00000000000..7b4416d1892 --- /dev/null +++ b/extensions/irc/src/channel.startup.test.ts @@ -0,0 +1,63 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import { + expectStopPendingUntilAbort, + startAccountAndTrackLifecycle, +} from "../../test-utils/start-account-lifecycle.js"; +import type { ResolvedIrcAccount } from "./accounts.js"; + +const hoisted = vi.hoisted(() => ({ + monitorIrcProvider: vi.fn(), +})); + +vi.mock("./monitor.js", async () => { + const actual = await vi.importActual("./monitor.js"); + return { + ...actual, + monitorIrcProvider: hoisted.monitorIrcProvider, + }; +}); + +import { ircPlugin } from "./channel.js"; + +describe("ircPlugin gateway.startAccount", () => { + afterEach(() => { + vi.clearAllMocks(); + }); + + it("keeps startAccount pending until abort, then stops the monitor", async () => { + const stop = vi.fn(); + hoisted.monitorIrcProvider.mockResolvedValue({ stop }); + + const account: ResolvedIrcAccount = { + accountId: "default", + enabled: true, + name: "default", + configured: true, + host: "irc.example.com", + port: 6697, + tls: true, + nick: "openclaw", + username: "openclaw", + realname: "OpenClaw", + password: "", + passwordSource: "none", + config: {} as ResolvedIrcAccount["config"], + }; + + const { abort, task, isSettled } = startAccountAndTrackLifecycle({ + startAccount: ircPlugin.gateway!.startAccount!, + account, + }); + + await expectStopPendingUntilAbort({ + waitForStarted: () => + vi.waitFor(() => { + expect(hoisted.monitorIrcProvider).toHaveBeenCalledOnce(); + }), + isSettled, + abort, + task, + stop, + }); + }); +}); diff --git a/extensions/irc/src/channel.ts b/extensions/irc/src/channel.ts index 03d86da4c54..62d64fb0866 100644 --- a/extensions/irc/src/channel.ts +++ b/extensions/irc/src/channel.ts @@ -9,6 +9,7 @@ import { buildBaseAccountStatusSnapshot, buildBaseChannelStatusSummary, buildChannelConfigSchema, + createAccountStatusSink, DEFAULT_ACCOUNT_ID, deleteAccountFromConfigSection, getChatChannelMeta, @@ -16,6 +17,7 @@ import { setAccountEnabledInConfigSection, type ChannelPlugin, } from "openclaw/plugin-sdk/irc"; +import { runStoppablePassiveMonitor } from "../../shared/passive-monitor.js"; import { listIrcAccountIds, resolveDefaultIrcAccountId, @@ -353,6 +355,10 @@ export const ircPlugin: ChannelPlugin = { gateway: { startAccount: async (ctx) => { const account = ctx.account; + const statusSink = createAccountStatusSink({ + accountId: ctx.accountId, + setStatus: ctx.setStatus, + }); if (!account.configured) { throw new Error( `IRC is not configured for account "${account.accountId}" (need host and nick in channels.irc).`, @@ -361,14 +367,17 @@ export const ircPlugin: ChannelPlugin = { ctx.log?.info( `[${account.accountId}] starting IRC provider (${account.host}:${account.port}${account.tls ? " tls" : ""})`, ); - const { stop } = await monitorIrcProvider({ - accountId: account.accountId, - config: ctx.cfg as CoreConfig, - runtime: ctx.runtime, + await runStoppablePassiveMonitor({ abortSignal: ctx.abortSignal, - statusSink: (patch) => ctx.setStatus({ accountId: ctx.accountId, ...patch }), + start: async () => + await monitorIrcProvider({ + accountId: account.accountId, + config: ctx.cfg as CoreConfig, + runtime: ctx.runtime, + abortSignal: ctx.abortSignal, + statusSink, + }), }); - return { stop }; }, }, }; diff --git a/extensions/irc/src/config-schema.ts b/extensions/irc/src/config-schema.ts index aa37b596cd1..8b9625b5bc4 100644 --- a/extensions/irc/src/config-schema.ts +++ b/extensions/irc/src/config-schema.ts @@ -9,6 +9,7 @@ import { requireOpenAllowFrom, } from "openclaw/plugin-sdk/irc"; import { z } from "zod"; +import { requireChannelOpenAllowFrom } from "../../shared/config-schema-helpers.js"; const IrcGroupSchema = z .object({ @@ -69,12 +70,12 @@ export const IrcAccountSchemaBase = z .strict(); export const IrcAccountSchema = IrcAccountSchemaBase.superRefine((value, ctx) => { - requireOpenAllowFrom({ + requireChannelOpenAllowFrom({ + channel: "irc", policy: value.dmPolicy, allowFrom: value.allowFrom, ctx, - path: ["allowFrom"], - message: 'channels.irc.dmPolicy="open" requires channels.irc.allowFrom to include "*"', + requireOpenAllowFrom, }); }); @@ -82,11 +83,11 @@ export const IrcConfigSchema = IrcAccountSchemaBase.extend({ accounts: z.record(z.string(), IrcAccountSchema.optional()).optional(), defaultAccount: z.string().optional(), }).superRefine((value, ctx) => { - requireOpenAllowFrom({ + requireChannelOpenAllowFrom({ + channel: "irc", policy: value.dmPolicy, allowFrom: value.allowFrom, ctx, - path: ["allowFrom"], - message: 'channels.irc.dmPolicy="open" requires channels.irc.allowFrom to include "*"', + requireOpenAllowFrom, }); }); diff --git a/extensions/irc/src/monitor.ts b/extensions/irc/src/monitor.ts index e416d95f8eb..2eec74a73d4 100644 --- a/extensions/irc/src/monitor.ts +++ b/extensions/irc/src/monitor.ts @@ -1,4 +1,5 @@ -import { createLoggerBackedRuntime, type RuntimeEnv } from "openclaw/plugin-sdk/irc"; +import type { RuntimeEnv } from "openclaw/plugin-sdk/irc"; +import { resolveLoggerBackedRuntime } from "../../shared/runtime.js"; import { resolveIrcAccount } from "./accounts.js"; import { connectIrcClient, type IrcClient } from "./client.js"; import { buildIrcConnectOptions } from "./connect-options.js"; @@ -39,12 +40,10 @@ export async function monitorIrcProvider(opts: IrcMonitorOptions): Promise<{ sto accountId: opts.accountId, }); - const runtime: RuntimeEnv = - opts.runtime ?? - createLoggerBackedRuntime({ - logger: core.logging.getChildLogger(), - exitError: () => new Error("Runtime exit not available"), - }); + const runtime: RuntimeEnv = resolveLoggerBackedRuntime( + opts.runtime, + core.logging.getChildLogger(), + ); if (!account.configured) { throw new Error( diff --git a/extensions/irc/src/onboarding.test.ts b/extensions/irc/src/onboarding.test.ts index 21f3e978c1a..613503700f3 100644 --- a/extensions/irc/src/onboarding.test.ts +++ b/extensions/irc/src/onboarding.test.ts @@ -1,5 +1,6 @@ import type { RuntimeEnv, WizardPrompter } from "openclaw/plugin-sdk/irc"; import { describe, expect, it, vi } from "vitest"; +import { createRuntimeEnv } from "../../test-utils/runtime-env.js"; import { ircOnboardingAdapter } from "./onboarding.js"; import type { CoreConfig } from "./types.js"; @@ -63,13 +64,7 @@ describe("irc onboarding", () => { }), }); - const runtime: RuntimeEnv = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn((code: number): never => { - throw new Error(`exit ${code}`); - }), - }; + const runtime: RuntimeEnv = createRuntimeEnv(); const result = await ircOnboardingAdapter.configure({ cfg: {} as CoreConfig, diff --git a/extensions/irc/src/onboarding.ts b/extensions/irc/src/onboarding.ts index d7d7b7f79a9..5e7c80c94d7 100644 --- a/extensions/irc/src/onboarding.ts +++ b/extensions/irc/src/onboarding.ts @@ -1,6 +1,7 @@ import { DEFAULT_ACCOUNT_ID, formatDocsLink, + patchScopedAccountConfig, promptChannelAccessConfig, resolveAccountIdForConfigure, setTopLevelChannelAllowFrom, @@ -59,35 +60,14 @@ function updateIrcAccountConfig( accountId: string, patch: Partial, ): CoreConfig { - const current = cfg.channels?.irc ?? {}; - if (accountId === DEFAULT_ACCOUNT_ID) { - return { - ...cfg, - channels: { - ...cfg.channels, - irc: { - ...current, - ...patch, - }, - }, - }; - } - return { - ...cfg, - channels: { - ...cfg.channels, - irc: { - ...current, - accounts: { - ...current.accounts, - [accountId]: { - ...current.accounts?.[accountId], - ...patch, - }, - }, - }, - }, - }; + return patchScopedAccountConfig({ + cfg, + channelKey: channel, + accountId, + patch, + ensureChannelEnabled: false, + ensureAccountEnabled: false, + }) as CoreConfig; } function setIrcDmPolicy(cfg: CoreConfig, dmPolicy: DmPolicy): CoreConfig { diff --git a/extensions/irc/src/send.test.ts b/extensions/irc/src/send.test.ts index df7b5e60ddd..8fbe58e7f22 100644 --- a/extensions/irc/src/send.test.ts +++ b/extensions/irc/src/send.test.ts @@ -1,4 +1,9 @@ import { beforeEach, describe, expect, it, vi } from "vitest"; +import { + createSendCfgThreadingRuntime, + expectProvidedCfgSkipsRuntimeLoad, + expectRuntimeCfgFallback, +} from "../../test-utils/send-config.js"; import type { IrcClient } from "./client.js"; import type { CoreConfig } from "./types.js"; @@ -27,20 +32,7 @@ const hoisted = vi.hoisted(() => { }); vi.mock("./runtime.js", () => ({ - getIrcRuntime: () => ({ - config: { - loadConfig: hoisted.loadConfig, - }, - channel: { - text: { - resolveMarkdownTableMode: hoisted.resolveMarkdownTableMode, - convertMarkdownTables: hoisted.convertMarkdownTables, - }, - activity: { - record: hoisted.record, - }, - }, - }), + getIrcRuntime: () => createSendCfgThreadingRuntime(hoisted), })); vi.mock("./accounts.js", () => ({ @@ -87,8 +79,9 @@ describe("sendMessageIrc cfg threading", () => { accountId: "work", }); - expect(hoisted.loadConfig).not.toHaveBeenCalled(); - expect(hoisted.resolveIrcAccount).toHaveBeenCalledWith({ + expectProvidedCfgSkipsRuntimeLoad({ + loadConfig: hoisted.loadConfig, + resolveAccount: hoisted.resolveIrcAccount, cfg: providedCfg, accountId: "work", }); @@ -106,8 +99,9 @@ describe("sendMessageIrc cfg threading", () => { await sendMessageIrc("#ops", "ping", { client }); - expect(hoisted.loadConfig).toHaveBeenCalledTimes(1); - expect(hoisted.resolveIrcAccount).toHaveBeenCalledWith({ + expectRuntimeCfgFallback({ + loadConfig: hoisted.loadConfig, + resolveAccount: hoisted.resolveIrcAccount, cfg: runtimeCfg, accountId: undefined, }); diff --git a/extensions/line/package.json b/extensions/line/package.json index 9ec37f833e7..85bfac7f0ac 100644 --- a/extensions/line/package.json +++ b/extensions/line/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/line", - "version": "2026.3.9", + "version": "2026.3.14", "private": true, "description": "OpenClaw LINE channel plugin", "type": "module", diff --git a/extensions/line/src/channel.ts b/extensions/line/src/channel.ts index 37fbda8c54a..d5f5ef43d03 100644 --- a/extensions/line/src/channel.ts +++ b/extensions/line/src/channel.ts @@ -1,7 +1,8 @@ import { - buildAccountScopedDmSecurityPolicy, - createScopedAccountConfigAccessors, collectAllowlistProviderRestrictSendersWarnings, + createScopedAccountConfigAccessors, + createScopedChannelConfigBase, + createScopedDmSecurityResolver, } from "openclaw/plugin-sdk/compat"; import { buildChannelConfigSchema, @@ -43,6 +44,24 @@ const lineConfigAccessors = createScopedAccountConfigAccessors({ .map((entry) => entry.replace(/^line:(?:user:)?/i, "")), }); +const lineConfigBase = createScopedChannelConfigBase({ + sectionKey: "line", + listAccountIds: (cfg) => getLineRuntime().channel.line.listLineAccountIds(cfg), + resolveAccount: (cfg, accountId) => + getLineRuntime().channel.line.resolveLineAccount({ cfg, accountId: accountId ?? undefined }), + defaultAccountId: (cfg) => getLineRuntime().channel.line.resolveDefaultLineAccountId(cfg), + clearBaseFields: ["channelSecret", "tokenFile", "secretFile"], +}); + +const resolveLineDmPolicy = createScopedDmSecurityResolver({ + channelKey: "line", + resolvePolicy: (account) => account.config.dmPolicy, + resolveAllowFrom: (account) => account.config.allowFrom, + policyPathSuffix: "dmPolicy", + approveHint: "openclaw pairing approve line ", + normalizeEntry: (raw) => raw.replace(/^line:(?:user:)?/i, ""), +}); + function patchLineAccountConfig( cfg: OpenClawConfig, lineConfig: LineConfig, @@ -113,40 +132,7 @@ export const linePlugin: ChannelPlugin = { reload: { configPrefixes: ["channels.line"] }, configSchema: buildChannelConfigSchema(LineConfigSchema), config: { - listAccountIds: (cfg) => getLineRuntime().channel.line.listLineAccountIds(cfg), - resolveAccount: (cfg, accountId) => - getLineRuntime().channel.line.resolveLineAccount({ cfg, accountId: accountId ?? undefined }), - defaultAccountId: (cfg) => getLineRuntime().channel.line.resolveDefaultLineAccountId(cfg), - setAccountEnabled: ({ cfg, accountId, enabled }) => { - const lineConfig = (cfg.channels?.line ?? {}) as LineConfig; - return patchLineAccountConfig(cfg, lineConfig, accountId, { enabled }); - }, - deleteAccount: ({ cfg, accountId }) => { - const lineConfig = (cfg.channels?.line ?? {}) as LineConfig; - if (accountId === DEFAULT_ACCOUNT_ID) { - // oxlint-disable-next-line no-unused-vars - const { channelSecret, tokenFile, secretFile, ...rest } = lineConfig; - return { - ...cfg, - channels: { - ...cfg.channels, - line: rest, - }, - }; - } - const accounts = { ...lineConfig.accounts }; - delete accounts[accountId]; - return { - ...cfg, - channels: { - ...cfg.channels, - line: { - ...lineConfig, - accounts: Object.keys(accounts).length > 0 ? accounts : undefined, - }, - }, - }; - }, + ...lineConfigBase, isConfigured: (account) => Boolean(account.channelAccessToken?.trim() && account.channelSecret?.trim()), describeAccount: (account) => ({ @@ -159,19 +145,7 @@ export const linePlugin: ChannelPlugin = { ...lineConfigAccessors, }, security: { - resolveDmPolicy: ({ cfg, accountId, account }) => { - return buildAccountScopedDmSecurityPolicy({ - cfg, - channelKey: "line", - accountId, - fallbackAccountId: account.accountId ?? DEFAULT_ACCOUNT_ID, - policy: account.config.dmPolicy, - allowFrom: account.config.allowFrom ?? [], - policyPathSuffix: "dmPolicy", - approveHint: "openclaw pairing approve line ", - normalizeEntry: (raw) => raw.replace(/^line:(?:user:)?/i, ""), - }); - }, + resolveDmPolicy: resolveLineDmPolicy, collectWarnings: ({ account, cfg }) => { return collectAllowlistProviderRestrictSendersWarnings({ cfg, @@ -373,6 +347,16 @@ export const linePlugin: ChannelPlugin = { : []; const mediaUrls = payload.mediaUrls ?? (payload.mediaUrl ? [payload.mediaUrl] : []); const shouldSendQuickRepliesInline = chunks.length === 0 && hasQuickReplies; + const sendMediaMessages = async () => { + for (const url of mediaUrls) { + lastResult = await runtime.channel.line.sendMessageLine(to, "", { + verbose: false, + mediaUrl: url, + cfg, + accountId: accountId ?? undefined, + }); + } + }; if (!shouldSendQuickRepliesInline) { if (lineData.flexMessage) { @@ -417,14 +401,7 @@ export const linePlugin: ChannelPlugin = { const sendMediaAfterText = !(hasQuickReplies && chunks.length > 0); if (mediaUrls.length > 0 && !shouldSendQuickRepliesInline && !sendMediaAfterText) { - for (const url of mediaUrls) { - lastResult = await runtime.channel.line.sendMessageLine(to, "", { - verbose: false, - mediaUrl: url, - cfg, - accountId: accountId ?? undefined, - }); - } + await sendMediaMessages(); } if (chunks.length > 0) { @@ -497,14 +474,7 @@ export const linePlugin: ChannelPlugin = { } if (mediaUrls.length > 0 && !shouldSendQuickRepliesInline && sendMediaAfterText) { - for (const url of mediaUrls) { - lastResult = await runtime.channel.line.sendMessageLine(to, "", { - verbose: false, - mediaUrl: url, - cfg, - accountId: accountId ?? undefined, - }); - } + await sendMediaMessages(); } if (lastResult) { diff --git a/extensions/llm-task/README.md b/extensions/llm-task/README.md index d8e5dadc6fb..738208f3d60 100644 --- a/extensions/llm-task/README.md +++ b/extensions/llm-task/README.md @@ -69,6 +69,7 @@ outside the list is rejected. - `schema` (object, optional JSON Schema) - `provider` (string, optional) - `model` (string, optional) +- `thinking` (string, optional) - `authProfileId` (string, optional) - `temperature` (number, optional) - `maxTokens` (number, optional) diff --git a/extensions/llm-task/package.json b/extensions/llm-task/package.json index 8a74b2ead7e..6b19e5cb4b2 100644 --- a/extensions/llm-task/package.json +++ b/extensions/llm-task/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/llm-task", - "version": "2026.3.9", + "version": "2026.3.14", "private": true, "description": "OpenClaw JSON-only LLM task plugin", "type": "module", diff --git a/extensions/llm-task/src/llm-task-tool.test.ts b/extensions/llm-task/src/llm-task-tool.test.ts index fea135e8be5..2bf0cb655aa 100644 --- a/extensions/llm-task/src/llm-task-tool.test.ts +++ b/extensions/llm-task/src/llm-task-tool.test.ts @@ -29,6 +29,21 @@ function fakeApi(overrides: any = {}) { }; } +function mockEmbeddedRunJson(payload: unknown) { + // oxlint-disable-next-line typescript/no-explicit-any + (runEmbeddedPiAgent as any).mockResolvedValueOnce({ + meta: {}, + payloads: [{ text: JSON.stringify(payload) }], + }); +} + +async function executeEmbeddedRun(input: Record) { + const tool = createLlmTaskTool(fakeApi()); + await tool.execute("id", input); + // oxlint-disable-next-line typescript/no-explicit-any + return (runEmbeddedPiAgent as any).mock.calls[0]?.[0]; +} + describe("llm-task tool (json-only)", () => { beforeEach(() => vi.clearAllMocks()); @@ -96,25 +111,50 @@ describe("llm-task tool (json-only)", () => { }); it("passes provider/model overrides to embedded runner", async () => { - // oxlint-disable-next-line typescript/no-explicit-any - (runEmbeddedPiAgent as any).mockResolvedValueOnce({ - meta: {}, - payloads: [{ text: JSON.stringify({ ok: true }) }], + mockEmbeddedRunJson({ ok: true }); + const call = await executeEmbeddedRun({ + prompt: "x", + provider: "anthropic", + model: "claude-4-sonnet", }); - const tool = createLlmTaskTool(fakeApi()); - await tool.execute("id", { prompt: "x", provider: "anthropic", model: "claude-4-sonnet" }); - // oxlint-disable-next-line typescript/no-explicit-any - const call = (runEmbeddedPiAgent as any).mock.calls[0]?.[0]; expect(call.provider).toBe("anthropic"); expect(call.model).toBe("claude-4-sonnet"); }); + it("passes thinking override to embedded runner", async () => { + mockEmbeddedRunJson({ ok: true }); + const call = await executeEmbeddedRun({ prompt: "x", thinking: "high" }); + expect(call.thinkLevel).toBe("high"); + }); + + it("normalizes thinking aliases", async () => { + mockEmbeddedRunJson({ ok: true }); + const call = await executeEmbeddedRun({ prompt: "x", thinking: "on" }); + expect(call.thinkLevel).toBe("low"); + }); + + it("throws on invalid thinking level", async () => { + const tool = createLlmTaskTool(fakeApi()); + await expect(tool.execute("id", { prompt: "x", thinking: "banana" })).rejects.toThrow( + /invalid thinking level/i, + ); + }); + + it("throws on unsupported xhigh thinking level", async () => { + const tool = createLlmTaskTool(fakeApi()); + await expect(tool.execute("id", { prompt: "x", thinking: "xhigh" })).rejects.toThrow( + /only supported/i, + ); + }); + + it("does not pass thinkLevel when thinking is omitted", async () => { + mockEmbeddedRunJson({ ok: true }); + const call = await executeEmbeddedRun({ prompt: "x" }); + expect(call.thinkLevel).toBeUndefined(); + }); + it("enforces allowedModels", async () => { - // oxlint-disable-next-line typescript/no-explicit-any - (runEmbeddedPiAgent as any).mockResolvedValueOnce({ - meta: {}, - payloads: [{ text: JSON.stringify({ ok: true }) }], - }); + mockEmbeddedRunJson({ ok: true }); const tool = createLlmTaskTool( fakeApi({ pluginConfig: { allowedModels: ["openai-codex/gpt-5.2"] } }), ); @@ -124,15 +164,8 @@ describe("llm-task tool (json-only)", () => { }); it("disables tools for embedded run", async () => { - // oxlint-disable-next-line typescript/no-explicit-any - (runEmbeddedPiAgent as any).mockResolvedValueOnce({ - meta: {}, - payloads: [{ text: JSON.stringify({ ok: true }) }], - }); - const tool = createLlmTaskTool(fakeApi()); - await tool.execute("id", { prompt: "x" }); - // oxlint-disable-next-line typescript/no-explicit-any - const call = (runEmbeddedPiAgent as any).mock.calls[0]?.[0]; + mockEmbeddedRunJson({ ok: true }); + const call = await executeEmbeddedRun({ prompt: "x" }); expect(call.disableTools).toBe(true); }); }); diff --git a/extensions/llm-task/src/llm-task-tool.ts b/extensions/llm-task/src/llm-task-tool.ts index 3a2e42c7223..ff2037e534a 100644 --- a/extensions/llm-task/src/llm-task-tool.ts +++ b/extensions/llm-task/src/llm-task-tool.ts @@ -2,7 +2,13 @@ import fs from "node:fs/promises"; import path from "node:path"; import { Type } from "@sinclair/typebox"; import Ajv from "ajv"; -import { resolvePreferredOpenClawTmpDir } from "openclaw/plugin-sdk/llm-task"; +import { + formatThinkingLevels, + formatXHighModelHint, + normalizeThinkLevel, + resolvePreferredOpenClawTmpDir, + supportsXHighThinking, +} from "openclaw/plugin-sdk/llm-task"; // NOTE: This extension is intended to be bundled with OpenClaw. // When running from source (tests/dev), OpenClaw internals live under src/. // When running from a built install, internals live under dist/ (no src/ tree). @@ -86,6 +92,7 @@ export function createLlmTaskTool(api: OpenClawPluginApi) { Type.String({ description: "Provider override (e.g. openai-codex, anthropic)." }), ), model: Type.Optional(Type.String({ description: "Model id override." })), + thinking: Type.Optional(Type.String({ description: "Thinking level override." })), authProfileId: Type.Optional(Type.String({ description: "Auth profile override." })), temperature: Type.Optional(Type.Number({ description: "Best-effort temperature override." })), maxTokens: Type.Optional(Type.Number({ description: "Best-effort maxTokens override." })), @@ -144,6 +151,18 @@ export function createLlmTaskTool(api: OpenClawPluginApi) { ); } + const thinkingRaw = + typeof params.thinking === "string" && params.thinking.trim() ? params.thinking : undefined; + const thinkLevel = thinkingRaw ? normalizeThinkLevel(thinkingRaw) : undefined; + if (thinkingRaw && !thinkLevel) { + throw new Error( + `Invalid thinking level "${thinkingRaw}". Use one of: ${formatThinkingLevels(provider, model)}.`, + ); + } + if (thinkLevel === "xhigh" && !supportsXHighThinking(provider, model)) { + throw new Error(`Thinking level "xhigh" is only supported for ${formatXHighModelHint()}.`); + } + const timeoutMs = (typeof params.timeoutMs === "number" && params.timeoutMs > 0 ? params.timeoutMs @@ -204,6 +223,7 @@ export function createLlmTaskTool(api: OpenClawPluginApi) { model, authProfileId, authProfileIdSource: authProfileId ? "user" : "auto", + thinkLevel, streamParams, disableTools: true, }); diff --git a/extensions/lobster/package.json b/extensions/lobster/package.json index 4c137401fbb..915e5d5c3de 100644 --- a/extensions/lobster/package.json +++ b/extensions/lobster/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/lobster", - "version": "2026.3.9", + "version": "2026.3.14", "description": "Lobster workflow tool plugin (typed pipelines + resumable approvals)", "type": "module", "dependencies": { diff --git a/extensions/lobster/src/windows-spawn.test.ts b/extensions/lobster/src/windows-spawn.test.ts index e3d791e36e4..48e6ddc9a54 100644 --- a/extensions/lobster/src/windows-spawn.test.ts +++ b/extensions/lobster/src/windows-spawn.test.ts @@ -14,6 +14,19 @@ describe("resolveWindowsLobsterSpawn", () => { let tempDir = ""; const originalProcessState = snapshotPlatformPathEnv(); + async function expectUnwrappedShim(params: { + scriptPath: string; + shimPath: string; + shimLine: string; + }) { + await createWindowsCmdShimFixture(params); + + const target = resolveWindowsLobsterSpawn(params.shimPath, ["run", "noop"], process.env); + expect(target.command).toBe(process.execPath); + expect(target.argv).toEqual([params.scriptPath, "run", "noop"]); + expect(target.windowsHide).toBe(true); + } + beforeEach(async () => { tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-lobster-win-spawn-")); setProcessPlatform("win32"); @@ -30,31 +43,21 @@ describe("resolveWindowsLobsterSpawn", () => { it("unwraps cmd shim with %dp0% token", async () => { const scriptPath = path.join(tempDir, "shim-dist", "lobster-cli.cjs"); const shimPath = path.join(tempDir, "shim", "lobster.cmd"); - await createWindowsCmdShimFixture({ + await expectUnwrappedShim({ shimPath, scriptPath, shimLine: `"%dp0%\\..\\shim-dist\\lobster-cli.cjs" %*`, }); - - const target = resolveWindowsLobsterSpawn(shimPath, ["run", "noop"], process.env); - expect(target.command).toBe(process.execPath); - expect(target.argv).toEqual([scriptPath, "run", "noop"]); - expect(target.windowsHide).toBe(true); }); it("unwraps cmd shim with %~dp0% token", async () => { const scriptPath = path.join(tempDir, "shim-dist", "lobster-cli.cjs"); const shimPath = path.join(tempDir, "shim", "lobster.cmd"); - await createWindowsCmdShimFixture({ + await expectUnwrappedShim({ shimPath, scriptPath, shimLine: `"%~dp0%\\..\\shim-dist\\lobster-cli.cjs" %*`, }); - - const target = resolveWindowsLobsterSpawn(shimPath, ["run", "noop"], process.env); - expect(target.command).toBe(process.execPath); - expect(target.argv).toEqual([scriptPath, "run", "noop"]); - expect(target.windowsHide).toBe(true); }); it("ignores node.exe shim entries and picks lobster script", async () => { diff --git a/extensions/matrix/CHANGELOG.md b/extensions/matrix/CHANGELOG.md index a3b32a18c85..5e6a7ed5327 100644 --- a/extensions/matrix/CHANGELOG.md +++ b/extensions/matrix/CHANGELOG.md @@ -1,5 +1,35 @@ # Changelog +## 2026.3.14 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.13 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.12 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.11 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.10 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.3.9 ### Changes diff --git a/extensions/matrix/package.json b/extensions/matrix/package.json index c1b5859b43e..5b973b88635 100644 --- a/extensions/matrix/package.json +++ b/extensions/matrix/package.json @@ -1,14 +1,14 @@ { "name": "@openclaw/matrix", - "version": "2026.3.9", + "version": "2026.3.14", "description": "OpenClaw Matrix channel plugin", "type": "module", "dependencies": { - "@mariozechner/pi-agent-core": "0.57.1", + "@mariozechner/pi-agent-core": "0.58.0", "@matrix-org/matrix-sdk-crypto-nodejs": "^0.4.0", "@vector-im/matrix-bot-sdk": "0.8.0-element.3", "markdown-it": "14.1.1", - "music-metadata": "^11.12.1", + "music-metadata": "^11.12.3", "zod": "^4.3.6" }, "openclaw": { diff --git a/extensions/matrix/src/channel.directory.test.ts b/extensions/matrix/src/channel.directory.test.ts index 51c781c0b75..2c5bc9533f3 100644 --- a/extensions/matrix/src/channel.directory.test.ts +++ b/extensions/matrix/src/channel.directory.test.ts @@ -1,36 +1,17 @@ import type { PluginRuntime, RuntimeEnv } from "openclaw/plugin-sdk/matrix"; import { beforeEach, describe, expect, it, vi } from "vitest"; +import { createRuntimeEnv } from "../../test-utils/runtime-env.js"; import { matrixPlugin } from "./channel.js"; import { setMatrixRuntime } from "./runtime.js"; +import { createMatrixBotSdkMock } from "./test-mocks.js"; import type { CoreConfig } from "./types.js"; -vi.mock("@vector-im/matrix-bot-sdk", () => ({ - ConsoleLogger: class { - trace = vi.fn(); - debug = vi.fn(); - info = vi.fn(); - warn = vi.fn(); - error = vi.fn(); - }, - MatrixClient: class {}, - LogService: { - setLogger: vi.fn(), - warn: vi.fn(), - info: vi.fn(), - debug: vi.fn(), - }, - SimpleFsStorageProvider: class {}, - RustSdkCryptoStorageProvider: class {}, -})); +vi.mock("@vector-im/matrix-bot-sdk", () => + createMatrixBotSdkMock({ includeVerboseLogService: true }), +); describe("matrix directory", () => { - const runtimeEnv: RuntimeEnv = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn((code: number): never => { - throw new Error(`exit ${code}`); - }), - }; + const runtimeEnv: RuntimeEnv = createRuntimeEnv(); beforeEach(() => { setMatrixRuntime({ diff --git a/extensions/matrix/src/channel.ts b/extensions/matrix/src/channel.ts index c33c85ebe05..bad3322f8d0 100644 --- a/extensions/matrix/src/channel.ts +++ b/extensions/matrix/src/channel.ts @@ -1,8 +1,9 @@ import { - buildAccountScopedDmSecurityPolicy, buildOpenGroupPolicyWarning, collectAllowlistProviderGroupPolicyWarnings, createScopedAccountConfigAccessors, + createScopedChannelConfigBase, + createScopedDmSecurityResolver, } from "openclaw/plugin-sdk/compat"; import { applyAccountNameToChannelSection, @@ -10,12 +11,11 @@ import { buildProbeChannelStatusSummary, collectStatusIssuesFromLastError, DEFAULT_ACCOUNT_ID, - deleteAccountFromConfigSection, normalizeAccountId, PAIRING_APPROVED_MESSAGE, - setAccountEnabledInConfigSection, type ChannelPlugin, } from "openclaw/plugin-sdk/matrix"; +import { buildTrafficStatusSummary } from "../../shared/channel-status-summary.js"; import { matrixMessageActions } from "./actions.js"; import { MatrixConfigSchema } from "./config-schema.js"; import { listMatrixDirectoryGroupsLive, listMatrixDirectoryPeersLive } from "./directory-live.js"; @@ -106,6 +106,30 @@ const matrixConfigAccessors = createScopedAccountConfigAccessors({ formatAllowFrom: (allowFrom) => normalizeMatrixAllowList(allowFrom), }); +const matrixConfigBase = createScopedChannelConfigBase({ + sectionKey: "matrix", + listAccountIds: listMatrixAccountIds, + resolveAccount: (cfg, accountId) => resolveMatrixAccount({ cfg, accountId }), + defaultAccountId: resolveDefaultMatrixAccountId, + clearBaseFields: [ + "name", + "homeserver", + "userId", + "accessToken", + "password", + "deviceName", + "initialSyncLimit", + ], +}); + +const resolveMatrixDmPolicy = createScopedDmSecurityResolver({ + channelKey: "matrix", + resolvePolicy: (account) => account.config.dm?.policy, + resolveAllowFrom: (account) => account.config.dm?.allowFrom, + allowFromPathSuffix: "dm.", + normalizeEntry: (raw) => normalizeMatrixUserId(raw), +}); + export const matrixPlugin: ChannelPlugin = { id: "matrix", meta, @@ -127,32 +151,7 @@ export const matrixPlugin: ChannelPlugin = { reload: { configPrefixes: ["channels.matrix"] }, configSchema: buildChannelConfigSchema(MatrixConfigSchema), config: { - listAccountIds: (cfg) => listMatrixAccountIds(cfg as CoreConfig), - resolveAccount: (cfg, accountId) => resolveMatrixAccount({ cfg: cfg as CoreConfig, accountId }), - defaultAccountId: (cfg) => resolveDefaultMatrixAccountId(cfg as CoreConfig), - setAccountEnabled: ({ cfg, accountId, enabled }) => - setAccountEnabledInConfigSection({ - cfg: cfg as CoreConfig, - sectionKey: "matrix", - accountId, - enabled, - allowTopLevel: true, - }), - deleteAccount: ({ cfg, accountId }) => - deleteAccountFromConfigSection({ - cfg: cfg as CoreConfig, - sectionKey: "matrix", - accountId, - clearBaseFields: [ - "name", - "homeserver", - "userId", - "accessToken", - "password", - "deviceName", - "initialSyncLimit", - ], - }), + ...matrixConfigBase, isConfigured: (account) => account.configured, describeAccount: (account) => ({ accountId: account.accountId, @@ -164,18 +163,7 @@ export const matrixPlugin: ChannelPlugin = { ...matrixConfigAccessors, }, security: { - resolveDmPolicy: ({ cfg, accountId, account }) => { - return buildAccountScopedDmSecurityPolicy({ - cfg: cfg as CoreConfig, - channelKey: "matrix", - accountId, - fallbackAccountId: account.accountId ?? DEFAULT_ACCOUNT_ID, - policy: account.config.dm?.policy, - allowFrom: account.config.dm?.allowFrom ?? [], - allowFromPathSuffix: "dm.", - normalizeEntry: (raw) => normalizeMatrixUserId(raw), - }); - }, + resolveDmPolicy: resolveMatrixDmPolicy, collectWarnings: ({ account, cfg }) => { return collectAllowlistProviderGroupPolicyWarnings({ cfg: cfg as CoreConfig, @@ -423,8 +411,7 @@ export const matrixPlugin: ChannelPlugin = { lastError: runtime?.lastError ?? null, probe, lastProbeAt: runtime?.lastProbeAt ?? null, - lastInboundAt: runtime?.lastInboundAt ?? null, - lastOutboundAt: runtime?.lastOutboundAt ?? null, + ...buildTrafficStatusSummary(runtime), }), }, gateway: { diff --git a/extensions/matrix/src/config-schema.ts b/extensions/matrix/src/config-schema.ts index cd1c89fbdb6..a95d2fbda96 100644 --- a/extensions/matrix/src/config-schema.ts +++ b/extensions/matrix/src/config-schema.ts @@ -1,9 +1,13 @@ +import { + AllowFromListSchema, + buildNestedDmConfigSchema, + DmPolicySchema, + GroupPolicySchema, +} from "openclaw/plugin-sdk/compat"; import { MarkdownConfigSchema, ToolPolicySchema } from "openclaw/plugin-sdk/matrix"; import { z } from "zod"; import { buildSecretInputSchema } from "./secret-input.js"; -const allowFromEntry = z.union([z.string(), z.number()]); - const matrixActionSchema = z .object({ reactions: z.boolean().optional(), @@ -14,14 +18,6 @@ const matrixActionSchema = z }) .optional(); -const matrixDmSchema = z - .object({ - enabled: z.boolean().optional(), - policy: z.enum(["pairing", "allowlist", "open", "disabled"]).optional(), - allowFrom: z.array(allowFromEntry).optional(), - }) - .optional(); - const matrixRoomSchema = z .object({ enabled: z.boolean().optional(), @@ -29,7 +25,7 @@ const matrixRoomSchema = z requireMention: z.boolean().optional(), tools: ToolPolicySchema, autoReply: z.boolean().optional(), - users: z.array(allowFromEntry).optional(), + users: AllowFromListSchema, skills: z.array(z.string()).optional(), systemPrompt: z.string().optional(), }) @@ -49,7 +45,7 @@ export const MatrixConfigSchema = z.object({ initialSyncLimit: z.number().optional(), encryption: z.boolean().optional(), allowlistOnly: z.boolean().optional(), - groupPolicy: z.enum(["open", "disabled", "allowlist"]).optional(), + groupPolicy: GroupPolicySchema.optional(), replyToMode: z.enum(["off", "first", "all"]).optional(), threadReplies: z.enum(["off", "inbound", "always"]).optional(), textChunkLimit: z.number().optional(), @@ -57,9 +53,9 @@ export const MatrixConfigSchema = z.object({ responsePrefix: z.string().optional(), mediaMaxMb: z.number().optional(), autoJoin: z.enum(["always", "allowlist", "off"]).optional(), - autoJoinAllowlist: z.array(allowFromEntry).optional(), - groupAllowFrom: z.array(allowFromEntry).optional(), - dm: matrixDmSchema, + autoJoinAllowlist: AllowFromListSchema, + groupAllowFrom: AllowFromListSchema, + dm: buildNestedDmConfigSchema(), groups: z.object({}).catchall(matrixRoomSchema).optional(), rooms: z.object({}).catchall(matrixRoomSchema).optional(), actions: matrixActionSchema, diff --git a/extensions/matrix/src/matrix/monitor/allowlist.ts b/extensions/matrix/src/matrix/monitor/allowlist.ts index e9402c38362..a48fe63bdb0 100644 --- a/extensions/matrix/src/matrix/monitor/allowlist.ts +++ b/extensions/matrix/src/matrix/monitor/allowlist.ts @@ -1,6 +1,7 @@ import { + compileAllowlist, normalizeStringEntries, - resolveAllowlistMatchByCandidates, + resolveCompiledAllowlistMatch, type AllowlistMatch, } from "openclaw/plugin-sdk/matrix"; @@ -75,20 +76,17 @@ export function resolveMatrixAllowListMatch(params: { allowList: string[]; userId?: string; }): MatrixAllowListMatch { - const allowList = params.allowList; - if (allowList.length === 0) { - return { allowed: false }; - } - if (allowList.includes("*")) { - return { allowed: true, matchKey: "*", matchSource: "wildcard" }; - } + const compiledAllowList = compileAllowlist(params.allowList); const userId = normalizeMatrixUser(params.userId); const candidates: Array<{ value?: string; source: MatrixAllowListSource }> = [ { value: userId, source: "id" }, { value: userId ? `matrix:${userId}` : "", source: "prefixed-id" }, { value: userId ? `user:${userId}` : "", source: "prefixed-user" }, ]; - return resolveAllowlistMatchByCandidates({ allowList, candidates }); + return resolveCompiledAllowlistMatch({ + compiledAllowlist: compiledAllowList, + candidates, + }); } export function resolveMatrixAllowListMatches(params: { allowList: string[]; userId?: string }) { diff --git a/extensions/matrix/src/matrix/monitor/direct.test.ts b/extensions/matrix/src/matrix/monitor/direct.test.ts index 298b3996837..6688f76e649 100644 --- a/extensions/matrix/src/matrix/monitor/direct.test.ts +++ b/extensions/matrix/src/matrix/monitor/direct.test.ts @@ -7,6 +7,8 @@ import { createDirectRoomTracker } from "./direct.js"; type StateEvent = Record; type DmMap = Record; +const brokenDmRoomId = "!broken-dm:example.org"; +const defaultBrokenDmMembers = ["@alice:example.org", "@bot:example.org"]; function createMockClient(opts: { dmRooms?: DmMap; @@ -50,6 +52,21 @@ function createMockClient(opts: { }; } +function createBrokenDmClient(roomNameEvent?: StateEvent) { + return createMockClient({ + dmRooms: {}, + membersByRoom: { + [brokenDmRoomId]: defaultBrokenDmMembers, + }, + stateEvents: { + // is_direct not set on either member (e.g. Continuwuity bug) + [`${brokenDmRoomId}|m.room.member|@alice:example.org`]: {}, + [`${brokenDmRoomId}|m.room.member|@bot:example.org`]: {}, + ...(roomNameEvent ? { [`${brokenDmRoomId}|m.room.name|`]: roomNameEvent } : {}), + }, + }); +} + // --------------------------------------------------------------------------- // Tests -- isDirectMessage // --------------------------------------------------------------------------- @@ -131,22 +148,11 @@ describe("createDirectRoomTracker", () => { describe("conservative fallback (memberCount + room name)", () => { it("returns true for 2-member room WITHOUT a room name (broken flags)", async () => { - const client = createMockClient({ - dmRooms: {}, - membersByRoom: { - "!broken-dm:example.org": ["@alice:example.org", "@bot:example.org"], - }, - stateEvents: { - // is_direct not set on either member (e.g. Continuwuity bug) - "!broken-dm:example.org|m.room.member|@alice:example.org": {}, - "!broken-dm:example.org|m.room.member|@bot:example.org": {}, - // No m.room.name -> getRoomStateEvent will throw (event not found) - }, - }); + const client = createBrokenDmClient(); const tracker = createDirectRoomTracker(client as never); const result = await tracker.isDirectMessage({ - roomId: "!broken-dm:example.org", + roomId: brokenDmRoomId, senderId: "@alice:example.org", }); @@ -154,21 +160,11 @@ describe("createDirectRoomTracker", () => { }); it("returns true for 2-member room with empty room name", async () => { - const client = createMockClient({ - dmRooms: {}, - membersByRoom: { - "!broken-dm:example.org": ["@alice:example.org", "@bot:example.org"], - }, - stateEvents: { - "!broken-dm:example.org|m.room.member|@alice:example.org": {}, - "!broken-dm:example.org|m.room.member|@bot:example.org": {}, - "!broken-dm:example.org|m.room.name|": { name: "" }, - }, - }); + const client = createBrokenDmClient({ name: "" }); const tracker = createDirectRoomTracker(client as never); const result = await tracker.isDirectMessage({ - roomId: "!broken-dm:example.org", + roomId: brokenDmRoomId, senderId: "@alice:example.org", }); diff --git a/extensions/matrix/src/matrix/monitor/events.test.ts b/extensions/matrix/src/matrix/monitor/events.test.ts index 9179cf69ee3..6dac0db59fc 100644 --- a/extensions/matrix/src/matrix/monitor/events.test.ts +++ b/extensions/matrix/src/matrix/monitor/events.test.ts @@ -12,6 +12,19 @@ vi.mock("../send.js", () => ({ })); describe("registerMatrixMonitorEvents", () => { + const roomId = "!room:example.org"; + + function makeEvent(overrides: Partial): MatrixRawEvent { + return { + event_id: "$event", + sender: "@alice:example.org", + type: "m.room.message", + origin_server_ts: 0, + content: {}, + ...overrides, + }; + } + beforeEach(() => { sendReadReceiptMatrixMock.mockClear(); }); @@ -53,12 +66,22 @@ describe("registerMatrixMonitorEvents", () => { return { client, getUserId, onRoomMessage, roomMessageHandler, logVerboseMessage }; } + async function expectForwardedWithoutReadReceipt(event: MatrixRawEvent) { + const { onRoomMessage, roomMessageHandler } = createHarness(); + + roomMessageHandler(roomId, event); + await vi.waitFor(() => { + expect(onRoomMessage).toHaveBeenCalledWith(roomId, event); + }); + expect(sendReadReceiptMatrixMock).not.toHaveBeenCalled(); + } + it("sends read receipt immediately for non-self messages", async () => { const { client, onRoomMessage, roomMessageHandler } = createHarness(); - const event = { + const event = makeEvent({ event_id: "$e1", sender: "@alice:example.org", - } as MatrixRawEvent; + }); roomMessageHandler("!room:example.org", event); @@ -69,36 +92,27 @@ describe("registerMatrixMonitorEvents", () => { }); it("does not send read receipts for self messages", async () => { - const { onRoomMessage, roomMessageHandler } = createHarness(); - const event = { - event_id: "$e2", - sender: "@bot:example.org", - } as MatrixRawEvent; - - roomMessageHandler("!room:example.org", event); - await vi.waitFor(() => { - expect(onRoomMessage).toHaveBeenCalledWith("!room:example.org", event); - }); - expect(sendReadReceiptMatrixMock).not.toHaveBeenCalled(); + await expectForwardedWithoutReadReceipt( + makeEvent({ + event_id: "$e2", + sender: "@bot:example.org", + }), + ); }); it("skips receipt when message lacks sender or event id", async () => { - const { onRoomMessage, roomMessageHandler } = createHarness(); - const event = { - sender: "@alice:example.org", - } as MatrixRawEvent; - - roomMessageHandler("!room:example.org", event); - await vi.waitFor(() => { - expect(onRoomMessage).toHaveBeenCalledWith("!room:example.org", event); - }); - expect(sendReadReceiptMatrixMock).not.toHaveBeenCalled(); + await expectForwardedWithoutReadReceipt( + makeEvent({ + sender: "@alice:example.org", + event_id: "", + }), + ); }); it("caches self user id across messages", async () => { const { getUserId, roomMessageHandler } = createHarness(); - const first = { event_id: "$e3", sender: "@alice:example.org" } as MatrixRawEvent; - const second = { event_id: "$e4", sender: "@bob:example.org" } as MatrixRawEvent; + const first = makeEvent({ event_id: "$e3", sender: "@alice:example.org" }); + const second = makeEvent({ event_id: "$e4", sender: "@bob:example.org" }); roomMessageHandler("!room:example.org", first); roomMessageHandler("!room:example.org", second); @@ -112,7 +126,7 @@ describe("registerMatrixMonitorEvents", () => { it("logs and continues when sending read receipt fails", async () => { sendReadReceiptMatrixMock.mockRejectedValueOnce(new Error("network boom")); const { roomMessageHandler, onRoomMessage, logVerboseMessage } = createHarness(); - const event = { event_id: "$e5", sender: "@alice:example.org" } as MatrixRawEvent; + const event = makeEvent({ event_id: "$e5", sender: "@alice:example.org" }); roomMessageHandler("!room:example.org", event); @@ -128,7 +142,7 @@ describe("registerMatrixMonitorEvents", () => { const { roomMessageHandler, onRoomMessage, getUserId } = createHarness({ getUserId: vi.fn().mockRejectedValue(new Error("cannot resolve self")), }); - const event = { event_id: "$e6", sender: "@alice:example.org" } as MatrixRawEvent; + const event = makeEvent({ event_id: "$e6", sender: "@alice:example.org" }); roomMessageHandler("!room:example.org", event); diff --git a/extensions/matrix/src/matrix/monitor/handler.ts b/extensions/matrix/src/matrix/monitor/handler.ts index 0adc9fa2886..22ee16275cf 100644 --- a/extensions/matrix/src/matrix/monitor/handler.ts +++ b/extensions/matrix/src/matrix/monitor/handler.ts @@ -686,6 +686,7 @@ export function createMatrixRoomMessageHandler(params: MatrixMonitorHandlerParam channel: "matrix", accountId: route.accountId, }); + const humanDelay = core.channel.reply.resolveHumanDelayConfig(cfg, route.agentId); const typingCallbacks = createTypingCallbacks({ start: () => sendTypingMatrix(roomId, true, undefined, client), stop: () => sendTypingMatrix(roomId, false, undefined, client), @@ -711,7 +712,7 @@ export function createMatrixRoomMessageHandler(params: MatrixMonitorHandlerParam const { dispatcher, replyOptions, markDispatchIdle } = core.channel.reply.createReplyDispatcherWithTyping({ ...prefixOptions, - humanDelay: core.channel.reply.resolveHumanDelayConfig(cfg, route.agentId), + humanDelay, typingCallbacks, deliver: async (payload) => { await deliverMatrixReplies({ diff --git a/extensions/matrix/src/matrix/send-queue.test.ts b/extensions/matrix/src/matrix/send-queue.test.ts index aa4765eaab3..240dd8ee71d 100644 --- a/extensions/matrix/src/matrix/send-queue.test.ts +++ b/extensions/matrix/src/matrix/send-queue.test.ts @@ -1,16 +1,7 @@ import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { createDeferred } from "../../../shared/deferred.js"; import { DEFAULT_SEND_GAP_MS, enqueueSend } from "./send-queue.js"; -function deferred() { - let resolve!: (value: T | PromiseLike) => void; - let reject!: (reason?: unknown) => void; - const promise = new Promise((res, rej) => { - resolve = res; - reject = rej; - }); - return { promise, resolve, reject }; -} - describe("enqueueSend", () => { beforeEach(() => { vi.useFakeTimers(); @@ -21,7 +12,7 @@ describe("enqueueSend", () => { }); it("serializes sends per room", async () => { - const gate = deferred(); + const gate = createDeferred(); const events: string[] = []; const first = enqueueSend("!room:example.org", async () => { @@ -91,7 +82,7 @@ describe("enqueueSend", () => { }); it("continues queued work when the head task fails", async () => { - const gate = deferred(); + const gate = createDeferred(); const events: string[] = []; const first = enqueueSend("!room:example.org", async () => { diff --git a/extensions/matrix/src/matrix/send.test.ts b/extensions/matrix/src/matrix/send.test.ts index dabe915b388..2bf21023909 100644 --- a/extensions/matrix/src/matrix/send.test.ts +++ b/extensions/matrix/src/matrix/send.test.ts @@ -1,6 +1,7 @@ import type { PluginRuntime } from "openclaw/plugin-sdk/matrix"; import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; import { setMatrixRuntime } from "../runtime.js"; +import { createMatrixBotSdkMock } from "../test-mocks.js"; vi.mock("music-metadata", () => ({ // `resolveMediaDurationMs` lazily imports `music-metadata`; in tests we don't @@ -8,21 +9,13 @@ vi.mock("music-metadata", () => ({ parseBuffer: vi.fn().mockResolvedValue({ format: {} }), })); -vi.mock("@vector-im/matrix-bot-sdk", () => ({ - ConsoleLogger: class { - trace = vi.fn(); - debug = vi.fn(); - info = vi.fn(); - warn = vi.fn(); - error = vi.fn(); - }, - LogService: { - setLogger: vi.fn(), - }, - MatrixClient: vi.fn(), - SimpleFsStorageProvider: vi.fn(), - RustSdkCryptoStorageProvider: vi.fn(), -})); +vi.mock("@vector-im/matrix-bot-sdk", () => + createMatrixBotSdkMock({ + matrixClient: vi.fn(), + simpleFsStorageProvider: vi.fn(), + rustSdkCryptoStorageProvider: vi.fn(), + }), +); vi.mock("./send-queue.js", () => ({ enqueueSend: async (_roomId: string, fn: () => Promise) => await fn(), diff --git a/extensions/matrix/src/resolve-targets.test.ts b/extensions/matrix/src/resolve-targets.test.ts index 10dff313a2e..02a5088e8ae 100644 --- a/extensions/matrix/src/resolve-targets.test.ts +++ b/extensions/matrix/src/resolve-targets.test.ts @@ -8,6 +8,15 @@ vi.mock("./directory-live.js", () => ({ listMatrixDirectoryGroupsLive: vi.fn(), })); +async function resolveUserTarget(input = "Alice") { + const [result] = await resolveMatrixTargets({ + cfg: {}, + inputs: [input], + kind: "user", + }); + return result; +} + describe("resolveMatrixTargets (users)", () => { beforeEach(() => { vi.mocked(listMatrixDirectoryPeersLive).mockReset(); @@ -20,11 +29,7 @@ describe("resolveMatrixTargets (users)", () => { ]; vi.mocked(listMatrixDirectoryPeersLive).mockResolvedValue(matches); - const [result] = await resolveMatrixTargets({ - cfg: {}, - inputs: ["Alice"], - kind: "user", - }); + const result = await resolveUserTarget(); expect(result?.resolved).toBe(true); expect(result?.id).toBe("@alice:example.org"); @@ -37,11 +42,7 @@ describe("resolveMatrixTargets (users)", () => { ]; vi.mocked(listMatrixDirectoryPeersLive).mockResolvedValue(matches); - const [result] = await resolveMatrixTargets({ - cfg: {}, - inputs: ["Alice"], - kind: "user", - }); + const result = await resolveUserTarget(); expect(result?.resolved).toBe(false); expect(result?.note).toMatch(/use full Matrix ID/i); diff --git a/extensions/matrix/src/test-mocks.ts b/extensions/matrix/src/test-mocks.ts new file mode 100644 index 00000000000..687b94459ea --- /dev/null +++ b/extensions/matrix/src/test-mocks.ts @@ -0,0 +1,53 @@ +import type { Mock } from "vitest"; +import { vi } from "vitest"; + +type MatrixBotSdkMockParams = { + matrixClient?: unknown; + simpleFsStorageProvider?: unknown; + rustSdkCryptoStorageProvider?: unknown; + includeVerboseLogService?: boolean; +}; + +type MatrixBotSdkMock = { + ConsoleLogger: new () => { + trace: Mock<() => void>; + debug: Mock<() => void>; + info: Mock<() => void>; + warn: Mock<() => void>; + error: Mock<() => void>; + }; + MatrixClient: unknown; + LogService: { + setLogger: Mock<() => void>; + warn?: Mock<() => void>; + info?: Mock<() => void>; + debug?: Mock<() => void>; + }; + SimpleFsStorageProvider: unknown; + RustSdkCryptoStorageProvider: unknown; +}; + +export function createMatrixBotSdkMock(params: MatrixBotSdkMockParams = {}): MatrixBotSdkMock { + return { + ConsoleLogger: class { + trace = vi.fn(); + debug = vi.fn(); + info = vi.fn(); + warn = vi.fn(); + error = vi.fn(); + }, + MatrixClient: params.matrixClient ?? class {}, + LogService: { + setLogger: vi.fn(), + ...(params.includeVerboseLogService + ? { + warn: vi.fn(), + info: vi.fn(), + debug: vi.fn(), + } + : {}), + }, + SimpleFsStorageProvider: params.simpleFsStorageProvider ?? class {}, + RustSdkCryptoStorageProvider: params.rustSdkCryptoStorageProvider ?? class {}, + }; +} diff --git a/extensions/mattermost/package.json b/extensions/mattermost/package.json index d532764db87..17f8add1b1f 100644 --- a/extensions/mattermost/package.json +++ b/extensions/mattermost/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/mattermost", - "version": "2026.3.9", + "version": "2026.3.14", "description": "OpenClaw Mattermost channel plugin", "type": "module", "dependencies": { diff --git a/extensions/mattermost/src/channel.test.ts b/extensions/mattermost/src/channel.test.ts index 97314f5e13b..5ac333b2e6c 100644 --- a/extensions/mattermost/src/channel.test.ts +++ b/extensions/mattermost/src/channel.test.ts @@ -65,6 +65,38 @@ describe("mattermostPlugin", () => { }); }); + describe("threading", () => { + it("uses replyToMode for channel messages and keeps direct messages off", () => { + const resolveReplyToMode = mattermostPlugin.threading?.resolveReplyToMode; + if (!resolveReplyToMode) { + return; + } + + const cfg: OpenClawConfig = { + channels: { + mattermost: { + replyToMode: "all", + }, + }, + }; + + expect( + resolveReplyToMode({ + cfg, + accountId: "default", + chatType: "channel", + }), + ).toBe("all"); + expect( + resolveReplyToMode({ + cfg, + accountId: "default", + chatType: "direct", + }), + ).toBe("off"); + }); + }); + describe("messageActions", () => { beforeEach(() => { resetMattermostReactionBotUserCacheForTests(); @@ -214,6 +246,57 @@ describe("mattermostPlugin", () => { ]); expect(result?.details).toEqual({}); }); + + it("maps replyTo to replyToId for send actions", async () => { + const cfg = createMattermostTestConfig(); + + await mattermostPlugin.actions?.handleAction?.({ + channel: "mattermost", + action: "send", + params: { + to: "channel:CHAN1", + message: "hello", + replyTo: "post-root", + }, + cfg, + accountId: "default", + } as any); + + expect(sendMessageMattermostMock).toHaveBeenCalledWith( + "channel:CHAN1", + "hello", + expect.objectContaining({ + accountId: "default", + replyToId: "post-root", + }), + ); + }); + + it("falls back to trimmed replyTo when replyToId is blank", async () => { + const cfg = createMattermostTestConfig(); + + await mattermostPlugin.actions?.handleAction?.({ + channel: "mattermost", + action: "send", + params: { + to: "channel:CHAN1", + message: "hello", + replyToId: " ", + replyTo: " post-root ", + }, + cfg, + accountId: "default", + } as any); + + expect(sendMessageMattermostMock).toHaveBeenCalledWith( + "channel:CHAN1", + "hello", + expect.objectContaining({ + accountId: "default", + replyToId: "post-root", + }), + ); + }); }); describe("outbound", () => { @@ -272,6 +355,53 @@ describe("mattermostPlugin", () => { }), ); }); + + it("uses threadId as fallback when replyToId is absent (sendText)", async () => { + const sendText = mattermostPlugin.outbound?.sendText; + if (!sendText) { + return; + } + + await sendText({ + to: "channel:CHAN1", + text: "hello", + accountId: "default", + threadId: "post-root", + } as any); + + expect(sendMessageMattermostMock).toHaveBeenCalledWith( + "channel:CHAN1", + "hello", + expect.objectContaining({ + accountId: "default", + replyToId: "post-root", + }), + ); + }); + + it("uses threadId as fallback when replyToId is absent (sendMedia)", async () => { + const sendMedia = mattermostPlugin.outbound?.sendMedia; + if (!sendMedia) { + return; + } + + await sendMedia({ + to: "channel:CHAN1", + text: "caption", + mediaUrl: "https://example.com/image.png", + accountId: "default", + threadId: "post-root", + } as any); + + expect(sendMessageMattermostMock).toHaveBeenCalledWith( + "channel:CHAN1", + "caption", + expect.objectContaining({ + accountId: "default", + replyToId: "post-root", + }), + ); + }); }); describe("config", () => { diff --git a/extensions/mattermost/src/channel.ts b/extensions/mattermost/src/channel.ts index 8c0504c7a5c..45c4d863c7c 100644 --- a/extensions/mattermost/src/channel.ts +++ b/extensions/mattermost/src/channel.ts @@ -9,21 +9,26 @@ import { applySetupAccountConfigPatch, buildComputedAccountStatusSnapshot, buildChannelConfigSchema, + createAccountStatusSink, DEFAULT_ACCOUNT_ID, deleteAccountFromConfigSection, migrateBaseNameToDefaultAccount, normalizeAccountId, + resolveAllowlistProviderRuntimeGroupPolicy, + resolveDefaultGroupPolicy, setAccountEnabledInConfigSection, type ChannelMessageActionAdapter, type ChannelMessageActionName, type ChannelPlugin, } from "openclaw/plugin-sdk/mattermost"; +import { buildPassiveProbedChannelStatusSummary } from "../../shared/channel-status-summary.js"; import { MattermostConfigSchema } from "./config-schema.js"; import { resolveMattermostGroupRequireMention } from "./group-mentions.js"; import { listMattermostAccountIds, resolveDefaultMattermostAccountId, resolveMattermostAccount, + resolveMattermostReplyToMode, type ResolvedMattermostAccount, } from "./mattermost/accounts.js"; import { normalizeMattermostBaseUrl } from "./mattermost/client.js"; @@ -35,6 +40,7 @@ import { monitorMattermostProvider } from "./mattermost/monitor.js"; import { probeMattermost } from "./mattermost/probe.js"; import { addMattermostReaction, removeMattermostReaction } from "./mattermost/reactions.js"; import { sendMessageMattermost } from "./mattermost/send.js"; +import { resolveMattermostOpaqueTarget } from "./mattermost/target-resolution.js"; import { looksLikeMattermostTargetId, normalizeMattermostMessagingTarget } from "./normalize.js"; import { mattermostOnboardingAdapter } from "./onboarding.js"; import { getMattermostRuntime } from "./runtime.js"; @@ -157,7 +163,9 @@ const mattermostMessageActions: ChannelMessageActionAdapter = { } const message = typeof params.message === "string" ? params.message : ""; - const replyToId = typeof params.replyToId === "string" ? params.replyToId : undefined; + // Match the shared runner semantics: trim empty reply IDs away before + // falling back from replyToId to replyTo on direct plugin calls. + const replyToId = readMattermostReplyToId(params); const resolvedAccountId = accountId || undefined; const mediaUrl = @@ -201,6 +209,18 @@ const meta = { quickstartAllowFrom: true, } as const; +function readMattermostReplyToId(params: Record): string | undefined { + const readNormalizedValue = (value: unknown) => { + if (typeof value !== "string") { + return undefined; + } + const trimmed = value.trim(); + return trimmed || undefined; + }; + + return readNormalizedValue(params.replyToId) ?? readNormalizedValue(params.replyTo); +} + function normalizeAllowEntry(entry: string): string { return entry .trim() @@ -254,6 +274,16 @@ export const mattermostPlugin: ChannelPlugin = { streaming: { blockStreamingCoalesceDefaults: { minChars: 1500, idleMs: 1000 }, }, + threading: { + resolveReplyToMode: ({ cfg, accountId, chatType }) => { + const account = resolveMattermostAccount({ cfg, accountId: accountId ?? "default" }); + const kind = + chatType === "direct" || chatType === "group" || chatType === "channel" + ? chatType + : "channel"; + return resolveMattermostReplyToMode(account, kind); + }, + }, reload: { configPrefixes: ["channels.mattermost"] }, configSchema: buildChannelConfigSchema(MattermostConfigSchema), config: { @@ -326,6 +356,21 @@ export const mattermostPlugin: ChannelPlugin = { targetResolver: { looksLikeId: looksLikeMattermostTargetId, hint: "", + resolveTarget: async ({ cfg, accountId, input }) => { + const resolved = await resolveMattermostOpaqueTarget({ + input, + cfg, + accountId, + }); + if (!resolved) { + return null; + } + return { + to: resolved.to, + kind: resolved.kind, + source: "directory", + }; + }, }, }, outbound: { @@ -345,21 +390,30 @@ export const mattermostPlugin: ChannelPlugin = { } return { ok: true, to: trimmed }; }, - sendText: async ({ cfg, to, text, accountId, replyToId }) => { + sendText: async ({ cfg, to, text, accountId, replyToId, threadId }) => { const result = await sendMessageMattermost(to, text, { cfg, accountId: accountId ?? undefined, - replyToId: replyToId ?? undefined, + replyToId: replyToId ?? (threadId != null ? String(threadId) : undefined), }); return { channel: "mattermost", ...result }; }, - sendMedia: async ({ cfg, to, text, mediaUrl, mediaLocalRoots, accountId, replyToId }) => { + sendMedia: async ({ + cfg, + to, + text, + mediaUrl, + mediaLocalRoots, + accountId, + replyToId, + threadId, + }) => { const result = await sendMessageMattermost(to, text, { cfg, accountId: accountId ?? undefined, mediaUrl, mediaLocalRoots, - replyToId: replyToId ?? undefined, + replyToId: replyToId ?? (threadId != null ? String(threadId) : undefined), }); return { channel: "mattermost", ...result }; }, @@ -375,18 +429,12 @@ export const mattermostPlugin: ChannelPlugin = { lastStopAt: null, lastError: null, }, - buildChannelSummary: ({ snapshot }) => ({ - configured: snapshot.configured ?? false, - botTokenSource: snapshot.botTokenSource ?? "none", - running: snapshot.running ?? false, - connected: snapshot.connected ?? false, - lastStartAt: snapshot.lastStartAt ?? null, - lastStopAt: snapshot.lastStopAt ?? null, - lastError: snapshot.lastError ?? null, - baseUrl: snapshot.baseUrl ?? null, - probe: snapshot.probe, - lastProbeAt: snapshot.lastProbeAt ?? null, - }), + buildChannelSummary: ({ snapshot }) => + buildPassiveProbedChannelStatusSummary(snapshot, { + botTokenSource: snapshot.botTokenSource ?? "none", + connected: snapshot.connected ?? false, + baseUrl: snapshot.baseUrl ?? null, + }), probeAccount: async ({ account, timeoutMs }) => { const token = account.botToken?.trim(); const baseUrl = account.baseUrl?.trim(); @@ -470,8 +518,11 @@ export const mattermostPlugin: ChannelPlugin = { gateway: { startAccount: async (ctx) => { const account = ctx.account; - ctx.setStatus({ - accountId: account.accountId, + const statusSink = createAccountStatusSink({ + accountId: ctx.accountId, + setStatus: ctx.setStatus, + }); + statusSink({ baseUrl: account.baseUrl, botTokenSource: account.botTokenSource, }); @@ -483,7 +534,7 @@ export const mattermostPlugin: ChannelPlugin = { config: ctx.cfg, runtime: ctx.runtime, abortSignal: ctx.abortSignal, - statusSink: (patch) => ctx.setStatus({ accountId: ctx.accountId, ...patch }), + statusSink, }); }, }, diff --git a/extensions/mattermost/src/config-schema.test.ts b/extensions/mattermost/src/config-schema.test.ts index c744a6a5e0f..aa8db0f5d02 100644 --- a/extensions/mattermost/src/config-schema.test.ts +++ b/extensions/mattermost/src/config-schema.test.ts @@ -1,7 +1,7 @@ import { describe, expect, it } from "vitest"; import { MattermostConfigSchema } from "./config-schema.js"; -describe("MattermostConfigSchema SecretInput", () => { +describe("MattermostConfigSchema", () => { it("accepts SecretRef botToken at top-level", () => { const result = MattermostConfigSchema.safeParse({ botToken: { source: "env", provider: "default", id: "MATTERMOST_BOT_TOKEN" }, @@ -21,4 +21,29 @@ describe("MattermostConfigSchema SecretInput", () => { }); expect(result.success).toBe(true); }); + + it("accepts replyToMode", () => { + const result = MattermostConfigSchema.safeParse({ + replyToMode: "all", + }); + expect(result.success).toBe(true); + }); + + it("rejects unsupported direct-message reply threading config", () => { + const result = MattermostConfigSchema.safeParse({ + dm: { + replyToMode: "all", + }, + }); + expect(result.success).toBe(false); + }); + + it("rejects unsupported per-chat-type reply threading config", () => { + const result = MattermostConfigSchema.safeParse({ + replyToModeByChatType: { + direct: "all", + }, + }); + expect(result.success).toBe(false); + }); }); diff --git a/extensions/mattermost/src/config-schema.ts b/extensions/mattermost/src/config-schema.ts index 51d9bdbe33a..16ee615454c 100644 --- a/extensions/mattermost/src/config-schema.ts +++ b/extensions/mattermost/src/config-schema.ts @@ -6,6 +6,7 @@ import { requireOpenAllowFrom, } from "openclaw/plugin-sdk/mattermost"; import { z } from "zod"; +import { requireChannelOpenAllowFrom } from "../../shared/config-schema-helpers.js"; import { buildSecretInputSchema } from "./secret-input.js"; const MattermostSlashCommandsSchema = z @@ -43,6 +44,7 @@ const MattermostAccountSchemaBase = z chunkMode: z.enum(["length", "newline"]).optional(), blockStreaming: z.boolean().optional(), blockStreamingCoalesce: BlockStreamingCoalesceSchema.optional(), + replyToMode: z.enum(["off", "first", "all"]).optional(), responsePrefix: z.string().optional(), actions: z .object({ @@ -60,13 +62,12 @@ const MattermostAccountSchemaBase = z .strict(); const MattermostAccountSchema = MattermostAccountSchemaBase.superRefine((value, ctx) => { - requireOpenAllowFrom({ + requireChannelOpenAllowFrom({ + channel: "mattermost", policy: value.dmPolicy, allowFrom: value.allowFrom, ctx, - path: ["allowFrom"], - message: - 'channels.mattermost.dmPolicy="open" requires channels.mattermost.allowFrom to include "*"', + requireOpenAllowFrom, }); }); @@ -74,12 +75,11 @@ export const MattermostConfigSchema = MattermostAccountSchemaBase.extend({ accounts: z.record(z.string(), MattermostAccountSchema.optional()).optional(), defaultAccount: z.string().optional(), }).superRefine((value, ctx) => { - requireOpenAllowFrom({ + requireChannelOpenAllowFrom({ + channel: "mattermost", policy: value.dmPolicy, allowFrom: value.allowFrom, ctx, - path: ["allowFrom"], - message: - 'channels.mattermost.dmPolicy="open" requires channels.mattermost.allowFrom to include "*"', + requireOpenAllowFrom, }); }); diff --git a/extensions/mattermost/src/mattermost/accounts.test.ts b/extensions/mattermost/src/mattermost/accounts.test.ts index b3ad8d49e04..0e01d362520 100644 --- a/extensions/mattermost/src/mattermost/accounts.test.ts +++ b/extensions/mattermost/src/mattermost/accounts.test.ts @@ -1,6 +1,10 @@ import type { OpenClawConfig } from "openclaw/plugin-sdk/mattermost"; import { describe, expect, it } from "vitest"; -import { resolveDefaultMattermostAccountId } from "./accounts.js"; +import { + resolveDefaultMattermostAccountId, + resolveMattermostAccount, + resolveMattermostReplyToMode, +} from "./accounts.js"; describe("resolveDefaultMattermostAccountId", () => { it("prefers channels.mattermost.defaultAccount when it matches a configured account", () => { @@ -50,3 +54,37 @@ describe("resolveDefaultMattermostAccountId", () => { expect(resolveDefaultMattermostAccountId(cfg)).toBe("default"); }); }); + +describe("resolveMattermostReplyToMode", () => { + it("uses the configured mode for channel and group messages", () => { + const cfg: OpenClawConfig = { + channels: { + mattermost: { + replyToMode: "all", + }, + }, + }; + + const account = resolveMattermostAccount({ cfg, accountId: "default" }); + expect(resolveMattermostReplyToMode(account, "channel")).toBe("all"); + expect(resolveMattermostReplyToMode(account, "group")).toBe("all"); + }); + + it("keeps direct messages off even when replyToMode is enabled", () => { + const cfg: OpenClawConfig = { + channels: { + mattermost: { + replyToMode: "all", + }, + }, + }; + + const account = resolveMattermostAccount({ cfg, accountId: "default" }); + expect(resolveMattermostReplyToMode(account, "direct")).toBe("off"); + }); + + it("defaults to off when replyToMode is unset", () => { + const account = resolveMattermostAccount({ cfg: {}, accountId: "default" }); + expect(resolveMattermostReplyToMode(account, "channel")).toBe("off"); + }); +}); diff --git a/extensions/mattermost/src/mattermost/accounts.ts b/extensions/mattermost/src/mattermost/accounts.ts index 1de9a09bca8..ae154ba8923 100644 --- a/extensions/mattermost/src/mattermost/accounts.ts +++ b/extensions/mattermost/src/mattermost/accounts.ts @@ -1,7 +1,12 @@ import { DEFAULT_ACCOUNT_ID, normalizeAccountId } from "openclaw/plugin-sdk/account-id"; import { createAccountListHelpers, type OpenClawConfig } from "openclaw/plugin-sdk/mattermost"; import { normalizeResolvedSecretInputString, normalizeSecretInputString } from "../secret-input.js"; -import type { MattermostAccountConfig, MattermostChatMode } from "../types.js"; +import type { + MattermostAccountConfig, + MattermostChatMode, + MattermostChatTypeKey, + MattermostReplyToMode, +} from "../types.js"; import { normalizeMattermostBaseUrl } from "./client.js"; export type MattermostTokenSource = "env" | "config" | "none"; @@ -130,6 +135,20 @@ export function resolveMattermostAccount(params: { }; } +/** + * Resolve the effective replyToMode for a given chat type. + * Mattermost auto-threading only applies to channel and group messages. + */ +export function resolveMattermostReplyToMode( + account: ResolvedMattermostAccount, + kind: MattermostChatTypeKey, +): MattermostReplyToMode { + if (kind === "direct") { + return "off"; + } + return account.config.replyToMode ?? "off"; +} + export function listEnabledMattermostAccounts(cfg: OpenClawConfig): ResolvedMattermostAccount[] { return listMattermostAccountIds(cfg) .map((accountId) => resolveMattermostAccount({ cfg, accountId })) diff --git a/extensions/mattermost/src/mattermost/client.test.ts b/extensions/mattermost/src/mattermost/client.test.ts index 3d325dda527..7d49ad3c573 100644 --- a/extensions/mattermost/src/mattermost/client.test.ts +++ b/extensions/mattermost/src/mattermost/client.test.ts @@ -27,6 +27,28 @@ function createMockFetch(response?: { status?: number; body?: unknown; contentTy return { mockFetch: mockFetch as unknown as typeof fetch, calls }; } +function createTestClient(response?: { status?: number; body?: unknown; contentType?: string }) { + const { mockFetch, calls } = createMockFetch(response); + const client = createMattermostClient({ + baseUrl: "http://localhost:8065", + botToken: "tok", + fetchImpl: mockFetch, + }); + return { client, calls }; +} + +async function updatePostAndCapture( + update: Parameters[2], + response?: { status?: number; body?: unknown; contentType?: string }, +) { + const { client, calls } = createTestClient(response ?? { body: { id: "post1" } }); + await updateMattermostPost(client, "post1", update); + return { + calls, + body: JSON.parse(calls[0].init?.body as string) as Record, + }; +} + // ── normalizeMattermostBaseUrl ──────────────────────────────────────── describe("normalizeMattermostBaseUrl", () => { @@ -229,68 +251,38 @@ describe("createMattermostPost", () => { describe("updateMattermostPost", () => { it("sends PUT to /posts/{id}", async () => { - const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } }); - const client = createMattermostClient({ - baseUrl: "http://localhost:8065", - botToken: "tok", - fetchImpl: mockFetch, - }); - - await updateMattermostPost(client, "post1", { message: "Updated" }); + const { calls } = await updatePostAndCapture({ message: "Updated" }); expect(calls[0].url).toContain("/posts/post1"); expect(calls[0].init?.method).toBe("PUT"); }); it("includes post id in the body", async () => { - const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } }); - const client = createMattermostClient({ - baseUrl: "http://localhost:8065", - botToken: "tok", - fetchImpl: mockFetch, - }); - - await updateMattermostPost(client, "post1", { message: "Updated" }); - - const body = JSON.parse(calls[0].init?.body as string); + const { body } = await updatePostAndCapture({ message: "Updated" }); expect(body.id).toBe("post1"); expect(body.message).toBe("Updated"); }); it("includes props for button completion updates", async () => { - const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } }); - const client = createMattermostClient({ - baseUrl: "http://localhost:8065", - botToken: "tok", - fetchImpl: mockFetch, - }); - - await updateMattermostPost(client, "post1", { + const { body } = await updatePostAndCapture({ message: "Original message", props: { attachments: [{ text: "✓ **do_now** selected by @tony" }], }, }); - - const body = JSON.parse(calls[0].init?.body as string); expect(body.message).toBe("Original message"); - expect(body.props.attachments[0].text).toContain("✓"); - expect(body.props.attachments[0].text).toContain("do_now"); + expect(body.props).toMatchObject({ + attachments: [{ text: expect.stringContaining("✓") }], + }); + expect(body.props).toMatchObject({ + attachments: [{ text: expect.stringContaining("do_now") }], + }); }); it("omits message when not provided", async () => { - const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } }); - const client = createMattermostClient({ - baseUrl: "http://localhost:8065", - botToken: "tok", - fetchImpl: mockFetch, - }); - - await updateMattermostPost(client, "post1", { + const { body } = await updatePostAndCapture({ props: { attachments: [] }, }); - - const body = JSON.parse(calls[0].init?.body as string); expect(body.id).toBe("post1"); expect(body.message).toBeUndefined(); expect(body.props).toEqual({ attachments: [] }); diff --git a/extensions/mattermost/src/mattermost/interactions.test.ts b/extensions/mattermost/src/mattermost/interactions.test.ts index a6379a52664..62c7bdb757f 100644 --- a/extensions/mattermost/src/mattermost/interactions.test.ts +++ b/extensions/mattermost/src/mattermost/interactions.test.ts @@ -2,7 +2,7 @@ import { type IncomingMessage, type ServerResponse } from "node:http"; import { describe, expect, it, beforeEach, afterEach, vi } from "vitest"; import { setMattermostRuntime } from "../runtime.js"; import { resolveMattermostAccount } from "./accounts.js"; -import type { MattermostClient } from "./client.js"; +import type { MattermostClient, MattermostPost } from "./client.js"; import { buildButtonAttachments, computeInteractionCallbackUrl, @@ -496,6 +496,104 @@ describe("createMattermostInteractionHandler", () => { return res as unknown as ServerResponse & { headers: Record; body: string }; } + function createActionContext(actionId = "approve", channelId = "chan-1") { + const context = { action_id: actionId, __openclaw_channel_id: channelId }; + return { context, token: generateInteractionToken(context, "acct") }; + } + + function createInteractionBody(params: { + context: Record; + token: string; + channelId?: string; + postId?: string; + userId?: string; + userName?: string; + }) { + return { + user_id: params.userId ?? "user-1", + ...(params.userName ? { user_name: params.userName } : {}), + channel_id: params.channelId ?? "chan-1", + post_id: params.postId ?? "post-1", + context: { ...params.context, _token: params.token }, + }; + } + + async function runHandler( + handler: ReturnType, + params: { + body: unknown; + remoteAddress?: string; + headers?: Record; + }, + ) { + const req = createReq({ + remoteAddress: params.remoteAddress, + headers: params.headers, + body: params.body, + }); + const res = createRes(); + await handler(req, res); + return res; + } + + function expectForbiddenResponse( + res: ServerResponse & { body: string }, + expectedMessage: string, + ) { + expect(res.statusCode).toBe(403); + expect(res.body).toContain(expectedMessage); + } + + function expectSuccessfulApprovalUpdate( + res: ServerResponse & { body: string }, + requestLog?: Array<{ path: string; method?: string }>, + ) { + expect(res.statusCode).toBe(200); + expect(res.body).toBe("{}"); + if (requestLog) { + expect(requestLog).toEqual([ + { path: "/posts/post-1", method: undefined }, + { path: "/posts/post-1", method: "PUT" }, + ]); + } + } + + function createActionPost(params?: { + actionId?: string; + actionName?: string; + channelId?: string; + rootId?: string; + }): MattermostPost { + return { + id: "post-1", + channel_id: params?.channelId ?? "chan-1", + ...(params?.rootId ? { root_id: params.rootId } : {}), + message: "Choose", + props: { + attachments: [ + { + actions: [ + { + id: params?.actionId ?? "approve", + name: params?.actionName ?? "Approve", + }, + ], + }, + ], + }, + }; + } + + function createUnusedInteractionHandler() { + return createMattermostInteractionHandler({ + client: { + request: async () => ({ message: "unused" }), + } as unknown as MattermostClient, + botUserId: "bot", + accountId: "acct", + }); + } + async function runApproveInteraction(params?: { actionName?: string; allowedSourceIps?: string[]; @@ -503,8 +601,7 @@ describe("createMattermostInteractionHandler", () => { remoteAddress?: string; headers?: Record; }) { - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext(); const requestLog: Array<{ path: string; method?: string }> = []; const handler = createMattermostInteractionHandler({ client: { @@ -513,15 +610,7 @@ describe("createMattermostInteractionHandler", () => { if (init?.method === "PUT") { return { id: "post-1" }; } - return { - channel_id: "chan-1", - message: "Choose", - props: { - attachments: [ - { actions: [{ id: "approve", name: params?.actionName ?? "Approve" }] }, - ], - }, - }; + return createActionPost({ actionName: params?.actionName }); }, } as unknown as MattermostClient, botUserId: "bot", @@ -530,50 +619,27 @@ describe("createMattermostInteractionHandler", () => { trustedProxies: params?.trustedProxies, }); - const req = createReq({ + const res = await runHandler(handler, { remoteAddress: params?.remoteAddress, headers: params?.headers, - body: { - user_id: "user-1", - user_name: "alice", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + body: createInteractionBody({ context, token, userName: "alice" }), }); - const res = createRes(); - await handler(req, res); return { res, requestLog }; } async function runInvalidActionRequest(actionId: string) { - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext(); const handler = createMattermostInteractionHandler({ client: { - request: async () => ({ - channel_id: "chan-1", - message: "Choose", - props: { - attachments: [{ actions: [{ id: actionId, name: actionId }] }], - }, - }), + request: async () => createActionPost({ actionId, actionName: actionId }), } as unknown as MattermostClient, botUserId: "bot", accountId: "acct", }); - const req = createReq({ - body: { - user_id: "user-1", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + return await runHandler(handler, { + body: createInteractionBody({ context, token }), }); - const res = createRes(); - await handler(req, res); - return res; } it("accepts callback requests from an allowlisted source IP", async () => { @@ -582,12 +648,7 @@ describe("createMattermostInteractionHandler", () => { remoteAddress: "198.51.100.8", }); - expect(res.statusCode).toBe(200); - expect(res.body).toBe("{}"); - expect(requestLog).toEqual([ - { path: "/posts/post-1", method: undefined }, - { path: "/posts/post-1", method: "PUT" }, - ]); + expectSuccessfulApprovalUpdate(res, requestLog); }); it("accepts forwarded Mattermost source IPs from a trusted proxy", async () => { @@ -603,8 +664,7 @@ describe("createMattermostInteractionHandler", () => { }); it("rejects callback requests from non-allowlisted source IPs", async () => { - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext(); const handler = createMattermostInteractionHandler({ client: { request: async () => { @@ -616,33 +676,17 @@ describe("createMattermostInteractionHandler", () => { allowedSourceIps: ["127.0.0.1"], }); - const req = createReq({ + const res = await runHandler(handler, { remoteAddress: "198.51.100.8", - body: { - user_id: "user-1", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + body: createInteractionBody({ context, token }), }); - const res = createRes(); - - await handler(req, res); - - expect(res.statusCode).toBe(403); - expect(res.body).toContain("Forbidden origin"); + expectForbiddenResponse(res, "Forbidden origin"); }); it("rejects requests with an invalid interaction token", async () => { - const handler = createMattermostInteractionHandler({ - client: { - request: async () => ({ message: "unused" }), - } as unknown as MattermostClient, - botUserId: "bot", - accountId: "acct", - }); + const handler = createUnusedInteractionHandler(); - const req = createReq({ + const res = await runHandler(handler, { body: { user_id: "user-1", channel_id: "chan-1", @@ -650,72 +694,33 @@ describe("createMattermostInteractionHandler", () => { context: { action_id: "approve", _token: "deadbeef" }, }, }); - const res = createRes(); - - await handler(req, res); - - expect(res.statusCode).toBe(403); - expect(res.body).toContain("Invalid token"); + expectForbiddenResponse(res, "Invalid token"); }); it("rejects requests when the signed channel does not match the callback payload", async () => { - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); - const handler = createMattermostInteractionHandler({ - client: { - request: async () => ({ message: "unused" }), - } as unknown as MattermostClient, - botUserId: "bot", - accountId: "acct", + const { context, token } = createActionContext(); + const handler = createUnusedInteractionHandler(); + + const res = await runHandler(handler, { + body: createInteractionBody({ context, token, channelId: "chan-2" }), }); - - const req = createReq({ - body: { - user_id: "user-1", - channel_id: "chan-2", - post_id: "post-1", - context: { ...context, _token: token }, - }, - }); - const res = createRes(); - - await handler(req, res); - - expect(res.statusCode).toBe(403); - expect(res.body).toContain("Channel mismatch"); + expectForbiddenResponse(res, "Channel mismatch"); }); it("rejects requests when the fetched post does not belong to the callback channel", async () => { - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext(); const handler = createMattermostInteractionHandler({ client: { - request: async () => ({ - channel_id: "chan-9", - message: "Choose", - props: { - attachments: [{ actions: [{ id: "approve", name: "Approve" }] }], - }, - }), + request: async () => createActionPost({ channelId: "chan-9" }), } as unknown as MattermostClient, botUserId: "bot", accountId: "acct", }); - const req = createReq({ - body: { - user_id: "user-1", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + const res = await runHandler(handler, { + body: createInteractionBody({ context, token }), }); - const res = createRes(); - - await handler(req, res); - - expect(res.statusCode).toBe(403); - expect(res.body).toContain("Post/channel mismatch"); + expectForbiddenResponse(res, "Post/channel mismatch"); }); it("rejects requests when the action is not present on the fetched post", async () => { @@ -730,17 +735,56 @@ describe("createMattermostInteractionHandler", () => { actionName: "approve", }); + expectSuccessfulApprovalUpdate(res, requestLog); + }); + + it("forwards fetched post threading metadata to session and button callbacks", async () => { + const enqueueSystemEvent = vi.fn(); + setMattermostRuntime({ + system: { + enqueueSystemEvent, + }, + } as unknown as Parameters[0]); + const { context, token } = createActionContext(); + const resolveSessionKey = vi.fn().mockResolvedValue("session:thread:root-9"); + const dispatchButtonClick = vi.fn(); + const fetchedPost = createActionPost({ rootId: "root-9" }); + const handler = createMattermostInteractionHandler({ + client: { + request: async (_path: string, init?: { method?: string }) => + init?.method === "PUT" ? { id: "post-1" } : fetchedPost, + } as unknown as MattermostClient, + botUserId: "bot", + accountId: "acct", + resolveSessionKey, + dispatchButtonClick, + }); + + const res = await runHandler(handler, { + body: createInteractionBody({ context, token, userName: "alice" }), + }); expect(res.statusCode).toBe(200); - expect(res.body).toBe("{}"); - expect(requestLog).toEqual([ - { path: "/posts/post-1", method: undefined }, - { path: "/posts/post-1", method: "PUT" }, - ]); + expect(resolveSessionKey).toHaveBeenCalledWith({ + channelId: "chan-1", + userId: "user-1", + post: fetchedPost, + }); + expect(enqueueSystemEvent).toHaveBeenCalledWith( + expect.stringContaining('Mattermost button click: action="approve"'), + expect.objectContaining({ sessionKey: "session:thread:root-9" }), + ); + expect(dispatchButtonClick).toHaveBeenCalledWith( + expect.objectContaining({ + channelId: "chan-1", + userId: "user-1", + postId: "post-1", + post: fetchedPost, + }), + ); }); it("lets a custom interaction handler short-circuit generic completion updates", async () => { - const context = { action_id: "mdlprov", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext("mdlprov"); const requestLog: Array<{ path: string; method?: string }> = []; const handleInteraction = vi.fn().mockResolvedValue({ ephemeral_text: "Only the original requester can use this picker.", @@ -750,13 +794,10 @@ describe("createMattermostInteractionHandler", () => { client: { request: async (path: string, init?: { method?: string }) => { requestLog.push({ path, method: init?.method }); - return { - channel_id: "chan-1", - message: "Choose", - props: { - attachments: [{ actions: [{ id: "mdlprov", name: "Browse providers" }] }], - }, - }; + return createActionPost({ + actionId: "mdlprov", + actionName: "Browse providers", + }); }, } as unknown as MattermostClient, botUserId: "bot", @@ -765,18 +806,14 @@ describe("createMattermostInteractionHandler", () => { dispatchButtonClick, }); - const req = createReq({ - body: { - user_id: "user-2", - user_name: "alice", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + const res = await runHandler(handler, { + body: createInteractionBody({ + context, + token, + userId: "user-2", + userName: "alice", + }), }); - const res = createRes(); - - await handler(req, res); expect(res.statusCode).toBe(200); expect(res.body).toBe( @@ -790,6 +827,7 @@ describe("createMattermostInteractionHandler", () => { actionId: "mdlprov", actionName: "Browse providers", originalMessage: "Choose", + post: expect.objectContaining({ id: "post-1" }), userName: "alice", }), ); diff --git a/extensions/mattermost/src/mattermost/interactions.ts b/extensions/mattermost/src/mattermost/interactions.ts index 9e888d658cb..f99d0b5d3ac 100644 --- a/extensions/mattermost/src/mattermost/interactions.ts +++ b/extensions/mattermost/src/mattermost/interactions.ts @@ -6,7 +6,7 @@ import { type OpenClawConfig, } from "openclaw/plugin-sdk/mattermost"; import { getMattermostRuntime } from "../runtime.js"; -import { updateMattermostPost, type MattermostClient } from "./client.js"; +import { updateMattermostPost, type MattermostClient, type MattermostPost } from "./client.js"; const INTERACTION_MAX_BODY_BYTES = 64 * 1024; const INTERACTION_BODY_TIMEOUT_MS = 10_000; @@ -390,7 +390,11 @@ export function createMattermostInteractionHandler(params: { allowedSourceIps?: string[]; trustedProxies?: string[]; allowRealIpFallback?: boolean; - resolveSessionKey?: (channelId: string, userId: string) => Promise; + resolveSessionKey?: (params: { + channelId: string; + userId: string; + post: MattermostPost; + }) => Promise; handleInteraction?: (opts: { payload: MattermostInteractionPayload; userName: string; @@ -398,6 +402,7 @@ export function createMattermostInteractionHandler(params: { actionName: string; originalMessage: string; context: Record; + post: MattermostPost; }) => Promise; dispatchButtonClick?: (opts: { channelId: string; @@ -406,6 +411,7 @@ export function createMattermostInteractionHandler(params: { actionId: string; actionName: string; postId: string; + post: MattermostPost; }) => Promise; log?: (message: string) => void; }): (req: IncomingMessage, res: ServerResponse) => Promise { @@ -503,13 +509,10 @@ export function createMattermostInteractionHandler(params: { const userName = payload.user_name ?? payload.user_id; let originalMessage = ""; + let originalPost: MattermostPost | null = null; let clickedButtonName: string | null = null; try { - const originalPost = await client.request<{ - channel_id?: string | null; - message?: string; - props?: Record; - }>(`/posts/${payload.post_id}`); + originalPost = await client.request(`/posts/${payload.post_id}`); const postChannelId = originalPost.channel_id?.trim(); if (!postChannelId || postChannelId !== payload.channel_id) { log?.( @@ -550,6 +553,14 @@ export function createMattermostInteractionHandler(params: { return; } + if (!originalPost) { + log?.(`mattermost interaction: missing fetched post ${payload.post_id}`); + res.statusCode = 500; + res.setHeader("Content-Type", "application/json"); + res.end(JSON.stringify({ error: "Failed to load interaction post" })); + return; + } + log?.( `mattermost interaction: action=${actionId} user=${payload.user_name ?? payload.user_id} ` + `post=${payload.post_id} channel=${payload.channel_id}`, @@ -564,6 +575,7 @@ export function createMattermostInteractionHandler(params: { actionName: clickedButtonName, originalMessage, context: contextWithoutToken, + post: originalPost, }); if (response !== null) { res.statusCode = 200; @@ -590,7 +602,11 @@ export function createMattermostInteractionHandler(params: { `in channel ${payload.channel_id}`; const sessionKey = params.resolveSessionKey - ? await params.resolveSessionKey(payload.channel_id, payload.user_id) + ? await params.resolveSessionKey({ + channelId: payload.channel_id, + userId: payload.user_id, + post: originalPost, + }) : `agent:main:mattermost:${accountId}:${payload.channel_id}`; core.system.enqueueSystemEvent(eventLabel, { @@ -632,6 +648,7 @@ export function createMattermostInteractionHandler(params: { actionId, actionName: clickedButtonName, postId: payload.post_id, + post: originalPost, }); } catch (err) { log?.(`mattermost interaction: dispatchButtonClick failed: ${String(err)}`); diff --git a/extensions/mattermost/src/mattermost/model-picker.test.ts b/extensions/mattermost/src/mattermost/model-picker.test.ts index b448339523e..cebafc4a1bc 100644 --- a/extensions/mattermost/src/mattermost/model-picker.test.ts +++ b/extensions/mattermost/src/mattermost/model-picker.test.ts @@ -60,6 +60,15 @@ describe("Mattermost model picker", () => { expect(view.buttons[0]?.[0]?.text).toBe("Browse providers"); }); + it("trims accidental model spacing in Mattermost current-model text", () => { + const view = renderMattermostModelSummaryView({ + ownerUserId: "user-1", + currentModel: " OpenAI/ gpt-5 ", + }); + + expect(view.text).toContain("Current: openai/gpt-5"); + }); + it("renders providers and models with Telegram-style navigation", () => { const providersView = renderMattermostProviderPickerView({ ownerUserId: "user-1", diff --git a/extensions/mattermost/src/mattermost/model-picker.ts b/extensions/mattermost/src/mattermost/model-picker.ts index 42462180901..1547041a74a 100644 --- a/extensions/mattermost/src/mattermost/model-picker.ts +++ b/extensions/mattermost/src/mattermost/model-picker.ts @@ -36,15 +36,13 @@ export type MattermostModelPickerRenderedView = { function splitModelRef(modelRef?: string | null): { provider: string; model: string } | null { const trimmed = modelRef?.trim(); - if (!trimmed) { + const match = trimmed?.match(/^([^/]+)\/(.+)$/u); + if (!match) { return null; } - const slashIndex = trimmed.indexOf("/"); - if (slashIndex <= 0 || slashIndex >= trimmed.length - 1) { - return null; - } - const provider = normalizeProviderId(trimmed.slice(0, slashIndex)); - const model = trimmed.slice(slashIndex + 1).trim(); + const provider = normalizeProviderId(match[1]); + // Mattermost copy should normalize accidental whitespace around the model. + const model = match[2].trim(); if (!provider || !model) { return null; } diff --git a/extensions/mattermost/src/mattermost/monitor-helpers.test.ts b/extensions/mattermost/src/mattermost/monitor-helpers.test.ts new file mode 100644 index 00000000000..191d0a6c238 --- /dev/null +++ b/extensions/mattermost/src/mattermost/monitor-helpers.test.ts @@ -0,0 +1,82 @@ +import { describe, expect, it } from "vitest"; +import { normalizeMention } from "./monitor-helpers.js"; + +describe("normalizeMention", () => { + it("returns trimmed text when no mention provided", () => { + expect(normalizeMention(" hello world ", undefined)).toBe("hello world"); + }); + + it("strips bot mention from text", () => { + expect(normalizeMention("@echobot hello", "echobot")).toBe("hello"); + }); + + it("strips mention case-insensitively", () => { + expect(normalizeMention("@EchoBot hello", "echobot")).toBe("hello"); + }); + + it("preserves newlines in multi-line messages", () => { + const input = "@echobot\nline1\nline2\nline3"; + const result = normalizeMention(input, "echobot"); + expect(result).toBe("line1\nline2\nline3"); + }); + + it("preserves Markdown headings", () => { + const input = "@echobot\n# Heading\n\nSome text"; + const result = normalizeMention(input, "echobot"); + expect(result).toContain("# Heading"); + expect(result).toContain("\n"); + }); + + it("preserves Markdown blockquotes", () => { + const input = "@echobot\n> quoted line\n> second line"; + const result = normalizeMention(input, "echobot"); + expect(result).toContain("> quoted line"); + expect(result).toContain("> second line"); + }); + + it("preserves Markdown lists", () => { + const input = "@echobot\n- item A\n- item B\n - sub B1"; + const result = normalizeMention(input, "echobot"); + expect(result).toContain("- item A"); + expect(result).toContain("- item B"); + }); + + it("preserves task lists", () => { + const input = "@echobot\n- [ ] todo\n- [x] done"; + const result = normalizeMention(input, "echobot"); + expect(result).toContain("- [ ] todo"); + expect(result).toContain("- [x] done"); + }); + + it("handles mention in middle of text", () => { + const input = "hey @echobot check this\nout"; + const result = normalizeMention(input, "echobot"); + expect(result).toBe("hey check this\nout"); + }); + + it("preserves leading indentation for nested lists", () => { + const input = "@echobot\n- item\n - nested\n - deep"; + const result = normalizeMention(input, "echobot"); + expect(result).toContain(" - nested"); + expect(result).toContain(" - deep"); + }); + + it("preserves first-line indentation for nested list items", () => { + const input = "@echobot\n - nested\n - deep"; + const result = normalizeMention(input, "echobot"); + expect(result).toBe(" - nested\n - deep"); + }); + + it("preserves indented code blocks", () => { + const input = "@echobot\ntext\n code line 1\n code line 2"; + const result = normalizeMention(input, "echobot"); + expect(result).toContain(" code line 1"); + expect(result).toContain(" code line 2"); + }); + + it("preserves first-line indentation for indented code blocks", () => { + const input = "@echobot\n code line 1\n code line 2"; + const result = normalizeMention(input, "echobot"); + expect(result).toBe(" code line 1\n code line 2"); + }); +}); diff --git a/extensions/mattermost/src/mattermost/monitor-helpers.ts b/extensions/mattermost/src/mattermost/monitor-helpers.ts index 1724f577485..219c0562638 100644 --- a/extensions/mattermost/src/mattermost/monitor-helpers.ts +++ b/extensions/mattermost/src/mattermost/monitor-helpers.ts @@ -41,12 +41,12 @@ function normalizeAgentId(value: string | undefined | null): string { type AgentEntry = NonNullable["list"]>[number]; +function isAgentEntry(entry: unknown): entry is AgentEntry { + return Boolean(entry && typeof entry === "object"); +} + function listAgents(cfg: OpenClawConfig): AgentEntry[] { - const list = cfg.agents?.list; - if (!Array.isArray(list)) { - return []; - } - return list.filter((entry): entry is AgentEntry => Boolean(entry && typeof entry === "object")); + return Array.isArray(cfg.agents?.list) ? cfg.agents.list.filter(isAgentEntry) : []; } function resolveAgentEntry(cfg: OpenClawConfig, agentId: string): AgentEntry | undefined { @@ -70,3 +70,38 @@ export function resolveThreadSessionKeys(params: { normalizeThreadId: (threadId) => threadId, }); } + +/** + * Strip bot mention from message text while preserving newlines and + * block-level Markdown formatting (headings, lists, blockquotes). + */ +export function normalizeMention(text: string, mention: string | undefined): string { + if (!mention) { + return text.trim(); + } + const escaped = mention.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"); + const hasMentionRe = new RegExp(`@${escaped}\\b`, "i"); + const leadingMentionRe = new RegExp(`^([\\t ]*)@${escaped}\\b[\\t ]*`, "i"); + const trailingMentionRe = new RegExp(`[\\t ]*@${escaped}\\b[\\t ]*$`, "i"); + const normalizedLines = text.split("\n").map((line) => { + const hadMention = hasMentionRe.test(line); + const normalizedLine = line + .replace(leadingMentionRe, "$1") + .replace(trailingMentionRe, "") + .replace(new RegExp(`@${escaped}\\b`, "gi"), "") + .replace(/(\S)[ \t]{2,}/g, "$1 "); + return { + text: normalizedLine, + mentionOnlyBlank: hadMention && normalizedLine.trim() === "", + }; + }); + + while (normalizedLines[0]?.mentionOnlyBlank) { + normalizedLines.shift(); + } + while (normalizedLines.at(-1)?.text.trim() === "") { + normalizedLines.pop(); + } + + return normalizedLines.map((line) => line.text).join("\n"); +} diff --git a/extensions/mattermost/src/mattermost/monitor.authz.test.ts b/extensions/mattermost/src/mattermost/monitor.authz.test.ts index 92fd0a3c3f4..68919da7908 100644 --- a/extensions/mattermost/src/mattermost/monitor.authz.test.ts +++ b/extensions/mattermost/src/mattermost/monitor.authz.test.ts @@ -16,6 +16,35 @@ const accountFixture: ResolvedMattermostAccount = { config: {}, }; +function authorizeGroupCommand(senderId: string) { + return authorizeMattermostCommandInvocation({ + account: { + ...accountFixture, + config: { + groupPolicy: "allowlist", + allowFrom: ["trusted-user"], + }, + }, + cfg: { + commands: { + useAccessGroups: true, + }, + }, + senderId, + senderName: senderId, + channelId: "chan-1", + channelInfo: { + id: "chan-1", + type: "O", + name: "general", + display_name: "General", + }, + storeAllowFrom: [], + allowTextCommands: true, + hasControlCommand: true, + }); +} + describe("mattermost monitor authz", () => { it("keeps DM allowlist merged with pairing-store entries", () => { const resolved = resolveMattermostEffectiveAllowFromLists({ @@ -72,32 +101,7 @@ describe("mattermost monitor authz", () => { }); it("denies group control commands when the sender is outside the allowlist", () => { - const decision = authorizeMattermostCommandInvocation({ - account: { - ...accountFixture, - config: { - groupPolicy: "allowlist", - allowFrom: ["trusted-user"], - }, - }, - cfg: { - commands: { - useAccessGroups: true, - }, - }, - senderId: "attacker", - senderName: "attacker", - channelId: "chan-1", - channelInfo: { - id: "chan-1", - type: "O", - name: "general", - display_name: "General", - }, - storeAllowFrom: [], - allowTextCommands: true, - hasControlCommand: true, - }); + const decision = authorizeGroupCommand("attacker"); expect(decision).toMatchObject({ ok: false, @@ -107,32 +111,7 @@ describe("mattermost monitor authz", () => { }); it("authorizes group control commands for allowlisted senders", () => { - const decision = authorizeMattermostCommandInvocation({ - account: { - ...accountFixture, - config: { - groupPolicy: "allowlist", - allowFrom: ["trusted-user"], - }, - }, - cfg: { - commands: { - useAccessGroups: true, - }, - }, - senderId: "trusted-user", - senderName: "trusted-user", - channelId: "chan-1", - channelInfo: { - id: "chan-1", - type: "O", - name: "general", - display_name: "General", - }, - storeAllowFrom: [], - allowTextCommands: true, - hasControlCommand: true, - }); + const decision = authorizeGroupCommand("trusted-user"); expect(decision).toMatchObject({ ok: true, diff --git a/extensions/mattermost/src/mattermost/monitor.test.ts b/extensions/mattermost/src/mattermost/monitor.test.ts index 1bd871714c4..ab993dbb2af 100644 --- a/extensions/mattermost/src/mattermost/monitor.test.ts +++ b/extensions/mattermost/src/mattermost/monitor.test.ts @@ -3,7 +3,9 @@ import { describe, expect, it, vi } from "vitest"; import { resolveMattermostAccount } from "./accounts.js"; import { evaluateMattermostMentionGate, + resolveMattermostEffectiveReplyToId, resolveMattermostReplyRootId, + resolveMattermostThreadSessionContext, type MattermostMentionGateInput, type MattermostRequireMentionResolverInput, } from "./monitor.js"; @@ -109,6 +111,29 @@ describe("mattermost mention gating", () => { }); }); +describe("resolveMattermostReplyRootId with block streaming payloads", () => { + it("uses threadRootId for block-streamed payloads with replyToId", () => { + // When block streaming sends a payload with replyToId from the threading + // mode, the deliver callback should still use the existing threadRootId. + expect( + resolveMattermostReplyRootId({ + threadRootId: "thread-root-1", + replyToId: "streamed-reply-id", + }), + ).toBe("thread-root-1"); + }); + + it("falls back to payload replyToId when no threadRootId in block streaming", () => { + // Top-level channel message: no threadRootId, payload carries the + // inbound post id as replyToId from the "all" threading mode. + expect( + resolveMattermostReplyRootId({ + replyToId: "inbound-post-for-threading", + }), + ).toBe("inbound-post-for-threading"); + }); +}); + describe("resolveMattermostReplyRootId", () => { it("uses replyToId for top-level replies", () => { expect( @@ -131,3 +156,94 @@ describe("resolveMattermostReplyRootId", () => { expect(resolveMattermostReplyRootId({})).toBeUndefined(); }); }); + +describe("resolveMattermostEffectiveReplyToId", () => { + it("keeps an existing thread root", () => { + expect( + resolveMattermostEffectiveReplyToId({ + kind: "channel", + postId: "post-123", + replyToMode: "all", + threadRootId: "thread-root-456", + }), + ).toBe("thread-root-456"); + }); + + it("starts a thread for top-level channel messages when replyToMode is all", () => { + expect( + resolveMattermostEffectiveReplyToId({ + kind: "channel", + postId: "post-123", + replyToMode: "all", + }), + ).toBe("post-123"); + }); + + it("starts a thread for top-level group messages when replyToMode is first", () => { + expect( + resolveMattermostEffectiveReplyToId({ + kind: "group", + postId: "post-123", + replyToMode: "first", + }), + ).toBe("post-123"); + }); + + it("keeps direct messages non-threaded", () => { + expect( + resolveMattermostEffectiveReplyToId({ + kind: "direct", + postId: "post-123", + replyToMode: "all", + }), + ).toBeUndefined(); + }); +}); + +describe("resolveMattermostThreadSessionContext", () => { + it("forks channel sessions by top-level post when replyToMode is all", () => { + expect( + resolveMattermostThreadSessionContext({ + baseSessionKey: "agent:main:mattermost:default:chan-1", + kind: "channel", + postId: "post-123", + replyToMode: "all", + }), + ).toEqual({ + effectiveReplyToId: "post-123", + sessionKey: "agent:main:mattermost:default:chan-1:thread:post-123", + parentSessionKey: "agent:main:mattermost:default:chan-1", + }); + }); + + it("keeps existing thread roots for threaded follow-ups", () => { + expect( + resolveMattermostThreadSessionContext({ + baseSessionKey: "agent:main:mattermost:default:chan-1", + kind: "group", + postId: "post-123", + replyToMode: "first", + threadRootId: "root-456", + }), + ).toEqual({ + effectiveReplyToId: "root-456", + sessionKey: "agent:main:mattermost:default:chan-1:thread:root-456", + parentSessionKey: "agent:main:mattermost:default:chan-1", + }); + }); + + it("keeps direct-message sessions linear", () => { + expect( + resolveMattermostThreadSessionContext({ + baseSessionKey: "agent:main:mattermost:default:user-1", + kind: "direct", + postId: "post-123", + replyToMode: "all", + }), + ).toEqual({ + effectiveReplyToId: undefined, + sessionKey: "agent:main:mattermost:default:user-1", + parentSessionKey: undefined, + }); + }); +}); diff --git a/extensions/mattermost/src/mattermost/monitor.ts b/extensions/mattermost/src/mattermost/monitor.ts index 93d4ce1cfcb..16e3bd6434a 100644 --- a/extensions/mattermost/src/mattermost/monitor.ts +++ b/extensions/mattermost/src/mattermost/monitor.ts @@ -32,7 +32,7 @@ import { type HistoryEntry, } from "openclaw/plugin-sdk/mattermost"; import { getMattermostRuntime } from "../runtime.js"; -import { resolveMattermostAccount } from "./accounts.js"; +import { resolveMattermostAccount, resolveMattermostReplyToMode } from "./accounts.js"; import { createMattermostClient, fetchMattermostChannel, @@ -70,6 +70,7 @@ import { import { createDedupeCache, formatInboundFromLabel, + normalizeMention, resolveThreadSessionKeys, } from "./monitor-helpers.js"; import { resolveOncharPrefixes, stripOncharPrefix } from "./monitor-onchar.js"; @@ -79,6 +80,7 @@ import { type MattermostWebSocketFactory, } from "./monitor-websocket.js"; import { runWithReconnect } from "./reconnect.js"; +import { deliverMattermostReplyPayload } from "./reply-delivery.js"; import { sendMessageMattermost } from "./send.js"; import { DEFAULT_COMMAND_SPECS, @@ -143,15 +145,6 @@ function resolveRuntime(opts: MonitorMattermostOpts): RuntimeEnv { ); } -function normalizeMention(text: string, mention: string | undefined): string { - if (!mention) { - return text.trim(); - } - const escaped = mention.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"); - const re = new RegExp(`@${escaped}\\b`, "gi"); - return text.replace(re, " ").replace(/\s+/g, " ").trim(); -} - function isSystemPost(post: MattermostPost): boolean { const type = post.type?.trim(); return Boolean(type); @@ -282,6 +275,51 @@ export function resolveMattermostReplyRootId(params: { } return params.replyToId?.trim() || undefined; } + +export function resolveMattermostEffectiveReplyToId(params: { + kind: ChatType; + postId?: string | null; + replyToMode: "off" | "first" | "all"; + threadRootId?: string | null; +}): string | undefined { + const threadRootId = params.threadRootId?.trim(); + if (threadRootId) { + return threadRootId; + } + if (params.kind === "direct") { + return undefined; + } + const postId = params.postId?.trim(); + if (!postId) { + return undefined; + } + return params.replyToMode === "all" || params.replyToMode === "first" ? postId : undefined; +} + +export function resolveMattermostThreadSessionContext(params: { + baseSessionKey: string; + kind: ChatType; + postId?: string | null; + replyToMode: "off" | "first" | "all"; + threadRootId?: string | null; +}): { effectiveReplyToId?: string; sessionKey: string; parentSessionKey?: string } { + const effectiveReplyToId = resolveMattermostEffectiveReplyToId({ + kind: params.kind, + postId: params.postId, + replyToMode: params.replyToMode, + threadRootId: params.threadRootId, + }); + const threadKeys = resolveThreadSessionKeys({ + baseSessionKey: params.baseSessionKey, + threadId: effectiveReplyToId, + parentSessionKey: effectiveReplyToId ? params.baseSessionKey : undefined, + }); + return { + effectiveReplyToId, + sessionKey: threadKeys.sessionKey, + parentSessionKey: threadKeys.parentSessionKey, + }; +} type MattermostMediaInfo = { path: string; contentType?: string; @@ -529,7 +567,7 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} trustedProxies: cfg.gateway?.trustedProxies, allowRealIpFallback: cfg.gateway?.allowRealIpFallback === true, handleInteraction: handleModelPickerInteraction, - resolveSessionKey: async (channelId: string, userId: string) => { + resolveSessionKey: async ({ channelId, userId, post }) => { const channelInfo = await resolveChannelInfo(channelId); const kind = mapMattermostChannelTypeToChatType(channelInfo?.type); const teamId = channelInfo?.team_id ?? undefined; @@ -543,7 +581,14 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} id: kind === "direct" ? userId : channelId, }, }); - return route.sessionKey; + const replyToMode = resolveMattermostReplyToMode(account, kind); + return resolveMattermostThreadSessionContext({ + baseSessionKey: route.sessionKey, + kind, + postId: post.id || undefined, + replyToMode, + threadRootId: post.root_id, + }).sessionKey; }, dispatchButtonClick: async (opts) => { const channelInfo = await resolveChannelInfo(opts.channelId); @@ -562,6 +607,14 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} id: kind === "direct" ? opts.userId : opts.channelId, }, }); + const replyToMode = resolveMattermostReplyToMode(account, kind); + const threadContext = resolveMattermostThreadSessionContext({ + baseSessionKey: route.sessionKey, + kind, + postId: opts.post.id || opts.postId, + replyToMode, + threadRootId: opts.post.root_id, + }); const to = kind === "direct" ? `user:${opts.userId}` : `channel:${opts.channelId}`; const bodyText = `[Button click: user @${opts.userName} selected "${opts.actionName}"]`; const ctxPayload = core.channel.reply.finalizeInboundContext({ @@ -576,7 +629,8 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} ? `mattermost:group:${opts.channelId}` : `mattermost:channel:${opts.channelId}`, To: to, - SessionKey: route.sessionKey, + SessionKey: threadContext.sessionKey, + ParentSessionKey: threadContext.parentSessionKey, AccountId: route.accountId, ChatType: chatType, ConversationLabel: `mattermost:${opts.userName}`, @@ -588,6 +642,8 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} Provider: "mattermost" as const, Surface: "mattermost" as const, MessageSid: `interaction:${opts.postId}:${opts.actionId}`, + ReplyToId: threadContext.effectiveReplyToId, + MessageThreadId: threadContext.effectiveReplyToId, WasMentioned: true, CommandAuthorized: false, OriginatingChannel: "mattermost" as const, @@ -612,7 +668,7 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} accountId: account.accountId, }); const typingCallbacks = createTypingCallbacks({ - start: () => sendTypingIndicator(opts.channelId), + start: () => sendTypingIndicator(opts.channelId, threadContext.effectiveReplyToId), onStartError: (err) => { logTypingFailure({ log: (message) => logger.debug?.(message), @@ -627,36 +683,21 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} ...prefixOptions, humanDelay: core.channel.reply.resolveHumanDelayConfig(cfg, route.agentId), deliver: async (payload: ReplyPayload) => { - const mediaUrls = payload.mediaUrls ?? (payload.mediaUrl ? [payload.mediaUrl] : []); - const text = core.channel.text.convertMarkdownTables(payload.text ?? "", tableMode); - if (mediaUrls.length === 0) { - const chunkMode = core.channel.text.resolveChunkMode( - cfg, - "mattermost", - account.accountId, - ); - const chunks = core.channel.text.chunkMarkdownTextWithMode( - text, - textLimit, - chunkMode, - ); - for (const chunk of chunks.length > 0 ? chunks : [text]) { - if (!chunk) continue; - await sendMessageMattermost(to, chunk, { - accountId: account.accountId, - }); - } - } else { - let first = true; - for (const mediaUrl of mediaUrls) { - const caption = first ? text : ""; - first = false; - await sendMessageMattermost(to, caption, { - accountId: account.accountId, - mediaUrl, - }); - } - } + await deliverMattermostReplyPayload({ + core, + cfg, + payload, + to, + accountId: account.accountId, + agentId: route.agentId, + replyToId: resolveMattermostReplyRootId({ + threadRootId: threadContext.effectiveReplyToId, + replyToId: payload.replyToId, + }), + textLimit, + tableMode, + sendMessage: sendMessageMattermost, + }); runtime.log?.(`delivered button-click reply to ${to}`); }, onError: (err, info) => { @@ -842,6 +883,8 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} commandText: string; commandAuthorized: boolean; route: ReturnType; + sessionKey: string; + parentSessionKey?: string; channelId: string; senderId: string; senderName: string; @@ -852,6 +895,7 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} roomLabel: string; teamId?: string; postId: string; + effectiveReplyToId?: string; deliverReplies?: boolean; }): Promise => { const to = params.kind === "direct" ? `user:${params.senderId}` : `channel:${params.channelId}`; @@ -871,7 +915,8 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} ? `mattermost:group:${params.channelId}` : `mattermost:channel:${params.channelId}`, To: to, - SessionKey: params.route.sessionKey, + SessionKey: params.sessionKey, + ParentSessionKey: params.parentSessionKey, AccountId: params.route.accountId, ChatType: params.chatType, ConversationLabel: fromLabel, @@ -884,6 +929,8 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} Provider: "mattermost" as const, Surface: "mattermost" as const, MessageSid: `interaction:${params.postId}:${Date.now()}`, + ReplyToId: params.effectiveReplyToId, + MessageThreadId: params.effectiveReplyToId, Timestamp: Date.now(), WasMentioned: true, CommandAuthorized: params.commandAuthorized, @@ -915,7 +962,7 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} const capturedTexts: string[] = []; const typingCallbacks = shouldDeliverReplies ? createTypingCallbacks({ - start: () => sendTypingIndicator(params.channelId), + start: () => sendTypingIndicator(params.channelId, params.effectiveReplyToId), onStartError: (err) => { logTypingFailure({ log: (message) => logger.debug?.(message), @@ -931,45 +978,34 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} ...prefixOptions, // Picker-triggered confirmations should stay immediate. deliver: async (payload: ReplyPayload) => { - const mediaUrls = payload.mediaUrls ?? (payload.mediaUrl ? [payload.mediaUrl] : []); - const text = core.channel.text - .convertMarkdownTables(payload.text ?? "", tableMode) - .trim(); + const trimmedPayload = { + ...payload, + text: core.channel.text.convertMarkdownTables(payload.text ?? "", tableMode).trim(), + }; if (!shouldDeliverReplies) { - if (text) { - capturedTexts.push(text); + if (trimmedPayload.text) { + capturedTexts.push(trimmedPayload.text); } return; } - if (mediaUrls.length === 0) { - const chunkMode = core.channel.text.resolveChunkMode( - cfg, - "mattermost", - account.accountId, - ); - const chunks = core.channel.text.chunkMarkdownTextWithMode(text, textLimit, chunkMode); - for (const chunk of chunks.length > 0 ? chunks : [text]) { - if (!chunk) { - continue; - } - await sendMessageMattermost(to, chunk, { - accountId: account.accountId, - }); - } - return; - } - - let first = true; - for (const mediaUrl of mediaUrls) { - const caption = first ? text : ""; - first = false; - await sendMessageMattermost(to, caption, { - accountId: account.accountId, - mediaUrl, - }); - } + await deliverMattermostReplyPayload({ + core, + cfg, + payload: trimmedPayload, + to, + accountId: account.accountId, + agentId: params.route.agentId, + replyToId: resolveMattermostReplyRootId({ + threadRootId: params.effectiveReplyToId, + replyToId: trimmedPayload.replyToId, + }), + textLimit, + // The picker path already converts and trims text before capture/delivery. + tableMode: "off", + sendMessage: sendMessageMattermost, + }); }, onError: (err, info) => { runtime.error?.(`mattermost model picker ${info.kind} reply failed: ${String(err)}`); @@ -1008,6 +1044,7 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} }; userName: string; context: Record; + post: MattermostPost; }): Promise { const pickerState = parseMattermostModelPickerContext(params.context); if (!pickerState) { @@ -1096,6 +1133,18 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} id: kind === "direct" ? params.payload.user_id : params.payload.channel_id, }, }); + const replyToMode = resolveMattermostReplyToMode(account, kind); + const threadContext = resolveMattermostThreadSessionContext({ + baseSessionKey: route.sessionKey, + kind, + postId: params.post.id || params.payload.post_id, + replyToMode, + threadRootId: params.post.root_id, + }); + const modelSessionRoute = { + agentId: route.agentId, + sessionKey: threadContext.sessionKey, + }; const data = await buildModelsProviderData(cfg, route.agentId); if (data.providers.length === 0) { @@ -1109,7 +1158,7 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} if (pickerState.action === "providers" || pickerState.action === "back") { const currentModel = resolveMattermostModelPickerCurrentModel({ cfg, - route, + route: modelSessionRoute, data, }); const view = renderMattermostProviderPickerView({ @@ -1128,7 +1177,7 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} if (pickerState.action === "list") { const currentModel = resolveMattermostModelPickerCurrentModel({ cfg, - route, + route: modelSessionRoute, data, }); const view = renderMattermostModelsPickerView({ @@ -1159,6 +1208,8 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} commandText: `/model ${targetModelRef}`, commandAuthorized: auth.commandAuthorized, route, + sessionKey: threadContext.sessionKey, + parentSessionKey: threadContext.parentSessionKey, channelId: params.payload.channel_id, senderId: params.payload.user_id, senderName: params.userName, @@ -1169,11 +1220,12 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} roomLabel, teamId, postId: params.payload.post_id, + effectiveReplyToId: threadContext.effectiveReplyToId, deliverReplies: true, }); const updatedModel = resolveMattermostModelPickerCurrentModel({ cfg, - route, + route: modelSessionRoute, data, skipCache: true, }); @@ -1393,12 +1445,15 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} const baseSessionKey = route.sessionKey; const threadRootId = post.root_id?.trim() || undefined; - const threadKeys = resolveThreadSessionKeys({ + const replyToMode = resolveMattermostReplyToMode(account, kind); + const threadContext = resolveMattermostThreadSessionContext({ baseSessionKey, - threadId: threadRootId, - parentSessionKey: threadRootId ? baseSessionKey : undefined, + kind, + postId: post.id, + replyToMode, + threadRootId, }); - const sessionKey = threadKeys.sessionKey; + const { effectiveReplyToId, sessionKey, parentSessionKey } = threadContext; const historyKey = kind === "direct" ? null : sessionKey; const mentionRegexes = core.channel.mentions.buildMentionRegexes(cfg, route.agentId); @@ -1562,7 +1617,7 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} : `mattermost:channel:${channelId}`, To: to, SessionKey: sessionKey, - ParentSessionKey: threadKeys.parentSessionKey, + ParentSessionKey: parentSessionKey, AccountId: route.accountId, ChatType: chatType, ConversationLabel: fromLabel, @@ -1578,8 +1633,8 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} MessageSidFirst: allMessageIds.length > 1 ? allMessageIds[0] : undefined, MessageSidLast: allMessageIds.length > 1 ? allMessageIds[allMessageIds.length - 1] : undefined, - ReplyToId: threadRootId, - MessageThreadId: threadRootId, + ReplyToId: effectiveReplyToId, + MessageThreadId: effectiveReplyToId, Timestamp: typeof post.create_at === "number" ? post.create_at : undefined, WasMentioned: kind !== "direct" ? mentionDecision.effectiveWasMentioned : undefined, CommandAuthorized: commandAuthorized, @@ -1631,7 +1686,7 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} }); const typingCallbacks = createTypingCallbacks({ - start: () => sendTypingIndicator(channelId, threadRootId), + start: () => sendTypingIndicator(channelId, effectiveReplyToId), onStartError: (err) => { logTypingFailure({ log: (message) => logger.debug?.(message), @@ -1647,42 +1702,21 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {} humanDelay: core.channel.reply.resolveHumanDelayConfig(cfg, route.agentId), typingCallbacks, deliver: async (payload: ReplyPayload) => { - const mediaUrls = payload.mediaUrls ?? (payload.mediaUrl ? [payload.mediaUrl] : []); - const text = core.channel.text.convertMarkdownTables(payload.text ?? "", tableMode); - if (mediaUrls.length === 0) { - const chunkMode = core.channel.text.resolveChunkMode( - cfg, - "mattermost", - account.accountId, - ); - const chunks = core.channel.text.chunkMarkdownTextWithMode(text, textLimit, chunkMode); - for (const chunk of chunks.length > 0 ? chunks : [text]) { - if (!chunk) { - continue; - } - await sendMessageMattermost(to, chunk, { - accountId: account.accountId, - replyToId: resolveMattermostReplyRootId({ - threadRootId, - replyToId: payload.replyToId, - }), - }); - } - } else { - let first = true; - for (const mediaUrl of mediaUrls) { - const caption = first ? text : ""; - first = false; - await sendMessageMattermost(to, caption, { - accountId: account.accountId, - mediaUrl, - replyToId: resolveMattermostReplyRootId({ - threadRootId, - replyToId: payload.replyToId, - }), - }); - } - } + await deliverMattermostReplyPayload({ + core, + cfg, + payload, + to, + accountId: account.accountId, + agentId: route.agentId, + replyToId: resolveMattermostReplyRootId({ + threadRootId: effectiveReplyToId, + replyToId: payload.replyToId, + }), + textLimit, + tableMode, + sendMessage: sendMessageMattermost, + }); runtime.log?.(`delivered reply to ${to}`); }, onError: (err, info) => { diff --git a/extensions/mattermost/src/mattermost/reactions.test.ts b/extensions/mattermost/src/mattermost/reactions.test.ts index 0b07c1b497b..2659f2e1a99 100644 --- a/extensions/mattermost/src/mattermost/reactions.test.ts +++ b/extensions/mattermost/src/mattermost/reactions.test.ts @@ -14,6 +14,28 @@ describe("mattermost reactions", () => { resetMattermostReactionBotUserCacheForTests(); }); + async function addReactionWithFetch( + fetchMock: ReturnType, + ) { + return addMattermostReaction({ + cfg: createMattermostTestConfig(), + postId: "POST1", + emojiName: "thumbsup", + fetchImpl: fetchMock as unknown as typeof fetch, + }); + } + + async function removeReactionWithFetch( + fetchMock: ReturnType, + ) { + return removeMattermostReaction({ + cfg: createMattermostTestConfig(), + postId: "POST1", + emojiName: "thumbsup", + fetchImpl: fetchMock as unknown as typeof fetch, + }); + } + it("adds reactions by calling /users/me then POST /reactions", async () => { const fetchMock = createMattermostReactionFetchMock({ mode: "add", @@ -21,12 +43,7 @@ describe("mattermost reactions", () => { emojiName: "thumbsup", }); - const result = await addMattermostReaction({ - cfg: createMattermostTestConfig(), - postId: "POST1", - emojiName: "thumbsup", - fetchImpl: fetchMock as unknown as typeof fetch, - }); + const result = await addReactionWithFetch(fetchMock); expect(result).toEqual({ ok: true }); expect(fetchMock).toHaveBeenCalled(); @@ -41,12 +58,7 @@ describe("mattermost reactions", () => { body: { id: "err", message: "boom" }, }); - const result = await addMattermostReaction({ - cfg: createMattermostTestConfig(), - postId: "POST1", - emojiName: "thumbsup", - fetchImpl: fetchMock as unknown as typeof fetch, - }); + const result = await addReactionWithFetch(fetchMock); expect(result.ok).toBe(false); if (!result.ok) { @@ -61,12 +73,7 @@ describe("mattermost reactions", () => { emojiName: "thumbsup", }); - const result = await removeMattermostReaction({ - cfg: createMattermostTestConfig(), - postId: "POST1", - emojiName: "thumbsup", - fetchImpl: fetchMock as unknown as typeof fetch, - }); + const result = await removeReactionWithFetch(fetchMock); expect(result).toEqual({ ok: true }); expect(fetchMock).toHaveBeenCalled(); diff --git a/extensions/mattermost/src/mattermost/reply-delivery.test.ts b/extensions/mattermost/src/mattermost/reply-delivery.test.ts new file mode 100644 index 00000000000..7d48e5fcfc0 --- /dev/null +++ b/extensions/mattermost/src/mattermost/reply-delivery.test.ts @@ -0,0 +1,95 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import type { OpenClawConfig } from "openclaw/plugin-sdk/mattermost"; +import { describe, expect, it, vi } from "vitest"; +import { deliverMattermostReplyPayload } from "./reply-delivery.js"; + +describe("deliverMattermostReplyPayload", () => { + it("passes agent-scoped mediaLocalRoots when sending media paths", async () => { + const previousStateDir = process.env.OPENCLAW_STATE_DIR; + const stateDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-mm-state-")); + process.env.OPENCLAW_STATE_DIR = stateDir; + + try { + const sendMessage = vi.fn(async () => undefined); + const core = { + channel: { + text: { + convertMarkdownTables: vi.fn((text: string) => text), + resolveChunkMode: vi.fn(() => "length"), + chunkMarkdownTextWithMode: vi.fn((text: string) => [text]), + }, + }, + } as any; + + const agentId = "agent-1"; + const mediaUrl = `file://${path.join(stateDir, `workspace-${agentId}`, "photo.png")}`; + const cfg = {} satisfies OpenClawConfig; + + await deliverMattermostReplyPayload({ + core, + cfg, + payload: { text: "caption", mediaUrl }, + to: "channel:town-square", + accountId: "default", + agentId, + replyToId: "root-post", + textLimit: 4000, + tableMode: "off", + sendMessage, + }); + + expect(sendMessage).toHaveBeenCalledTimes(1); + expect(sendMessage).toHaveBeenCalledWith( + "channel:town-square", + "caption", + expect.objectContaining({ + accountId: "default", + mediaUrl, + replyToId: "root-post", + mediaLocalRoots: expect.arrayContaining([path.join(stateDir, `workspace-${agentId}`)]), + }), + ); + } finally { + if (previousStateDir === undefined) { + delete process.env.OPENCLAW_STATE_DIR; + } else { + process.env.OPENCLAW_STATE_DIR = previousStateDir; + } + await fs.rm(stateDir, { recursive: true, force: true }); + } + }); + + it("forwards replyToId for text-only chunked replies", async () => { + const sendMessage = vi.fn(async () => undefined); + const core = { + channel: { + text: { + convertMarkdownTables: vi.fn((text: string) => text), + resolveChunkMode: vi.fn(() => "length"), + chunkMarkdownTextWithMode: vi.fn(() => ["hello"]), + }, + }, + } as any; + + await deliverMattermostReplyPayload({ + core, + cfg: {} satisfies OpenClawConfig, + payload: { text: "hello" }, + to: "channel:town-square", + accountId: "default", + agentId: "agent-1", + replyToId: "root-post", + textLimit: 4000, + tableMode: "off", + sendMessage, + }); + + expect(sendMessage).toHaveBeenCalledTimes(1); + expect(sendMessage).toHaveBeenCalledWith("channel:town-square", "hello", { + accountId: "default", + replyToId: "root-post", + }); + }); +}); diff --git a/extensions/mattermost/src/mattermost/reply-delivery.ts b/extensions/mattermost/src/mattermost/reply-delivery.ts new file mode 100644 index 00000000000..5c94e51934b --- /dev/null +++ b/extensions/mattermost/src/mattermost/reply-delivery.ts @@ -0,0 +1,71 @@ +import type { OpenClawConfig, PluginRuntime, ReplyPayload } from "openclaw/plugin-sdk/mattermost"; +import { getAgentScopedMediaLocalRoots } from "openclaw/plugin-sdk/mattermost"; + +type MarkdownTableMode = Parameters[1]; + +type SendMattermostMessage = ( + to: string, + text: string, + opts: { + accountId?: string; + mediaUrl?: string; + mediaLocalRoots?: readonly string[]; + replyToId?: string; + }, +) => Promise; + +export async function deliverMattermostReplyPayload(params: { + core: PluginRuntime; + cfg: OpenClawConfig; + payload: ReplyPayload; + to: string; + accountId: string; + agentId?: string; + replyToId?: string; + textLimit: number; + tableMode: MarkdownTableMode; + sendMessage: SendMattermostMessage; +}): Promise { + const mediaUrls = + params.payload.mediaUrls ?? (params.payload.mediaUrl ? [params.payload.mediaUrl] : []); + const text = params.core.channel.text.convertMarkdownTables( + params.payload.text ?? "", + params.tableMode, + ); + + if (mediaUrls.length === 0) { + const chunkMode = params.core.channel.text.resolveChunkMode( + params.cfg, + "mattermost", + params.accountId, + ); + const chunks = params.core.channel.text.chunkMarkdownTextWithMode( + text, + params.textLimit, + chunkMode, + ); + for (const chunk of chunks.length > 0 ? chunks : [text]) { + if (!chunk) { + continue; + } + await params.sendMessage(params.to, chunk, { + accountId: params.accountId, + replyToId: params.replyToId, + }); + } + return; + } + + const mediaLocalRoots = getAgentScopedMediaLocalRoots(params.cfg, params.agentId); + let first = true; + for (const mediaUrl of mediaUrls) { + const caption = first ? text : ""; + first = false; + await params.sendMessage(params.to, caption, { + accountId: params.accountId, + mediaUrl, + mediaLocalRoots, + replyToId: params.replyToId, + }); + } +} diff --git a/extensions/mattermost/src/mattermost/send.test.ts b/extensions/mattermost/src/mattermost/send.test.ts index 41ce2dd283a..774f40f99fa 100644 --- a/extensions/mattermost/src/mattermost/send.test.ts +++ b/extensions/mattermost/src/mattermost/send.test.ts @@ -1,5 +1,10 @@ import { beforeEach, describe, expect, it, vi } from "vitest"; +import { + expectProvidedCfgSkipsRuntimeLoad, + expectRuntimeCfgFallback, +} from "../../../test-utils/send-config.js"; import { parseMattermostTarget, sendMessageMattermost } from "./send.js"; +import { resetMattermostOpaqueTargetCacheForTests } from "./target-resolution.js"; const mockState = vi.hoisted(() => ({ loadConfig: vi.fn(() => ({})), @@ -14,6 +19,7 @@ const mockState = vi.hoisted(() => ({ createMattermostPost: vi.fn(), fetchMattermostChannelByName: vi.fn(), fetchMattermostMe: vi.fn(), + fetchMattermostUser: vi.fn(), fetchMattermostUserTeams: vi.fn(), fetchMattermostUserByUsername: vi.fn(), normalizeMattermostBaseUrl: vi.fn((input: string | undefined) => input?.trim() ?? ""), @@ -34,6 +40,7 @@ vi.mock("./client.js", () => ({ createMattermostPost: mockState.createMattermostPost, fetchMattermostChannelByName: mockState.fetchMattermostChannelByName, fetchMattermostMe: mockState.fetchMattermostMe, + fetchMattermostUser: mockState.fetchMattermostUser, fetchMattermostUserTeams: mockState.fetchMattermostUserTeams, fetchMattermostUserByUsername: mockState.fetchMattermostUserByUsername, normalizeMattermostBaseUrl: mockState.normalizeMattermostBaseUrl, @@ -77,9 +84,11 @@ describe("sendMessageMattermost", () => { mockState.createMattermostPost.mockReset(); mockState.fetchMattermostChannelByName.mockReset(); mockState.fetchMattermostMe.mockReset(); + mockState.fetchMattermostUser.mockReset(); mockState.fetchMattermostUserTeams.mockReset(); mockState.fetchMattermostUserByUsername.mockReset(); mockState.uploadMattermostFile.mockReset(); + resetMattermostOpaqueTargetCacheForTests(); mockState.createMattermostClient.mockReturnValue({}); mockState.createMattermostPost.mockResolvedValue({ id: "post-1" }); mockState.fetchMattermostMe.mockResolvedValue({ id: "bot-user" }); @@ -102,8 +111,9 @@ describe("sendMessageMattermost", () => { accountId: "work", }); - expect(mockState.loadConfig).not.toHaveBeenCalled(); - expect(mockState.resolveMattermostAccount).toHaveBeenCalledWith({ + expectProvidedCfgSkipsRuntimeLoad({ + loadConfig: mockState.loadConfig, + resolveAccount: mockState.resolveMattermostAccount, cfg: providedCfg, accountId: "work", }); @@ -121,8 +131,9 @@ describe("sendMessageMattermost", () => { await sendMessageMattermost("channel:town-square", "hello"); - expect(mockState.loadConfig).toHaveBeenCalledTimes(1); - expect(mockState.resolveMattermostAccount).toHaveBeenCalledWith({ + expectRuntimeCfgFallback({ + loadConfig: mockState.loadConfig, + resolveAccount: mockState.resolveMattermostAccount, cfg: runtimeCfg, accountId: undefined, }); @@ -182,6 +193,61 @@ describe("sendMessageMattermost", () => { }), ); }); + + it("resolves a bare Mattermost user id as a DM target before upload", async () => { + const userId = "dthcxgoxhifn3pwh65cut3ud3w"; + mockState.fetchMattermostUser.mockResolvedValueOnce({ id: userId }); + mockState.createMattermostDirectChannel.mockResolvedValueOnce({ id: "dm-channel-1" }); + mockState.loadOutboundMediaFromUrl.mockResolvedValueOnce({ + buffer: Buffer.from("media-bytes"), + fileName: "photo.png", + contentType: "image/png", + kind: "image", + }); + + const result = await sendMessageMattermost(userId, "hello", { + mediaUrl: "file:///tmp/agent-workspace/photo.png", + mediaLocalRoots: ["/tmp/agent-workspace"], + }); + + expect(mockState.fetchMattermostUser).toHaveBeenCalledWith({}, userId); + expect(mockState.createMattermostDirectChannel).toHaveBeenCalledWith({}, ["bot-user", userId]); + expect(mockState.uploadMattermostFile).toHaveBeenCalledWith( + {}, + expect.objectContaining({ + channelId: "dm-channel-1", + }), + ); + expect(result.channelId).toBe("dm-channel-1"); + }); + + it("falls back to a channel target when bare Mattermost id is not a user", async () => { + const channelId = "aaaaaaaaaaaaaaaaaaaaaaaaaa"; + mockState.fetchMattermostUser.mockRejectedValueOnce( + new Error("Mattermost API 404 Not Found: user not found"), + ); + mockState.loadOutboundMediaFromUrl.mockResolvedValueOnce({ + buffer: Buffer.from("media-bytes"), + fileName: "photo.png", + contentType: "image/png", + kind: "image", + }); + + const result = await sendMessageMattermost(channelId, "hello", { + mediaUrl: "file:///tmp/agent-workspace/photo.png", + mediaLocalRoots: ["/tmp/agent-workspace"], + }); + + expect(mockState.fetchMattermostUser).toHaveBeenCalledWith({}, channelId); + expect(mockState.createMattermostDirectChannel).not.toHaveBeenCalled(); + expect(mockState.uploadMattermostFile).toHaveBeenCalledWith( + {}, + expect.objectContaining({ + channelId, + }), + ); + expect(result.channelId).toBe(channelId); + }); }); describe("parseMattermostTarget", () => { @@ -266,3 +332,110 @@ describe("parseMattermostTarget", () => { expect(parseMattermostTarget("Mattermost:QRS")).toEqual({ kind: "user", id: "QRS" }); }); }); + +// Each test uses a unique (token, id) pair to avoid module-level cache collisions. +// userIdResolutionCache and dmChannelCache are module singletons that survive across tests. +// Using unique cache keys per test ensures full isolation without needing a cache reset API. +describe("sendMessageMattermost user-first resolution", () => { + function makeAccount(token: string) { + return { + accountId: "default", + botToken: token, + baseUrl: "https://mattermost.example.com", + }; + } + + beforeEach(() => { + vi.clearAllMocks(); + mockState.createMattermostClient.mockReturnValue({}); + mockState.createMattermostPost.mockResolvedValue({ id: "post-id" }); + mockState.createMattermostDirectChannel.mockResolvedValue({ id: "dm-channel-id" }); + mockState.fetchMattermostMe.mockResolvedValue({ id: "bot-id" }); + }); + + it("resolves unprefixed 26-char id as user and sends via DM channel", async () => { + // Unique token + id to avoid cache pollution from other tests + const userId = "aaaaaa1111111111aaaaaa1111"; // 26 chars + mockState.resolveMattermostAccount.mockReturnValue(makeAccount("token-user-dm-t1")); + mockState.fetchMattermostUser.mockResolvedValueOnce({ id: userId }); + + const res = await sendMessageMattermost(userId, "hello"); + + expect(mockState.fetchMattermostUser).toHaveBeenCalledTimes(1); + expect(mockState.createMattermostDirectChannel).toHaveBeenCalledTimes(1); + const params = mockState.createMattermostPost.mock.calls[0]?.[1]; + expect(params.channelId).toBe("dm-channel-id"); + expect(res.channelId).toBe("dm-channel-id"); + expect(res.messageId).toBe("post-id"); + }); + + it("falls back to channel id when user lookup returns 404", async () => { + // Unique token + id for this test + const channelId = "bbbbbb2222222222bbbbbb2222"; // 26 chars + mockState.resolveMattermostAccount.mockReturnValue(makeAccount("token-404-t2")); + const err = new Error("Mattermost API 404: user not found"); + mockState.fetchMattermostUser.mockRejectedValueOnce(err); + + const res = await sendMessageMattermost(channelId, "hello"); + + expect(mockState.fetchMattermostUser).toHaveBeenCalledTimes(1); + expect(mockState.createMattermostDirectChannel).not.toHaveBeenCalled(); + const params = mockState.createMattermostPost.mock.calls[0]?.[1]; + expect(params.channelId).toBe(channelId); + expect(res.channelId).toBe(channelId); + }); + + it("falls back to channel id without caching negative result on transient error", async () => { + // Two unique tokens so each call has its own cache namespace + const userId = "cccccc3333333333cccccc3333"; // 26 chars + const tokenA = "token-transient-t3a"; + const tokenB = "token-transient-t3b"; + const transientErr = new Error("Mattermost API 503: service unavailable"); + + // First call: transient error → fall back to channel id, do NOT cache negative + mockState.resolveMattermostAccount.mockReturnValue(makeAccount(tokenA)); + mockState.fetchMattermostUser.mockRejectedValueOnce(transientErr); + + const res1 = await sendMessageMattermost(userId, "first"); + expect(res1.channelId).toBe(userId); + + // Second call with a different token (new cache key) → retries user lookup + vi.clearAllMocks(); + mockState.createMattermostClient.mockReturnValue({}); + mockState.createMattermostPost.mockResolvedValue({ id: "post-id-2" }); + mockState.createMattermostDirectChannel.mockResolvedValue({ id: "dm-channel-id" }); + mockState.fetchMattermostMe.mockResolvedValue({ id: "bot-id" }); + mockState.resolveMattermostAccount.mockReturnValue(makeAccount(tokenB)); + mockState.fetchMattermostUser.mockResolvedValueOnce({ id: userId }); + + const res2 = await sendMessageMattermost(userId, "second"); + expect(mockState.fetchMattermostUser).toHaveBeenCalledTimes(1); + expect(res2.channelId).toBe("dm-channel-id"); + }); + + it("does not apply user-first resolution for explicit user: prefix", async () => { + // Unique token + id — explicit user: prefix bypasses probe, goes straight to DM + const userId = "dddddd4444444444dddddd4444"; // 26 chars + mockState.resolveMattermostAccount.mockReturnValue(makeAccount("token-explicit-user-t4")); + + const res = await sendMessageMattermost(`user:${userId}`, "hello"); + + expect(mockState.fetchMattermostUser).not.toHaveBeenCalled(); + expect(mockState.createMattermostDirectChannel).toHaveBeenCalledTimes(1); + expect(res.channelId).toBe("dm-channel-id"); + }); + + it("does not apply user-first resolution for explicit channel: prefix", async () => { + // Unique token + id — explicit channel: prefix, no probe, no DM + const chanId = "eeeeee5555555555eeeeee5555"; // 26 chars + mockState.resolveMattermostAccount.mockReturnValue(makeAccount("token-explicit-chan-t5")); + + const res = await sendMessageMattermost(`channel:${chanId}`, "hello"); + + expect(mockState.fetchMattermostUser).not.toHaveBeenCalled(); + expect(mockState.createMattermostDirectChannel).not.toHaveBeenCalled(); + const params = mockState.createMattermostPost.mock.calls[0]?.[1]; + expect(params.channelId).toBe(chanId); + expect(res.channelId).toBe(chanId); + }); +}); diff --git a/extensions/mattermost/src/mattermost/send.ts b/extensions/mattermost/src/mattermost/send.ts index 7af69a65ada..4655dab2f7d 100644 --- a/extensions/mattermost/src/mattermost/send.ts +++ b/extensions/mattermost/src/mattermost/send.ts @@ -19,6 +19,7 @@ import { setInteractionSecret, type MattermostInteractiveButtonInput, } from "./interactions.js"; +import { isMattermostId, resolveMattermostOpaqueTarget } from "./target-resolution.js"; export type MattermostSendOpts = { cfg?: OpenClawConfig; @@ -50,6 +51,7 @@ type MattermostTarget = const botUserCache = new Map(); const userByNameCache = new Map(); const channelByNameCache = new Map(); +const dmChannelCache = new Map(); const getCore = () => getMattermostRuntime(); @@ -66,12 +68,6 @@ function normalizeMessage(text: string, mediaUrl?: string): string { function isHttpUrl(value: string): boolean { return /^https?:\/\//i.test(value); } - -/** Mattermost IDs are 26-character lowercase alphanumeric strings. */ -function isMattermostId(value: string): boolean { - return /^[a-z0-9]{26}$/.test(value); -} - export function parseMattermostTarget(raw: string): MattermostTarget { const trimmed = raw.trim(); if (!trimmed) { @@ -208,12 +204,18 @@ async function resolveTargetChannelId(params: { token: params.token, username: params.target.username ?? "", }); + const dmKey = `${cacheKey(params.baseUrl, params.token)}::dm::${userId}`; + const cachedDm = dmChannelCache.get(dmKey); + if (cachedDm) { + return cachedDm; + } const botUser = await resolveBotUser(params.baseUrl, params.token); const client = createMattermostClient({ baseUrl: params.baseUrl, botToken: params.token, }); const channel = await createMattermostDirectChannel(client, [botUser.id, userId]); + dmChannelCache.set(dmKey, channel.id); return channel.id; } @@ -248,7 +250,18 @@ async function resolveMattermostSendContext( ); } - const target = parseMattermostTarget(to); + const trimmedTo = to?.trim() ?? ""; + const opaqueTarget = await resolveMattermostOpaqueTarget({ + input: trimmedTo, + token, + baseUrl, + }); + const target = + opaqueTarget?.kind === "user" + ? { kind: "user" as const, id: opaqueTarget.id } + : opaqueTarget?.kind === "channel" + ? { kind: "channel" as const, id: opaqueTarget.id } + : parseMattermostTarget(trimmedTo); const channelId = await resolveTargetChannelId({ target, baseUrl, diff --git a/extensions/mattermost/src/mattermost/slash-commands.test.ts b/extensions/mattermost/src/mattermost/slash-commands.test.ts index 4beaea98ca5..d53c8f99203 100644 --- a/extensions/mattermost/src/mattermost/slash-commands.test.ts +++ b/extensions/mattermost/src/mattermost/slash-commands.test.ts @@ -10,6 +10,25 @@ import { } from "./slash-commands.js"; describe("slash-commands", () => { + async function registerSingleStatusCommand( + request: (path: string, init?: { method?: string }) => Promise, + ) { + const client = { request } as unknown as MattermostClient; + return registerSlashCommands({ + client, + teamId: "team-1", + creatorUserId: "bot-user", + callbackUrl: "http://gateway/callback", + commands: [ + { + trigger: "oc_status", + description: "status", + autoComplete: true, + }, + ], + }); + } + it("parses application/x-www-form-urlencoded payloads", () => { const payload = parseSlashCommandPayload( "token=t1&team_id=team&channel_id=ch1&user_id=u1&command=%2Foc_status&text=now", @@ -101,21 +120,7 @@ describe("slash-commands", () => { } throw new Error(`unexpected request path: ${path}`); }); - const client = { request } as unknown as MattermostClient; - - const result = await registerSlashCommands({ - client, - teamId: "team-1", - creatorUserId: "bot-user", - callbackUrl: "http://gateway/callback", - commands: [ - { - trigger: "oc_status", - description: "status", - autoComplete: true, - }, - ], - }); + const result = await registerSingleStatusCommand(request); expect(result).toHaveLength(1); expect(result[0]?.managed).toBe(false); @@ -144,21 +149,7 @@ describe("slash-commands", () => { } throw new Error(`unexpected request path: ${path}`); }); - const client = { request } as unknown as MattermostClient; - - const result = await registerSlashCommands({ - client, - teamId: "team-1", - creatorUserId: "bot-user", - callbackUrl: "http://gateway/callback", - commands: [ - { - trigger: "oc_status", - description: "status", - autoComplete: true, - }, - ], - }); + const result = await registerSingleStatusCommand(request); expect(result).toHaveLength(0); expect(request).toHaveBeenCalledTimes(1); diff --git a/extensions/mattermost/src/mattermost/slash-http.test.ts b/extensions/mattermost/src/mattermost/slash-http.test.ts index 92a6babe35c..a89bfc4e33a 100644 --- a/extensions/mattermost/src/mattermost/slash-http.test.ts +++ b/extensions/mattermost/src/mattermost/slash-http.test.ts @@ -58,6 +58,23 @@ const accountFixture: ResolvedMattermostAccount = { config: {}, }; +async function runSlashRequest(params: { + commandTokens: Set; + body: string; + method?: string; +}) { + const handler = createSlashCommandHttpHandler({ + account: accountFixture, + cfg: {} as OpenClawConfig, + runtime: {} as RuntimeEnv, + commandTokens: params.commandTokens, + }); + const req = createRequest({ method: params.method, body: params.body }); + const response = createResponse(); + await handler(req, response.res); + return response; +} + describe("slash-http", () => { it("rejects non-POST methods", async () => { const handler = createSlashCommandHttpHandler({ @@ -93,36 +110,20 @@ describe("slash-http", () => { }); it("fails closed when no command tokens are registered", async () => { - const handler = createSlashCommandHttpHandler({ - account: accountFixture, - cfg: {} as OpenClawConfig, - runtime: {} as RuntimeEnv, + const response = await runSlashRequest({ commandTokens: new Set(), - }); - const req = createRequest({ body: "token=tok1&team_id=t1&channel_id=c1&user_id=u1&command=%2Foc_status&text=", }); - const response = createResponse(); - - await handler(req, response.res); expect(response.res.statusCode).toBe(401); expect(response.getBody()).toContain("Unauthorized: invalid command token."); }); it("rejects unknown command tokens", async () => { - const handler = createSlashCommandHttpHandler({ - account: accountFixture, - cfg: {} as OpenClawConfig, - runtime: {} as RuntimeEnv, + const response = await runSlashRequest({ commandTokens: new Set(["known-token"]), - }); - const req = createRequest({ body: "token=unknown&team_id=t1&channel_id=c1&user_id=u1&command=%2Foc_status&text=", }); - const response = createResponse(); - - await handler(req, response.res); expect(response.res.statusCode).toBe(401); expect(response.getBody()).toContain("Unauthorized: invalid command token."); diff --git a/extensions/mattermost/src/mattermost/slash-http.ts b/extensions/mattermost/src/mattermost/slash-http.ts index 3c64b083d3a..468f5c3584c 100644 --- a/extensions/mattermost/src/mattermost/slash-http.ts +++ b/extensions/mattermost/src/mattermost/slash-http.ts @@ -35,6 +35,7 @@ import { authorizeMattermostCommandInvocation, normalizeMattermostAllowList, } from "./monitor-auth.js"; +import { deliverMattermostReplyPayload } from "./reply-delivery.js"; import { sendMessageMattermost } from "./send.js"; import { parseSlashCommandPayload, @@ -474,6 +475,7 @@ async function handleSlashCommandAsync(params: { channel: "mattermost", accountId: account.accountId, }); + const humanDelay = core.channel.reply.resolveHumanDelayConfig(cfg, route.agentId); const typingCallbacks = createTypingCallbacks({ start: () => sendMattermostTyping(client, { channelId }), @@ -490,34 +492,19 @@ async function handleSlashCommandAsync(params: { const { dispatcher, replyOptions, markDispatchIdle } = core.channel.reply.createReplyDispatcherWithTyping({ ...prefixOptions, - humanDelay: core.channel.reply.resolveHumanDelayConfig(cfg, route.agentId), + humanDelay, deliver: async (payload: ReplyPayload) => { - const mediaUrls = payload.mediaUrls ?? (payload.mediaUrl ? [payload.mediaUrl] : []); - const text = core.channel.text.convertMarkdownTables(payload.text ?? "", tableMode); - if (mediaUrls.length === 0) { - const chunkMode = core.channel.text.resolveChunkMode( - cfg, - "mattermost", - account.accountId, - ); - const chunks = core.channel.text.chunkMarkdownTextWithMode(text, textLimit, chunkMode); - for (const chunk of chunks.length > 0 ? chunks : [text]) { - if (!chunk) continue; - await sendMessageMattermost(to, chunk, { - accountId: account.accountId, - }); - } - } else { - let first = true; - for (const mediaUrl of mediaUrls) { - const caption = first ? text : ""; - first = false; - await sendMessageMattermost(to, caption, { - accountId: account.accountId, - mediaUrl, - }); - } - } + await deliverMattermostReplyPayload({ + core, + cfg, + payload, + to, + accountId: account.accountId, + agentId: route.agentId, + textLimit, + tableMode, + sendMessage: sendMessageMattermost, + }); runtime.log?.(`delivered slash reply to ${to}`); }, onError: (err, info) => { diff --git a/extensions/mattermost/src/mattermost/target-resolution.ts b/extensions/mattermost/src/mattermost/target-resolution.ts new file mode 100644 index 00000000000..d3b59a3e696 --- /dev/null +++ b/extensions/mattermost/src/mattermost/target-resolution.ts @@ -0,0 +1,97 @@ +import type { OpenClawConfig } from "openclaw/plugin-sdk/mattermost"; +import { resolveMattermostAccount } from "./accounts.js"; +import { + createMattermostClient, + fetchMattermostUser, + normalizeMattermostBaseUrl, +} from "./client.js"; + +export type MattermostOpaqueTargetResolution = { + kind: "user" | "channel"; + id: string; + to: string; +}; + +const mattermostOpaqueTargetCache = new Map(); + +function cacheKey(baseUrl: string, token: string, id: string): string { + return `${baseUrl}::${token}::${id}`; +} + +/** Mattermost IDs are 26-character lowercase alphanumeric strings. */ +export function isMattermostId(value: string): boolean { + return /^[a-z0-9]{26}$/.test(value); +} + +export function isExplicitMattermostTarget(raw: string): boolean { + const trimmed = raw.trim(); + if (!trimmed) { + return false; + } + return ( + /^(channel|user|mattermost):/i.test(trimmed) || + trimmed.startsWith("@") || + trimmed.startsWith("#") + ); +} + +export function parseMattermostApiStatus(err: unknown): number | undefined { + if (!err || typeof err !== "object") { + return undefined; + } + const msg = "message" in err ? String((err as { message?: unknown }).message ?? "") : ""; + const match = /Mattermost API (\d{3})\b/.exec(msg); + if (!match) { + return undefined; + } + const code = Number(match[1]); + return Number.isFinite(code) ? code : undefined; +} + +export async function resolveMattermostOpaqueTarget(params: { + input: string; + cfg?: OpenClawConfig; + accountId?: string | null; + token?: string; + baseUrl?: string; +}): Promise { + const input = params.input.trim(); + if (!input || isExplicitMattermostTarget(input) || !isMattermostId(input)) { + return null; + } + + const account = + params.cfg && (!params.token || !params.baseUrl) + ? resolveMattermostAccount({ cfg: params.cfg, accountId: params.accountId }) + : null; + const token = params.token?.trim() || account?.botToken?.trim(); + const baseUrl = normalizeMattermostBaseUrl(params.baseUrl ?? account?.baseUrl); + if (!token || !baseUrl) { + return null; + } + + const key = cacheKey(baseUrl, token, input); + const cached = mattermostOpaqueTargetCache.get(key); + if (cached === true) { + return { kind: "user", id: input, to: `user:${input}` }; + } + if (cached === false) { + return { kind: "channel", id: input, to: `channel:${input}` }; + } + + const client = createMattermostClient({ baseUrl, botToken: token }); + try { + await fetchMattermostUser(client, input); + mattermostOpaqueTargetCache.set(key, true); + return { kind: "user", id: input, to: `user:${input}` }; + } catch (err) { + if (parseMattermostApiStatus(err) === 404) { + mattermostOpaqueTargetCache.set(key, false); + } + return { kind: "channel", id: input, to: `channel:${input}` }; + } +} + +export function resetMattermostOpaqueTargetCacheForTests(): void { + mattermostOpaqueTargetCache.clear(); +} diff --git a/extensions/mattermost/src/types.ts b/extensions/mattermost/src/types.ts index ba664baa894..f4038ac6920 100644 --- a/extensions/mattermost/src/types.ts +++ b/extensions/mattermost/src/types.ts @@ -5,6 +5,9 @@ import type { SecretInput, } from "openclaw/plugin-sdk/mattermost"; +export type MattermostReplyToMode = "off" | "first" | "all"; +export type MattermostChatTypeKey = "direct" | "channel" | "group"; + export type MattermostChatMode = "oncall" | "onmessage" | "onchar"; export type MattermostAccountConfig = { @@ -54,6 +57,14 @@ export type MattermostAccountConfig = { blockStreamingCoalesce?: BlockStreamingCoalesceConfig; /** Outbound response prefix override for this channel/account. */ responsePrefix?: string; + /** + * Controls whether channel and group replies are sent as thread replies. + * - "off" (default): only thread-reply when incoming message is already a thread reply + * - "first": reply in a thread under the triggering message + * - "all": always reply in a thread; uses existing thread root or starts a new thread under the message + * Direct messages always behave as "off". + */ + replyToMode?: MattermostReplyToMode; /** Action toggles for this account. */ actions?: { /** Enable message reaction actions. Default: true. */ diff --git a/extensions/memory-core/package.json b/extensions/memory-core/package.json index ca697290047..a6a8d1dbca8 100644 --- a/extensions/memory-core/package.json +++ b/extensions/memory-core/package.json @@ -1,11 +1,11 @@ { "name": "@openclaw/memory-core", - "version": "2026.3.9", + "version": "2026.3.14", "private": true, "description": "OpenClaw core memory search plugin", "type": "module", "peerDependencies": { - "openclaw": ">=2026.3.2" + "openclaw": ">=2026.3.11" }, "peerDependenciesMeta": { "openclaw": { diff --git a/extensions/memory-lancedb/index.test.ts b/extensions/memory-lancedb/index.test.ts index 2d9a6db1063..a733c3dffb8 100644 --- a/extensions/memory-lancedb/index.test.ts +++ b/extensions/memory-lancedb/index.test.ts @@ -18,12 +18,12 @@ const HAS_OPENAI_KEY = Boolean(process.env.OPENAI_API_KEY); const liveEnabled = HAS_OPENAI_KEY && process.env.OPENCLAW_LIVE_TEST === "1"; const describeLive = liveEnabled ? describe : describe.skip; -describe("memory plugin e2e", () => { - let tmpDir: string; - let dbPath: string; +function installTmpDirHarness(params: { prefix: string }) { + let tmpDir = ""; + let dbPath = ""; beforeEach(async () => { - tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-memory-test-")); + tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), params.prefix)); dbPath = path.join(tmpDir, "lancedb"); }); @@ -33,6 +33,27 @@ describe("memory plugin e2e", () => { } }); + return { + getTmpDir: () => tmpDir, + getDbPath: () => dbPath, + }; +} + +describe("memory plugin e2e", () => { + const { getDbPath } = installTmpDirHarness({ prefix: "openclaw-memory-test-" }); + + async function parseConfig(overrides: Record = {}) { + const { default: memoryPlugin } = await import("./index.js"); + return memoryPlugin.configSchema?.parse?.({ + embedding: { + apiKey: OPENAI_API_KEY, + model: "text-embedding-3-small", + }, + dbPath: getDbPath(), + ...overrides, + }); + } + test("memory plugin registers and initializes correctly", async () => { // Dynamic import to avoid loading LanceDB when not testing const { default: memoryPlugin } = await import("./index.js"); @@ -46,21 +67,14 @@ describe("memory plugin e2e", () => { }); test("config schema parses valid config", async () => { - const { default: memoryPlugin } = await import("./index.js"); - - const config = memoryPlugin.configSchema?.parse?.({ - embedding: { - apiKey: OPENAI_API_KEY, - model: "text-embedding-3-small", - }, - dbPath, + const config = await parseConfig({ autoCapture: true, autoRecall: true, }); expect(config).toBeDefined(); expect(config?.embedding?.apiKey).toBe(OPENAI_API_KEY); - expect(config?.dbPath).toBe(dbPath); + expect(config?.dbPath).toBe(getDbPath()); expect(config?.captureMaxChars).toBe(500); }); @@ -74,7 +88,7 @@ describe("memory plugin e2e", () => { embedding: { apiKey: "${TEST_MEMORY_API_KEY}", }, - dbPath, + dbPath: getDbPath(), }); expect(config?.embedding?.apiKey).toBe("test-key-123"); @@ -88,7 +102,7 @@ describe("memory plugin e2e", () => { expect(() => { memoryPlugin.configSchema?.parse?.({ embedding: {}, - dbPath, + dbPath: getDbPath(), }); }).toThrow("embedding.apiKey is required"); }); @@ -99,21 +113,14 @@ describe("memory plugin e2e", () => { expect(() => { memoryPlugin.configSchema?.parse?.({ embedding: { apiKey: OPENAI_API_KEY }, - dbPath, + dbPath: getDbPath(), captureMaxChars: 99, }); }).toThrow("captureMaxChars must be between 100 and 10000"); }); test("config schema accepts captureMaxChars override", async () => { - const { default: memoryPlugin } = await import("./index.js"); - - const config = memoryPlugin.configSchema?.parse?.({ - embedding: { - apiKey: OPENAI_API_KEY, - model: "text-embedding-3-small", - }, - dbPath, + const config = await parseConfig({ captureMaxChars: 1800, }); @@ -121,15 +128,7 @@ describe("memory plugin e2e", () => { }); test("config schema keeps autoCapture disabled by default", async () => { - const { default: memoryPlugin } = await import("./index.js"); - - const config = memoryPlugin.configSchema?.parse?.({ - embedding: { - apiKey: OPENAI_API_KEY, - model: "text-embedding-3-small", - }, - dbPath, - }); + const config = await parseConfig(); expect(config?.autoCapture).toBe(false); expect(config?.autoRecall).toBe(true); @@ -176,7 +175,7 @@ describe("memory plugin e2e", () => { model: "text-embedding-3-small", dimensions: 1024, }, - dbPath, + dbPath: getDbPath(), autoCapture: false, autoRecall: false, }, @@ -279,19 +278,7 @@ describe("memory plugin e2e", () => { // Live tests that require OpenAI API key and actually use LanceDB describeLive("memory plugin live tests", () => { - let tmpDir: string; - let dbPath: string; - - beforeEach(async () => { - tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-memory-live-")); - dbPath = path.join(tmpDir, "lancedb"); - }); - - afterEach(async () => { - if (tmpDir) { - await fs.rm(tmpDir, { recursive: true, force: true }); - } - }); + const { getDbPath } = installTmpDirHarness({ prefix: "openclaw-memory-live-" }); test("memory tools work end-to-end", async () => { const { default: memoryPlugin } = await import("./index.js"); @@ -318,7 +305,7 @@ describeLive("memory plugin live tests", () => { apiKey: liveApiKey, model: "text-embedding-3-small", }, - dbPath, + dbPath: getDbPath(), autoCapture: false, autoRecall: false, }, diff --git a/extensions/memory-lancedb/package.json b/extensions/memory-lancedb/package.json index abd920833ca..3f387bee4f4 100644 --- a/extensions/memory-lancedb/package.json +++ b/extensions/memory-lancedb/package.json @@ -1,13 +1,13 @@ { "name": "@openclaw/memory-lancedb", - "version": "2026.3.9", + "version": "2026.3.14", "private": true, "description": "OpenClaw LanceDB-backed long-term memory plugin with auto-recall/capture", "type": "module", "dependencies": { "@lancedb/lancedb": "^0.26.2", "@sinclair/typebox": "0.34.48", - "openai": "^6.27.0" + "openai": "^6.29.0" }, "openclaw": { "extensions": [ diff --git a/extensions/minimax-portal-auth/package.json b/extensions/minimax-portal-auth/package.json index 9443f37d524..093d42dad1d 100644 --- a/extensions/minimax-portal-auth/package.json +++ b/extensions/minimax-portal-auth/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/minimax-portal-auth", - "version": "2026.3.9", + "version": "2026.3.14", "private": true, "description": "OpenClaw MiniMax Portal OAuth provider plugin", "type": "module", diff --git a/extensions/msteams/CHANGELOG.md b/extensions/msteams/CHANGELOG.md index 38d5614305c..4fb831f9278 100644 --- a/extensions/msteams/CHANGELOG.md +++ b/extensions/msteams/CHANGELOG.md @@ -1,5 +1,35 @@ # Changelog +## 2026.3.14 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.13 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.12 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.11 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.10 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.3.9 ### Changes diff --git a/extensions/msteams/package.json b/extensions/msteams/package.json index c4453f82f6e..4784334d1d5 100644 --- a/extensions/msteams/package.json +++ b/extensions/msteams/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/msteams", - "version": "2026.3.9", + "version": "2026.3.14", "description": "OpenClaw Microsoft Teams channel plugin", "type": "module", "dependencies": { diff --git a/extensions/msteams/src/attachments.test.ts b/extensions/msteams/src/attachments.test.ts index 6887fad7fcb..790dc8bd33f 100644 --- a/extensions/msteams/src/attachments.test.ts +++ b/extensions/msteams/src/attachments.test.ts @@ -88,14 +88,17 @@ function isUrlAllowedBySsrfPolicy(url: string, policy?: SsrFPolicy): boolean { ); } -const fetchRemoteMediaMock = vi.fn(async (params: RemoteMediaFetchParams) => { +async function fetchRemoteMediaWithRedirects( + params: RemoteMediaFetchParams, + requestInit?: RequestInit, +) { const fetchFn = params.fetchImpl ?? fetch; let currentUrl = params.url; for (let i = 0; i <= MAX_REDIRECT_HOPS; i += 1) { if (!isUrlAllowedBySsrfPolicy(currentUrl, params.ssrfPolicy)) { throw new Error(`Blocked hostname (not in allowlist): ${currentUrl}`); } - const res = await fetchFn(currentUrl, { redirect: "manual" }); + const res = await fetchFn(currentUrl, { redirect: "manual", ...requestInit }); if (REDIRECT_STATUS_CODES.includes(res.status)) { const location = res.headers.get("location"); if (!location) { @@ -107,6 +110,10 @@ const fetchRemoteMediaMock = vi.fn(async (params: RemoteMediaFetchParams) => { return readRemoteMediaResponse(res, params); } throw new Error("too many redirects"); +} + +const fetchRemoteMediaMock = vi.fn(async (params: RemoteMediaFetchParams) => { + return await fetchRemoteMediaWithRedirects(params); }); const runtimeStub: PluginRuntime = createPluginRuntimeMock({ @@ -720,24 +727,9 @@ describe("msteams attachments", () => { }); fetchRemoteMediaMock.mockImplementationOnce(async (params) => { - const fetchFn = params.fetchImpl ?? fetch; - let currentUrl = params.url; - for (let i = 0; i < MAX_REDIRECT_HOPS; i += 1) { - const res = await fetchFn(currentUrl, { - redirect: "manual", - dispatcher: {}, - } as RequestInit); - if (REDIRECT_STATUS_CODES.includes(res.status)) { - const location = res.headers.get("location"); - if (!location) { - throw new Error("redirect missing location"); - } - currentUrl = new URL(location, currentUrl).toString(); - continue; - } - return readRemoteMediaResponse(res, params); - } - throw new Error("too many redirects"); + return await fetchRemoteMediaWithRedirects(params, { + dispatcher: {}, + } as RequestInit); }); const media = await downloadAttachmentsWithFetch( diff --git a/extensions/msteams/src/attachments/shared.test.ts b/extensions/msteams/src/attachments/shared.test.ts index 186a70f71aa..3e29e65aac4 100644 --- a/extensions/msteams/src/attachments/shared.test.ts +++ b/extensions/msteams/src/attachments/shared.test.ts @@ -31,6 +31,23 @@ function mockFetchWithRedirect(redirectMap: Record, finalBody = }); } +async function expectSafeFetchStatus(params: { + fetchMock: ReturnType; + url: string; + allowHosts: string[]; + expectedStatus: number; + resolveFn?: typeof publicResolve; +}) { + const res = await safeFetch({ + url: params.url, + allowHosts: params.allowHosts, + fetchFn: params.fetchMock as unknown as typeof fetch, + resolveFn: params.resolveFn ?? publicResolve, + }); + expect(res.status).toBe(params.expectedStatus); + return res; +} + describe("msteams attachment allowlists", () => { it("normalizes wildcard host lists", () => { expect(resolveAllowedHosts(["*", "graph.microsoft.com"])).toEqual(["*"]); @@ -121,13 +138,12 @@ describe("safeFetch", () => { const fetchMock = vi.fn(async (_url: string, _init?: RequestInit) => { return new Response("ok", { status: 200 }); }); - const res = await safeFetch({ + await expectSafeFetchStatus({ + fetchMock, url: "https://teams.sharepoint.com/file.pdf", allowHosts: ["sharepoint.com"], - fetchFn: fetchMock as unknown as typeof fetch, - resolveFn: publicResolve, + expectedStatus: 200, }); - expect(res.status).toBe(200); expect(fetchMock).toHaveBeenCalledOnce(); // Should have used redirect: "manual" expect(fetchMock.mock.calls[0][1]).toHaveProperty("redirect", "manual"); @@ -137,13 +153,12 @@ describe("safeFetch", () => { const fetchMock = mockFetchWithRedirect({ "https://teams.sharepoint.com/file.pdf": "https://cdn.sharepoint.com/storage/file.pdf", }); - const res = await safeFetch({ + await expectSafeFetchStatus({ + fetchMock, url: "https://teams.sharepoint.com/file.pdf", allowHosts: ["sharepoint.com"], - fetchFn: fetchMock as unknown as typeof fetch, - resolveFn: publicResolve, + expectedStatus: 200, }); - expect(res.status).toBe(200); expect(fetchMock).toHaveBeenCalledTimes(2); }); diff --git a/extensions/msteams/src/channel.directory.test.ts b/extensions/msteams/src/channel.directory.test.ts index 0746f78aabb..be95e6103ea 100644 --- a/extensions/msteams/src/channel.directory.test.ts +++ b/extensions/msteams/src/channel.directory.test.ts @@ -1,15 +1,10 @@ import type { OpenClawConfig, RuntimeEnv } from "openclaw/plugin-sdk/msteams"; import { describe, expect, it } from "vitest"; +import { createDirectoryTestRuntime, expectDirectorySurface } from "../../test-utils/directory.js"; import { msteamsPlugin } from "./channel.js"; describe("msteams directory", () => { - const runtimeEnv: RuntimeEnv = { - log: () => {}, - error: () => {}, - exit: (code: number): never => { - throw new Error(`exit ${code}`); - }, - }; + const runtimeEnv = createDirectoryTestRuntime() as RuntimeEnv; it("lists peers and groups from config", async () => { const cfg = { @@ -29,12 +24,10 @@ describe("msteams directory", () => { }, } as unknown as OpenClawConfig; - expect(msteamsPlugin.directory).toBeTruthy(); - expect(msteamsPlugin.directory?.listPeers).toBeTruthy(); - expect(msteamsPlugin.directory?.listGroups).toBeTruthy(); + const directory = expectDirectorySurface(msteamsPlugin.directory); await expect( - msteamsPlugin.directory!.listPeers!({ + directory.listPeers({ cfg, query: undefined, limit: undefined, @@ -50,7 +43,7 @@ describe("msteams directory", () => { ); await expect( - msteamsPlugin.directory!.listGroups!({ + directory.listGroups({ cfg, query: undefined, limit: undefined, diff --git a/extensions/msteams/src/graph-upload.test.ts b/extensions/msteams/src/graph-upload.test.ts new file mode 100644 index 00000000000..484075984dd --- /dev/null +++ b/extensions/msteams/src/graph-upload.test.ts @@ -0,0 +1,101 @@ +import { describe, expect, it, vi } from "vitest"; +import { uploadToOneDrive, uploadToSharePoint } from "./graph-upload.js"; + +describe("graph upload helpers", () => { + const tokenProvider = { + getAccessToken: vi.fn(async () => "graph-token"), + }; + + it("uploads to OneDrive with the personal drive path", async () => { + const fetchFn = vi.fn( + async () => + new Response( + JSON.stringify({ id: "item-1", webUrl: "https://example.com/1", name: "a.txt" }), + { + status: 200, + headers: { "content-type": "application/json" }, + }, + ), + ); + + const result = await uploadToOneDrive({ + buffer: Buffer.from("hello"), + filename: "a.txt", + tokenProvider, + fetchFn: fetchFn as typeof fetch, + }); + + expect(fetchFn).toHaveBeenCalledWith( + "https://graph.microsoft.com/v1.0/me/drive/root:/OpenClawShared/a.txt:/content", + expect.objectContaining({ + method: "PUT", + headers: expect.objectContaining({ + Authorization: "Bearer graph-token", + "Content-Type": "application/octet-stream", + }), + }), + ); + expect(result).toEqual({ + id: "item-1", + webUrl: "https://example.com/1", + name: "a.txt", + }); + }); + + it("uploads to SharePoint with the site drive path", async () => { + const fetchFn = vi.fn( + async () => + new Response( + JSON.stringify({ id: "item-2", webUrl: "https://example.com/2", name: "b.txt" }), + { + status: 200, + headers: { "content-type": "application/json" }, + }, + ), + ); + + const result = await uploadToSharePoint({ + buffer: Buffer.from("world"), + filename: "b.txt", + siteId: "site-123", + tokenProvider, + fetchFn: fetchFn as typeof fetch, + }); + + expect(fetchFn).toHaveBeenCalledWith( + "https://graph.microsoft.com/v1.0/sites/site-123/drive/root:/OpenClawShared/b.txt:/content", + expect.objectContaining({ + method: "PUT", + headers: expect.objectContaining({ + Authorization: "Bearer graph-token", + "Content-Type": "application/octet-stream", + }), + }), + ); + expect(result).toEqual({ + id: "item-2", + webUrl: "https://example.com/2", + name: "b.txt", + }); + }); + + it("rejects upload responses missing required fields", async () => { + const fetchFn = vi.fn( + async () => + new Response(JSON.stringify({ id: "item-3" }), { + status: 200, + headers: { "content-type": "application/json" }, + }), + ); + + await expect( + uploadToSharePoint({ + buffer: Buffer.from("world"), + filename: "bad.txt", + siteId: "site-123", + tokenProvider, + fetchFn: fetchFn as typeof fetch, + }), + ).rejects.toThrow("SharePoint upload response missing required fields"); + }); +}); diff --git a/extensions/msteams/src/graph-upload.ts b/extensions/msteams/src/graph-upload.ts index 65e854ac439..9705b1a63a4 100644 --- a/extensions/msteams/src/graph-upload.ts +++ b/extensions/msteams/src/graph-upload.ts @@ -21,6 +21,53 @@ export interface OneDriveUploadResult { name: string; } +function parseUploadedDriveItem( + data: { id?: string; webUrl?: string; name?: string }, + label: "OneDrive" | "SharePoint", +): OneDriveUploadResult { + if (!data.id || !data.webUrl || !data.name) { + throw new Error(`${label} upload response missing required fields`); + } + + return { + id: data.id, + webUrl: data.webUrl, + name: data.name, + }; +} + +async function uploadDriveItem(params: { + buffer: Buffer; + filename: string; + contentType?: string; + tokenProvider: MSTeamsAccessTokenProvider; + fetchFn?: typeof fetch; + url: string; + label: "OneDrive" | "SharePoint"; +}): Promise { + const fetchFn = params.fetchFn ?? fetch; + const token = await params.tokenProvider.getAccessToken(GRAPH_SCOPE); + + const res = await fetchFn(params.url, { + method: "PUT", + headers: { + Authorization: `Bearer ${token}`, + "Content-Type": params.contentType ?? "application/octet-stream", + }, + body: new Uint8Array(params.buffer), + }); + + if (!res.ok) { + const body = await res.text().catch(() => ""); + throw new Error(`${params.label} upload failed: ${res.status} ${res.statusText} - ${body}`); + } + + return parseUploadedDriveItem( + (await res.json()) as { id?: string; webUrl?: string; name?: string }, + params.label, + ); +} + /** * Upload a file to the user's OneDrive root folder. * For larger files, this uses the simple upload endpoint (up to 4MB). @@ -32,41 +79,13 @@ export async function uploadToOneDrive(params: { tokenProvider: MSTeamsAccessTokenProvider; fetchFn?: typeof fetch; }): Promise { - const fetchFn = params.fetchFn ?? fetch; - const token = await params.tokenProvider.getAccessToken(GRAPH_SCOPE); - // Use "OpenClawShared" folder to organize bot-uploaded files const uploadPath = `/OpenClawShared/${encodeURIComponent(params.filename)}`; - - const res = await fetchFn(`${GRAPH_ROOT}/me/drive/root:${uploadPath}:/content`, { - method: "PUT", - headers: { - Authorization: `Bearer ${token}`, - "Content-Type": params.contentType ?? "application/octet-stream", - }, - body: new Uint8Array(params.buffer), + return await uploadDriveItem({ + ...params, + url: `${GRAPH_ROOT}/me/drive/root:${uploadPath}:/content`, + label: "OneDrive", }); - - if (!res.ok) { - const body = await res.text().catch(() => ""); - throw new Error(`OneDrive upload failed: ${res.status} ${res.statusText} - ${body}`); - } - - const data = (await res.json()) as { - id?: string; - webUrl?: string; - name?: string; - }; - - if (!data.id || !data.webUrl || !data.name) { - throw new Error("OneDrive upload response missing required fields"); - } - - return { - id: data.id, - webUrl: data.webUrl, - name: data.name, - }; } export interface OneDriveSharingLink { @@ -175,44 +194,13 @@ export async function uploadToSharePoint(params: { siteId: string; fetchFn?: typeof fetch; }): Promise { - const fetchFn = params.fetchFn ?? fetch; - const token = await params.tokenProvider.getAccessToken(GRAPH_SCOPE); - // Use "OpenClawShared" folder to organize bot-uploaded files const uploadPath = `/OpenClawShared/${encodeURIComponent(params.filename)}`; - - const res = await fetchFn( - `${GRAPH_ROOT}/sites/${params.siteId}/drive/root:${uploadPath}:/content`, - { - method: "PUT", - headers: { - Authorization: `Bearer ${token}`, - "Content-Type": params.contentType ?? "application/octet-stream", - }, - body: new Uint8Array(params.buffer), - }, - ); - - if (!res.ok) { - const body = await res.text().catch(() => ""); - throw new Error(`SharePoint upload failed: ${res.status} ${res.statusText} - ${body}`); - } - - const data = (await res.json()) as { - id?: string; - webUrl?: string; - name?: string; - }; - - if (!data.id || !data.webUrl || !data.name) { - throw new Error("SharePoint upload response missing required fields"); - } - - return { - id: data.id, - webUrl: data.webUrl, - name: data.name, - }; + return await uploadDriveItem({ + ...params, + url: `${GRAPH_ROOT}/sites/${params.siteId}/drive/root:${uploadPath}:/content`, + label: "SharePoint", + }); } export interface ChatMember { diff --git a/extensions/msteams/src/messenger.test.ts b/extensions/msteams/src/messenger.test.ts index aa0a92b5159..cc4cf2fb6f0 100644 --- a/extensions/msteams/src/messenger.test.ts +++ b/extensions/msteams/src/messenger.test.ts @@ -139,6 +139,22 @@ describe("msteams messenger", () => { }); describe("sendMSTeamsMessages", () => { + function createRevokedThreadContext(params?: { failAfterAttempt?: number; sent?: string[] }) { + let attempt = 0; + return { + sendActivity: async (activity: unknown) => { + const { text } = activity as { text?: string }; + const content = text ?? ""; + attempt += 1; + if (params?.failAfterAttempt && attempt < params.failAfterAttempt) { + params.sent?.push(content); + return { id: `id:${content}` }; + } + throw new TypeError(REVOCATION_ERROR); + }, + }; + } + const baseRef: StoredConversationReference = { activityId: "activity123", user: { id: "user123", name: "User" }, @@ -305,13 +321,7 @@ describe("msteams messenger", () => { it("falls back to proactive messaging when thread context is revoked", async () => { const proactiveSent: string[] = []; - - const ctx = { - sendActivity: async () => { - throw new TypeError(REVOCATION_ERROR); - }, - }; - + const ctx = createRevokedThreadContext(); const adapter = createFallbackAdapter(proactiveSent); const ids = await sendMSTeamsMessages({ @@ -331,21 +341,7 @@ describe("msteams messenger", () => { it("falls back only for remaining thread messages after context revocation", async () => { const threadSent: string[] = []; const proactiveSent: string[] = []; - let attempt = 0; - - const ctx = { - sendActivity: async (activity: unknown) => { - const { text } = activity as { text?: string }; - const content = text ?? ""; - attempt += 1; - if (attempt === 1) { - threadSent.push(content); - return { id: `id:${content}` }; - } - throw new TypeError(REVOCATION_ERROR); - }, - }; - + const ctx = createRevokedThreadContext({ failAfterAttempt: 2, sent: threadSent }); const adapter = createFallbackAdapter(proactiveSent); const ids = await sendMSTeamsMessages({ diff --git a/extensions/msteams/src/monitor-handler.file-consent.test.ts b/extensions/msteams/src/monitor-handler.file-consent.test.ts index 88a6a67a838..5e72f7a9dd1 100644 --- a/extensions/msteams/src/monitor-handler.file-consent.test.ts +++ b/extensions/msteams/src/monitor-handler.file-consent.test.ts @@ -123,6 +123,26 @@ function createInvokeContext(params: { }; } +function createConsentInvokeHarness(params: { + pendingConversationId?: string; + invokeConversationId: string; + action: "accept" | "decline"; +}) { + const uploadId = storePendingUpload({ + buffer: Buffer.from("TOP_SECRET_VICTIM_FILE\n"), + filename: "secret.txt", + contentType: "text/plain", + conversationId: params.pendingConversationId ?? "19:victim@thread.v2", + }); + const handler = registerMSTeamsHandlers(createActivityHandler(), createDeps()); + const { context, sendActivity } = createInvokeContext({ + conversationId: params.invokeConversationId, + uploadId, + action: params.action, + }); + return { uploadId, handler, context, sendActivity }; +} + describe("msteams file consent invoke authz", () => { beforeEach(() => { setMSTeamsRuntime(runtimeStub); @@ -132,17 +152,8 @@ describe("msteams file consent invoke authz", () => { }); it("uploads when invoke conversation matches pending upload conversation", async () => { - const uploadId = storePendingUpload({ - buffer: Buffer.from("TOP_SECRET_VICTIM_FILE\n"), - filename: "secret.txt", - contentType: "text/plain", - conversationId: "19:victim@thread.v2", - }); - const deps = createDeps(); - const handler = registerMSTeamsHandlers(createActivityHandler(), deps); - const { context, sendActivity } = createInvokeContext({ - conversationId: "19:victim@thread.v2;messageid=abc123", - uploadId, + const { uploadId, handler, context, sendActivity } = createConsentInvokeHarness({ + invokeConversationId: "19:victim@thread.v2;messageid=abc123", action: "accept", }); @@ -166,17 +177,8 @@ describe("msteams file consent invoke authz", () => { }); it("rejects cross-conversation accept invoke and keeps pending upload", async () => { - const uploadId = storePendingUpload({ - buffer: Buffer.from("TOP_SECRET_VICTIM_FILE\n"), - filename: "secret.txt", - contentType: "text/plain", - conversationId: "19:victim@thread.v2", - }); - const deps = createDeps(); - const handler = registerMSTeamsHandlers(createActivityHandler(), deps); - const { context, sendActivity } = createInvokeContext({ - conversationId: "19:attacker@thread.v2", - uploadId, + const { uploadId, handler, context, sendActivity } = createConsentInvokeHarness({ + invokeConversationId: "19:attacker@thread.v2", action: "accept", }); @@ -198,17 +200,8 @@ describe("msteams file consent invoke authz", () => { }); it("ignores cross-conversation decline invoke and keeps pending upload", async () => { - const uploadId = storePendingUpload({ - buffer: Buffer.from("TOP_SECRET_VICTIM_FILE\n"), - filename: "secret.txt", - contentType: "text/plain", - conversationId: "19:victim@thread.v2", - }); - const deps = createDeps(); - const handler = registerMSTeamsHandlers(createActivityHandler(), deps); - const { context, sendActivity } = createInvokeContext({ - conversationId: "19:attacker@thread.v2", - uploadId, + const { uploadId, handler, context, sendActivity } = createConsentInvokeHarness({ + invokeConversationId: "19:attacker@thread.v2", action: "decline", }); diff --git a/extensions/msteams/src/monitor-handler/message-handler.ts b/extensions/msteams/src/monitor-handler/message-handler.ts index 6fe227537d3..60a88c56664 100644 --- a/extensions/msteams/src/monitor-handler/message-handler.ts +++ b/extensions/msteams/src/monitor-handler/message-handler.ts @@ -9,7 +9,7 @@ import { evaluateSenderGroupAccessForPolicy, resolveSenderScopedGroupPolicy, recordPendingHistoryEntryIfEnabled, - resolveControlCommandGate, + resolveDualTextControlCommandGate, resolveDefaultGroupPolicy, isDangerousNameMatchingEnabled, readStoreAllowFromForDmPolicy, @@ -175,6 +175,7 @@ export function createMSTeamsMessageHandler(deps: MSTeamsMessageHandlerDeps) { teamName, conversationId, channelName, + allowNameMatching: isDangerousNameMatchingEnabled(msteamsCfg), }); const senderGroupPolicy = resolveSenderScopedGroupPolicy({ groupPolicy, @@ -296,18 +297,15 @@ export function createMSTeamsMessageHandler(deps: MSTeamsMessageHandlerDeps) { senderName, allowNameMatching: isDangerousNameMatchingEnabled(msteamsCfg), }); - const hasControlCommandInMessage = core.channel.text.hasControlCommand(text, cfg); - const commandGate = resolveControlCommandGate({ + const { commandAuthorized, shouldBlock } = resolveDualTextControlCommandGate({ useAccessGroups, - authorizers: [ - { configured: commandDmAllowFrom.length > 0, allowed: ownerAllowedForCommands }, - { configured: effectiveGroupAllowFrom.length > 0, allowed: groupAllowedForCommands }, - ], - allowTextCommands: true, - hasControlCommand: hasControlCommandInMessage, + primaryConfigured: commandDmAllowFrom.length > 0, + primaryAllowed: ownerAllowedForCommands, + secondaryConfigured: effectiveGroupAllowFrom.length > 0, + secondaryAllowed: groupAllowedForCommands, + hasControlCommand: core.channel.text.hasControlCommand(text, cfg), }); - const commandAuthorized = commandGate.commandAuthorized; - if (commandGate.shouldBlock) { + if (shouldBlock) { logInboundDrop({ log: logVerboseMessage, channel: "msteams", diff --git a/extensions/msteams/src/policy.test.ts b/extensions/msteams/src/policy.test.ts index 02d59a99723..ac324f3d785 100644 --- a/extensions/msteams/src/policy.test.ts +++ b/extensions/msteams/src/policy.test.ts @@ -6,6 +6,27 @@ import { resolveMSTeamsRouteConfig, } from "./policy.js"; +function resolveNamedTeamRouteConfig(allowNameMatching = false) { + const cfg: MSTeamsConfig = { + teams: { + "My Team": { + requireMention: true, + channels: { + "General Chat": { requireMention: false }, + }, + }, + }, + }; + + return resolveMSTeamsRouteConfig({ + cfg, + teamName: "My Team", + channelName: "General Chat", + conversationId: "ignored", + allowNameMatching, + }); +} + describe("msteams policy", () => { describe("resolveMSTeamsRouteConfig", () => { it("returns team and channel config when present", () => { @@ -50,24 +71,16 @@ describe("msteams policy", () => { expect(res.allowed).toBe(false); }); - it("matches team and channel by name", () => { - const cfg: MSTeamsConfig = { - teams: { - "My Team": { - requireMention: true, - channels: { - "General Chat": { requireMention: false }, - }, - }, - }, - }; + it("blocks team and channel name matches by default", () => { + const res = resolveNamedTeamRouteConfig(); - const res = resolveMSTeamsRouteConfig({ - cfg, - teamName: "My Team", - channelName: "General Chat", - conversationId: "ignored", - }); + expect(res.teamConfig).toBeUndefined(); + expect(res.channelConfig).toBeUndefined(); + expect(res.allowed).toBe(false); + }); + + it("matches team and channel by name when dangerous name matching is enabled", () => { + const res = resolveNamedTeamRouteConfig(true); expect(res.teamConfig?.requireMention).toBe(true); expect(res.channelConfig?.requireMention).toBe(false); diff --git a/extensions/msteams/src/policy.ts b/extensions/msteams/src/policy.ts index 3d405f94c9e..c6317184d89 100644 --- a/extensions/msteams/src/policy.ts +++ b/extensions/msteams/src/policy.ts @@ -16,6 +16,7 @@ import { resolveToolsBySender, resolveChannelEntryMatchWithFallback, resolveNestedAllowlistDecision, + isDangerousNameMatchingEnabled, } from "openclaw/plugin-sdk/msteams"; export type MSTeamsResolvedRouteConfig = { @@ -35,6 +36,7 @@ export function resolveMSTeamsRouteConfig(params: { teamName?: string | null | undefined; conversationId?: string | null | undefined; channelName?: string | null | undefined; + allowNameMatching?: boolean; }): MSTeamsResolvedRouteConfig { const teamId = params.teamId?.trim(); const teamName = params.teamName?.trim(); @@ -44,8 +46,8 @@ export function resolveMSTeamsRouteConfig(params: { const allowlistConfigured = Object.keys(teams).length > 0; const teamCandidates = buildChannelKeyCandidates( teamId, - teamName, - teamName ? normalizeChannelSlug(teamName) : undefined, + params.allowNameMatching ? teamName : undefined, + params.allowNameMatching && teamName ? normalizeChannelSlug(teamName) : undefined, ); const teamMatch = resolveChannelEntryMatchWithFallback({ entries: teams, @@ -58,8 +60,8 @@ export function resolveMSTeamsRouteConfig(params: { const channelAllowlistConfigured = Object.keys(channels).length > 0; const channelCandidates = buildChannelKeyCandidates( conversationId, - channelName, - channelName ? normalizeChannelSlug(channelName) : undefined, + params.allowNameMatching ? channelName : undefined, + params.allowNameMatching && channelName ? normalizeChannelSlug(channelName) : undefined, ); const channelMatch = resolveChannelEntryMatchWithFallback({ entries: channels, @@ -101,6 +103,7 @@ export function resolveMSTeamsGroupToolPolicy( const groupId = params.groupId?.trim(); const groupChannel = params.groupChannel?.trim(); const groupSpace = params.groupSpace?.trim(); + const allowNameMatching = isDangerousNameMatchingEnabled(cfg); const resolved = resolveMSTeamsRouteConfig({ cfg, @@ -108,6 +111,7 @@ export function resolveMSTeamsGroupToolPolicy( teamName: groupSpace, conversationId: groupId, channelName: groupChannel, + allowNameMatching, }); if (resolved.channelConfig) { @@ -158,8 +162,8 @@ export function resolveMSTeamsGroupToolPolicy( const channelCandidates = buildChannelKeyCandidates( groupId, - groupChannel, - groupChannel ? normalizeChannelSlug(groupChannel) : undefined, + allowNameMatching ? groupChannel : undefined, + allowNameMatching && groupChannel ? normalizeChannelSlug(groupChannel) : undefined, ); for (const teamConfig of Object.values(cfg.teams ?? {})) { const match = resolveChannelEntryMatchWithFallback({ diff --git a/extensions/msteams/src/resolve-allowlist.test.ts b/extensions/msteams/src/resolve-allowlist.test.ts index 03d97c15b01..1fdd706aaca 100644 --- a/extensions/msteams/src/resolve-allowlist.test.ts +++ b/extensions/msteams/src/resolve-allowlist.test.ts @@ -54,10 +54,12 @@ describe("resolveMSTeamsUserAllowlist", () => { describe("resolveMSTeamsChannelAllowlist", () => { it("resolves team/channel by team name + channel display name", async () => { - listTeamsByName.mockResolvedValueOnce([{ id: "team-1", displayName: "Product Team" }]); + // After the fix, listChannelsForTeam is called once and reused for both + // General channel resolution and channel matching. + listTeamsByName.mockResolvedValueOnce([{ id: "team-guid-1", displayName: "Product Team" }]); listChannelsForTeam.mockResolvedValueOnce([ - { id: "channel-1", displayName: "General" }, - { id: "channel-2", displayName: "Roadmap" }, + { id: "19:general-conv-id@thread.tacv2", displayName: "General" }, + { id: "19:roadmap-conv-id@thread.tacv2", displayName: "Roadmap" }, ]); const [result] = await resolveMSTeamsChannelAllowlist({ @@ -65,14 +67,80 @@ describe("resolveMSTeamsChannelAllowlist", () => { entries: ["Product Team/Roadmap"], }); + // teamId is now the General channel's conversation ID — not the Graph GUID — + // because that's what Bot Framework sends as channelData.team.id at runtime. expect(result).toEqual({ input: "Product Team/Roadmap", resolved: true, - teamId: "team-1", + teamId: "19:general-conv-id@thread.tacv2", teamName: "Product Team", - channelId: "channel-2", + channelId: "19:roadmap-conv-id@thread.tacv2", channelName: "Roadmap", note: "multiple channels; chose first", }); }); + + it("uses General channel conversation ID as team key for team-only entry", async () => { + // When no channel is specified we still resolve the General channel so the + // stored key matches what Bot Framework sends as channelData.team.id. + listTeamsByName.mockResolvedValueOnce([{ id: "guid-engineering", displayName: "Engineering" }]); + listChannelsForTeam.mockResolvedValueOnce([ + { id: "19:eng-general@thread.tacv2", displayName: "General" }, + { id: "19:eng-standups@thread.tacv2", displayName: "Standups" }, + ]); + + const [result] = await resolveMSTeamsChannelAllowlist({ + cfg: {}, + entries: ["Engineering"], + }); + + expect(result).toEqual({ + input: "Engineering", + resolved: true, + teamId: "19:eng-general@thread.tacv2", + teamName: "Engineering", + }); + }); + + it("falls back to Graph GUID when listChannelsForTeam throws", async () => { + // Edge case: API call fails (rate limit, network error). We fall back to + // the Graph GUID as the team key — the pre-fix behavior — so resolution + // still succeeds instead of propagating the error. + listTeamsByName.mockResolvedValueOnce([{ id: "guid-flaky", displayName: "Flaky Team" }]); + listChannelsForTeam.mockRejectedValueOnce(new Error("429 Too Many Requests")); + + const [result] = await resolveMSTeamsChannelAllowlist({ + cfg: {}, + entries: ["Flaky Team"], + }); + + expect(result).toEqual({ + input: "Flaky Team", + resolved: true, + teamId: "guid-flaky", + teamName: "Flaky Team", + }); + }); + + it("falls back to Graph GUID when General channel is not found", async () => { + // Edge case: General channel was renamed or deleted. We fall back to the + // Graph GUID so resolution still succeeds rather than silently breaking. + listTeamsByName.mockResolvedValueOnce([{ id: "guid-ops", displayName: "Operations" }]); + listChannelsForTeam.mockResolvedValueOnce([ + { id: "19:ops-announce@thread.tacv2", displayName: "Announcements" }, + { id: "19:ops-random@thread.tacv2", displayName: "Random" }, + ]); + + const [result] = await resolveMSTeamsChannelAllowlist({ + cfg: {}, + entries: ["Operations"], + }); + + expect(result).toEqual({ + input: "Operations", + resolved: true, + teamId: "guid-ops", + teamName: "Operations", + }); + }); }); diff --git a/extensions/msteams/src/resolve-allowlist.ts b/extensions/msteams/src/resolve-allowlist.ts index fede9c7f98b..374cae2d965 100644 --- a/extensions/msteams/src/resolve-allowlist.ts +++ b/extensions/msteams/src/resolve-allowlist.ts @@ -120,11 +120,26 @@ export async function resolveMSTeamsChannelAllowlist(params: { return { input, resolved: false, note: "team not found" }; } const teamMatch = teams[0]; - const teamId = teamMatch.id?.trim(); + const graphTeamId = teamMatch.id?.trim(); const teamName = teamMatch.displayName?.trim() || team; - if (!teamId) { + if (!graphTeamId) { return { input, resolved: false, note: "team id missing" }; } + // Bot Framework sends the General channel's conversation ID as + // channelData.team.id at runtime, NOT the Graph API group GUID. + // Fetch channels upfront so we can resolve the correct key format for + // runtime matching and reuse the list for channel lookups. + let teamChannels: Awaited> = []; + try { + teamChannels = await listChannelsForTeam(token, graphTeamId); + } catch { + // API failure (rate limit, network error) — fall back to Graph GUID as team key + } + const generalChannel = teamChannels.find((ch) => ch.displayName?.toLowerCase() === "general"); + // Use the General channel's conversation ID as the team key — this + // matches what Bot Framework sends at runtime. Fall back to the Graph + // GUID if the General channel isn't found (renamed or deleted). + const teamId = generalChannel?.id?.trim() || graphTeamId; if (!channel) { return { input, @@ -134,11 +149,11 @@ export async function resolveMSTeamsChannelAllowlist(params: { note: teams.length > 1 ? "multiple teams; chose first" : undefined, }; } - const channels = await listChannelsForTeam(token, teamId); + // Reuse teamChannels — already fetched above const channelMatch = - channels.find((item) => item.id === channel) ?? - channels.find((item) => item.displayName?.toLowerCase() === channel.toLowerCase()) ?? - channels.find((item) => + teamChannels.find((item) => item.id === channel) ?? + teamChannels.find((item) => item.displayName?.toLowerCase() === channel.toLowerCase()) ?? + teamChannels.find((item) => item.displayName?.toLowerCase().includes(channel.toLowerCase() ?? ""), ); if (!channelMatch?.id) { @@ -151,7 +166,7 @@ export async function resolveMSTeamsChannelAllowlist(params: { teamName, channelId: channelMatch.id, channelName: channelMatch.displayName ?? channel, - note: channels.length > 1 ? "multiple channels; chose first" : undefined, + note: teamChannels.length > 1 ? "multiple channels; chose first" : undefined, }; }, }); diff --git a/extensions/nextcloud-talk/package.json b/extensions/nextcloud-talk/package.json index 96797d4b76e..c217d0f0ce7 100644 --- a/extensions/nextcloud-talk/package.json +++ b/extensions/nextcloud-talk/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/nextcloud-talk", - "version": "2026.3.9", + "version": "2026.3.14", "description": "OpenClaw Nextcloud Talk channel plugin", "type": "module", "dependencies": { diff --git a/extensions/nextcloud-talk/src/accounts.test.ts b/extensions/nextcloud-talk/src/accounts.test.ts new file mode 100644 index 00000000000..dbc43690a3b --- /dev/null +++ b/extensions/nextcloud-talk/src/accounts.test.ts @@ -0,0 +1,30 @@ +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { resolveNextcloudTalkAccount } from "./accounts.js"; +import type { CoreConfig } from "./types.js"; + +describe("resolveNextcloudTalkAccount", () => { + it.runIf(process.platform !== "win32")("rejects symlinked botSecretFile paths", () => { + const dir = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-nextcloud-talk-")); + const secretFile = path.join(dir, "secret.txt"); + const secretLink = path.join(dir, "secret-link.txt"); + fs.writeFileSync(secretFile, "bot-secret\n", "utf8"); + fs.symlinkSync(secretFile, secretLink); + + const cfg = { + channels: { + "nextcloud-talk": { + baseUrl: "https://cloud.example.com", + botSecretFile: secretLink, + }, + }, + } as CoreConfig; + + const account = resolveNextcloudTalkAccount({ cfg }); + expect(account.secret).toBe(""); + expect(account.secretSource).toBe("none"); + fs.rmSync(dir, { recursive: true, force: true }); + }); +}); diff --git a/extensions/nextcloud-talk/src/accounts.ts b/extensions/nextcloud-talk/src/accounts.ts index 74bb45cfd8b..2cfba6fea44 100644 --- a/extensions/nextcloud-talk/src/accounts.ts +++ b/extensions/nextcloud-talk/src/accounts.ts @@ -1,4 +1,4 @@ -import { readFileSync } from "node:fs"; +import { tryReadSecretFileSync } from "openclaw/plugin-sdk/core"; import { createAccountListHelpers, DEFAULT_ACCOUNT_ID, @@ -88,13 +88,13 @@ function resolveNextcloudTalkSecret( } if (merged.botSecretFile) { - try { - const fileSecret = readFileSync(merged.botSecretFile, "utf-8").trim(); - if (fileSecret) { - return { secret: fileSecret, source: "secretFile" }; - } - } catch { - // File not found or unreadable, fall through. + const fileSecret = tryReadSecretFileSync( + merged.botSecretFile, + "Nextcloud Talk bot secret file", + { rejectSymlink: true }, + ); + if (fileSecret) { + return { secret: fileSecret, source: "secretFile" }; } } diff --git a/extensions/nextcloud-talk/src/channel.startup.test.ts b/extensions/nextcloud-talk/src/channel.startup.test.ts index 79b3cd77cd5..5fd0607e753 100644 --- a/extensions/nextcloud-talk/src/channel.startup.test.ts +++ b/extensions/nextcloud-talk/src/channel.startup.test.ts @@ -1,5 +1,9 @@ import { afterEach, describe, expect, it, vi } from "vitest"; import { createStartAccountContext } from "../../test-utils/start-account-context.js"; +import { + expectStopPendingUntilAbort, + startAccountAndTrackLifecycle, +} from "../../test-utils/start-account-lifecycle.js"; import type { ResolvedNextcloudTalkAccount } from "./accounts.js"; const hoisted = vi.hoisted(() => ({ @@ -40,28 +44,20 @@ describe("nextcloudTalkPlugin gateway.startAccount", () => { it("keeps startAccount pending until abort, then stops the monitor", async () => { const stop = vi.fn(); hoisted.monitorNextcloudTalkProvider.mockResolvedValue({ stop }); - const abort = new AbortController(); - - const task = nextcloudTalkPlugin.gateway!.startAccount!( - createStartAccountContext({ - account: buildAccount(), - abortSignal: abort.signal, - }), - ); - let settled = false; - void task.then(() => { - settled = true; + const { abort, task, isSettled } = startAccountAndTrackLifecycle({ + startAccount: nextcloudTalkPlugin.gateway!.startAccount!, + account: buildAccount(), }); - await vi.waitFor(() => { - expect(hoisted.monitorNextcloudTalkProvider).toHaveBeenCalledOnce(); + await expectStopPendingUntilAbort({ + waitForStarted: () => + vi.waitFor(() => { + expect(hoisted.monitorNextcloudTalkProvider).toHaveBeenCalledOnce(); + }), + isSettled, + abort, + task, + stop, }); - expect(settled).toBe(false); - expect(stop).not.toHaveBeenCalled(); - - abort.abort(); - await task; - - expect(stop).toHaveBeenCalledOnce(); }); it("stops immediately when startAccount receives an already-aborted signal", async () => { diff --git a/extensions/nextcloud-talk/src/channel.ts b/extensions/nextcloud-talk/src/channel.ts index 6fdf36e9f8c..473299b74e0 100644 --- a/extensions/nextcloud-talk/src/channel.ts +++ b/extensions/nextcloud-talk/src/channel.ts @@ -2,6 +2,7 @@ import { buildAccountScopedDmSecurityPolicy, collectAllowlistProviderGroupPolicyWarnings, collectOpenGroupPolicyRouteAllowlistWarnings, + createAccountStatusSink, formatAllowFromLowercase, mapAllowFromEntries, } from "openclaw/plugin-sdk/compat"; @@ -15,11 +16,11 @@ import { deleteAccountFromConfigSection, normalizeAccountId, setAccountEnabledInConfigSection, - waitForAbortSignal, type ChannelPlugin, type OpenClawConfig, type ChannelSetupInput, } from "openclaw/plugin-sdk/nextcloud-talk"; +import { runStoppablePassiveMonitor } from "../../shared/passive-monitor.js"; import { listNextcloudTalkAccountIds, resolveDefaultNextcloudTalkAccountId, @@ -338,17 +339,22 @@ export const nextcloudTalkPlugin: ChannelPlugin = ctx.log?.info(`[${account.accountId}] starting Nextcloud Talk webhook server`); - const { stop } = await monitorNextcloudTalkProvider({ - accountId: account.accountId, - config: ctx.cfg as CoreConfig, - runtime: ctx.runtime, - abortSignal: ctx.abortSignal, - statusSink: (patch) => ctx.setStatus({ accountId: ctx.accountId, ...patch }), + const statusSink = createAccountStatusSink({ + accountId: ctx.accountId, + setStatus: ctx.setStatus, }); - // Keep webhook channels pending for the account lifecycle. - await waitForAbortSignal(ctx.abortSignal); - stop(); + await runStoppablePassiveMonitor({ + abortSignal: ctx.abortSignal, + start: async () => + await monitorNextcloudTalkProvider({ + accountId: account.accountId, + config: ctx.cfg as CoreConfig, + runtime: ctx.runtime, + abortSignal: ctx.abortSignal, + statusSink, + }), + }); }, logoutAccount: async ({ accountId, cfg }) => { const nextCfg = { ...cfg } as OpenClawConfig; diff --git a/extensions/nextcloud-talk/src/config-schema.ts b/extensions/nextcloud-talk/src/config-schema.ts index 5ab3e632d22..85cb14ff213 100644 --- a/extensions/nextcloud-talk/src/config-schema.ts +++ b/extensions/nextcloud-talk/src/config-schema.ts @@ -9,6 +9,7 @@ import { requireOpenAllowFrom, } from "openclaw/plugin-sdk/nextcloud-talk"; import { z } from "zod"; +import { requireChannelOpenAllowFrom } from "../../shared/config-schema-helpers.js"; import { buildSecretInputSchema } from "./secret-input.js"; export const NextcloudTalkRoomSchema = z @@ -48,13 +49,12 @@ export const NextcloudTalkAccountSchemaBase = z export const NextcloudTalkAccountSchema = NextcloudTalkAccountSchemaBase.superRefine( (value, ctx) => { - requireOpenAllowFrom({ + requireChannelOpenAllowFrom({ + channel: "nextcloud-talk", policy: value.dmPolicy, allowFrom: value.allowFrom, ctx, - path: ["allowFrom"], - message: - 'channels.nextcloud-talk.dmPolicy="open" requires channels.nextcloud-talk.allowFrom to include "*"', + requireOpenAllowFrom, }); }, ); @@ -63,12 +63,11 @@ export const NextcloudTalkConfigSchema = NextcloudTalkAccountSchemaBase.extend({ accounts: z.record(z.string(), NextcloudTalkAccountSchema.optional()).optional(), defaultAccount: z.string().optional(), }).superRefine((value, ctx) => { - requireOpenAllowFrom({ + requireChannelOpenAllowFrom({ + channel: "nextcloud-talk", policy: value.dmPolicy, allowFrom: value.allowFrom, ctx, - path: ["allowFrom"], - message: - 'channels.nextcloud-talk.dmPolicy="open" requires channels.nextcloud-talk.allowFrom to include "*"', + requireOpenAllowFrom, }); }); diff --git a/extensions/nextcloud-talk/src/monitor.ts b/extensions/nextcloud-talk/src/monitor.ts index f940195a28b..93c66ade4b5 100644 --- a/extensions/nextcloud-talk/src/monitor.ts +++ b/extensions/nextcloud-talk/src/monitor.ts @@ -1,12 +1,12 @@ import { createServer, type IncomingMessage, type Server, type ServerResponse } from "node:http"; import os from "node:os"; import { - createLoggerBackedRuntime, type RuntimeEnv, isRequestBodyLimitError, readRequestBodyWithLimit, requestBodyErrorToText, } from "openclaw/plugin-sdk/nextcloud-talk"; +import { resolveLoggerBackedRuntime } from "../../shared/runtime.js"; import { resolveNextcloudTalkAccount } from "./accounts.js"; import { handleNextcloudTalkInbound } from "./inbound.js"; import { createNextcloudTalkReplayGuard } from "./replay-guard.js"; @@ -318,12 +318,10 @@ export async function monitorNextcloudTalkProvider( cfg, accountId: opts.accountId, }); - const runtime: RuntimeEnv = - opts.runtime ?? - createLoggerBackedRuntime({ - logger: core.logging.getChildLogger(), - exitError: () => new Error("Runtime exit not available"), - }); + const runtime: RuntimeEnv = resolveLoggerBackedRuntime( + opts.runtime, + core.logging.getChildLogger(), + ); if (!account.secret) { throw new Error(`Nextcloud Talk bot secret not configured for account "${account.accountId}"`); diff --git a/extensions/nextcloud-talk/src/normalize.test.ts b/extensions/nextcloud-talk/src/normalize.test.ts new file mode 100644 index 00000000000..2419e063ff1 --- /dev/null +++ b/extensions/nextcloud-talk/src/normalize.test.ts @@ -0,0 +1,28 @@ +import { describe, expect, it } from "vitest"; +import { + looksLikeNextcloudTalkTargetId, + normalizeNextcloudTalkMessagingTarget, + stripNextcloudTalkTargetPrefix, +} from "./normalize.js"; + +describe("nextcloud-talk target normalization", () => { + it("strips supported prefixes to a room token", () => { + expect(stripNextcloudTalkTargetPrefix(" room:abc123 ")).toBe("abc123"); + expect(stripNextcloudTalkTargetPrefix("nextcloud-talk:room:AbC123")).toBe("AbC123"); + expect(stripNextcloudTalkTargetPrefix("nc-talk:room:ops")).toBe("ops"); + expect(stripNextcloudTalkTargetPrefix("nc:room:ops")).toBe("ops"); + expect(stripNextcloudTalkTargetPrefix("room: ")).toBeUndefined(); + }); + + it("normalizes messaging targets to lowercase channel ids", () => { + expect(normalizeNextcloudTalkMessagingTarget("room:AbC123")).toBe("nextcloud-talk:abc123"); + expect(normalizeNextcloudTalkMessagingTarget("nc-talk:room:Ops")).toBe("nextcloud-talk:ops"); + }); + + it("detects prefixed and bare room ids", () => { + expect(looksLikeNextcloudTalkTargetId("nextcloud-talk:room:abc12345")).toBe(true); + expect(looksLikeNextcloudTalkTargetId("nc:opsroom1")).toBe(true); + expect(looksLikeNextcloudTalkTargetId("abc12345")).toBe(true); + expect(looksLikeNextcloudTalkTargetId("")).toBe(false); + }); +}); diff --git a/extensions/nextcloud-talk/src/normalize.ts b/extensions/nextcloud-talk/src/normalize.ts index 6854d603fc0..295caadd8a4 100644 --- a/extensions/nextcloud-talk/src/normalize.ts +++ b/extensions/nextcloud-talk/src/normalize.ts @@ -1,4 +1,4 @@ -export function normalizeNextcloudTalkMessagingTarget(raw: string): string | undefined { +export function stripNextcloudTalkTargetPrefix(raw: string): string | undefined { const trimmed = raw.trim(); if (!trimmed) { return undefined; @@ -22,7 +22,12 @@ export function normalizeNextcloudTalkMessagingTarget(raw: string): string | und return undefined; } - return `nextcloud-talk:${normalized}`.toLowerCase(); + return normalized; +} + +export function normalizeNextcloudTalkMessagingTarget(raw: string): string | undefined { + const normalized = stripNextcloudTalkTargetPrefix(raw); + return normalized ? `nextcloud-talk:${normalized}`.toLowerCase() : undefined; } export function looksLikeNextcloudTalkTargetId(raw: string): boolean { diff --git a/extensions/nextcloud-talk/src/onboarding.ts b/extensions/nextcloud-talk/src/onboarding.ts index 3ccf2851c3b..7b1a8b11d28 100644 --- a/extensions/nextcloud-talk/src/onboarding.ts +++ b/extensions/nextcloud-talk/src/onboarding.ts @@ -1,15 +1,14 @@ import { - buildSingleChannelSecretPromptState, formatDocsLink, hasConfiguredSecretInput, mapAllowFromEntries, mergeAllowFromEntries, - promptSingleChannelSecretInput, + patchScopedAccountConfig, + runSingleChannelSecretStep, resolveAccountIdForConfigure, DEFAULT_ACCOUNT_ID, normalizeAccountId, setTopLevelChannelDmPolicyWithAllowFrom, - type SecretInput, type ChannelOnboardingAdapter, type ChannelOnboardingDmPolicy, type OpenClawConfig, @@ -39,38 +38,12 @@ function setNextcloudTalkAccountConfig( accountId: string, updates: Record, ): CoreConfig { - if (accountId === DEFAULT_ACCOUNT_ID) { - return { - ...cfg, - channels: { - ...cfg.channels, - "nextcloud-talk": { - ...cfg.channels?.["nextcloud-talk"], - enabled: true, - ...updates, - }, - }, - }; - } - - return { - ...cfg, - channels: { - ...cfg.channels, - "nextcloud-talk": { - ...cfg.channels?.["nextcloud-talk"], - enabled: true, - accounts: { - ...cfg.channels?.["nextcloud-talk"]?.accounts, - [accountId]: { - ...cfg.channels?.["nextcloud-talk"]?.accounts?.[accountId], - enabled: cfg.channels?.["nextcloud-talk"]?.accounts?.[accountId]?.enabled ?? true, - ...updates, - }, - }, - }, - }, - }; + return patchScopedAccountConfig({ + cfg, + channelKey: channel, + accountId, + patch: updates, + }) as CoreConfig; } async function noteNextcloudTalkSecretHelp(prompter: WizardPrompter): Promise { @@ -215,12 +188,6 @@ export const nextcloudTalkOnboardingAdapter: ChannelOnboardingAdapter = { hasConfiguredSecretInput(resolvedAccount.config.botSecret) || resolvedAccount.config.botSecretFile, ); - const secretPromptState = buildSingleChannelSecretPromptState({ - accountConfigured, - hasConfigToken: hasConfigSecret, - allowEnv, - envValue: process.env.NEXTCLOUD_TALK_BOT_SECRET, - }); let baseUrl = resolvedAccount.baseUrl; if (!baseUrl) { @@ -241,32 +208,35 @@ export const nextcloudTalkOnboardingAdapter: ChannelOnboardingAdapter = { ).trim(); } - let secret: SecretInput | null = null; - if (!accountConfigured) { - await noteNextcloudTalkSecretHelp(prompter); - } - - const secretResult = await promptSingleChannelSecretInput({ + const secretStep = await runSingleChannelSecretStep({ cfg: next, prompter, providerHint: "nextcloud-talk", credentialLabel: "bot secret", - accountConfigured: secretPromptState.accountConfigured, - canUseEnv: secretPromptState.canUseEnv, - hasConfigToken: secretPromptState.hasConfigToken, + accountConfigured, + hasConfigToken: hasConfigSecret, + allowEnv, + envValue: process.env.NEXTCLOUD_TALK_BOT_SECRET, envPrompt: "NEXTCLOUD_TALK_BOT_SECRET detected. Use env var?", keepPrompt: "Nextcloud Talk bot secret already configured. Keep it?", inputPrompt: "Enter Nextcloud Talk bot secret", preferredEnvVar: "NEXTCLOUD_TALK_BOT_SECRET", + onMissingConfigured: async () => await noteNextcloudTalkSecretHelp(prompter), + applyUseEnv: async (cfg) => + setNextcloudTalkAccountConfig(cfg as CoreConfig, accountId, { + baseUrl, + }), + applySet: async (cfg, value) => + setNextcloudTalkAccountConfig(cfg as CoreConfig, accountId, { + baseUrl, + botSecret: value, + }), }); - if (secretResult.action === "set") { - secret = secretResult.value; - } + next = secretStep.cfg as CoreConfig; - if (secretResult.action === "use-env" || secret || baseUrl !== resolvedAccount.baseUrl) { + if (secretStep.action === "keep" && baseUrl !== resolvedAccount.baseUrl) { next = setNextcloudTalkAccountConfig(next, accountId, { baseUrl, - ...(secret ? { botSecret: secret } : {}), }); } @@ -287,26 +257,28 @@ export const nextcloudTalkOnboardingAdapter: ChannelOnboardingAdapter = { validate: (value) => (String(value ?? "").trim() ? undefined : "Required"), }), ).trim(); - const apiPasswordResult = await promptSingleChannelSecretInput({ + const apiPasswordStep = await runSingleChannelSecretStep({ cfg: next, prompter, providerHint: "nextcloud-talk-api", credentialLabel: "API password", - ...buildSingleChannelSecretPromptState({ - accountConfigured: Boolean(existingApiUser && existingApiPasswordConfigured), - hasConfigToken: existingApiPasswordConfigured, - allowEnv: false, - }), + accountConfigured: Boolean(existingApiUser && existingApiPasswordConfigured), + hasConfigToken: existingApiPasswordConfigured, + allowEnv: false, envPrompt: "", keepPrompt: "Nextcloud Talk API password already configured. Keep it?", inputPrompt: "Enter Nextcloud Talk API password", preferredEnvVar: "NEXTCLOUD_TALK_API_PASSWORD", + applySet: async (cfg, value) => + setNextcloudTalkAccountConfig(cfg as CoreConfig, accountId, { + apiUser, + apiPassword: value, + }), }); - const apiPassword = apiPasswordResult.action === "set" ? apiPasswordResult.value : undefined; - next = setNextcloudTalkAccountConfig(next, accountId, { - apiUser, - ...(apiPassword ? { apiPassword } : {}), - }); + next = + apiPasswordStep.action === "keep" + ? setNextcloudTalkAccountConfig(next, accountId, { apiUser }) + : (apiPasswordStep.cfg as CoreConfig); } if (forceAllowFrom) { diff --git a/extensions/nextcloud-talk/src/send.test.ts b/extensions/nextcloud-talk/src/send.test.ts index 88133f9cbed..3ee178b815d 100644 --- a/extensions/nextcloud-talk/src/send.test.ts +++ b/extensions/nextcloud-talk/src/send.test.ts @@ -1,4 +1,9 @@ import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { + createSendCfgThreadingRuntime, + expectProvidedCfgSkipsRuntimeLoad, + expectRuntimeCfgFallback, +} from "../../test-utils/send-config.js"; const hoisted = vi.hoisted(() => ({ loadConfig: vi.fn(), @@ -17,20 +22,7 @@ const hoisted = vi.hoisted(() => ({ })); vi.mock("./runtime.js", () => ({ - getNextcloudTalkRuntime: () => ({ - config: { - loadConfig: hoisted.loadConfig, - }, - channel: { - text: { - resolveMarkdownTableMode: hoisted.resolveMarkdownTableMode, - convertMarkdownTables: hoisted.convertMarkdownTables, - }, - activity: { - record: hoisted.record, - }, - }, - }), + getNextcloudTalkRuntime: () => createSendCfgThreadingRuntime(hoisted), })); vi.mock("./accounts.js", () => ({ @@ -72,8 +64,9 @@ describe("nextcloud-talk send cfg threading", () => { accountId: "work", }); - expect(hoisted.loadConfig).not.toHaveBeenCalled(); - expect(hoisted.resolveNextcloudTalkAccount).toHaveBeenCalledWith({ + expectProvidedCfgSkipsRuntimeLoad({ + loadConfig: hoisted.loadConfig, + resolveAccount: hoisted.resolveNextcloudTalkAccount, cfg, accountId: "work", }); @@ -95,8 +88,9 @@ describe("nextcloud-talk send cfg threading", () => { }); expect(result).toEqual({ ok: true }); - expect(hoisted.loadConfig).toHaveBeenCalledTimes(1); - expect(hoisted.resolveNextcloudTalkAccount).toHaveBeenCalledWith({ + expectRuntimeCfgFallback({ + loadConfig: hoisted.loadConfig, + resolveAccount: hoisted.resolveNextcloudTalkAccount, cfg: runtimeCfg, accountId: "default", }); diff --git a/extensions/nextcloud-talk/src/send.ts b/extensions/nextcloud-talk/src/send.ts index 7cc8f05658c..2b6284a6fc2 100644 --- a/extensions/nextcloud-talk/src/send.ts +++ b/extensions/nextcloud-talk/src/send.ts @@ -1,4 +1,5 @@ import { resolveNextcloudTalkAccount } from "./accounts.js"; +import { stripNextcloudTalkTargetPrefix } from "./normalize.js"; import { getNextcloudTalkRuntime } from "./runtime.js"; import { generateNextcloudTalkSignature } from "./signature.js"; import type { CoreConfig, NextcloudTalkSendResult } from "./types.js"; @@ -34,33 +35,19 @@ function resolveCredentials( } function normalizeRoomToken(to: string): string { - const trimmed = to.trim(); - if (!trimmed) { - throw new Error("Room token is required for Nextcloud Talk sends"); - } - - let normalized = trimmed; - if (normalized.startsWith("nextcloud-talk:")) { - normalized = normalized.slice("nextcloud-talk:".length).trim(); - } else if (normalized.startsWith("nc:")) { - normalized = normalized.slice("nc:".length).trim(); - } - - if (normalized.startsWith("room:")) { - normalized = normalized.slice("room:".length).trim(); - } - + const normalized = stripNextcloudTalkTargetPrefix(to); if (!normalized) { throw new Error("Room token is required for Nextcloud Talk sends"); } return normalized; } -export async function sendMessageNextcloudTalk( - to: string, - text: string, - opts: NextcloudTalkSendOpts = {}, -): Promise { +function resolveNextcloudTalkSendContext(opts: NextcloudTalkSendOpts): { + cfg: CoreConfig; + account: ReturnType; + baseUrl: string; + secret: string; +} { const cfg = (opts.cfg ?? getNextcloudTalkRuntime().config.loadConfig()) as CoreConfig; const account = resolveNextcloudTalkAccount({ cfg, @@ -70,6 +57,15 @@ export async function sendMessageNextcloudTalk( { baseUrl: opts.baseUrl, secret: opts.secret }, account, ); + return { cfg, account, baseUrl, secret }; +} + +export async function sendMessageNextcloudTalk( + to: string, + text: string, + opts: NextcloudTalkSendOpts = {}, +): Promise { + const { cfg, account, baseUrl, secret } = resolveNextcloudTalkSendContext(opts); const roomToken = normalizeRoomToken(to); if (!text?.trim()) { @@ -176,15 +172,7 @@ export async function sendReactionNextcloudTalk( reaction: string, opts: Omit = {}, ): Promise<{ ok: true }> { - const cfg = (opts.cfg ?? getNextcloudTalkRuntime().config.loadConfig()) as CoreConfig; - const account = resolveNextcloudTalkAccount({ - cfg, - accountId: opts.accountId, - }); - const { baseUrl, secret } = resolveCredentials( - { baseUrl: opts.baseUrl, secret: opts.secret }, - account, - ); + const { account, baseUrl, secret } = resolveNextcloudTalkSendContext(opts); const normalizedToken = normalizeRoomToken(roomToken); const body = JSON.stringify({ reaction }); diff --git a/extensions/nostr/CHANGELOG.md b/extensions/nostr/CHANGELOG.md index 3088efcc2bb..c8cdc11422e 100644 --- a/extensions/nostr/CHANGELOG.md +++ b/extensions/nostr/CHANGELOG.md @@ -1,5 +1,35 @@ # Changelog +## 2026.3.14 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.13 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.12 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.11 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.10 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.3.9 ### Changes diff --git a/extensions/nostr/package.json b/extensions/nostr/package.json index dbee4bc09d7..19ef7cc03e7 100644 --- a/extensions/nostr/package.json +++ b/extensions/nostr/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/nostr", - "version": "2026.3.9", + "version": "2026.3.14", "description": "OpenClaw Nostr channel plugin for NIP-04 encrypted DMs", "type": "module", "dependencies": { diff --git a/extensions/nostr/src/channel.ts b/extensions/nostr/src/channel.ts index 20de320a3d1..937c698bd47 100644 --- a/extensions/nostr/src/channel.ts +++ b/extensions/nostr/src/channel.ts @@ -7,6 +7,10 @@ import { mapAllowFromEntries, type ChannelPlugin, } from "openclaw/plugin-sdk/nostr"; +import { + buildPassiveChannelStatusSummary, + buildTrafficStatusSummary, +} from "../../shared/channel-status-summary.js"; import type { NostrProfile } from "./config-schema.js"; import { NostrConfigSchema } from "./config-schema.js"; import type { MetricEvent, MetricsSnapshot } from "./metrics.js"; @@ -160,14 +164,10 @@ export const nostrPlugin: ChannelPlugin = { status: { defaultRuntime: createDefaultChannelRuntimeState(DEFAULT_ACCOUNT_ID), collectStatusIssues: (accounts) => collectStatusIssuesFromLastError("nostr", accounts), - buildChannelSummary: ({ snapshot }) => ({ - configured: snapshot.configured ?? false, - publicKey: snapshot.publicKey ?? null, - running: snapshot.running ?? false, - lastStartAt: snapshot.lastStartAt ?? null, - lastStopAt: snapshot.lastStopAt ?? null, - lastError: snapshot.lastError ?? null, - }), + buildChannelSummary: ({ snapshot }) => + buildPassiveChannelStatusSummary(snapshot, { + publicKey: snapshot.publicKey ?? null, + }), buildAccountSnapshot: ({ account, runtime }) => ({ accountId: account.accountId, name: account.name, @@ -179,8 +179,7 @@ export const nostrPlugin: ChannelPlugin = { lastStartAt: runtime?.lastStartAt ?? null, lastStopAt: runtime?.lastStopAt ?? null, lastError: runtime?.lastError ?? null, - lastInboundAt: runtime?.lastInboundAt ?? null, - lastOutboundAt: runtime?.lastOutboundAt ?? null, + ...buildTrafficStatusSummary(runtime), }), }, diff --git a/extensions/nostr/src/config-schema.ts b/extensions/nostr/src/config-schema.ts index a25868da356..25d928b4837 100644 --- a/extensions/nostr/src/config-schema.ts +++ b/extensions/nostr/src/config-schema.ts @@ -1,8 +1,7 @@ +import { AllowFromListSchema, DmPolicySchema } from "openclaw/plugin-sdk/compat"; import { MarkdownConfigSchema, buildChannelConfigSchema } from "openclaw/plugin-sdk/nostr"; import { z } from "zod"; -const allowFromEntry = z.union([z.string(), z.number()]); - /** * Validates https:// URLs only (no javascript:, data:, file:, etc.) */ @@ -76,10 +75,10 @@ export const NostrConfigSchema = z.object({ relays: z.array(z.string()).optional(), /** DM access policy: pairing, allowlist, open, or disabled */ - dmPolicy: z.enum(["pairing", "allowlist", "open", "disabled"]).optional(), + dmPolicy: DmPolicySchema.optional(), /** Allowed sender pubkeys (npub or hex format) */ - allowFrom: z.array(allowFromEntry).optional(), + allowFrom: AllowFromListSchema, /** Profile metadata (NIP-01 kind:0 content) */ profile: NostrProfileSchema.optional(), diff --git a/extensions/nostr/src/nostr-profile-http.test.ts b/extensions/nostr/src/nostr-profile-http.test.ts index 8fb17c443f4..3caa739c6c1 100644 --- a/extensions/nostr/src/nostr-profile-http.test.ts +++ b/extensions/nostr/src/nostr-profile-http.test.ts @@ -115,6 +115,13 @@ function createMockContext(overrides?: Partial): NostrP }; } +function expectOkResponse(res: ReturnType) { + expect(res._getStatusCode()).toBe(200); + const data = JSON.parse(res._getData()); + expect(data.ok).toBe(true); + return data; +} + function mockSuccessfulProfileImport() { vi.mocked(importProfileFromRelays).mockResolvedValue({ ok: true, @@ -208,6 +215,22 @@ describe("nostr-profile-http", () => { }); describe("PUT /api/channels/nostr/:accountId/profile", () => { + function mockPublishSuccess() { + vi.mocked(publishNostrProfile).mockResolvedValue({ + eventId: "event123", + createdAt: 1234567890, + successes: ["wss://relay.damus.io"], + failures: [], + }); + } + + function expectBadRequestResponse(res: ReturnType) { + expect(res._getStatusCode()).toBe(400); + const data = JSON.parse(res._getData()); + expect(data.ok).toBe(false); + return data; + } + async function expectPrivatePictureRejected(pictureUrl: string) { const ctx = createMockContext(); const handler = createNostrProfileHttpHandler(ctx); @@ -219,9 +242,7 @@ describe("nostr-profile-http", () => { await handler(req, res); - expect(res._getStatusCode()).toBe(400); - const data = JSON.parse(res._getData()); - expect(data.ok).toBe(false); + const data = expectBadRequestResponse(res); expect(data.error).toContain("private"); } @@ -235,18 +256,11 @@ describe("nostr-profile-http", () => { }); const res = createMockResponse(); - vi.mocked(publishNostrProfile).mockResolvedValue({ - eventId: "event123", - createdAt: 1234567890, - successes: ["wss://relay.damus.io"], - failures: [], - }); + mockPublishSuccess(); await handler(req, res); - expect(res._getStatusCode()).toBe(200); - const data = JSON.parse(res._getData()); - expect(data.ok).toBe(true); + const data = expectOkResponse(res); expect(data.eventId).toBe("event123"); expect(data.successes).toContain("wss://relay.damus.io"); expect(data.persisted).toBe(true); @@ -332,9 +346,7 @@ describe("nostr-profile-http", () => { await handler(req, res); - expect(res._getStatusCode()).toBe(400); - const data = JSON.parse(res._getData()); - expect(data.ok).toBe(false); + const data = expectBadRequestResponse(res); // The schema validation catches non-https URLs before SSRF check expect(data.error).toBe("Validation failed"); expect(data.details).toBeDefined(); @@ -368,12 +380,7 @@ describe("nostr-profile-http", () => { const ctx = createMockContext(); const handler = createNostrProfileHttpHandler(ctx); - vi.mocked(publishNostrProfile).mockResolvedValue({ - eventId: "event123", - createdAt: 1234567890, - successes: ["wss://relay.damus.io"], - failures: [], - }); + mockPublishSuccess(); // Make 6 requests (limit is 5/min) for (let i = 0; i < 6; i++) { @@ -384,7 +391,7 @@ describe("nostr-profile-http", () => { await handler(req, res); if (i < 5) { - expect(res._getStatusCode()).toBe(200); + expectOkResponse(res); } else { expect(res._getStatusCode()).toBe(429); const data = JSON.parse(res._getData()); @@ -414,6 +421,12 @@ describe("nostr-profile-http", () => { }); describe("POST /api/channels/nostr/:accountId/profile/import", () => { + function expectImportSuccessResponse(res: ReturnType) { + const data = expectOkResponse(res); + expect(data.imported.name).toBe("imported"); + return data; + } + it("imports profile from relays", async () => { const ctx = createMockContext(); const handler = createNostrProfileHttpHandler(ctx); @@ -424,10 +437,7 @@ describe("nostr-profile-http", () => { await handler(req, res); - expect(res._getStatusCode()).toBe(200); - const data = JSON.parse(res._getData()); - expect(data.ok).toBe(true); - expect(data.imported.name).toBe("imported"); + const data = expectImportSuccessResponse(res); expect(data.saved).toBe(false); // autoMerge not requested }); @@ -490,8 +500,7 @@ describe("nostr-profile-http", () => { await handler(req, res); - expect(res._getStatusCode()).toBe(200); - const data = JSON.parse(res._getData()); + const data = expectImportSuccessResponse(res); expect(data.saved).toBe(true); expect(ctx.updateConfigProfile).toHaveBeenCalled(); }); diff --git a/extensions/ollama/README.md b/extensions/ollama/README.md new file mode 100644 index 00000000000..3a331c08e4b --- /dev/null +++ b/extensions/ollama/README.md @@ -0,0 +1,3 @@ +# Ollama Provider + +Bundled provider plugin for Ollama discovery and setup. diff --git a/extensions/ollama/index.ts b/extensions/ollama/index.ts new file mode 100644 index 00000000000..6ba28a3af7c --- /dev/null +++ b/extensions/ollama/index.ts @@ -0,0 +1,123 @@ +import { + buildOllamaProvider, + emptyPluginConfigSchema, + ensureOllamaModelPulled, + OLLAMA_DEFAULT_BASE_URL, + promptAndConfigureOllama, + configureOllamaNonInteractive, + type OpenClawPluginApi, + type ProviderAuthContext, + type ProviderAuthMethodNonInteractiveContext, + type ProviderAuthResult, + type ProviderDiscoveryContext, +} from "openclaw/plugin-sdk/core"; + +const PROVIDER_ID = "ollama"; +const DEFAULT_API_KEY = "ollama-local"; + +const ollamaPlugin = { + id: "ollama", + name: "Ollama Provider", + description: "Bundled Ollama provider plugin", + configSchema: emptyPluginConfigSchema(), + register(api: OpenClawPluginApi) { + api.registerProvider({ + id: PROVIDER_ID, + label: "Ollama", + docsPath: "/providers/ollama", + envVars: ["OLLAMA_API_KEY"], + auth: [ + { + id: "local", + label: "Ollama", + hint: "Cloud and local open models", + kind: "custom", + run: async (ctx: ProviderAuthContext): Promise => { + const result = await promptAndConfigureOllama({ + cfg: ctx.config, + prompter: ctx.prompter, + }); + return { + profiles: [ + { + profileId: "ollama:default", + credential: { + type: "api_key", + provider: PROVIDER_ID, + key: DEFAULT_API_KEY, + }, + }, + ], + configPatch: result.config, + defaultModel: `ollama/${result.defaultModelId}`, + }; + }, + runNonInteractive: async (ctx: ProviderAuthMethodNonInteractiveContext) => + configureOllamaNonInteractive({ + nextConfig: ctx.config, + opts: ctx.opts, + runtime: ctx.runtime, + }), + }, + ], + discovery: { + order: "late", + run: async (ctx: ProviderDiscoveryContext) => { + const explicit = ctx.config.models?.providers?.ollama; + const hasExplicitModels = Array.isArray(explicit?.models) && explicit.models.length > 0; + const ollamaKey = ctx.resolveProviderApiKey(PROVIDER_ID).apiKey; + if (hasExplicitModels && explicit) { + return { + provider: { + ...explicit, + baseUrl: + typeof explicit.baseUrl === "string" && explicit.baseUrl.trim() + ? explicit.baseUrl.trim().replace(/\/+$/, "") + : OLLAMA_DEFAULT_BASE_URL, + api: explicit.api ?? "ollama", + apiKey: ollamaKey ?? explicit.apiKey ?? DEFAULT_API_KEY, + }, + }; + } + + const provider = await buildOllamaProvider(explicit?.baseUrl, { + quiet: !ollamaKey && !explicit, + }); + if (provider.models.length === 0 && !ollamaKey && !explicit?.apiKey) { + return null; + } + return { + provider: { + ...provider, + apiKey: ollamaKey ?? explicit?.apiKey ?? DEFAULT_API_KEY, + }, + }; + }, + }, + wizard: { + onboarding: { + choiceId: "ollama", + choiceLabel: "Ollama", + choiceHint: "Cloud and local open models", + groupId: "ollama", + groupLabel: "Ollama", + groupHint: "Cloud and local open models", + methodId: "local", + }, + modelPicker: { + label: "Ollama (custom)", + hint: "Detect models from a local or remote Ollama instance", + methodId: "local", + }, + }, + onModelSelected: async ({ config, model, prompter }) => { + if (!model.startsWith("ollama/")) { + return; + } + await ensureOllamaModelPulled({ config, prompter }); + }, + }); + }, +}; + +export default ollamaPlugin; diff --git a/extensions/ollama/openclaw.plugin.json b/extensions/ollama/openclaw.plugin.json new file mode 100644 index 00000000000..3df1002d1ac --- /dev/null +++ b/extensions/ollama/openclaw.plugin.json @@ -0,0 +1,9 @@ +{ + "id": "ollama", + "providers": ["ollama"], + "configSchema": { + "type": "object", + "additionalProperties": false, + "properties": {} + } +} diff --git a/extensions/ollama/package.json b/extensions/ollama/package.json new file mode 100644 index 00000000000..61a8227c3ed --- /dev/null +++ b/extensions/ollama/package.json @@ -0,0 +1,12 @@ +{ + "name": "@openclaw/ollama-provider", + "version": "2026.3.14", + "private": true, + "description": "OpenClaw Ollama provider plugin", + "type": "module", + "openclaw": { + "extensions": [ + "./index.ts" + ] + } +} diff --git a/extensions/open-prose/package.json b/extensions/open-prose/package.json index 240a2bbcb41..69272781198 100644 --- a/extensions/open-prose/package.json +++ b/extensions/open-prose/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/open-prose", - "version": "2026.3.9", + "version": "2026.3.14", "private": true, "description": "OpenProse VM skill pack plugin (slash command + telemetry).", "type": "module", diff --git a/extensions/open-prose/skills/prose/alts/arabian-nights.md b/extensions/open-prose/skills/prose/alts/arabian-nights.md index cc0d146664e..c637c883bb6 100644 --- a/extensions/open-prose/skills/prose/alts/arabian-nights.md +++ b/extensions/open-prose/skills/prose/alts/arabian-nights.md @@ -78,146 +78,17 @@ An alternative register for OpenProse that draws from One Thousand and One Night | `prompt` | `command` | What is commanded of the djinn | | `model` | `spirit` | Which spirit answers | -### Unchanged +### Shared appendix -These keywords already work or are too functional to replace sensibly: +Use [shared-appendix.md](./shared-appendix.md) for unchanged keywords and the common comparison pattern. -- `**...**` discretion markers — already work -- `until`, `while` — already work -- `map`, `filter`, `reduce`, `pmap` — pipeline operators -- `max` — constraint modifier -- `as` — aliasing -- Model names: `sonnet`, `opus`, `haiku` — already poetic +Recommended Arabian Nights rewrite targets: ---- - -## Side-by-Side Comparison - -### Simple Program - -```prose -# Functional -use "@alice/research" as research -input topic: "What to investigate" - -agent helper: - model: sonnet - -let findings = session: helper - prompt: "Research {topic}" - -output summary = session "Summarize" - context: findings -``` - -```prose -# Nights -conjure "@alice/research" as research -wish topic: "What to investigate" - -djinn helper: - spirit: sonnet - -name findings = tale: helper - command: "Research {topic}" - -gift summary = tale "Summarize" - scroll: findings -``` - -### Parallel Execution - -```prose -# Functional -parallel: - security = session "Check security" - perf = session "Check performance" - style = session "Check style" - -session "Synthesize review" - context: { security, perf, style } -``` - -```prose -# Nights -bazaar: - security = tale "Check security" - perf = tale "Check performance" - style = tale "Check style" - -tale "Synthesize review" - scroll: { security, perf, style } -``` - -### Loop with Condition - -```prose -# Functional -loop until **the code is bug-free** (max: 5): - session "Find and fix bugs" -``` - -```prose -# Nights -telling until **the code is bug-free** (max: 5): - tale "Find and fix bugs" -``` - -### Error Handling - -```prose -# Functional -try: - session "Risky operation" -catch as err: - session "Handle error" - context: err -finally: - session "Cleanup" -``` - -```prose -# Nights -venture: - tale "Risky operation" -should misfortune strike as err: - tale "Handle error" - scroll: err -and so it was: - tale "Cleanup" -``` - -### Choice Block - -```prose -# Functional -choice **the severity level**: - option "Critical": - session "Escalate immediately" - option "Minor": - session "Log for later" -``` - -```prose -# Nights -crossroads **the severity level**: - path "Critical": - tale "Escalate immediately" - path "Minor": - tale "Log for later" -``` - -### Conditionals - -```prose -# Functional -if **has security issues**: - session "Fix security" -elif **has performance issues**: - session "Optimize" -else: - session "Approve" -``` +- `session` sample -> `tale` +- `parallel` sample -> `bazaar` +- `loop` sample -> `telling` +- `try/catch/finally` sample -> `venture` / `should misfortune strike` / `and so it was` +- `choice` sample -> `crossroads` / `path` ```prose # Nights diff --git a/extensions/open-prose/skills/prose/alts/homer.md b/extensions/open-prose/skills/prose/alts/homer.md index bc27905cf78..716f2052e34 100644 --- a/extensions/open-prose/skills/prose/alts/homer.md +++ b/extensions/open-prose/skills/prose/alts/homer.md @@ -78,146 +78,17 @@ An alternative register for OpenProse that draws from Greek epic poetry—the Il | `prompt` | `charge` | The quest given | | `model` | `muse` | Which muse inspires | -### Unchanged +### Shared appendix -These keywords already work or are too functional to replace sensibly: +Use [shared-appendix.md](./shared-appendix.md) for unchanged keywords and the common comparison pattern. -- `**...**` discretion markers — already work -- `until`, `while` — already work -- `map`, `filter`, `reduce`, `pmap` — pipeline operators -- `max` — constraint modifier -- `as` — aliasing -- Model names: `sonnet`, `opus`, `haiku` — already poetic +Recommended Homeric rewrite targets: ---- - -## Side-by-Side Comparison - -### Simple Program - -```prose -# Functional -use "@alice/research" as research -input topic: "What to investigate" - -agent helper: - model: sonnet - -let findings = session: helper - prompt: "Research {topic}" - -output summary = session "Summarize" - context: findings -``` - -```prose -# Homeric -invoke "@alice/research" as research -omen topic: "What to investigate" - -hero helper: - muse: sonnet - -decree findings = trial: helper - charge: "Research {topic}" - -glory summary = trial "Summarize" - tidings: findings -``` - -### Parallel Execution - -```prose -# Functional -parallel: - security = session "Check security" - perf = session "Check performance" - style = session "Check style" - -session "Synthesize review" - context: { security, perf, style } -``` - -```prose -# Homeric -host: - security = trial "Check security" - perf = trial "Check performance" - style = trial "Check style" - -trial "Synthesize review" - tidings: { security, perf, style } -``` - -### Loop with Condition - -```prose -# Functional -loop until **the code is bug-free** (max: 5): - session "Find and fix bugs" -``` - -```prose -# Homeric -ordeal until **the code is bug-free** (max: 5): - trial "Find and fix bugs" -``` - -### Error Handling - -```prose -# Functional -try: - session "Risky operation" -catch as err: - session "Handle error" - context: err -finally: - session "Cleanup" -``` - -```prose -# Homeric -venture: - trial "Risky operation" -should ruin come as err: - trial "Handle error" - tidings: err -in the end: - trial "Cleanup" -``` - -### Choice Block - -```prose -# Functional -choice **the severity level**: - option "Critical": - session "Escalate immediately" - option "Minor": - session "Log for later" -``` - -```prose -# Homeric -crossroads **the severity level**: - path "Critical": - trial "Escalate immediately" - path "Minor": - trial "Log for later" -``` - -### Conditionals - -```prose -# Functional -if **has security issues**: - session "Fix security" -elif **has performance issues**: - session "Optimize" -else: - session "Approve" -``` +- `session` sample -> `trial` +- `parallel` sample -> `host` +- `loop` sample -> `ordeal` +- `try/catch/finally` sample -> `venture` / `should ruin come` / `in the end` +- `choice` sample -> `crossroads` / `path` ```prose # Homeric diff --git a/extensions/open-prose/skills/prose/alts/shared-appendix.md b/extensions/open-prose/skills/prose/alts/shared-appendix.md new file mode 100644 index 00000000000..32a4fcbcd17 --- /dev/null +++ b/extensions/open-prose/skills/prose/alts/shared-appendix.md @@ -0,0 +1,35 @@ +--- +role: reference +summary: Shared appendix for experimental OpenProse alternate registers. +status: draft +requires: prose.md +--- + +# OpenProse Alternate Register Appendix + +Use this appendix with experimental register files such as `arabian-nights.md` and `homer.md`. + +## Unchanged keywords + +These keywords already work or are too functional to replace sensibly: + +- `**...**` discretion markers +- `until`, `while` +- `map`, `filter`, `reduce`, `pmap` +- `max` +- `as` +- model names such as `sonnet`, `opus`, and `haiku` + +## Comparison pattern + +Use the translation map in each register file to rewrite the same functional sample programs: + +- simple program +- parallel execution +- loop with condition +- error handling +- choice block +- conditionals + +The goal is consistency, not one canonical wording. +Keep the functional version intact and rewrite only the register-specific aliases. diff --git a/extensions/open-prose/skills/prose/state/sqlite.md b/extensions/open-prose/skills/prose/state/sqlite.md index cfec757567c..352a8705cd5 100644 --- a/extensions/open-prose/skills/prose/state/sqlite.md +++ b/extensions/open-prose/skills/prose/state/sqlite.md @@ -87,71 +87,28 @@ The `agents` and `agent_segments` tables for project-scoped agents live in `.pro ## Responsibility Separation -This section defines **who does what**. This is the contract between the VM and subagents. +The VM/subagent contract matches [postgres.md](./postgres.md#responsibility-separation). -### VM Responsibilities +SQLite-specific differences: -The VM (the orchestrating agent running the .prose program) is responsible for: +- the VM creates `state.db` instead of an `openprose` schema +- subagent confirmation messages point at a local database path, for example `.prose/runs//state.db` +- cleanup is typically `VACUUM` or file deletion rather than dropping schema objects -| Responsibility | Description | -| ------------------------- | -------------------------------------------------------------------------------------------------------- | -| **Database creation** | Create `state.db` and initialize core tables at run start | -| **Program registration** | Store the program source and metadata | -| **Execution tracking** | Update position, status, and timing as statements execute | -| **Subagent spawning** | Spawn sessions via Task tool with database path and instructions | -| **Parallel coordination** | Track branch status, implement join strategies | -| **Loop management** | Track iteration counts, evaluate conditions | -| **Error aggregation** | Record failures, manage retry state | -| **Context preservation** | Maintain sufficient narration in the main conversation thread so execution can be understood and resumed | -| **Completion detection** | Mark the run as complete when finished | +Example return values: -**Critical:** The VM must preserve enough context in its own conversation to understand execution state without re-reading the entire database. The database is for coordination and persistence, not a replacement for working memory. - -### Subagent Responsibilities - -Subagents (sessions spawned by the VM) are responsible for: - -| Responsibility | Description | -| ----------------------- | ----------------------------------------------------------------- | -| **Writing own outputs** | Insert/update their binding in the `bindings` table | -| **Memory management** | For persistent agents: read and update their memory record | -| **Segment recording** | For persistent agents: append segment history | -| **Attachment handling** | Write large outputs to `attachments/` directory, store path in DB | -| **Atomic writes** | Use transactions when updating multiple related records | - -**Critical:** Subagents write ONLY to `bindings`, `agents`, and `agent_segments` tables. The VM owns the `execution` table entirely. Completion signaling happens through the substrate (Task tool return), not database updates. - -**Critical:** Subagents must write their outputs directly to the database. The VM does not write subagent outputs—it only reads them after the subagent completes. - -**What subagents return to the VM:** A confirmation message with the binding location—not the full content: - -**Root scope:** - -``` +```text Binding written: research Location: .prose/runs/20260116-143052-a7b3c9/state.db (bindings table, name='research', execution_id=NULL) -Summary: AI safety research covering alignment, robustness, and interpretability with 15 citations. ``` -**Inside block invocation:** - -``` +```text Binding written: result Location: .prose/runs/20260116-143052-a7b3c9/state.db (bindings table, name='result', execution_id=43) Execution ID: 43 -Summary: Processed chunk into 3 sub-parts for recursive processing. ``` -The VM tracks locations, not values. This keeps the VM's context lean and enables arbitrarily large intermediate values. - -### Shared Concerns - -| Concern | Who Handles | -| ---------------- | ------------------------------------------------------------------ | -| Schema evolution | Either (use `CREATE TABLE IF NOT EXISTS`, `ALTER TABLE` as needed) | -| Custom tables | Either (prefix with `x_` for extensions) | -| Indexing | Either (add indexes for frequently-queried columns) | -| Cleanup | VM (at run end, optionally vacuum) | +The VM still tracks locations, not full values. --- diff --git a/extensions/phone-control/index.test.ts b/extensions/phone-control/index.test.ts index 9259092b153..2c3462c82a9 100644 --- a/extensions/phone-control/index.test.ts +++ b/extensions/phone-control/index.test.ts @@ -7,6 +7,7 @@ import type { PluginCommandContext, } from "openclaw/plugin-sdk/phone-control"; import { describe, expect, it, vi } from "vitest"; +import { createTestPluginApi } from "../test-utils/plugin-api.js"; import registerPhoneControl from "./index.js"; function createApi(params: { @@ -15,7 +16,7 @@ function createApi(params: { writeConfig: (next: Record) => Promise; registerCommand: (command: OpenClawPluginCommandDefinition) => void; }): OpenClawPluginApi { - return { + return createTestPluginApi({ id: "phone-control", name: "phone-control", source: "test", @@ -30,22 +31,8 @@ function createApi(params: { writeConfigFile: (next: Record) => params.writeConfig(next), }, } as OpenClawPluginApi["runtime"], - logger: { info() {}, warn() {}, error() {} }, - registerTool() {}, - registerHook() {}, - registerHttpRoute() {}, - registerChannel() {}, - registerGatewayMethod() {}, - registerCli() {}, - registerService() {}, - registerProvider() {}, - registerContextEngine() {}, registerCommand: params.registerCommand, - resolvePath(input: string) { - return input; - }, - on() {}, - }; + }) as OpenClawPluginApi; } function createCommandContext(args: string): PluginCommandContext { diff --git a/extensions/sglang/README.md b/extensions/sglang/README.md new file mode 100644 index 00000000000..4a16a882c2e --- /dev/null +++ b/extensions/sglang/README.md @@ -0,0 +1,3 @@ +# SGLang Provider + +Bundled provider plugin for SGLang discovery and setup. diff --git a/extensions/sglang/index.ts b/extensions/sglang/index.ts new file mode 100644 index 00000000000..64143026592 --- /dev/null +++ b/extensions/sglang/index.ts @@ -0,0 +1,81 @@ +import { + buildSglangProvider, + configureOpenAICompatibleSelfHostedProviderNonInteractive, + discoverOpenAICompatibleSelfHostedProvider, + emptyPluginConfigSchema, + promptAndConfigureOpenAICompatibleSelfHostedProviderAuth, + type OpenClawPluginApi, + type ProviderAuthMethodNonInteractiveContext, +} from "openclaw/plugin-sdk/core"; + +const PROVIDER_ID = "sglang"; +const DEFAULT_BASE_URL = "http://127.0.0.1:30000/v1"; + +const sglangPlugin = { + id: "sglang", + name: "SGLang Provider", + description: "Bundled SGLang provider plugin", + configSchema: emptyPluginConfigSchema(), + register(api: OpenClawPluginApi) { + api.registerProvider({ + id: PROVIDER_ID, + label: "SGLang", + docsPath: "/providers/sglang", + envVars: ["SGLANG_API_KEY"], + auth: [ + { + id: "custom", + label: "SGLang", + hint: "Fast self-hosted OpenAI-compatible server", + kind: "custom", + run: async (ctx) => + promptAndConfigureOpenAICompatibleSelfHostedProviderAuth({ + cfg: ctx.config, + prompter: ctx.prompter, + providerId: PROVIDER_ID, + providerLabel: "SGLang", + defaultBaseUrl: DEFAULT_BASE_URL, + defaultApiKeyEnvVar: "SGLANG_API_KEY", + modelPlaceholder: "Qwen/Qwen3-8B", + }), + runNonInteractive: async (ctx: ProviderAuthMethodNonInteractiveContext) => + configureOpenAICompatibleSelfHostedProviderNonInteractive({ + ctx, + providerId: PROVIDER_ID, + providerLabel: "SGLang", + defaultBaseUrl: DEFAULT_BASE_URL, + defaultApiKeyEnvVar: "SGLANG_API_KEY", + modelPlaceholder: "Qwen/Qwen3-8B", + }), + }, + ], + discovery: { + order: "late", + run: async (ctx) => + discoverOpenAICompatibleSelfHostedProvider({ + ctx, + providerId: PROVIDER_ID, + buildProvider: buildSglangProvider, + }), + }, + wizard: { + onboarding: { + choiceId: "sglang", + choiceLabel: "SGLang", + choiceHint: "Fast self-hosted OpenAI-compatible server", + groupId: "sglang", + groupLabel: "SGLang", + groupHint: "Fast self-hosted server", + methodId: "custom", + }, + modelPicker: { + label: "SGLang (custom)", + hint: "Enter SGLang URL + API key + model", + methodId: "custom", + }, + }, + }); + }, +}; + +export default sglangPlugin; diff --git a/extensions/sglang/openclaw.plugin.json b/extensions/sglang/openclaw.plugin.json new file mode 100644 index 00000000000..161ea4c635a --- /dev/null +++ b/extensions/sglang/openclaw.plugin.json @@ -0,0 +1,9 @@ +{ + "id": "sglang", + "providers": ["sglang"], + "configSchema": { + "type": "object", + "additionalProperties": false, + "properties": {} + } +} diff --git a/extensions/sglang/package.json b/extensions/sglang/package.json new file mode 100644 index 00000000000..d64495bd110 --- /dev/null +++ b/extensions/sglang/package.json @@ -0,0 +1,12 @@ +{ + "name": "@openclaw/sglang-provider", + "version": "2026.3.14", + "private": true, + "description": "OpenClaw SGLang provider plugin", + "type": "module", + "openclaw": { + "extensions": [ + "./index.ts" + ] + } +} diff --git a/extensions/shared/channel-status-summary.ts b/extensions/shared/channel-status-summary.ts new file mode 100644 index 00000000000..5ebdb067596 --- /dev/null +++ b/extensions/shared/channel-status-summary.ts @@ -0,0 +1,48 @@ +type PassiveChannelStatusSnapshot = { + configured?: boolean; + running?: boolean; + lastStartAt?: number | null; + lastStopAt?: number | null; + lastError?: string | null; + probe?: unknown; + lastProbeAt?: number | null; +}; + +type TrafficStatusSnapshot = { + lastInboundAt?: number | null; + lastOutboundAt?: number | null; +}; + +export function buildPassiveChannelStatusSummary( + snapshot: PassiveChannelStatusSnapshot, + extra?: TExtra, +) { + return { + configured: snapshot.configured ?? false, + ...(extra ?? ({} as TExtra)), + running: snapshot.running ?? false, + lastStartAt: snapshot.lastStartAt ?? null, + lastStopAt: snapshot.lastStopAt ?? null, + lastError: snapshot.lastError ?? null, + }; +} + +export function buildPassiveProbedChannelStatusSummary( + snapshot: PassiveChannelStatusSnapshot, + extra?: TExtra, +) { + return { + ...buildPassiveChannelStatusSummary(snapshot, extra), + probe: snapshot.probe, + lastProbeAt: snapshot.lastProbeAt ?? null, + }; +} + +export function buildTrafficStatusSummary( + snapshot?: TSnapshot | null, +) { + return { + lastInboundAt: snapshot?.lastInboundAt ?? null, + lastOutboundAt: snapshot?.lastOutboundAt ?? null, + }; +} diff --git a/extensions/shared/config-schema-helpers.ts b/extensions/shared/config-schema-helpers.ts new file mode 100644 index 00000000000..495793b54b6 --- /dev/null +++ b/extensions/shared/config-schema-helpers.ts @@ -0,0 +1,25 @@ +import type { z } from "zod"; + +type RequireOpenAllowFromFn = (params: { + policy?: string; + allowFrom?: Array; + ctx: z.RefinementCtx; + path: Array; + message: string; +}) => void; + +export function requireChannelOpenAllowFrom(params: { + channel: string; + policy?: string; + allowFrom?: Array; + ctx: z.RefinementCtx; + requireOpenAllowFrom: RequireOpenAllowFromFn; +}) { + params.requireOpenAllowFrom({ + policy: params.policy, + allowFrom: params.allowFrom, + ctx: params.ctx, + path: ["allowFrom"], + message: `channels.${params.channel}.dmPolicy="open" requires channels.${params.channel}.allowFrom to include "*"`, + }); +} diff --git a/extensions/shared/deferred.ts b/extensions/shared/deferred.ts new file mode 100644 index 00000000000..1a874100916 --- /dev/null +++ b/extensions/shared/deferred.ts @@ -0,0 +1,9 @@ +export function createDeferred() { + let resolve!: (value: T | PromiseLike) => void; + let reject!: (reason?: unknown) => void; + const promise = new Promise((res, rej) => { + resolve = res; + reject = rej; + }); + return { promise, resolve, reject }; +} diff --git a/extensions/shared/passive-monitor.ts b/extensions/shared/passive-monitor.ts new file mode 100644 index 00000000000..e5ffb3f03ff --- /dev/null +++ b/extensions/shared/passive-monitor.ts @@ -0,0 +1,18 @@ +import { runPassiveAccountLifecycle } from "openclaw/plugin-sdk"; + +type StoppableMonitor = { + stop: () => void; +}; + +export async function runStoppablePassiveMonitor(params: { + abortSignal: AbortSignal; + start: () => Promise; +}): Promise { + await runPassiveAccountLifecycle({ + abortSignal: params.abortSignal, + start: params.start, + stop: async (monitor) => { + monitor.stop(); + }, + }); +} diff --git a/extensions/shared/runtime.ts b/extensions/shared/runtime.ts new file mode 100644 index 00000000000..a1950ba6be0 --- /dev/null +++ b/extensions/shared/runtime.ts @@ -0,0 +1,14 @@ +import { createLoggerBackedRuntime } from "openclaw/plugin-sdk"; + +export function resolveLoggerBackedRuntime( + runtime: TRuntime | undefined, + logger: Parameters[0]["logger"], +): TRuntime { + return ( + runtime ?? + (createLoggerBackedRuntime({ + logger, + exitError: () => new Error("Runtime exit not available"), + }) as TRuntime) + ); +} diff --git a/extensions/shared/status-issues.ts b/extensions/shared/status-issues.ts new file mode 100644 index 00000000000..1eb39e2b686 --- /dev/null +++ b/extensions/shared/status-issues.ts @@ -0,0 +1,18 @@ +export function readStatusIssueFields( + value: unknown, + fields: readonly TField[], +): Record | null { + if (!value || typeof value !== "object") { + return null; + } + const record = value as Record; + const result = {} as Record; + for (const field of fields) { + result[field] = record[field]; + } + return result; +} + +export function coerceStatusIssueAccountId(value: unknown): string | undefined { + return typeof value === "string" ? value : typeof value === "number" ? String(value) : undefined; +} diff --git a/extensions/signal/package.json b/extensions/signal/package.json index 743c8212d31..67d6eae6506 100644 --- a/extensions/signal/package.json +++ b/extensions/signal/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/signal", - "version": "2026.3.9", + "version": "2026.3.14", "private": true, "description": "OpenClaw Signal channel plugin", "type": "module", diff --git a/extensions/slack/package.json b/extensions/slack/package.json index 539541bdc6d..183cdce7ad4 100644 --- a/extensions/slack/package.json +++ b/extensions/slack/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/slack", - "version": "2026.3.9", + "version": "2026.3.14", "private": true, "description": "OpenClaw Slack channel plugin", "type": "module", diff --git a/extensions/slack/src/channel.test.ts b/extensions/slack/src/channel.test.ts index ad6860d6f8d..98fbddca77d 100644 --- a/extensions/slack/src/channel.test.ts +++ b/extensions/slack/src/channel.test.ts @@ -15,6 +15,18 @@ vi.mock("./runtime.js", () => ({ import { slackPlugin } from "./channel.js"; +async function getSlackConfiguredState(cfg: OpenClawConfig) { + const account = slackPlugin.config.resolveAccount(cfg, "default"); + return { + configured: slackPlugin.config.isConfigured?.(account, cfg), + snapshot: await slackPlugin.status?.buildAccountSnapshot?.({ + account, + cfg, + runtime: undefined, + }), + }; +} + describe("slackPlugin actions", () => { it("prefers session lookup for announce target routing", () => { expect(slackPlugin.meta.preferSessionLookupForAnnounceTarget).toBe(true); @@ -137,6 +149,46 @@ describe("slackPlugin outbound", () => { }); }); +describe("slackPlugin agentPrompt", () => { + it("tells agents interactive replies are disabled by default", () => { + const hints = slackPlugin.agentPrompt?.messageToolHints?.({ + cfg: { + channels: { + slack: { + botToken: "xoxb-test", + appToken: "xapp-test", + }, + }, + }, + }); + + expect(hints).toEqual([ + "- Slack interactive replies are disabled. If needed, ask to set `channels.slack.capabilities.interactiveReplies=true` (or the same under `channels.slack.accounts..capabilities`).", + ]); + }); + + it("shows Slack interactive reply directives when enabled", () => { + const hints = slackPlugin.agentPrompt?.messageToolHints?.({ + cfg: { + channels: { + slack: { + botToken: "xoxb-test", + appToken: "xapp-test", + capabilities: { interactiveReplies: true }, + }, + }, + }, + }); + + expect(hints).toContain( + "- Slack interactive replies: use `[[slack_buttons: Label:value, Other:other]]` to add action buttons that route clicks back as Slack interaction system events.", + ); + expect(hints).toContain( + "- Slack selects: use `[[slack_select: Placeholder | Label:value, Other:other]]` to add a static select menu that routes the chosen value back as a Slack interaction system event.", + ); + }); +}); + describe("slackPlugin config", () => { it("treats HTTP mode accounts with bot token + signing secret as configured", async () => { const cfg: OpenClawConfig = { @@ -149,13 +201,7 @@ describe("slackPlugin config", () => { }, }; - const account = slackPlugin.config.resolveAccount(cfg, "default"); - const configured = slackPlugin.config.isConfigured?.(account, cfg); - const snapshot = await slackPlugin.status?.buildAccountSnapshot?.({ - account, - cfg, - runtime: undefined, - }); + const { configured, snapshot } = await getSlackConfiguredState(cfg); expect(configured).toBe(true); expect(snapshot?.configured).toBe(true); @@ -171,13 +217,7 @@ describe("slackPlugin config", () => { }, }; - const account = slackPlugin.config.resolveAccount(cfg, "default"); - const configured = slackPlugin.config.isConfigured?.(account, cfg); - const snapshot = await slackPlugin.status?.buildAccountSnapshot?.({ - account, - cfg, - runtime: undefined, - }); + const { configured, snapshot } = await getSlackConfiguredState(cfg); expect(configured).toBe(false); expect(snapshot?.configured).toBe(false); diff --git a/extensions/slack/src/channel.ts b/extensions/slack/src/channel.ts index 570ef20ffa1..17209b6e4d1 100644 --- a/extensions/slack/src/channel.ts +++ b/extensions/slack/src/channel.ts @@ -29,6 +29,7 @@ import { resolveDefaultSlackAccountId, resolveSlackAccount, resolveSlackReplyToMode, + isSlackInteractiveRepliesEnabled, resolveSlackGroupRequireMention, resolveSlackGroupToolPolicy, buildSlackThreadingToolContext, @@ -37,6 +38,7 @@ import { type ChannelPlugin, type ResolvedSlackAccount, } from "openclaw/plugin-sdk/slack"; +import { buildPassiveProbedChannelStatusSummary } from "../../shared/channel-status-summary.js"; import { getSlackRuntime } from "./runtime.js"; const meta = getChatChannelMeta("slack"); @@ -146,6 +148,17 @@ export const slackPlugin: ChannelPlugin = { media: true, nativeCommands: true, }, + agentPrompt: { + messageToolHints: ({ cfg, accountId }) => + isSlackInteractiveRepliesEnabled({ cfg, accountId }) + ? [ + "- Slack interactive replies: use `[[slack_buttons: Label:value, Other:other]]` to add action buttons that route clicks back as Slack interaction system events.", + "- Slack selects: use `[[slack_select: Placeholder | Label:value, Other:other]]` to add a static select menu that routes the chosen value back as a Slack interaction system event.", + ] + : [ + "- Slack interactive replies are disabled. If needed, ask to set `channels.slack.capabilities.interactiveReplies=true` (or the same under `channels.slack.accounts..capabilities`).", + ], + }, streaming: { blockStreamingCoalesceDefaults: { minChars: 1500, idleMs: 1000 }, }, @@ -232,6 +245,18 @@ export const slackPlugin: ChannelPlugin = { }, resolver: { resolveTargets: async ({ cfg, accountId, inputs, kind }) => { + const toResolvedTarget = < + T extends { input: string; resolved: boolean; id?: string; name?: string }, + >( + entry: T, + note?: string, + ) => ({ + input: entry.input, + resolved: entry.resolved, + id: entry.id, + name: entry.name, + note, + }); const account = resolveSlackAccount({ cfg, accountId }); const token = account.config.userToken?.trim() || account.botToken?.trim(); if (!token) { @@ -246,25 +271,15 @@ export const slackPlugin: ChannelPlugin = { token, entries: inputs, }); - return resolved.map((entry) => ({ - input: entry.input, - resolved: entry.resolved, - id: entry.id, - name: entry.name, - note: entry.archived ? "archived" : undefined, - })); + return resolved.map((entry) => + toResolvedTarget(entry, entry.archived ? "archived" : undefined), + ); } const resolved = await getSlackRuntime().channel.slack.resolveUserAllowlist({ token, entries: inputs, }); - return resolved.map((entry) => ({ - input: entry.input, - resolved: entry.resolved, - id: entry.id, - name: entry.name, - note: entry.note, - })); + return resolved.map((entry) => toResolvedTarget(entry, entry.note)); }, }, actions: { @@ -407,17 +422,11 @@ export const slackPlugin: ChannelPlugin = { lastStopAt: null, lastError: null, }, - buildChannelSummary: ({ snapshot }) => ({ - configured: snapshot.configured ?? false, - botTokenSource: snapshot.botTokenSource ?? "none", - appTokenSource: snapshot.appTokenSource ?? "none", - running: snapshot.running ?? false, - lastStartAt: snapshot.lastStartAt ?? null, - lastStopAt: snapshot.lastStopAt ?? null, - lastError: snapshot.lastError ?? null, - probe: snapshot.probe, - lastProbeAt: snapshot.lastProbeAt ?? null, - }), + buildChannelSummary: ({ snapshot }) => + buildPassiveProbedChannelStatusSummary(snapshot, { + botTokenSource: snapshot.botTokenSource ?? "none", + appTokenSource: snapshot.appTokenSource ?? "none", + }), probeAccount: async ({ account, timeoutMs }) => { const token = account.botToken?.trim(); if (!token) { diff --git a/extensions/synology-chat/package.json b/extensions/synology-chat/package.json index 00503898817..c6148c856a3 100644 --- a/extensions/synology-chat/package.json +++ b/extensions/synology-chat/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/synology-chat", - "version": "2026.3.9", + "version": "2026.3.14", "description": "Synology Chat channel plugin for OpenClaw", "type": "module", "dependencies": { diff --git a/extensions/synology-chat/src/channel.integration.test.ts b/extensions/synology-chat/src/channel.integration.test.ts index b9cb5484621..e5d1e7f24c9 100644 --- a/extensions/synology-chat/src/channel.integration.test.ts +++ b/extensions/synology-chat/src/channel.integration.test.ts @@ -1,5 +1,9 @@ import type { IncomingMessage, ServerResponse } from "node:http"; import { beforeEach, describe, expect, it, vi } from "vitest"; +import { + dispatchReplyWithBufferedBlockDispatcher, + registerPluginHttpRouteMock, +} from "./channel.test-mocks.js"; import { makeFormBody, makeReq, makeRes } from "./test-http-utils.js"; type RegisteredRoute = { @@ -8,41 +12,6 @@ type RegisteredRoute = { handler: (req: IncomingMessage, res: ServerResponse) => Promise; }; -const registerPluginHttpRouteMock = vi.fn<(params: RegisteredRoute) => () => void>(() => vi.fn()); -const dispatchReplyWithBufferedBlockDispatcher = vi.fn().mockResolvedValue({ counts: {} }); - -vi.mock("openclaw/plugin-sdk/synology-chat", async (importOriginal) => { - const actual = await importOriginal(); - return { - ...actual, - DEFAULT_ACCOUNT_ID: "default", - setAccountEnabledInConfigSection: vi.fn((_opts: any) => ({})), - registerPluginHttpRoute: registerPluginHttpRouteMock, - buildChannelConfigSchema: vi.fn((schema: any) => ({ schema })), - createFixedWindowRateLimiter: vi.fn(() => ({ - isRateLimited: vi.fn(() => false), - size: vi.fn(() => 0), - clear: vi.fn(), - })), - }; -}); - -vi.mock("./runtime.js", () => ({ - getSynologyRuntime: vi.fn(() => ({ - config: { loadConfig: vi.fn().mockResolvedValue({}) }, - channel: { - reply: { - dispatchReplyWithBufferedBlockDispatcher, - }, - }, - })), -})); - -vi.mock("./client.js", () => ({ - sendMessage: vi.fn().mockResolvedValue(true), - sendFileUrl: vi.fn().mockResolvedValue(true), -})); - const { createSynologyChatPlugin } = await import("./channel.js"); describe("Synology channel wiring integration", () => { beforeEach(() => { diff --git a/extensions/synology-chat/src/channel.test-mocks.ts b/extensions/synology-chat/src/channel.test-mocks.ts new file mode 100644 index 00000000000..10ccca5f9d0 --- /dev/null +++ b/extensions/synology-chat/src/channel.test-mocks.ts @@ -0,0 +1,76 @@ +import type { IncomingMessage, ServerResponse } from "node:http"; +import type { Mock } from "vitest"; +import { vi } from "vitest"; + +export type RegisteredRoute = { + path: string; + accountId: string; + handler: (req: IncomingMessage, res: ServerResponse) => Promise; +}; + +export const registerPluginHttpRouteMock: Mock<(params: RegisteredRoute) => () => void> = vi.fn( + () => vi.fn(), +); + +export const dispatchReplyWithBufferedBlockDispatcher: Mock< + () => Promise<{ counts: Record }> +> = vi.fn().mockResolvedValue({ counts: {} }); + +async function readRequestBodyWithLimitForTest(req: IncomingMessage): Promise { + return await new Promise((resolve, reject) => { + const chunks: Buffer[] = []; + req.on("data", (chunk) => { + chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk)); + }); + req.on("end", () => resolve(Buffer.concat(chunks).toString("utf8"))); + req.on("error", reject); + }); +} + +vi.mock("openclaw/plugin-sdk/synology-chat", () => ({ + DEFAULT_ACCOUNT_ID: "default", + setAccountEnabledInConfigSection: vi.fn((_opts: unknown) => ({})), + registerPluginHttpRoute: registerPluginHttpRouteMock, + buildChannelConfigSchema: vi.fn((schema: unknown) => ({ schema })), + readRequestBodyWithLimit: vi.fn(readRequestBodyWithLimitForTest), + isRequestBodyLimitError: vi.fn(() => false), + requestBodyErrorToText: vi.fn(() => "Request body too large"), + createFixedWindowRateLimiter: vi.fn(() => ({ + isRateLimited: vi.fn(() => false), + size: vi.fn(() => 0), + clear: vi.fn(), + })), +})); + +vi.mock("./client.js", () => ({ + sendMessage: vi.fn().mockResolvedValue(true), + sendFileUrl: vi.fn().mockResolvedValue(true), +})); + +vi.mock("./runtime.js", () => ({ + getSynologyRuntime: vi.fn(() => ({ + config: { loadConfig: vi.fn().mockResolvedValue({}) }, + channel: { + reply: { + dispatchReplyWithBufferedBlockDispatcher, + }, + }, + })), +})); + +export function makeSecurityAccount(overrides: Record = {}) { + return { + accountId: "default", + enabled: true, + token: "t", + incomingUrl: "https://nas/incoming", + nasHost: "h", + webhookPath: "/w", + dmPolicy: "allowlist" as const, + allowedUserIds: [], + rateLimitPerMinute: 30, + botName: "Bot", + allowInsecureSsl: false, + ...overrides, + }; +} diff --git a/extensions/synology-chat/src/channel.test.ts b/extensions/synology-chat/src/channel.test.ts index 4e3be192f39..bdce5f37d79 100644 --- a/extensions/synology-chat/src/channel.test.ts +++ b/extensions/synology-chat/src/channel.test.ts @@ -1,40 +1,10 @@ -import { describe, it, expect, vi, beforeEach } from "vitest"; - -// Mock external dependencies -vi.mock("openclaw/plugin-sdk/synology-chat", () => ({ - DEFAULT_ACCOUNT_ID: "default", - setAccountEnabledInConfigSection: vi.fn((_opts: any) => ({})), - registerPluginHttpRoute: vi.fn(() => vi.fn()), - buildChannelConfigSchema: vi.fn((schema: any) => ({ schema })), - createFixedWindowRateLimiter: vi.fn(() => ({ - isRateLimited: vi.fn(() => false), - size: vi.fn(() => 0), - clear: vi.fn(), - })), -})); - -vi.mock("./client.js", () => ({ - sendMessage: vi.fn().mockResolvedValue(true), - sendFileUrl: vi.fn().mockResolvedValue(true), -})); +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { makeSecurityAccount, registerPluginHttpRouteMock } from "./channel.test-mocks.js"; vi.mock("./webhook-handler.js", () => ({ createWebhookHandler: vi.fn(() => vi.fn()), })); -vi.mock("./runtime.js", () => ({ - getSynologyRuntime: vi.fn(() => ({ - config: { loadConfig: vi.fn().mockResolvedValue({}) }, - channel: { - reply: { - dispatchReplyWithBufferedBlockDispatcher: vi.fn().mockResolvedValue({ - counts: {}, - }), - }, - }, - })), -})); - vi.mock("zod", () => ({ z: { object: vi.fn(() => ({ @@ -44,7 +14,6 @@ vi.mock("zod", () => ({ })); const { createSynologyChatPlugin } = await import("./channel.js"); -const { registerPluginHttpRoute } = await import("openclaw/plugin-sdk/synology-chat"); describe("createSynologyChatPlugin", () => { it("returns a plugin object with all required sections", () => { @@ -133,95 +102,35 @@ describe("createSynologyChatPlugin", () => { describe("security.collectWarnings", () => { it("warns when token is missing", () => { const plugin = createSynologyChatPlugin(); - const account = { - accountId: "default", - enabled: true, - token: "", - incomingUrl: "https://nas/incoming", - nasHost: "h", - webhookPath: "/w", - dmPolicy: "allowlist" as const, - allowedUserIds: [], - rateLimitPerMinute: 30, - botName: "Bot", - allowInsecureSsl: false, - }; + const account = makeSecurityAccount({ token: "" }); const warnings = plugin.security.collectWarnings({ account }); expect(warnings.some((w: string) => w.includes("token"))).toBe(true); }); it("warns when allowInsecureSsl is true", () => { const plugin = createSynologyChatPlugin(); - const account = { - accountId: "default", - enabled: true, - token: "t", - incomingUrl: "https://nas/incoming", - nasHost: "h", - webhookPath: "/w", - dmPolicy: "allowlist" as const, - allowedUserIds: [], - rateLimitPerMinute: 30, - botName: "Bot", - allowInsecureSsl: true, - }; + const account = makeSecurityAccount({ allowInsecureSsl: true }); const warnings = plugin.security.collectWarnings({ account }); expect(warnings.some((w: string) => w.includes("SSL"))).toBe(true); }); it("warns when dmPolicy is open", () => { const plugin = createSynologyChatPlugin(); - const account = { - accountId: "default", - enabled: true, - token: "t", - incomingUrl: "https://nas/incoming", - nasHost: "h", - webhookPath: "/w", - dmPolicy: "open" as const, - allowedUserIds: [], - rateLimitPerMinute: 30, - botName: "Bot", - allowInsecureSsl: false, - }; + const account = makeSecurityAccount({ dmPolicy: "open" }); const warnings = plugin.security.collectWarnings({ account }); expect(warnings.some((w: string) => w.includes("open"))).toBe(true); }); it("warns when dmPolicy is allowlist and allowedUserIds is empty", () => { const plugin = createSynologyChatPlugin(); - const account = { - accountId: "default", - enabled: true, - token: "t", - incomingUrl: "https://nas/incoming", - nasHost: "h", - webhookPath: "/w", - dmPolicy: "allowlist" as const, - allowedUserIds: [], - rateLimitPerMinute: 30, - botName: "Bot", - allowInsecureSsl: false, - }; + const account = makeSecurityAccount(); const warnings = plugin.security.collectWarnings({ account }); expect(warnings.some((w: string) => w.includes("empty allowedUserIds"))).toBe(true); }); it("returns no warnings for fully configured account", () => { const plugin = createSynologyChatPlugin(); - const account = { - accountId: "default", - enabled: true, - token: "t", - incomingUrl: "https://nas/incoming", - nasHost: "h", - webhookPath: "/w", - dmPolicy: "allowlist" as const, - allowedUserIds: ["user1"], - rateLimitPerMinute: 30, - botName: "Bot", - allowInsecureSsl: false, - }; + const account = makeSecurityAccount({ allowedUserIds: ["user1"] }); const warnings = plugin.security.collectWarnings({ account }); expect(warnings).toHaveLength(0); }); @@ -317,6 +226,23 @@ describe("createSynologyChatPlugin", () => { }); describe("gateway", () => { + function makeStartAccountCtx( + accountConfig: Record, + abortController = new AbortController(), + ) { + return { + abortController, + ctx: { + cfg: { + channels: { "synology-chat": accountConfig }, + }, + accountId: "default", + log: { info: vi.fn(), warn: vi.fn(), error: vi.fn() }, + abortSignal: abortController.signal, + }, + }; + } + async function expectPendingStartAccountPromise( result: Promise, abortController: AbortController, @@ -333,15 +259,7 @@ describe("createSynologyChatPlugin", () => { async function expectPendingStartAccount(accountConfig: Record) { const plugin = createSynologyChatPlugin(); - const abortController = new AbortController(); - const ctx = { - cfg: { - channels: { "synology-chat": accountConfig }, - }, - accountId: "default", - log: { info: vi.fn(), warn: vi.fn(), error: vi.fn() }, - abortSignal: abortController.signal, - }; + const { ctx, abortController } = makeStartAccountCtx(accountConfig); const result = plugin.gateway.startAccount(ctx); await expectPendingStartAccountPromise(result, abortController); } @@ -355,27 +273,16 @@ describe("createSynologyChatPlugin", () => { }); it("startAccount refuses allowlist accounts with empty allowedUserIds", async () => { - const registerMock = vi.mocked(registerPluginHttpRoute); + const registerMock = registerPluginHttpRouteMock; registerMock.mockClear(); - const abortController = new AbortController(); - const plugin = createSynologyChatPlugin(); - const ctx = { - cfg: { - channels: { - "synology-chat": { - enabled: true, - token: "t", - incomingUrl: "https://nas/incoming", - dmPolicy: "allowlist", - allowedUserIds: [], - }, - }, - }, - accountId: "default", - log: { info: vi.fn(), warn: vi.fn(), error: vi.fn() }, - abortSignal: abortController.signal, - }; + const { ctx, abortController } = makeStartAccountCtx({ + enabled: true, + token: "t", + incomingUrl: "https://nas/incoming", + dmPolicy: "allowlist", + allowedUserIds: [], + }); const result = plugin.gateway.startAccount(ctx); await expectPendingStartAccountPromise(result, abortController); @@ -386,7 +293,7 @@ describe("createSynologyChatPlugin", () => { it("deregisters stale route before re-registering same account/path", async () => { const unregisterFirst = vi.fn(); const unregisterSecond = vi.fn(); - const registerMock = vi.mocked(registerPluginHttpRoute); + const registerMock = registerPluginHttpRouteMock; registerMock.mockReturnValueOnce(unregisterFirst).mockReturnValueOnce(unregisterSecond); const plugin = createSynologyChatPlugin(); diff --git a/extensions/synology-chat/src/client.test.ts b/extensions/synology-chat/src/client.test.ts index 416412f0408..2ae24f42904 100644 --- a/extensions/synology-chat/src/client.test.ts +++ b/extensions/synology-chat/src/client.test.ts @@ -51,7 +51,7 @@ function mockFailureResponse(statusCode = 500) { mockResponse(statusCode, "error"); } -describe("sendMessage", () => { +function installFakeTimerHarness() { beforeEach(() => { vi.clearAllMocks(); vi.useFakeTimers(); @@ -62,6 +62,10 @@ describe("sendMessage", () => { afterEach(() => { vi.useRealTimers(); }); +} + +describe("sendMessage", () => { + installFakeTimerHarness(); it("returns true on successful send", async () => { mockSuccessResponse(); @@ -86,16 +90,7 @@ describe("sendMessage", () => { }); describe("sendFileUrl", () => { - beforeEach(() => { - vi.clearAllMocks(); - vi.useFakeTimers(); - fakeNowMs += 10_000; - vi.setSystemTime(fakeNowMs); - }); - - afterEach(() => { - vi.useRealTimers(); - }); + installFakeTimerHarness(); it("returns true on success", async () => { mockSuccessResponse(); diff --git a/extensions/synology-chat/src/client.ts b/extensions/synology-chat/src/client.ts index 95240e556f5..d66f1b720f4 100644 --- a/extensions/synology-chat/src/client.ts +++ b/extensions/synology-chat/src/client.ts @@ -27,6 +27,12 @@ type ChatUserCacheEntry = { cachedAt: number; }; +type ChatWebhookPayload = { + text?: string; + file_url?: string; + user_ids?: number[]; +}; + // Cache user lists per bot endpoint to avoid cross-account bleed. const chatUserCache = new Map(); const CACHE_TTL_MS = 5 * 60 * 1000; // 5 minutes @@ -47,16 +53,7 @@ export async function sendMessage( ): Promise { // Synology Chat API requires user_ids (numeric) to specify the recipient // The @mention is optional but user_ids is mandatory - const payloadObj: Record = { text }; - if (userId) { - // userId can be numeric ID or username - if numeric, add to user_ids - const numericId = typeof userId === "number" ? userId : parseInt(userId, 10); - if (!isNaN(numericId)) { - payloadObj.user_ids = [numericId]; - } - } - const payload = JSON.stringify(payloadObj); - const body = `payload=${encodeURIComponent(payload)}`; + const body = buildWebhookBody({ text }, userId); // Internal rate limit: min 500ms between sends const now = Date.now(); @@ -95,15 +92,7 @@ export async function sendFileUrl( userId?: string | number, allowInsecureSsl = true, ): Promise { - const payloadObj: Record = { file_url: fileUrl }; - if (userId) { - const numericId = typeof userId === "number" ? userId : parseInt(userId, 10); - if (!isNaN(numericId)) { - payloadObj.user_ids = [numericId]; - } - } - const payload = JSON.stringify(payloadObj); - const body = `payload=${encodeURIComponent(payload)}`; + const body = buildWebhookBody({ file_url: fileUrl }, userId); try { const ok = await doPost(incomingUrl, body, allowInsecureSsl); @@ -215,6 +204,22 @@ export async function resolveChatUserId( return undefined; } +function buildWebhookBody(payload: ChatWebhookPayload, userId?: string | number): string { + const numericId = parseNumericUserId(userId); + if (numericId !== undefined) { + payload.user_ids = [numericId]; + } + return `payload=${encodeURIComponent(JSON.stringify(payload))}`; +} + +function parseNumericUserId(userId?: string | number): number | undefined { + if (userId === undefined) { + return undefined; + } + const numericId = typeof userId === "number" ? userId : parseInt(userId, 10); + return Number.isNaN(numericId) ? undefined : numericId; +} + function doPost(url: string, body: string, allowInsecureSsl = true): Promise { return new Promise((resolve, reject) => { let parsedUrl: URL; diff --git a/extensions/synology-chat/src/types.ts b/extensions/synology-chat/src/types.ts index 7ba222531c6..842c2ee97bb 100644 --- a/extensions/synology-chat/src/types.ts +++ b/extensions/synology-chat/src/types.ts @@ -2,8 +2,7 @@ * Type definitions for the Synology Chat channel plugin. */ -/** Raw channel config from openclaw.json channels.synology-chat */ -export interface SynologyChatChannelConfig { +type SynologyChatConfigFields = { enabled?: boolean; token?: string; incomingUrl?: string; @@ -14,22 +13,15 @@ export interface SynologyChatChannelConfig { rateLimitPerMinute?: number; botName?: string; allowInsecureSsl?: boolean; +}; + +/** Raw channel config from openclaw.json channels.synology-chat */ +export interface SynologyChatChannelConfig extends SynologyChatConfigFields { accounts?: Record; } /** Raw per-account config (overrides base config) */ -export interface SynologyChatAccountRaw { - enabled?: boolean; - token?: string; - incomingUrl?: string; - nasHost?: string; - webhookPath?: string; - dmPolicy?: "open" | "allowlist" | "disabled"; - allowedUserIds?: string | string[]; - rateLimitPerMinute?: number; - botName?: string; - allowInsecureSsl?: boolean; -} +export interface SynologyChatAccountRaw extends SynologyChatConfigFields {} /** Fully resolved account config with defaults applied */ export interface ResolvedSynologyChatAccount { diff --git a/extensions/synology-chat/src/webhook-handler.test.ts b/extensions/synology-chat/src/webhook-handler.test.ts index 37ee566e6a6..ae5bd061b85 100644 --- a/extensions/synology-chat/src/webhook-handler.test.ts +++ b/extensions/synology-chat/src/webhook-handler.test.ts @@ -2,6 +2,7 @@ import { EventEmitter } from "node:events"; import type { IncomingMessage, ServerResponse } from "node:http"; import { describe, it, expect, vi, beforeEach } from "vitest"; import type { ResolvedSynologyChatAccount } from "./types.js"; +import type { WebhookHandlerDeps } from "./webhook-handler.js"; import { clearSynologyWebhookRateLimiterStateForTest, createWebhookHandler, @@ -37,21 +38,7 @@ function makeReq( body: string, opts: { headers?: Record; url?: string } = {}, ): IncomingMessage { - const req = new EventEmitter() as IncomingMessage & { - destroyed: boolean; - }; - req.method = method; - req.headers = opts.headers ?? {}; - req.url = opts.url ?? "/webhook/synology"; - req.socket = { remoteAddress: "127.0.0.1" } as any; - req.destroyed = false; - req.destroy = ((_: Error | undefined) => { - if (req.destroyed) { - return req; - } - req.destroyed = true; - return req; - }) as IncomingMessage["destroy"]; + const req = makeBaseReq(method, opts); // Simulate body delivery process.nextTick(() => { @@ -65,11 +52,19 @@ function makeReq( return req; } function makeStalledReq(method: string): IncomingMessage { + return makeBaseReq(method); +} + +function makeBaseReq( + method: string, + opts: { headers?: Record; url?: string } = {}, +): IncomingMessage & { destroyed: boolean } { const req = new EventEmitter() as IncomingMessage & { destroyed: boolean; }; req.method = method; - req.headers = {}; + req.headers = opts.headers ?? {}; + req.url = opts.url ?? "/webhook/synology"; req.socket = { remoteAddress: "127.0.0.1" } as any; req.destroyed = false; req.destroy = ((_: Error | undefined) => { @@ -124,10 +119,12 @@ describe("createWebhookHandler", () => { async function expectForbiddenByPolicy(params: { account: Partial; bodyContains: string; + deliver?: WebhookHandlerDeps["deliver"]; }) { + const deliver = params.deliver ?? vi.fn(); const handler = createWebhookHandler({ account: makeAccount(params.account), - deliver: vi.fn(), + deliver, log, }); @@ -137,6 +134,7 @@ describe("createWebhookHandler", () => { expect(res._status).toBe(403); expect(res._body).toContain(params.bodyContains); + expect(deliver).not.toHaveBeenCalled(); } it("rejects non-POST methods with 405", async () => { @@ -302,22 +300,14 @@ describe("createWebhookHandler", () => { it("returns 403 when allowlist policy is set with empty allowedUserIds", async () => { const deliver = vi.fn(); - const handler = createWebhookHandler({ - account: makeAccount({ + await expectForbiddenByPolicy({ + account: { dmPolicy: "allowlist", allowedUserIds: [], - }), + }, + bodyContains: "Allowlist is empty", deliver, - log, }); - - const req = makeReq("POST", validBody); - const res = makeRes(); - await handler(req, res); - - expect(res._status).toBe(403); - expect(res._body).toContain("Allowlist is empty"); - expect(deliver).not.toHaveBeenCalled(); }); it("returns 403 when DMs are disabled", async () => { diff --git a/extensions/telegram/package.json b/extensions/telegram/package.json index 6602b46f2c8..92054ca01a3 100644 --- a/extensions/telegram/package.json +++ b/extensions/telegram/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/telegram", - "version": "2026.3.9", + "version": "2026.3.14", "private": true, "description": "OpenClaw Telegram channel plugin", "type": "module", diff --git a/extensions/telegram/src/channel.test.ts b/extensions/telegram/src/channel.test.ts index 1f40a5f1cce..a957a3e5b1c 100644 --- a/extensions/telegram/src/channel.test.ts +++ b/extensions/telegram/src/channel.test.ts @@ -57,18 +57,62 @@ function installGatewayRuntime(params?: { probeOk?: boolean; botUsername?: strin const probeTelegram = vi.fn(async () => params?.probeOk ? { ok: true, bot: { username: params.botUsername ?? "bot" } } : { ok: false }, ); + const collectUnmentionedGroupIds = vi.fn(() => ({ + groupIds: [] as string[], + unresolvedGroups: 0, + hasWildcardUnmentionedGroups: false, + })); + const auditGroupMembership = vi.fn(async () => ({ + ok: true, + checkedGroups: 0, + unresolvedGroups: 0, + hasWildcardUnmentionedGroups: false, + groups: [], + elapsedMs: 0, + })); setTelegramRuntime({ channel: { telegram: { monitorTelegramProvider, probeTelegram, + collectUnmentionedGroupIds, + auditGroupMembership, }, }, logging: { shouldLogVerbose: () => false, }, } as unknown as PluginRuntime); - return { monitorTelegramProvider, probeTelegram }; + return { + monitorTelegramProvider, + probeTelegram, + collectUnmentionedGroupIds, + auditGroupMembership, + }; +} + +function configureOpsProxyNetwork(cfg: OpenClawConfig) { + cfg.channels!.telegram!.accounts!.ops = { + ...cfg.channels!.telegram!.accounts!.ops, + proxy: "http://127.0.0.1:8888", + network: { + autoSelectFamily: false, + dnsResultOrder: "ipv4first", + }, + }; +} + +function installSendMessageRuntime( + sendMessageTelegram: ReturnType, +): ReturnType { + setTelegramRuntime({ + channel: { + telegram: { + sendMessageTelegram, + }, + }, + } as unknown as PluginRuntime); + return sendMessageTelegram; } describe("telegramPlugin duplicate token guard", () => { @@ -149,15 +193,78 @@ describe("telegramPlugin duplicate token guard", () => { ); }); - it("forwards mediaLocalRoots to sendMessageTelegram for outbound media sends", async () => { - const sendMessageTelegram = vi.fn(async () => ({ messageId: "tg-1" })); - setTelegramRuntime({ - channel: { - telegram: { - sendMessageTelegram, - }, + it("passes account proxy and network settings into Telegram probes", async () => { + const { probeTelegram } = installGatewayRuntime({ + probeOk: true, + botUsername: "opsbot", + }); + + const cfg = createCfg(); + configureOpsProxyNetwork(cfg); + const account = telegramPlugin.config.resolveAccount(cfg, "ops"); + + await telegramPlugin.status!.probeAccount!({ + account, + timeoutMs: 5000, + cfg, + }); + + expect(probeTelegram).toHaveBeenCalledWith("token-ops", 5000, { + accountId: "ops", + proxyUrl: "http://127.0.0.1:8888", + network: { + autoSelectFamily: false, + dnsResultOrder: "ipv4first", }, - } as unknown as PluginRuntime); + }); + }); + + it("passes account proxy and network settings into Telegram membership audits", async () => { + const { collectUnmentionedGroupIds, auditGroupMembership } = installGatewayRuntime({ + probeOk: true, + botUsername: "opsbot", + }); + + collectUnmentionedGroupIds.mockReturnValue({ + groupIds: ["-100123"], + unresolvedGroups: 0, + hasWildcardUnmentionedGroups: false, + }); + + const cfg = createCfg(); + configureOpsProxyNetwork(cfg); + cfg.channels!.telegram!.accounts!.ops = { + ...cfg.channels!.telegram!.accounts!.ops, + groups: { + "-100123": { requireMention: false }, + }, + }; + const account = telegramPlugin.config.resolveAccount(cfg, "ops"); + + await telegramPlugin.status!.auditAccount!({ + account, + timeoutMs: 5000, + probe: { ok: true, bot: { id: 123 }, elapsedMs: 1 }, + cfg, + }); + + expect(auditGroupMembership).toHaveBeenCalledWith({ + token: "token-ops", + botId: 123, + groupIds: ["-100123"], + proxyUrl: "http://127.0.0.1:8888", + network: { + autoSelectFamily: false, + dnsResultOrder: "ipv4first", + }, + timeoutMs: 5000, + }); + }); + + it("forwards mediaLocalRoots to sendMessageTelegram for outbound media sends", async () => { + const sendMessageTelegram = installSendMessageRuntime( + vi.fn(async () => ({ messageId: "tg-1" })), + ); const result = await telegramPlugin.outbound!.sendMedia!({ cfg: createCfg(), @@ -179,6 +286,93 @@ describe("telegramPlugin duplicate token guard", () => { expect(result).toMatchObject({ channel: "telegram", messageId: "tg-1" }); }); + it("preserves buttons for outbound text payload sends", async () => { + const sendMessageTelegram = installSendMessageRuntime( + vi.fn(async () => ({ messageId: "tg-2" })), + ); + + const result = await telegramPlugin.outbound!.sendPayload!({ + cfg: createCfg(), + to: "12345", + text: "", + payload: { + text: "Approval required", + channelData: { + telegram: { + buttons: [[{ text: "Allow Once", callback_data: "/approve abc allow-once" }]], + }, + }, + }, + accountId: "ops", + }); + + expect(sendMessageTelegram).toHaveBeenCalledWith( + "12345", + "Approval required", + expect.objectContaining({ + buttons: [[{ text: "Allow Once", callback_data: "/approve abc allow-once" }]], + }), + ); + expect(result).toMatchObject({ channel: "telegram", messageId: "tg-2" }); + }); + + it("sends outbound payload media lists and keeps buttons on the first message only", async () => { + const sendMessageTelegram = installSendMessageRuntime( + vi + .fn() + .mockResolvedValueOnce({ messageId: "tg-3", chatId: "12345" }) + .mockResolvedValueOnce({ messageId: "tg-4", chatId: "12345" }), + ); + + const result = await telegramPlugin.outbound!.sendPayload!({ + cfg: createCfg(), + to: "12345", + text: "", + payload: { + text: "Approval required", + mediaUrls: ["https://example.com/1.jpg", "https://example.com/2.jpg"], + channelData: { + telegram: { + quoteText: "quoted", + buttons: [[{ text: "Allow Once", callback_data: "/approve abc allow-once" }]], + }, + }, + }, + mediaLocalRoots: ["/tmp/media"], + accountId: "ops", + silent: true, + }); + + expect(sendMessageTelegram).toHaveBeenCalledTimes(2); + expect(sendMessageTelegram).toHaveBeenNthCalledWith( + 1, + "12345", + "Approval required", + expect.objectContaining({ + mediaUrl: "https://example.com/1.jpg", + mediaLocalRoots: ["/tmp/media"], + quoteText: "quoted", + silent: true, + buttons: [[{ text: "Allow Once", callback_data: "/approve abc allow-once" }]], + }), + ); + expect(sendMessageTelegram).toHaveBeenNthCalledWith( + 2, + "12345", + "", + expect.objectContaining({ + mediaUrl: "https://example.com/2.jpg", + mediaLocalRoots: ["/tmp/media"], + quoteText: "quoted", + silent: true, + }), + ); + expect( + (sendMessageTelegram.mock.calls[1]?.[2] as Record)?.buttons, + ).toBeUndefined(); + expect(result).toMatchObject({ channel: "telegram", messageId: "tg-4" }); + }); + it("ignores accounts with missing tokens during duplicate-token checks", async () => { const cfg = createCfg(); cfg.channels!.telegram!.accounts!.ops = {} as never; diff --git a/extensions/telegram/src/channel.ts b/extensions/telegram/src/channel.ts index 0f4721a4d62..20d012c9dda 100644 --- a/extensions/telegram/src/channel.ts +++ b/extensions/telegram/src/channel.ts @@ -1,9 +1,9 @@ import { createScopedChannelConfigBase } from "openclaw/plugin-sdk/compat"; import { collectAllowlistProviderGroupPolicyWarnings, - buildAccountScopedDmSecurityPolicy, collectOpenGroupPolicyRouteAllowlistWarnings, createScopedAccountConfigAccessors, + createScopedDmSecurityResolver, formatAllowFromLowercase, } from "openclaw/plugin-sdk/compat"; import { @@ -31,6 +31,7 @@ import { resolveTelegramAccount, resolveTelegramGroupRequireMention, resolveTelegramGroupToolPolicy, + sendTelegramPayloadMessages, telegramOnboardingAdapter, TelegramConfigSchema, type ChannelMessageActionAdapter, @@ -77,6 +78,61 @@ function formatDuplicateTelegramTokenReason(params: { ); } +type TelegramSendFn = ReturnType< + typeof getTelegramRuntime +>["channel"]["telegram"]["sendMessageTelegram"]; +type TelegramSendOptions = NonNullable[2]>; + +function buildTelegramSendOptions(params: { + cfg: OpenClawConfig; + mediaUrl?: string | null; + mediaLocalRoots?: readonly string[] | null; + accountId?: string | null; + replyToId?: string | null; + threadId?: string | number | null; + silent?: boolean | null; +}): TelegramSendOptions { + return { + verbose: false, + cfg: params.cfg, + ...(params.mediaUrl ? { mediaUrl: params.mediaUrl } : {}), + ...(params.mediaLocalRoots?.length ? { mediaLocalRoots: params.mediaLocalRoots } : {}), + messageThreadId: parseTelegramThreadId(params.threadId), + replyToMessageId: parseTelegramReplyToMessageId(params.replyToId), + accountId: params.accountId ?? undefined, + silent: params.silent ?? undefined, + }; +} + +async function sendTelegramOutbound(params: { + cfg: OpenClawConfig; + to: string; + text: string; + mediaUrl?: string | null; + mediaLocalRoots?: readonly string[] | null; + accountId?: string | null; + deps?: { sendTelegram?: TelegramSendFn }; + replyToId?: string | null; + threadId?: string | number | null; + silent?: boolean | null; +}) { + const send = + params.deps?.sendTelegram ?? getTelegramRuntime().channel.telegram.sendMessageTelegram; + return await send( + params.to, + params.text, + buildTelegramSendOptions({ + cfg: params.cfg, + mediaUrl: params.mediaUrl, + mediaLocalRoots: params.mediaLocalRoots, + accountId: params.accountId, + replyToId: params.replyToId, + threadId: params.threadId, + silent: params.silent, + }), + ); +} + const telegramMessageActions: ChannelMessageActionAdapter = { listActions: (ctx) => getTelegramRuntime().channel.telegram.messageActions?.listActions?.(ctx) ?? [], @@ -108,6 +164,14 @@ const telegramConfigBase = createScopedChannelConfigBase({ + channelKey: "telegram", + resolvePolicy: (account) => account.config.dmPolicy, + resolveAllowFrom: (account) => account.config.allowFrom, + policyPathSuffix: "dmPolicy", + normalizeEntry: (raw) => raw.replace(/^(telegram|tg):/i, ""), +}); + export const telegramPlugin: ChannelPlugin = { id: "telegram", meta: { @@ -176,18 +240,7 @@ export const telegramPlugin: ChannelPlugin { - return buildAccountScopedDmSecurityPolicy({ - cfg, - channelKey: "telegram", - accountId, - fallbackAccountId: account.accountId ?? DEFAULT_ACCOUNT_ID, - policy: account.config.dmPolicy, - allowFrom: account.config.allowFrom ?? [], - policyPathSuffix: "dmPolicy", - normalizeEntry: (raw) => raw.replace(/^(telegram|tg):/i, ""), - }); - }, + resolveDmPolicy: resolveTelegramDmPolicy, collectWarnings: ({ account, cfg }) => { const groupAllowlistConfigured = account.config.groups && Object.keys(account.config.groups).length > 0; @@ -317,17 +370,43 @@ export const telegramPlugin: ChannelPlugin { + sendPayload: async ({ + cfg, + to, + payload, + mediaLocalRoots, + accountId, + deps, + replyToId, + threadId, + silent, + }) => { const send = deps?.sendTelegram ?? getTelegramRuntime().channel.telegram.sendMessageTelegram; - const replyToMessageId = parseTelegramReplyToMessageId(replyToId); - const messageThreadId = parseTelegramThreadId(threadId); - const result = await send(to, text, { - verbose: false, + const result = await sendTelegramPayloadMessages({ + send, + to, + payload, + baseOpts: buildTelegramSendOptions({ + cfg, + mediaLocalRoots, + accountId, + replyToId, + threadId, + silent, + }), + }); + return { channel: "telegram", ...result }; + }, + sendText: async ({ cfg, to, text, accountId, deps, replyToId, threadId, silent }) => { + const result = await sendTelegramOutbound({ cfg, - messageThreadId, - replyToMessageId, - accountId: accountId ?? undefined, - silent: silent ?? undefined, + to, + text, + accountId, + deps, + replyToId, + threadId, + silent, }); return { channel: "telegram", ...result }; }, @@ -343,18 +422,17 @@ export const telegramPlugin: ChannelPlugin { - const send = deps?.sendTelegram ?? getTelegramRuntime().channel.telegram.sendMessageTelegram; - const replyToMessageId = parseTelegramReplyToMessageId(replyToId); - const messageThreadId = parseTelegramThreadId(threadId); - const result = await send(to, text, { - verbose: false, + const result = await sendTelegramOutbound({ cfg, + to, + text, mediaUrl, mediaLocalRoots, - messageThreadId, - replyToMessageId, - accountId: accountId ?? undefined, - silent: silent ?? undefined, + accountId, + deps, + replyToId, + threadId, + silent, }); return { channel: "telegram", ...result }; }, @@ -378,11 +456,11 @@ export const telegramPlugin: ChannelPlugin buildTokenChannelStatusSummary(snapshot), probeAccount: async ({ account, timeoutMs }) => - getTelegramRuntime().channel.telegram.probeTelegram( - account.token, - timeoutMs, - account.config.proxy, - ), + getTelegramRuntime().channel.telegram.probeTelegram(account.token, timeoutMs, { + accountId: account.accountId, + proxyUrl: account.config.proxy, + network: account.config.network, + }), auditAccount: async ({ account, timeoutMs, probe, cfg }) => { const groups = cfg.channels?.telegram?.accounts?.[account.accountId]?.groups ?? @@ -408,6 +486,7 @@ export const telegramPlugin: ChannelPlugin {}, + error: () => {}, + exit: (code: number): never => { + throw new Error(`exit ${code}`); + }, + }; +} + +export function expectDirectorySurface(directory: ChannelDirectoryAdapter | null | undefined) { + if (!directory) { + throw new Error("expected directory"); + } + if (!directory.listPeers) { + throw new Error("expected listPeers"); + } + if (!directory.listGroups) { + throw new Error("expected listGroups"); + } + return directory as { + listPeers: NonNullable; + listGroups: NonNullable; + }; +} diff --git a/extensions/test-utils/plugin-api.ts b/extensions/test-utils/plugin-api.ts new file mode 100644 index 00000000000..5c9693c1a80 --- /dev/null +++ b/extensions/test-utils/plugin-api.ts @@ -0,0 +1,25 @@ +import type { OpenClawPluginApi } from "../../src/plugins/types.js"; + +type TestPluginApiInput = Partial & + Pick; + +export function createTestPluginApi(api: TestPluginApiInput): OpenClawPluginApi { + return { + logger: { info() {}, warn() {}, error() {}, debug() {} }, + registerTool() {}, + registerHook() {}, + registerHttpRoute() {}, + registerChannel() {}, + registerGatewayMethod() {}, + registerCli() {}, + registerService() {}, + registerProvider() {}, + registerCommand() {}, + registerContextEngine() {}, + resolvePath(input: string) { + return input; + }, + on() {}, + ...api, + }; +} diff --git a/extensions/test-utils/plugin-runtime-mock.ts b/extensions/test-utils/plugin-runtime-mock.ts index 8c599599a31..81e3fdedeec 100644 --- a/extensions/test-utils/plugin-runtime-mock.ts +++ b/extensions/test-utils/plugin-runtime-mock.ts @@ -253,6 +253,11 @@ export function createPluginRuntimeMock(overrides: DeepPartial = state: { resolveStateDir: vi.fn(() => "/tmp/openclaw"), }, + modelAuth: { + getApiKeyForModel: vi.fn() as unknown as PluginRuntime["modelAuth"]["getApiKeyForModel"], + resolveApiKeyForProvider: + vi.fn() as unknown as PluginRuntime["modelAuth"]["resolveApiKeyForProvider"], + }, subagent: { run: vi.fn(), waitForRun: vi.fn(), diff --git a/extensions/test-utils/send-config.ts b/extensions/test-utils/send-config.ts new file mode 100644 index 00000000000..61c7e126b12 --- /dev/null +++ b/extensions/test-utils/send-config.ts @@ -0,0 +1,65 @@ +import { expect } from "vitest"; + +type MockFn = (...args: never[]) => unknown; + +type CfgThreadingAssertion = { + loadConfig: MockFn; + resolveAccount: MockFn; + cfg: TCfg; + accountId?: string; +}; + +type SendRuntimeState = { + loadConfig: MockFn; + resolveMarkdownTableMode: MockFn; + convertMarkdownTables: MockFn; + record: MockFn; +}; + +export function expectProvidedCfgSkipsRuntimeLoad({ + loadConfig, + resolveAccount, + cfg, + accountId, +}: CfgThreadingAssertion): void { + expect(loadConfig).not.toHaveBeenCalled(); + expect(resolveAccount).toHaveBeenCalledWith({ + cfg, + accountId, + }); +} + +export function expectRuntimeCfgFallback({ + loadConfig, + resolveAccount, + cfg, + accountId, +}: CfgThreadingAssertion): void { + expect(loadConfig).toHaveBeenCalledTimes(1); + expect(resolveAccount).toHaveBeenCalledWith({ + cfg, + accountId, + }); +} + +export function createSendCfgThreadingRuntime({ + loadConfig, + resolveMarkdownTableMode, + convertMarkdownTables, + record, +}: SendRuntimeState) { + return { + config: { + loadConfig, + }, + channel: { + text: { + resolveMarkdownTableMode, + convertMarkdownTables, + }, + activity: { + record, + }, + }, + }; +} diff --git a/extensions/test-utils/start-account-lifecycle.ts b/extensions/test-utils/start-account-lifecycle.ts new file mode 100644 index 00000000000..6ce1c734736 --- /dev/null +++ b/extensions/test-utils/start-account-lifecycle.ts @@ -0,0 +1,72 @@ +import type { ChannelAccountSnapshot, ChannelGatewayContext } from "openclaw/plugin-sdk/test-utils"; +import { expect, vi } from "vitest"; +import { createStartAccountContext } from "./start-account-context.js"; + +export function startAccountAndTrackLifecycle(params: { + startAccount: (ctx: ChannelGatewayContext) => Promise; + account: TAccount; +}) { + const patches: ChannelAccountSnapshot[] = []; + const abort = new AbortController(); + const task = params.startAccount( + createStartAccountContext({ + account: params.account, + abortSignal: abort.signal, + statusPatchSink: (next) => patches.push({ ...next }), + }), + ); + let settled = false; + void task.then(() => { + settled = true; + }); + return { + abort, + patches, + task, + isSettled: () => settled, + }; +} + +export async function abortStartedAccount(params: { + abort: AbortController; + task: Promise; +}) { + params.abort.abort(); + await params.task; +} + +export async function expectPendingUntilAbort(params: { + waitForStarted: () => Promise; + isSettled: () => boolean; + abort: AbortController; + task: Promise; + assertBeforeAbort?: () => void; + assertAfterAbort?: () => void; +}) { + await params.waitForStarted(); + expect(params.isSettled()).toBe(false); + params.assertBeforeAbort?.(); + await abortStartedAccount({ abort: params.abort, task: params.task }); + params.assertAfterAbort?.(); +} + +export async function expectStopPendingUntilAbort(params: { + waitForStarted: () => Promise; + isSettled: () => boolean; + abort: AbortController; + task: Promise; + stop: ReturnType; +}) { + await expectPendingUntilAbort({ + waitForStarted: params.waitForStarted, + isSettled: params.isSettled, + abort: params.abort, + task: params.task, + assertBeforeAbort: () => { + expect(params.stop).not.toHaveBeenCalled(); + }, + assertAfterAbort: () => { + expect(params.stop).toHaveBeenCalledOnce(); + }, + }); +} diff --git a/extensions/test-utils/status-issues.ts b/extensions/test-utils/status-issues.ts new file mode 100644 index 00000000000..7de3c6bcd55 --- /dev/null +++ b/extensions/test-utils/status-issues.ts @@ -0,0 +1,10 @@ +import { expect } from "vitest"; + +export function expectOpenDmPolicyConfigIssue(params: { + collectIssues: (accounts: TAccount[]) => Array<{ kind?: string }>; + account: TAccount; +}) { + const issues = params.collectIssues([params.account]); + expect(issues).toHaveLength(1); + expect(issues[0]?.kind).toBe("config"); +} diff --git a/extensions/thread-ownership/index.test.ts b/extensions/thread-ownership/index.test.ts index 825b4ca5bb5..3d98d8f9735 100644 --- a/extensions/thread-ownership/index.test.ts +++ b/extensions/thread-ownership/index.test.ts @@ -51,6 +51,13 @@ describe("thread-ownership plugin", () => { register(api as any); }); + async function sendSlackThreadMessage() { + return await hooks.message_sending( + { content: "hello", metadata: { threadTs: "1234.5678", channelId: "C123" }, to: "C123" }, + { channelId: "slack", conversationId: "C123" }, + ); + } + it("allows non-slack channels", async () => { const result = await hooks.message_sending( { content: "hello", metadata: { threadTs: "1234.5678", channelId: "C123" }, to: "C123" }, @@ -76,10 +83,7 @@ describe("thread-ownership plugin", () => { new Response(JSON.stringify({ owner: "test-agent" }), { status: 200 }), ); - const result = await hooks.message_sending( - { content: "hello", metadata: { threadTs: "1234.5678", channelId: "C123" }, to: "C123" }, - { channelId: "slack", conversationId: "C123" }, - ); + const result = await sendSlackThreadMessage(); expect(result).toBeUndefined(); expect(globalThis.fetch).toHaveBeenCalledWith( @@ -96,10 +100,7 @@ describe("thread-ownership plugin", () => { new Response(JSON.stringify({ owner: "other-agent" }), { status: 409 }), ); - const result = await hooks.message_sending( - { content: "hello", metadata: { threadTs: "1234.5678", channelId: "C123" }, to: "C123" }, - { channelId: "slack", conversationId: "C123" }, - ); + const result = await sendSlackThreadMessage(); expect(result).toEqual({ cancel: true }); expect(api.logger.info).toHaveBeenCalledWith(expect.stringContaining("cancelled send")); @@ -108,10 +109,7 @@ describe("thread-ownership plugin", () => { it("fails open on network error", async () => { vi.mocked(globalThis.fetch).mockRejectedValue(new Error("ECONNREFUSED")); - const result = await hooks.message_sending( - { content: "hello", metadata: { threadTs: "1234.5678", channelId: "C123" }, to: "C123" }, - { channelId: "slack", conversationId: "C123" }, - ); + const result = await sendSlackThreadMessage(); expect(result).toBeUndefined(); expect(api.logger.warn).toHaveBeenCalledWith( diff --git a/extensions/tlon/package.json b/extensions/tlon/package.json index 0cb79328d89..40ec9aeedde 100644 --- a/extensions/tlon/package.json +++ b/extensions/tlon/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/tlon", - "version": "2026.3.9", + "version": "2026.3.14", "description": "OpenClaw Tlon/Urbit channel plugin", "type": "module", "dependencies": { diff --git a/extensions/tlon/src/channel.ts b/extensions/tlon/src/channel.ts index 3c5bedbf841..eb37c8d7f74 100644 --- a/extensions/tlon/src/channel.ts +++ b/extensions/tlon/src/channel.ts @@ -153,6 +153,57 @@ function applyTlonSetupConfig(params: { }; } +type ResolvedTlonAccount = ReturnType; +type ConfiguredTlonAccount = ResolvedTlonAccount & { + ship: string; + url: string; + code: string; +}; + +function resolveOutboundContext(params: { + cfg: OpenClawConfig; + accountId?: string | null; + to: string; +}) { + const account = resolveTlonAccount(params.cfg, params.accountId ?? undefined); + if (!account.configured || !account.ship || !account.url || !account.code) { + throw new Error("Tlon account not configured"); + } + + const parsed = parseTlonTarget(params.to); + if (!parsed) { + throw new Error(`Invalid Tlon target. Use ${formatTargetHint()}`); + } + + return { account: account as ConfiguredTlonAccount, parsed }; +} + +function resolveReplyId(replyToId?: string | null, threadId?: string | number | null) { + return (replyToId ?? threadId) ? String(replyToId ?? threadId) : undefined; +} + +async function withHttpPokeAccountApi( + account: ConfiguredTlonAccount, + run: (api: Awaited>) => Promise, +) { + const api = await createHttpPokeApi({ + url: account.url, + ship: account.ship, + code: account.code, + allowPrivateNetwork: account.allowPrivateNetwork ?? undefined, + }); + + try { + return await run(api); + } finally { + try { + await api.delete(); + } catch { + // ignore cleanup errors + } + } +} + const tlonOutbound: ChannelOutboundAdapter = { deliveryMode: "direct", textChunkLimit: 10000, @@ -170,25 +221,8 @@ const tlonOutbound: ChannelOutboundAdapter = { return { ok: true, to: parsed.nest }; }, sendText: async ({ cfg, to, text, accountId, replyToId, threadId }) => { - const account = resolveTlonAccount(cfg, accountId ?? undefined); - if (!account.configured || !account.ship || !account.url || !account.code) { - throw new Error("Tlon account not configured"); - } - - const parsed = parseTlonTarget(to); - if (!parsed) { - throw new Error(`Invalid Tlon target. Use ${formatTargetHint()}`); - } - - // Use HTTP-only poke (no EventSource) to avoid conflicts with monitor's SSE connection - const api = await createHttpPokeApi({ - url: account.url, - ship: account.ship, - code: account.code, - allowPrivateNetwork: account.allowPrivateNetwork ?? undefined, - }); - - try { + const { account, parsed } = resolveOutboundContext({ cfg, accountId, to }); + return withHttpPokeAccountApi(account, async (api) => { const fromShip = normalizeShip(account.ship); if (parsed.kind === "dm") { return await sendDm({ @@ -198,52 +232,29 @@ const tlonOutbound: ChannelOutboundAdapter = { text, }); } - const replyId = (replyToId ?? threadId) ? String(replyToId ?? threadId) : undefined; return await sendGroupMessage({ api, fromShip, hostShip: parsed.hostShip, channelName: parsed.channelName, text, - replyToId: replyId, + replyToId: resolveReplyId(replyToId, threadId), }); - } finally { - try { - await api.delete(); - } catch { - // ignore cleanup errors - } - } + }); }, sendMedia: async ({ cfg, to, text, mediaUrl, accountId, replyToId, threadId }) => { - const account = resolveTlonAccount(cfg, accountId ?? undefined); - if (!account.configured || !account.ship || !account.url || !account.code) { - throw new Error("Tlon account not configured"); - } - - const parsed = parseTlonTarget(to); - if (!parsed) { - throw new Error(`Invalid Tlon target. Use ${formatTargetHint()}`); - } + const { account, parsed } = resolveOutboundContext({ cfg, accountId, to }); // Configure the API client for uploads configureClient({ shipUrl: account.url, shipName: account.ship.replace(/^~/, ""), verbose: false, - getCode: async () => account.code!, + getCode: async () => account.code, }); const uploadedUrl = mediaUrl ? await uploadImageFromUrl(mediaUrl) : undefined; - - const api = await createHttpPokeApi({ - url: account.url, - ship: account.ship, - code: account.code, - allowPrivateNetwork: account.allowPrivateNetwork ?? undefined, - }); - - try { + return withHttpPokeAccountApi(account, async (api) => { const fromShip = normalizeShip(account.ship); const story = buildMediaStory(text, uploadedUrl); @@ -255,22 +266,15 @@ const tlonOutbound: ChannelOutboundAdapter = { story, }); } - const replyId = (replyToId ?? threadId) ? String(replyToId ?? threadId) : undefined; return await sendGroupMessageWithStory({ api, fromShip, hostShip: parsed.hostShip, channelName: parsed.channelName, story, - replyToId: replyId, + replyToId: resolveReplyId(replyToId, threadId), }); - } finally { - try { - await api.delete(); - } catch { - // ignore cleanup errors - } - } + }); }, }; diff --git a/extensions/tlon/src/monitor/utils.ts b/extensions/tlon/src/monitor/utils.ts index c0649dfbe85..3eccbf6cbc9 100644 --- a/extensions/tlon/src/monitor/utils.ts +++ b/extensions/tlon/src/monitor/utils.ts @@ -162,41 +162,55 @@ export function isGroupInviteAllowed( } // Helper to recursively extract text from inline content +function renderInlineItem( + item: any, + options?: { + linkMode?: "content-or-href" | "href"; + allowBreak?: boolean; + allowBlockquote?: boolean; + }, +): string { + if (typeof item === "string") { + return item; + } + if (!item || typeof item !== "object") { + return ""; + } + if (item.ship) { + return item.ship; + } + if ("sect" in item) { + return `@${item.sect || "all"}`; + } + if (options?.allowBreak && item.break !== undefined) { + return "\n"; + } + if (item["inline-code"]) { + return `\`${item["inline-code"]}\``; + } + if (item.code) { + return `\`${item.code}\``; + } + if (item.link && item.link.href) { + return options?.linkMode === "href" ? item.link.href : item.link.content || item.link.href; + } + if (item.bold && Array.isArray(item.bold)) { + return `**${extractInlineText(item.bold)}**`; + } + if (item.italics && Array.isArray(item.italics)) { + return `*${extractInlineText(item.italics)}*`; + } + if (item.strike && Array.isArray(item.strike)) { + return `~~${extractInlineText(item.strike)}~~`; + } + if (options?.allowBlockquote && item.blockquote && Array.isArray(item.blockquote)) { + return `> ${extractInlineText(item.blockquote)}`; + } + return ""; +} + function extractInlineText(items: any[]): string { - return items - .map((item: any) => { - if (typeof item === "string") { - return item; - } - if (item && typeof item === "object") { - if (item.ship) { - return item.ship; - } - if ("sect" in item) { - return `@${item.sect || "all"}`; - } - if (item["inline-code"]) { - return `\`${item["inline-code"]}\``; - } - if (item.code) { - return `\`${item.code}\``; - } - if (item.link && item.link.href) { - return item.link.content || item.link.href; - } - if (item.bold && Array.isArray(item.bold)) { - return `**${extractInlineText(item.bold)}**`; - } - if (item.italics && Array.isArray(item.italics)) { - return `*${extractInlineText(item.italics)}*`; - } - if (item.strike && Array.isArray(item.strike)) { - return `~~${extractInlineText(item.strike)}~~`; - } - } - return ""; - }) - .join(""); + return items.map((item: any) => renderInlineItem(item)).join(""); } export function extractMessageText(content: unknown): string { @@ -209,48 +223,13 @@ export function extractMessageText(content: unknown): string { // Handle inline content (text, ships, links, etc.) if (verse.inline && Array.isArray(verse.inline)) { return verse.inline - .map((item: any) => { - if (typeof item === "string") { - return item; - } - if (item && typeof item === "object") { - if (item.ship) { - return item.ship; - } - // Handle sect (role mentions like @all) - if ("sect" in item) { - return `@${item.sect || "all"}`; - } - if (item.break !== undefined) { - return "\n"; - } - if (item.link && item.link.href) { - return item.link.href; - } - // Handle inline code (Tlon uses "inline-code" key) - if (item["inline-code"]) { - return `\`${item["inline-code"]}\``; - } - if (item.code) { - return `\`${item.code}\``; - } - // Handle bold/italic/strike - recursively extract text - if (item.bold && Array.isArray(item.bold)) { - return `**${extractInlineText(item.bold)}**`; - } - if (item.italics && Array.isArray(item.italics)) { - return `*${extractInlineText(item.italics)}*`; - } - if (item.strike && Array.isArray(item.strike)) { - return `~~${extractInlineText(item.strike)}~~`; - } - // Handle blockquote inline - if (item.blockquote && Array.isArray(item.blockquote)) { - return `> ${extractInlineText(item.blockquote)}`; - } - } - return ""; - }) + .map((item: any) => + renderInlineItem(item, { + linkMode: "href", + allowBreak: true, + allowBlockquote: true, + }), + ) .join(""); } diff --git a/extensions/tlon/src/onboarding.ts b/extensions/tlon/src/onboarding.ts index 6558dab0257..8207b190628 100644 --- a/extensions/tlon/src/onboarding.ts +++ b/extensions/tlon/src/onboarding.ts @@ -1,6 +1,7 @@ import type { OpenClawConfig } from "openclaw/plugin-sdk/tlon"; import { formatDocsLink, + patchScopedAccountConfig, resolveAccountIdForConfigure, DEFAULT_ACCOUNT_ID, type ChannelOnboardingAdapter, @@ -32,46 +33,30 @@ function applyAccountConfig(params: { }; }): OpenClawConfig { const { cfg, accountId, input } = params; - const useDefault = accountId === DEFAULT_ACCOUNT_ID; - const base = cfg.channels?.tlon ?? {}; const nextValues = { enabled: true, ...(input.name ? { name: input.name } : {}), ...buildTlonAccountFields(input), }; - - if (useDefault) { - return { - ...cfg, - channels: { - ...cfg.channels, - tlon: { - ...base, - ...nextValues, - }, - }, - }; + if (accountId === DEFAULT_ACCOUNT_ID) { + return patchScopedAccountConfig({ + cfg, + channelKey: channel, + accountId, + patch: nextValues, + ensureChannelEnabled: false, + ensureAccountEnabled: false, + }); } - - return { - ...cfg, - channels: { - ...cfg.channels, - tlon: { - ...base, - enabled: base.enabled ?? true, - accounts: { - ...(base as { accounts?: Record }).accounts, - [accountId]: { - ...(base as { accounts?: Record> }).accounts?.[ - accountId - ], - ...nextValues, - }, - }, - }, - }, - }; + return patchScopedAccountConfig({ + cfg, + channelKey: channel, + accountId, + patch: { enabled: cfg.channels?.tlon?.enabled ?? true }, + accountPatch: nextValues, + ensureChannelEnabled: false, + ensureAccountEnabled: false, + }); } async function noteTlonHelp(prompter: WizardPrompter): Promise { diff --git a/extensions/tlon/src/urbit/channel-ops.ts b/extensions/tlon/src/urbit/channel-ops.ts index f5401d3bb73..ef65e4ca9fe 100644 --- a/extensions/tlon/src/urbit/channel-ops.ts +++ b/extensions/tlon/src/urbit/channel-ops.ts @@ -12,6 +12,29 @@ export type UrbitChannelDeps = { fetchImpl?: (input: RequestInfo | URL, init?: RequestInit) => Promise; }; +async function putUrbitChannel( + deps: UrbitChannelDeps, + params: { body: unknown; auditContext: string }, +) { + return await urbitFetch({ + baseUrl: deps.baseUrl, + path: `/~/channel/${deps.channelId}`, + init: { + method: "PUT", + headers: { + "Content-Type": "application/json", + Cookie: deps.cookie, + }, + body: JSON.stringify(params.body), + }, + ssrfPolicy: deps.ssrfPolicy, + lookupFn: deps.lookupFn, + fetchImpl: deps.fetchImpl, + timeoutMs: 30_000, + auditContext: params.auditContext, + }); +} + export async function pokeUrbitChannel( deps: UrbitChannelDeps, params: { app: string; mark: string; json: unknown; auditContext: string }, @@ -26,21 +49,8 @@ export async function pokeUrbitChannel( json: params.json, }; - const { response, release } = await urbitFetch({ - baseUrl: deps.baseUrl, - path: `/~/channel/${deps.channelId}`, - init: { - method: "PUT", - headers: { - "Content-Type": "application/json", - Cookie: deps.cookie, - }, - body: JSON.stringify([pokeData]), - }, - ssrfPolicy: deps.ssrfPolicy, - lookupFn: deps.lookupFn, - fetchImpl: deps.fetchImpl, - timeoutMs: 30_000, + const { response, release } = await putUrbitChannel(deps, { + body: [pokeData], auditContext: params.auditContext, }); @@ -88,23 +98,7 @@ export async function createUrbitChannel( deps: UrbitChannelDeps, params: { body: unknown; auditContext: string }, ): Promise { - const { response, release } = await urbitFetch({ - baseUrl: deps.baseUrl, - path: `/~/channel/${deps.channelId}`, - init: { - method: "PUT", - headers: { - "Content-Type": "application/json", - Cookie: deps.cookie, - }, - body: JSON.stringify(params.body), - }, - ssrfPolicy: deps.ssrfPolicy, - lookupFn: deps.lookupFn, - fetchImpl: deps.fetchImpl, - timeoutMs: 30_000, - auditContext: params.auditContext, - }); + const { response, release } = await putUrbitChannel(deps, params); try { if (!response.ok && response.status !== 204) { @@ -116,30 +110,17 @@ export async function createUrbitChannel( } export async function wakeUrbitChannel(deps: UrbitChannelDeps): Promise { - const { response, release } = await urbitFetch({ - baseUrl: deps.baseUrl, - path: `/~/channel/${deps.channelId}`, - init: { - method: "PUT", - headers: { - "Content-Type": "application/json", - Cookie: deps.cookie, + const { response, release } = await putUrbitChannel(deps, { + body: [ + { + id: Date.now(), + action: "poke", + ship: deps.ship, + app: "hood", + mark: "helm-hi", + json: "Opening API channel", }, - body: JSON.stringify([ - { - id: Date.now(), - action: "poke", - ship: deps.ship, - app: "hood", - mark: "helm-hi", - json: "Opening API channel", - }, - ]), - }, - ssrfPolicy: deps.ssrfPolicy, - lookupFn: deps.lookupFn, - fetchImpl: deps.fetchImpl, - timeoutMs: 30_000, + ], auditContext: "tlon-urbit-channel-wake", }); diff --git a/extensions/tlon/src/urbit/sse-client.ts b/extensions/tlon/src/urbit/sse-client.ts index ab12977d0e8..afa87502320 100644 --- a/extensions/tlon/src/urbit/sse-client.ts +++ b/extensions/tlon/src/urbit/sse-client.ts @@ -115,20 +115,7 @@ export class UrbitSSEClient { app: string; path: string; }) { - const { response, release } = await urbitFetch({ - baseUrl: this.url, - path: `/~/channel/${this.channelId}`, - init: { - method: "PUT", - headers: { - "Content-Type": "application/json", - Cookie: this.cookie, - }, - body: JSON.stringify([subscription]), - }, - ssrfPolicy: this.ssrfPolicy, - lookupFn: this.lookupFn, - fetchImpl: this.fetchImpl, + const { response, release } = await this.putChannelPayload([subscription], { timeoutMs: 30_000, auditContext: "tlon-urbit-subscribe", }); @@ -359,20 +346,7 @@ export class UrbitSSEClient { "event-id": eventId, }; - const { response, release } = await urbitFetch({ - baseUrl: this.url, - path: `/~/channel/${this.channelId}`, - init: { - method: "PUT", - headers: { - "Content-Type": "application/json", - Cookie: this.cookie, - }, - body: JSON.stringify([ackData]), - }, - ssrfPolicy: this.ssrfPolicy, - lookupFn: this.lookupFn, - fetchImpl: this.fetchImpl, + const { response, release } = await this.putChannelPayload([ackData], { timeoutMs: 10_000, auditContext: "tlon-urbit-ack", }); @@ -445,20 +419,7 @@ export class UrbitSSEClient { })); { - const { response, release } = await urbitFetch({ - baseUrl: this.url, - path: `/~/channel/${this.channelId}`, - init: { - method: "PUT", - headers: { - "Content-Type": "application/json", - Cookie: this.cookie, - }, - body: JSON.stringify(unsubscribes), - }, - ssrfPolicy: this.ssrfPolicy, - lookupFn: this.lookupFn, - fetchImpl: this.fetchImpl, + const { response, release } = await this.putChannelPayload(unsubscribes, { timeoutMs: 30_000, auditContext: "tlon-urbit-unsubscribe", }); @@ -501,4 +462,27 @@ export class UrbitSSEClient { await release(); } } + + private async putChannelPayload( + payload: unknown, + params: { timeoutMs: number; auditContext: string }, + ) { + return await urbitFetch({ + baseUrl: this.url, + path: `/~/channel/${this.channelId}`, + init: { + method: "PUT", + headers: { + "Content-Type": "application/json", + Cookie: this.cookie, + }, + body: JSON.stringify(payload), + }, + ssrfPolicy: this.ssrfPolicy, + lookupFn: this.lookupFn, + fetchImpl: this.fetchImpl, + timeoutMs: params.timeoutMs, + auditContext: params.auditContext, + }); + } } diff --git a/extensions/tlon/src/urbit/upload.test.ts b/extensions/tlon/src/urbit/upload.test.ts index ca95a0412d4..34dd6186d20 100644 --- a/extensions/tlon/src/urbit/upload.test.ts +++ b/extensions/tlon/src/urbit/upload.test.ts @@ -15,6 +15,57 @@ vi.mock("@tloncorp/api", () => ({ })); describe("uploadImageFromUrl", () => { + async function loadUploadMocks() { + const { fetchWithSsrFGuard } = await import("openclaw/plugin-sdk/tlon"); + const { uploadFile } = await import("@tloncorp/api"); + const { uploadImageFromUrl } = await import("./upload.js"); + return { + mockFetch: vi.mocked(fetchWithSsrFGuard), + mockUploadFile: vi.mocked(uploadFile), + uploadImageFromUrl, + }; + } + + type UploadMocks = Awaited>; + + function mockSuccessfulFetch(params: { + mockFetch: UploadMocks["mockFetch"]; + blob: Blob; + finalUrl: string; + contentType: string; + }) { + params.mockFetch.mockResolvedValue({ + response: { + ok: true, + headers: new Headers({ "content-type": params.contentType }), + blob: () => Promise.resolve(params.blob), + } as unknown as Response, + finalUrl: params.finalUrl, + release: vi.fn().mockResolvedValue(undefined), + }); + } + + async function setupSuccessfulUpload(params?: { + sourceUrl?: string; + contentType?: string; + uploadedUrl?: string; + }) { + const { mockFetch, mockUploadFile, uploadImageFromUrl } = await loadUploadMocks(); + const sourceUrl = params?.sourceUrl ?? "https://example.com/image.png"; + const contentType = params?.contentType ?? "image/png"; + const mockBlob = new Blob(["fake-image"], { type: contentType }); + mockSuccessfulFetch({ + mockFetch, + blob: mockBlob, + finalUrl: sourceUrl, + contentType, + }); + if (params?.uploadedUrl) { + mockUploadFile.mockResolvedValue({ url: params.uploadedUrl }); + } + return { mockBlob, mockUploadFile, uploadImageFromUrl }; + } + beforeEach(() => { vi.clearAllMocks(); }); @@ -24,28 +75,10 @@ describe("uploadImageFromUrl", () => { }); it("fetches image and calls uploadFile, returns uploaded URL", async () => { - const { fetchWithSsrFGuard } = await import("openclaw/plugin-sdk/tlon"); - const mockFetch = vi.mocked(fetchWithSsrFGuard); - - const { uploadFile } = await import("@tloncorp/api"); - const mockUploadFile = vi.mocked(uploadFile); - - // Mock fetchWithSsrFGuard to return a successful response with a blob - const mockBlob = new Blob(["fake-image"], { type: "image/png" }); - mockFetch.mockResolvedValue({ - response: { - ok: true, - headers: new Headers({ "content-type": "image/png" }), - blob: () => Promise.resolve(mockBlob), - } as unknown as Response, - finalUrl: "https://example.com/image.png", - release: vi.fn().mockResolvedValue(undefined), + const { mockBlob, mockUploadFile, uploadImageFromUrl } = await setupSuccessfulUpload({ + uploadedUrl: "https://memex.tlon.network/uploaded.png", }); - // Mock uploadFile to return a successful upload - mockUploadFile.mockResolvedValue({ url: "https://memex.tlon.network/uploaded.png" }); - - const { uploadImageFromUrl } = await import("./upload.js"); const result = await uploadImageFromUrl("https://example.com/image.png"); expect(result).toBe("https://memex.tlon.network/uploaded.png"); @@ -59,10 +92,8 @@ describe("uploadImageFromUrl", () => { }); it("returns original URL if fetch fails", async () => { - const { fetchWithSsrFGuard } = await import("openclaw/plugin-sdk/tlon"); - const mockFetch = vi.mocked(fetchWithSsrFGuard); + const { mockFetch, uploadImageFromUrl } = await loadUploadMocks(); - // Mock fetchWithSsrFGuard to return a failed response mockFetch.mockResolvedValue({ response: { ok: false, @@ -72,35 +103,15 @@ describe("uploadImageFromUrl", () => { release: vi.fn().mockResolvedValue(undefined), }); - const { uploadImageFromUrl } = await import("./upload.js"); const result = await uploadImageFromUrl("https://example.com/image.png"); expect(result).toBe("https://example.com/image.png"); }); it("returns original URL if upload fails", async () => { - const { fetchWithSsrFGuard } = await import("openclaw/plugin-sdk/tlon"); - const mockFetch = vi.mocked(fetchWithSsrFGuard); - - const { uploadFile } = await import("@tloncorp/api"); - const mockUploadFile = vi.mocked(uploadFile); - - // Mock fetchWithSsrFGuard to return a successful response - const mockBlob = new Blob(["fake-image"], { type: "image/png" }); - mockFetch.mockResolvedValue({ - response: { - ok: true, - headers: new Headers({ "content-type": "image/png" }), - blob: () => Promise.resolve(mockBlob), - } as unknown as Response, - finalUrl: "https://example.com/image.png", - release: vi.fn().mockResolvedValue(undefined), - }); - - // Mock uploadFile to throw an error + const { mockUploadFile, uploadImageFromUrl } = await setupSuccessfulUpload(); mockUploadFile.mockRejectedValue(new Error("Upload failed")); - const { uploadImageFromUrl } = await import("./upload.js"); const result = await uploadImageFromUrl("https://example.com/image.png"); expect(result).toBe("https://example.com/image.png"); @@ -127,26 +138,18 @@ describe("uploadImageFromUrl", () => { }); it("extracts filename from URL path", async () => { - const { fetchWithSsrFGuard } = await import("openclaw/plugin-sdk/tlon"); - const mockFetch = vi.mocked(fetchWithSsrFGuard); - - const { uploadFile } = await import("@tloncorp/api"); - const mockUploadFile = vi.mocked(uploadFile); + const { mockFetch, mockUploadFile, uploadImageFromUrl } = await loadUploadMocks(); const mockBlob = new Blob(["fake-image"], { type: "image/jpeg" }); - mockFetch.mockResolvedValue({ - response: { - ok: true, - headers: new Headers({ "content-type": "image/jpeg" }), - blob: () => Promise.resolve(mockBlob), - } as unknown as Response, + mockSuccessfulFetch({ + mockFetch, + blob: mockBlob, finalUrl: "https://example.com/path/to/my-image.jpg", - release: vi.fn().mockResolvedValue(undefined), + contentType: "image/jpeg", }); mockUploadFile.mockResolvedValue({ url: "https://memex.tlon.network/uploaded.jpg" }); - const { uploadImageFromUrl } = await import("./upload.js"); await uploadImageFromUrl("https://example.com/path/to/my-image.jpg"); expect(mockUploadFile).toHaveBeenCalledWith( @@ -157,26 +160,18 @@ describe("uploadImageFromUrl", () => { }); it("uses default filename when URL has no path", async () => { - const { fetchWithSsrFGuard } = await import("openclaw/plugin-sdk/tlon"); - const mockFetch = vi.mocked(fetchWithSsrFGuard); - - const { uploadFile } = await import("@tloncorp/api"); - const mockUploadFile = vi.mocked(uploadFile); + const { mockFetch, mockUploadFile, uploadImageFromUrl } = await loadUploadMocks(); const mockBlob = new Blob(["fake-image"], { type: "image/png" }); - mockFetch.mockResolvedValue({ - response: { - ok: true, - headers: new Headers({ "content-type": "image/png" }), - blob: () => Promise.resolve(mockBlob), - } as unknown as Response, + mockSuccessfulFetch({ + mockFetch, + blob: mockBlob, finalUrl: "https://example.com/", - release: vi.fn().mockResolvedValue(undefined), + contentType: "image/png", }); mockUploadFile.mockResolvedValue({ url: "https://memex.tlon.network/uploaded.png" }); - const { uploadImageFromUrl } = await import("./upload.js"); await uploadImageFromUrl("https://example.com/"); expect(mockUploadFile).toHaveBeenCalledWith( diff --git a/extensions/twitch/CHANGELOG.md b/extensions/twitch/CHANGELOG.md index 48160f427e8..cc887a99055 100644 --- a/extensions/twitch/CHANGELOG.md +++ b/extensions/twitch/CHANGELOG.md @@ -1,5 +1,35 @@ # Changelog +## 2026.3.14 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.13 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.12 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.11 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.10 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.3.9 ### Changes diff --git a/extensions/twitch/package.json b/extensions/twitch/package.json index 5fbf49cc971..bc730150b5e 100644 --- a/extensions/twitch/package.json +++ b/extensions/twitch/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/twitch", - "version": "2026.3.9", + "version": "2026.3.14", "description": "OpenClaw Twitch channel plugin", "type": "module", "dependencies": { diff --git a/extensions/twitch/src/access-control.test.ts b/extensions/twitch/src/access-control.test.ts index 874326c9697..3d522246700 100644 --- a/extensions/twitch/src/access-control.test.ts +++ b/extensions/twitch/src/access-control.test.ts @@ -49,6 +49,41 @@ describe("checkTwitchAccessControl", () => { return result; } + function expectAllowedAccessCheck(params: { + account?: Partial; + message?: Partial; + }) { + const result = runAccessCheck({ + account: params.account, + message: { + message: "@testbot hello", + ...params.message, + }, + }); + expect(result.allowed).toBe(true); + return result; + } + + function expectAllowFromBlocked(params: { + allowFrom: string[]; + allowedRoles?: NonNullable; + message?: Partial; + reason: string; + }) { + const result = runAccessCheck({ + account: { + allowFrom: params.allowFrom, + allowedRoles: params.allowedRoles, + }, + message: { + message: "@testbot hello", + ...params.message, + }, + }); + expect(result.allowed).toBe(false); + expect(result.reason).toContain(params.reason); + } + describe("when no restrictions are configured", () => { it("allows messages that mention the bot (default requireMention)", () => { const result = runAccessCheck({ @@ -109,62 +144,28 @@ describe("checkTwitchAccessControl", () => { describe("allowFrom allowlist", () => { it("allows users in the allowlist", () => { - const account: TwitchAccountConfig = { - ...mockAccount, - allowFrom: ["123456", "789012"], - }; - const message: TwitchChatMessage = { - ...mockMessage, - message: "@testbot hello", - }; - - const result = checkTwitchAccessControl({ - message, - account, - botUsername: "testbot", + const result = expectAllowedAccessCheck({ + account: { + allowFrom: ["123456", "789012"], + }, }); - expect(result.allowed).toBe(true); expect(result.matchKey).toBe("123456"); expect(result.matchSource).toBe("allowlist"); }); it("blocks users not in allowlist when allowFrom is set", () => { - const account: TwitchAccountConfig = { - ...mockAccount, + expectAllowFromBlocked({ allowFrom: ["789012"], - }; - const message: TwitchChatMessage = { - ...mockMessage, - message: "@testbot hello", - }; - - const result = checkTwitchAccessControl({ - message, - account, - botUsername: "testbot", + reason: "allowFrom", }); - expect(result.allowed).toBe(false); - expect(result.reason).toContain("allowFrom"); }); it("blocks messages without userId", () => { - const account: TwitchAccountConfig = { - ...mockAccount, + expectAllowFromBlocked({ allowFrom: ["123456"], - }; - const message: TwitchChatMessage = { - ...mockMessage, - message: "@testbot hello", - userId: undefined, - }; - - const result = checkTwitchAccessControl({ - message, - account, - botUsername: "testbot", + message: { userId: undefined }, + reason: "user ID not available", }); - expect(result.allowed).toBe(false); - expect(result.reason).toContain("user ID not available"); }); it("bypasses role checks when user is in allowlist", () => { @@ -188,47 +189,21 @@ describe("checkTwitchAccessControl", () => { }); it("blocks user with role when not in allowlist", () => { - const account: TwitchAccountConfig = { - ...mockAccount, + expectAllowFromBlocked({ allowFrom: ["789012"], allowedRoles: ["moderator"], - }; - const message: TwitchChatMessage = { - ...mockMessage, - message: "@testbot hello", - userId: "123456", - isMod: true, - }; - - const result = checkTwitchAccessControl({ - message, - account, - botUsername: "testbot", + message: { userId: "123456", isMod: true }, + reason: "allowFrom", }); - expect(result.allowed).toBe(false); - expect(result.reason).toContain("allowFrom"); }); it("blocks user not in allowlist even when roles configured", () => { - const account: TwitchAccountConfig = { - ...mockAccount, + expectAllowFromBlocked({ allowFrom: ["789012"], allowedRoles: ["moderator"], - }; - const message: TwitchChatMessage = { - ...mockMessage, - message: "@testbot hello", - userId: "123456", - isMod: false, - }; - - const result = checkTwitchAccessControl({ - message, - account, - botUsername: "testbot", + message: { userId: "123456", isMod: false }, + reason: "allowFrom", }); - expect(result.allowed).toBe(false); - expect(result.reason).toContain("allowFrom"); }); }); @@ -283,21 +258,11 @@ describe("checkTwitchAccessControl", () => { }); it("allows all users when role is 'all'", () => { - const account: TwitchAccountConfig = { - ...mockAccount, - allowedRoles: ["all"], - }; - const message: TwitchChatMessage = { - ...mockMessage, - message: "@testbot hello", - }; - - const result = checkTwitchAccessControl({ - message, - account, - botUsername: "testbot", + const result = expectAllowedAccessCheck({ + account: { + allowedRoles: ["all"], + }, }); - expect(result.allowed).toBe(true); expect(result.matchKey).toBe("all"); }); diff --git a/extensions/twitch/src/outbound.test.ts b/extensions/twitch/src/outbound.test.ts index 7b480df32dd..f58e2d1ad48 100644 --- a/extensions/twitch/src/outbound.test.ts +++ b/extensions/twitch/src/outbound.test.ts @@ -46,6 +46,20 @@ function assertResolvedTarget( return result.to; } +function expectTargetError( + resolveTarget: NonNullable, + params: Parameters>[0], + expectedMessage: string, +) { + const result = resolveTarget(params); + + expect(result.ok).toBe(false); + if (result.ok) { + throw new Error("expected resolveTarget to fail"); + } + expect(result.error.message).toContain(expectedMessage); +} + describe("outbound", () => { const mockAccount = { ...BASE_TWITCH_TEST_ACCOUNT, @@ -106,17 +120,15 @@ describe("outbound", () => { }); it("should error when target not in allowlist (implicit mode)", () => { - const result = resolveTarget({ - to: "#notallowed", - mode: "implicit", - allowFrom: ["#primary", "#secondary"], - }); - - expect(result.ok).toBe(false); - if (result.ok) { - throw new Error("expected resolveTarget to fail"); - } - expect(result.error.message).toContain("Twitch"); + expectTargetError( + resolveTarget, + { + to: "#notallowed", + mode: "implicit", + allowFrom: ["#primary", "#secondary"], + }, + "Twitch", + ); }); it("should accept any target when allowlist is empty", () => { @@ -131,59 +143,51 @@ describe("outbound", () => { }); it("should error when no target provided with allowlist", () => { - const result = resolveTarget({ - to: undefined, - mode: "implicit", - allowFrom: ["#fallback", "#other"], - }); - - expect(result.ok).toBe(false); - if (result.ok) { - throw new Error("expected resolveTarget to fail"); - } - expect(result.error.message).toContain("Twitch"); + expectTargetError( + resolveTarget, + { + to: undefined, + mode: "implicit", + allowFrom: ["#fallback", "#other"], + }, + "Twitch", + ); }); it("should return error when no target and no allowlist", () => { - const result = resolveTarget({ - to: undefined, - mode: "explicit", - allowFrom: [], - }); - - expect(result.ok).toBe(false); - if (result.ok) { - throw new Error("expected resolveTarget to fail"); - } - expect(result.error.message).toContain("Missing target"); + expectTargetError( + resolveTarget, + { + to: undefined, + mode: "explicit", + allowFrom: [], + }, + "Missing target", + ); }); it("should handle whitespace-only target", () => { - const result = resolveTarget({ - to: " ", - mode: "explicit", - allowFrom: [], - }); - - expect(result.ok).toBe(false); - if (result.ok) { - throw new Error("expected resolveTarget to fail"); - } - expect(result.error.message).toContain("Missing target"); + expectTargetError( + resolveTarget, + { + to: " ", + mode: "explicit", + allowFrom: [], + }, + "Missing target", + ); }); it("should error when target normalizes to empty string", () => { - const result = resolveTarget({ - to: "#", - mode: "explicit", - allowFrom: [], - }); - - expect(result.ok).toBe(false); - if (result.ok) { - throw new Error("expected resolveTarget to fail"); - } - expect(result.error.message).toContain("Twitch"); + expectTargetError( + resolveTarget, + { + to: "#", + mode: "explicit", + allowFrom: [], + }, + "Twitch", + ); }); it("should filter wildcard from allowlist when checking membership", () => { diff --git a/extensions/twitch/src/plugin.ts b/extensions/twitch/src/plugin.ts index f6cf576b6a0..11cf90b8893 100644 --- a/extensions/twitch/src/plugin.ts +++ b/extensions/twitch/src/plugin.ts @@ -7,6 +7,7 @@ import type { OpenClawConfig } from "openclaw/plugin-sdk/twitch"; import { buildChannelConfigSchema } from "openclaw/plugin-sdk/twitch"; +import { buildPassiveProbedChannelStatusSummary } from "../../shared/channel-status-summary.js"; import { twitchMessageActions } from "./actions.js"; import { removeClientManager } from "./client-manager-registry.js"; import { TwitchConfigSchema } from "./config-schema.js"; @@ -169,15 +170,8 @@ export const twitchPlugin: ChannelPlugin = { }, /** Build channel summary from snapshot */ - buildChannelSummary: ({ snapshot }: { snapshot: ChannelAccountSnapshot }) => ({ - configured: snapshot.configured ?? false, - running: snapshot.running ?? false, - lastStartAt: snapshot.lastStartAt ?? null, - lastStopAt: snapshot.lastStopAt ?? null, - lastError: snapshot.lastError ?? null, - probe: snapshot.probe, - lastProbeAt: snapshot.lastProbeAt ?? null, - }), + buildChannelSummary: ({ snapshot }: { snapshot: ChannelAccountSnapshot }) => + buildPassiveProbedChannelStatusSummary(snapshot), /** Probe account connection */ probeAccount: async ({ diff --git a/extensions/twitch/src/send.test.ts b/extensions/twitch/src/send.test.ts index e7185b3f5fb..b45321229a4 100644 --- a/extensions/twitch/src/send.test.ts +++ b/extensions/twitch/src/send.test.ts @@ -55,7 +55,10 @@ describe("send", () => { installTwitchTestHooks(); describe("sendMessageTwitchInternal", () => { - it("should send a message successfully", async () => { + async function mockSuccessfulSend(params: { + messageId: string; + stripMarkdown?: (text: string) => string; + }) { const { getAccountConfig } = await import("./config.js"); const { getClientManager } = await import("./client-manager-registry.js"); const { stripMarkdownForTwitch } = await import("./utils/markdown.js"); @@ -64,10 +67,18 @@ describe("send", () => { vi.mocked(getClientManager).mockReturnValue({ sendMessage: vi.fn().mockResolvedValue({ ok: true, - messageId: "twitch-msg-123", + messageId: params.messageId, }), } as unknown as ReturnType); - vi.mocked(stripMarkdownForTwitch).mockImplementation((text) => text); + vi.mocked(stripMarkdownForTwitch).mockImplementation( + params.stripMarkdown ?? ((text) => text), + ); + + return { stripMarkdownForTwitch }; + } + + it("should send a message successfully", async () => { + await mockSuccessfulSend({ messageId: "twitch-msg-123" }); const result = await sendMessageTwitchInternal( "#testchannel", @@ -83,18 +94,10 @@ describe("send", () => { }); it("should strip markdown when enabled", async () => { - const { getAccountConfig } = await import("./config.js"); - const { getClientManager } = await import("./client-manager-registry.js"); - const { stripMarkdownForTwitch } = await import("./utils/markdown.js"); - - vi.mocked(getAccountConfig).mockReturnValue(mockAccount); - vi.mocked(getClientManager).mockReturnValue({ - sendMessage: vi.fn().mockResolvedValue({ - ok: true, - messageId: "twitch-msg-456", - }), - } as unknown as ReturnType); - vi.mocked(stripMarkdownForTwitch).mockImplementation((text) => text.replace(/\*\*/g, "")); + const { stripMarkdownForTwitch } = await mockSuccessfulSend({ + messageId: "twitch-msg-456", + stripMarkdown: (text) => text.replace(/\*\*/g, ""), + }); await sendMessageTwitchInternal( "#testchannel", diff --git a/extensions/vllm/README.md b/extensions/vllm/README.md new file mode 100644 index 00000000000..ce0990a8698 --- /dev/null +++ b/extensions/vllm/README.md @@ -0,0 +1,3 @@ +# vLLM Provider + +Bundled provider plugin for vLLM discovery and setup. diff --git a/extensions/vllm/index.ts b/extensions/vllm/index.ts new file mode 100644 index 00000000000..cb865de4dfd --- /dev/null +++ b/extensions/vllm/index.ts @@ -0,0 +1,81 @@ +import { + buildVllmProvider, + configureOpenAICompatibleSelfHostedProviderNonInteractive, + discoverOpenAICompatibleSelfHostedProvider, + emptyPluginConfigSchema, + promptAndConfigureOpenAICompatibleSelfHostedProviderAuth, + type OpenClawPluginApi, + type ProviderAuthMethodNonInteractiveContext, +} from "openclaw/plugin-sdk/core"; + +const PROVIDER_ID = "vllm"; +const DEFAULT_BASE_URL = "http://127.0.0.1:8000/v1"; + +const vllmPlugin = { + id: "vllm", + name: "vLLM Provider", + description: "Bundled vLLM provider plugin", + configSchema: emptyPluginConfigSchema(), + register(api: OpenClawPluginApi) { + api.registerProvider({ + id: PROVIDER_ID, + label: "vLLM", + docsPath: "/providers/vllm", + envVars: ["VLLM_API_KEY"], + auth: [ + { + id: "custom", + label: "vLLM", + hint: "Local/self-hosted OpenAI-compatible server", + kind: "custom", + run: async (ctx) => + promptAndConfigureOpenAICompatibleSelfHostedProviderAuth({ + cfg: ctx.config, + prompter: ctx.prompter, + providerId: PROVIDER_ID, + providerLabel: "vLLM", + defaultBaseUrl: DEFAULT_BASE_URL, + defaultApiKeyEnvVar: "VLLM_API_KEY", + modelPlaceholder: "meta-llama/Meta-Llama-3-8B-Instruct", + }), + runNonInteractive: async (ctx: ProviderAuthMethodNonInteractiveContext) => + configureOpenAICompatibleSelfHostedProviderNonInteractive({ + ctx, + providerId: PROVIDER_ID, + providerLabel: "vLLM", + defaultBaseUrl: DEFAULT_BASE_URL, + defaultApiKeyEnvVar: "VLLM_API_KEY", + modelPlaceholder: "meta-llama/Meta-Llama-3-8B-Instruct", + }), + }, + ], + discovery: { + order: "late", + run: async (ctx) => + discoverOpenAICompatibleSelfHostedProvider({ + ctx, + providerId: PROVIDER_ID, + buildProvider: buildVllmProvider, + }), + }, + wizard: { + onboarding: { + choiceId: "vllm", + choiceLabel: "vLLM", + choiceHint: "Local/self-hosted OpenAI-compatible server", + groupId: "vllm", + groupLabel: "vLLM", + groupHint: "Local/self-hosted OpenAI-compatible", + methodId: "custom", + }, + modelPicker: { + label: "vLLM (custom)", + hint: "Enter vLLM URL + API key + model", + methodId: "custom", + }, + }, + }); + }, +}; + +export default vllmPlugin; diff --git a/extensions/vllm/openclaw.plugin.json b/extensions/vllm/openclaw.plugin.json new file mode 100644 index 00000000000..5a9f9a778ee --- /dev/null +++ b/extensions/vllm/openclaw.plugin.json @@ -0,0 +1,9 @@ +{ + "id": "vllm", + "providers": ["vllm"], + "configSchema": { + "type": "object", + "additionalProperties": false, + "properties": {} + } +} diff --git a/extensions/vllm/package.json b/extensions/vllm/package.json new file mode 100644 index 00000000000..bb293610355 --- /dev/null +++ b/extensions/vllm/package.json @@ -0,0 +1,12 @@ +{ + "name": "@openclaw/vllm-provider", + "version": "2026.3.14", + "private": true, + "description": "OpenClaw vLLM provider plugin", + "type": "module", + "openclaw": { + "extensions": [ + "./index.ts" + ] + } +} diff --git a/extensions/voice-call/CHANGELOG.md b/extensions/voice-call/CHANGELOG.md index a8a4586116c..d9d27a97e87 100644 --- a/extensions/voice-call/CHANGELOG.md +++ b/extensions/voice-call/CHANGELOG.md @@ -1,5 +1,35 @@ # Changelog +## 2026.3.14 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.13 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.12 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.11 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.10 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.3.9 ### Changes diff --git a/extensions/voice-call/README.md b/extensions/voice-call/README.md index 9acc9aec987..fe228537ee8 100644 --- a/extensions/voice-call/README.md +++ b/extensions/voice-call/README.md @@ -89,56 +89,18 @@ Notes: - Twilio/Telnyx/Plivo require a **publicly reachable** webhook URL. - `mock` is a local dev provider (no network calls). - Telnyx requires `telnyx.publicKey` (or `TELNYX_PUBLIC_KEY`) unless `skipSignatureVerification` is true. -- `tunnel.allowNgrokFreeTierLoopbackBypass: true` allows Twilio webhooks with invalid signatures **only** when `tunnel.provider="ngrok"` and `serve.bind` is loopback (ngrok local agent). Use for local dev only. - -Streaming security defaults: - -- `streaming.preStartTimeoutMs` closes sockets that never send a valid `start` frame. -- `streaming.maxPendingConnections` caps total unauthenticated pre-start sockets. -- `streaming.maxPendingConnectionsPerIp` caps unauthenticated pre-start sockets per source IP. -- `streaming.maxConnections` caps total open media stream sockets (pending + active). +- advanced webhook, streaming, and tunnel notes: `https://docs.openclaw.ai/plugins/voice-call` ## Stale call reaper -Use `staleCallReaperSeconds` to end calls that never receive a terminal webhook -(for example, notify-mode calls that never complete). The default is `0` -(disabled). - -Recommended ranges: - -- **Production:** `120`–`300` seconds for notify-style flows. -- Keep this value **higher than `maxDurationSeconds`** so normal calls can - finish. A good starting point is `maxDurationSeconds + 30–60` seconds. - -Example: - -```json5 -{ - staleCallReaperSeconds: 360, -} -``` +See the plugin docs for recommended ranges and production examples: +`https://docs.openclaw.ai/plugins/voice-call#stale-call-reaper` ## TTS for calls Voice Call uses the core `messages.tts` configuration (OpenAI or ElevenLabs) for -streaming speech on calls. You can override it under the plugin config with the -same shape — overrides deep-merge with `messages.tts`. - -```json5 -{ - tts: { - provider: "openai", - openai: { - voice: "alloy", - }, - }, -} -``` - -Notes: - -- Edge TTS is ignored for voice calls (telephony audio needs PCM; Edge output is unreliable). -- Core TTS is used when Twilio media streaming is enabled; otherwise calls fall back to provider native voices. +streaming speech on calls. Override examples and provider caveats live here: +`https://docs.openclaw.ai/plugins/voice-call#tts-for-calls` ## CLI diff --git a/extensions/voice-call/index.ts b/extensions/voice-call/index.ts index 8e2fba9898f..7393fb03c9b 100644 --- a/extensions/voice-call/index.ts +++ b/extensions/voice-call/index.ts @@ -227,6 +227,37 @@ const voiceCallPlugin = { params.respond(true, { callId: result.callId, initiated: true }); }; + const respondToCallMessageAction = async (params: { + requestParams: GatewayRequestHandlerOptions["params"]; + respond: GatewayRequestHandlerOptions["respond"]; + action: ( + request: Exclude>, { error: string }>, + ) => Promise<{ + success: boolean; + error?: string; + transcript?: string; + }>; + failure: string; + includeTranscript?: boolean; + }) => { + const request = await resolveCallMessageRequest(params.requestParams); + if ("error" in request) { + params.respond(false, { error: request.error }); + return; + } + const result = await params.action(request); + if (!result.success) { + params.respond(false, { error: result.error || params.failure }); + return; + } + params.respond( + true, + params.includeTranscript + ? { success: true, transcript: result.transcript } + : { success: true }, + ); + }; + api.registerGatewayMethod( "voicecall.initiate", async ({ params, respond }: GatewayRequestHandlerOptions) => { @@ -264,17 +295,13 @@ const voiceCallPlugin = { "voicecall.continue", async ({ params, respond }: GatewayRequestHandlerOptions) => { try { - const request = await resolveCallMessageRequest(params); - if ("error" in request) { - respond(false, { error: request.error }); - return; - } - const result = await request.rt.manager.continueCall(request.callId, request.message); - if (!result.success) { - respond(false, { error: result.error || "continue failed" }); - return; - } - respond(true, { success: true, transcript: result.transcript }); + await respondToCallMessageAction({ + requestParams: params, + respond, + action: (request) => request.rt.manager.continueCall(request.callId, request.message), + failure: "continue failed", + includeTranscript: true, + }); } catch (err) { sendError(respond, err); } @@ -285,17 +312,12 @@ const voiceCallPlugin = { "voicecall.speak", async ({ params, respond }: GatewayRequestHandlerOptions) => { try { - const request = await resolveCallMessageRequest(params); - if ("error" in request) { - respond(false, { error: request.error }); - return; - } - const result = await request.rt.manager.speak(request.callId, request.message); - if (!result.success) { - respond(false, { error: result.error || "speak failed" }); - return; - } - respond(true, { success: true }); + await respondToCallMessageAction({ + requestParams: params, + respond, + action: (request) => request.rt.manager.speak(request.callId, request.message), + failure: "speak failed", + }); } catch (err) { sendError(respond, err); } diff --git a/extensions/voice-call/openclaw.plugin.json b/extensions/voice-call/openclaw.plugin.json index d9a904c73eb..fef3ccc6ad9 100644 --- a/extensions/voice-call/openclaw.plugin.json +++ b/extensions/voice-call/openclaw.plugin.json @@ -522,11 +522,22 @@ "apiKey": { "type": "string" }, + "baseUrl": { + "type": "string" + }, "model": { "type": "string" }, "voice": { "type": "string" + }, + "speed": { + "type": "number", + "minimum": 0.25, + "maximum": 4.0 + }, + "instructions": { + "type": "string" } } }, diff --git a/extensions/voice-call/package.json b/extensions/voice-call/package.json index 420f8b41560..3c65532f9c9 100644 --- a/extensions/voice-call/package.json +++ b/extensions/voice-call/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/voice-call", - "version": "2026.3.9", + "version": "2026.3.14", "description": "OpenClaw voice-call plugin", "type": "module", "dependencies": { diff --git a/extensions/voice-call/src/manager.restore.test.ts b/extensions/voice-call/src/manager.restore.test.ts index f7f142a16ff..8f76169546f 100644 --- a/extensions/voice-call/src/manager.restore.test.ts +++ b/extensions/voice-call/src/manager.restore.test.ts @@ -9,121 +9,87 @@ import { } from "./manager.test-harness.js"; describe("CallManager verification on restore", () => { - it("skips stale calls reported terminal by provider", async () => { + async function initializeManager(params?: { + callOverrides?: Parameters[0]; + providerResult?: FakeProvider["getCallStatusResult"]; + configureProvider?: (provider: FakeProvider) => void; + configOverrides?: Partial<{ maxDurationSeconds: number }>; + }) { const storePath = createTestStorePath(); - const call = makePersistedCall(); + const call = makePersistedCall(params?.callOverrides); writeCallsToStore(storePath, [call]); const provider = new FakeProvider(); - provider.getCallStatusResult = { status: "completed", isTerminal: true }; + if (params?.providerResult) { + provider.getCallStatusResult = params.providerResult; + } + params?.configureProvider?.(provider); const config = VoiceCallConfigSchema.parse({ enabled: true, provider: "plivo", fromNumber: "+15550000000", + ...params?.configOverrides, }); const manager = new CallManager(config, storePath); await manager.initialize(provider, "https://example.com/voice/webhook"); + return { call, manager }; + } + + it("skips stale calls reported terminal by provider", async () => { + const { manager } = await initializeManager({ + providerResult: { status: "completed", isTerminal: true }, + }); + expect(manager.getActiveCalls()).toHaveLength(0); }); it("keeps calls reported active by provider", async () => { - const storePath = createTestStorePath(); - const call = makePersistedCall(); - writeCallsToStore(storePath, [call]); - - const provider = new FakeProvider(); - provider.getCallStatusResult = { status: "in-progress", isTerminal: false }; - - const config = VoiceCallConfigSchema.parse({ - enabled: true, - provider: "plivo", - fromNumber: "+15550000000", + const { call, manager } = await initializeManager({ + providerResult: { status: "in-progress", isTerminal: false }, }); - const manager = new CallManager(config, storePath); - await manager.initialize(provider, "https://example.com/voice/webhook"); expect(manager.getActiveCalls()).toHaveLength(1); expect(manager.getActiveCalls()[0]?.callId).toBe(call.callId); }); it("keeps calls when provider returns unknown (transient error)", async () => { - const storePath = createTestStorePath(); - const call = makePersistedCall(); - writeCallsToStore(storePath, [call]); - - const provider = new FakeProvider(); - provider.getCallStatusResult = { status: "error", isTerminal: false, isUnknown: true }; - - const config = VoiceCallConfigSchema.parse({ - enabled: true, - provider: "plivo", - fromNumber: "+15550000000", + const { manager } = await initializeManager({ + providerResult: { status: "error", isTerminal: false, isUnknown: true }, }); - const manager = new CallManager(config, storePath); - await manager.initialize(provider, "https://example.com/voice/webhook"); expect(manager.getActiveCalls()).toHaveLength(1); }); it("skips calls older than maxDurationSeconds", async () => { - const storePath = createTestStorePath(); - const call = makePersistedCall({ - startedAt: Date.now() - 600_000, - answeredAt: Date.now() - 590_000, + const { manager } = await initializeManager({ + callOverrides: { + startedAt: Date.now() - 600_000, + answeredAt: Date.now() - 590_000, + }, + configOverrides: { maxDurationSeconds: 300 }, }); - writeCallsToStore(storePath, [call]); - - const provider = new FakeProvider(); - - const config = VoiceCallConfigSchema.parse({ - enabled: true, - provider: "plivo", - fromNumber: "+15550000000", - maxDurationSeconds: 300, - }); - const manager = new CallManager(config, storePath); - await manager.initialize(provider, "https://example.com/voice/webhook"); expect(manager.getActiveCalls()).toHaveLength(0); }); it("skips calls without providerCallId", async () => { - const storePath = createTestStorePath(); - const call = makePersistedCall({ providerCallId: undefined, state: "initiated" }); - writeCallsToStore(storePath, [call]); - - const provider = new FakeProvider(); - - const config = VoiceCallConfigSchema.parse({ - enabled: true, - provider: "plivo", - fromNumber: "+15550000000", + const { manager } = await initializeManager({ + callOverrides: { providerCallId: undefined, state: "initiated" }, }); - const manager = new CallManager(config, storePath); - await manager.initialize(provider, "https://example.com/voice/webhook"); expect(manager.getActiveCalls()).toHaveLength(0); }); it("keeps call when getCallStatus throws (verification failure)", async () => { - const storePath = createTestStorePath(); - const call = makePersistedCall(); - writeCallsToStore(storePath, [call]); - - const provider = new FakeProvider(); - provider.getCallStatus = async () => { - throw new Error("network failure"); - }; - - const config = VoiceCallConfigSchema.parse({ - enabled: true, - provider: "plivo", - fromNumber: "+15550000000", + const { manager } = await initializeManager({ + configureProvider: (provider) => { + provider.getCallStatus = async () => { + throw new Error("network failure"); + }; + }, }); - const manager = new CallManager(config, storePath); - await manager.initialize(provider, "https://example.com/voice/webhook"); expect(manager.getActiveCalls()).toHaveLength(1); }); diff --git a/extensions/voice-call/src/providers/telnyx.test.ts b/extensions/voice-call/src/providers/telnyx.test.ts index c083070229f..15a4cc8f17f 100644 --- a/extensions/voice-call/src/providers/telnyx.test.ts +++ b/extensions/voice-call/src/providers/telnyx.test.ts @@ -22,6 +22,34 @@ function decodeBase64Url(input: string): Buffer { return Buffer.from(padded, "base64"); } +function createSignedTelnyxCtx(params: { + privateKey: crypto.KeyObject; + rawBody: string; +}): WebhookContext { + const timestamp = String(Math.floor(Date.now() / 1000)); + const signedPayload = `${timestamp}|${params.rawBody}`; + const signature = crypto + .sign(null, Buffer.from(signedPayload), params.privateKey) + .toString("base64"); + + return createCtx({ + rawBody: params.rawBody, + headers: { + "telnyx-signature-ed25519": signature, + "telnyx-timestamp": timestamp, + }, + }); +} + +function expectReplayVerification( + results: Array<{ ok: boolean; isReplay?: boolean; verifiedRequestKey?: string }>, +) { + expect(results.map((result) => result.ok)).toEqual([true, true]); + expect(results.map((result) => Boolean(result.isReplay))).toEqual([false, true]); + expect(results[0]?.verifiedRequestKey).toEqual(expect.any(String)); + expect(results[1]?.verifiedRequestKey).toBe(results[0]?.verifiedRequestKey); +} + function expectWebhookVerificationSucceeds(params: { publicKey: string; privateKey: crypto.KeyObject; @@ -35,20 +63,8 @@ function expectWebhookVerificationSucceeds(params: { event_type: "call.initiated", payload: { call_control_id: "x" }, }); - const timestamp = String(Math.floor(Date.now() / 1000)); - const signedPayload = `${timestamp}|${rawBody}`; - const signature = crypto - .sign(null, Buffer.from(signedPayload), params.privateKey) - .toString("base64"); - const result = provider.verifyWebhook( - createCtx({ - rawBody, - headers: { - "telnyx-signature-ed25519": signature, - "telnyx-timestamp": timestamp, - }, - }), + createSignedTelnyxCtx({ privateKey: params.privateKey, rawBody }), ); expect(result.ok).toBe(true); } @@ -117,26 +133,12 @@ describe("TelnyxProvider.verifyWebhook", () => { payload: { call_control_id: "call-replay-test" }, nonce: crypto.randomUUID(), }); - const timestamp = String(Math.floor(Date.now() / 1000)); - const signedPayload = `${timestamp}|${rawBody}`; - const signature = crypto.sign(null, Buffer.from(signedPayload), privateKey).toString("base64"); - const ctx = createCtx({ - rawBody, - headers: { - "telnyx-signature-ed25519": signature, - "telnyx-timestamp": timestamp, - }, - }); + const ctx = createSignedTelnyxCtx({ privateKey, rawBody }); const first = provider.verifyWebhook(ctx); const second = provider.verifyWebhook(ctx); - expect(first.ok).toBe(true); - expect(first.isReplay).toBeFalsy(); - expect(first.verifiedRequestKey).toBeTruthy(); - expect(second.ok).toBe(true); - expect(second.isReplay).toBe(true); - expect(second.verifiedRequestKey).toBe(first.verifiedRequestKey); + expectReplayVerification([first, second]); }); }); diff --git a/extensions/voice-call/src/providers/tts-openai.ts b/extensions/voice-call/src/providers/tts-openai.ts index a27030b4578..0a7c74d90ac 100644 --- a/extensions/voice-call/src/providers/tts-openai.ts +++ b/extensions/voice-call/src/providers/tts-openai.ts @@ -1,3 +1,4 @@ +import { resolveOpenAITtsInstructions } from "openclaw/plugin-sdk/voice-call"; import { pcmToMulaw } from "../telephony-audio.js"; /** @@ -110,9 +111,11 @@ export class OpenAITTSProvider { speed: this.speed, }; - // Add instructions if using gpt-4o-mini-tts model - const effectiveInstructions = trimToUndefined(instructions) ?? this.instructions; - if (effectiveInstructions && this.model.includes("gpt-4o-mini-tts")) { + const effectiveInstructions = resolveOpenAITtsInstructions( + this.model, + trimToUndefined(instructions) ?? this.instructions, + ); + if (effectiveInstructions) { body.instructions = effectiveInstructions; } diff --git a/extensions/voice-call/src/providers/twilio.test.ts b/extensions/voice-call/src/providers/twilio.test.ts index 0a88bdeae07..4e23783b93a 100644 --- a/extensions/voice-call/src/providers/twilio.test.ts +++ b/extensions/voice-call/src/providers/twilio.test.ts @@ -21,6 +21,12 @@ function createContext(rawBody: string, query?: WebhookContext["query"]): Webhoo }; } +function expectStreamingTwiml(body: string) { + expect(body).toContain(STREAM_URL); + expect(body).toContain('"); +} + describe("TwilioProvider", () => { it("returns streaming TwiML for outbound conversation calls before in-progress", () => { const provider = createProvider(); @@ -30,9 +36,8 @@ describe("TwilioProvider", () => { const result = provider.parseWebhookEvent(ctx); - expect(result.providerResponseBody).toContain(STREAM_URL); - expect(result.providerResponseBody).toContain('"); + expect(result.providerResponseBody).toBeDefined(); + expectStreamingTwiml(result.providerResponseBody ?? ""); }); it("returns empty TwiML for status callbacks", () => { @@ -55,9 +60,8 @@ describe("TwilioProvider", () => { const result = provider.parseWebhookEvent(ctx); - expect(result.providerResponseBody).toContain(STREAM_URL); - expect(result.providerResponseBody).toContain('"); + expect(result.providerResponseBody).toBeDefined(); + expectStreamingTwiml(result.providerResponseBody ?? ""); }); it("returns queue TwiML for second inbound call when first call is active", () => { diff --git a/extensions/voice-call/src/webhook-security.test.ts b/extensions/voice-call/src/webhook-security.test.ts index 3134f18b729..3fe3cd473a1 100644 --- a/extensions/voice-call/src/webhook-security.test.ts +++ b/extensions/voice-call/src/webhook-security.test.ts @@ -98,6 +98,51 @@ function expectReplayResultPair( expect(second.verifiedRequestKey).toBe(first.verifiedRequestKey); } +function expectAcceptedWebhookVersion( + result: { ok: boolean; version?: string }, + version: "v2" | "v3", +) { + expect(result).toMatchObject({ ok: true, version }); +} + +function verifyTwilioNgrokLoopback(signature: string) { + return verifyTwilioWebhook( + { + headers: { + host: "127.0.0.1:3334", + "x-forwarded-proto": "https", + "x-forwarded-host": "local.ngrok-free.app", + "x-twilio-signature": signature, + }, + rawBody: "CallSid=CS123&CallStatus=completed&From=%2B15550000000", + url: "http://127.0.0.1:3334/voice/webhook", + method: "POST", + remoteAddress: "127.0.0.1", + }, + "test-auth-token", + { allowNgrokFreeTierLoopbackBypass: true }, + ); +} + +function verifyTwilioSignedRequest(params: { + headers: Record; + rawBody: string; + authToken: string; + publicUrl: string; +}) { + return verifyTwilioWebhook( + { + headers: params.headers, + rawBody: params.rawBody, + url: "http://local/voice/webhook?callId=abc", + method: "POST", + query: { callId: "abc" }, + }, + params.authToken, + { publicUrl: params.publicUrl }, + ); +} + describe("verifyPlivoWebhook", () => { it("accepts valid V2 signature", () => { const authToken = "test-auth-token"; @@ -127,8 +172,7 @@ describe("verifyPlivoWebhook", () => { authToken, ); - expect(result.ok).toBe(true); - expect(result.version).toBe("v2"); + expectAcceptedWebhookVersion(result, "v2"); }); it("accepts valid V3 signature (including multi-signature header)", () => { @@ -161,8 +205,7 @@ describe("verifyPlivoWebhook", () => { authToken, ); - expect(result.ok).toBe(true); - expect(result.version).toBe("v3"); + expectAcceptedWebhookVersion(result, "v3"); }); it("rejects missing signatures", () => { @@ -317,35 +360,10 @@ describe("verifyTwilioWebhook", () => { "i-twilio-idempotency-token": "idem-replay-1", }; - const first = verifyTwilioWebhook( - { - headers, - rawBody: postBody, - url: "http://local/voice/webhook?callId=abc", - method: "POST", - query: { callId: "abc" }, - }, - authToken, - { publicUrl }, - ); - const second = verifyTwilioWebhook( - { - headers, - rawBody: postBody, - url: "http://local/voice/webhook?callId=abc", - method: "POST", - query: { callId: "abc" }, - }, - authToken, - { publicUrl }, - ); + const first = verifyTwilioSignedRequest({ headers, rawBody: postBody, authToken, publicUrl }); + const second = verifyTwilioSignedRequest({ headers, rawBody: postBody, authToken, publicUrl }); - expect(first.ok).toBe(true); - expect(first.isReplay).toBeFalsy(); - expect(first.verifiedRequestKey).toBeTruthy(); - expect(second.ok).toBe(true); - expect(second.isReplay).toBe(true); - expect(second.verifiedRequestKey).toBe(first.verifiedRequestKey); + expectReplayResultPair(first, second); }); it("treats changed idempotency header as replay for identical signed requests", () => { @@ -355,45 +373,30 @@ describe("verifyTwilioWebhook", () => { const postBody = "CallSid=CS778&CallStatus=completed&From=%2B15550000000"; const signature = twilioSignature({ authToken, url: urlWithQuery, postBody }); - const first = verifyTwilioWebhook( - { - headers: { - host: "example.com", - "x-forwarded-proto": "https", - "x-twilio-signature": signature, - "i-twilio-idempotency-token": "idem-replay-a", - }, - rawBody: postBody, - url: "http://local/voice/webhook?callId=abc", - method: "POST", - query: { callId: "abc" }, + const first = verifyTwilioSignedRequest({ + headers: { + host: "example.com", + "x-forwarded-proto": "https", + "x-twilio-signature": signature, + "i-twilio-idempotency-token": "idem-replay-a", }, + rawBody: postBody, authToken, - { publicUrl }, - ); - const second = verifyTwilioWebhook( - { - headers: { - host: "example.com", - "x-forwarded-proto": "https", - "x-twilio-signature": signature, - "i-twilio-idempotency-token": "idem-replay-b", - }, - rawBody: postBody, - url: "http://local/voice/webhook?callId=abc", - method: "POST", - query: { callId: "abc" }, + publicUrl, + }); + const second = verifyTwilioSignedRequest({ + headers: { + host: "example.com", + "x-forwarded-proto": "https", + "x-twilio-signature": signature, + "i-twilio-idempotency-token": "idem-replay-b", }, + rawBody: postBody, authToken, - { publicUrl }, - ); + publicUrl, + }); - expect(first.ok).toBe(true); - expect(first.isReplay).toBe(false); - expect(first.verifiedRequestKey).toBeTruthy(); - expect(second.ok).toBe(true); - expect(second.isReplay).toBe(true); - expect(second.verifiedRequestKey).toBe(first.verifiedRequestKey); + expectReplayResultPair(first, second); }); it("rejects invalid signatures even when attacker injects forwarded host", () => { @@ -422,57 +425,22 @@ describe("verifyTwilioWebhook", () => { }); it("accepts valid signatures for ngrok free tier on loopback when compatibility mode is enabled", () => { - const authToken = "test-auth-token"; - const postBody = "CallSid=CS123&CallStatus=completed&From=%2B15550000000"; const webhookUrl = "https://local.ngrok-free.app/voice/webhook"; const signature = twilioSignature({ - authToken, + authToken: "test-auth-token", url: webhookUrl, - postBody, + postBody: "CallSid=CS123&CallStatus=completed&From=%2B15550000000", }); - const result = verifyTwilioWebhook( - { - headers: { - host: "127.0.0.1:3334", - "x-forwarded-proto": "https", - "x-forwarded-host": "local.ngrok-free.app", - "x-twilio-signature": signature, - }, - rawBody: postBody, - url: "http://127.0.0.1:3334/voice/webhook", - method: "POST", - remoteAddress: "127.0.0.1", - }, - authToken, - { allowNgrokFreeTierLoopbackBypass: true }, - ); + const result = verifyTwilioNgrokLoopback(signature); expect(result.ok).toBe(true); expect(result.verificationUrl).toBe(webhookUrl); }); it("does not allow invalid signatures for ngrok free tier on loopback", () => { - const authToken = "test-auth-token"; - const postBody = "CallSid=CS123&CallStatus=completed&From=%2B15550000000"; - - const result = verifyTwilioWebhook( - { - headers: { - host: "127.0.0.1:3334", - "x-forwarded-proto": "https", - "x-forwarded-host": "local.ngrok-free.app", - "x-twilio-signature": "invalid", - }, - rawBody: postBody, - url: "http://127.0.0.1:3334/voice/webhook", - method: "POST", - remoteAddress: "127.0.0.1", - }, - authToken, - { allowNgrokFreeTierLoopbackBypass: true }, - ); + const result = verifyTwilioNgrokLoopback("invalid"); expect(result.ok).toBe(false); expect(result.reason).toMatch(/Invalid signature/); diff --git a/extensions/voice-call/src/webhook.test.ts b/extensions/voice-call/src/webhook.test.ts index f5a827a3ef3..6297a69f14a 100644 --- a/extensions/voice-call/src/webhook.test.ts +++ b/extensions/voice-call/src/webhook.test.ts @@ -56,6 +56,28 @@ const createManager = (calls: CallRecord[]) => { return { manager, endCall, processEvent }; }; +async function runStaleCallReaperCase(params: { + callAgeMs: number; + staleCallReaperSeconds: number; + advanceMs: number; +}) { + const now = new Date("2026-02-16T00:00:00Z"); + vi.setSystemTime(now); + + const call = createCall(now.getTime() - params.callAgeMs); + const { manager, endCall } = createManager([call]); + const config = createConfig({ staleCallReaperSeconds: params.staleCallReaperSeconds }); + const server = new VoiceCallWebhookServer(config, manager, provider); + + try { + await server.start(); + await vi.advanceTimersByTimeAsync(params.advanceMs); + return { call, endCall }; + } finally { + await server.stop(); + } +} + async function postWebhookForm(server: VoiceCallWebhookServer, baseUrl: string, body: string) { const address = ( server as unknown as { server?: { address?: () => unknown } } @@ -81,39 +103,21 @@ describe("VoiceCallWebhookServer stale call reaper", () => { }); it("ends calls older than staleCallReaperSeconds", async () => { - const now = new Date("2026-02-16T00:00:00Z"); - vi.setSystemTime(now); - - const call = createCall(now.getTime() - 120_000); - const { manager, endCall } = createManager([call]); - const config = createConfig({ staleCallReaperSeconds: 60 }); - const server = new VoiceCallWebhookServer(config, manager, provider); - - try { - await server.start(); - await vi.advanceTimersByTimeAsync(30_000); - expect(endCall).toHaveBeenCalledWith(call.callId); - } finally { - await server.stop(); - } + const { call, endCall } = await runStaleCallReaperCase({ + callAgeMs: 120_000, + staleCallReaperSeconds: 60, + advanceMs: 30_000, + }); + expect(endCall).toHaveBeenCalledWith(call.callId); }); it("skips calls that are younger than the threshold", async () => { - const now = new Date("2026-02-16T00:00:00Z"); - vi.setSystemTime(now); - - const call = createCall(now.getTime() - 10_000); - const { manager, endCall } = createManager([call]); - const config = createConfig({ staleCallReaperSeconds: 60 }); - const server = new VoiceCallWebhookServer(config, manager, provider); - - try { - await server.start(); - await vi.advanceTimersByTimeAsync(30_000); - expect(endCall).not.toHaveBeenCalled(); - } finally { - await server.stop(); - } + const { endCall } = await runStaleCallReaperCase({ + callAgeMs: 10_000, + staleCallReaperSeconds: 60, + advanceMs: 30_000, + }); + expect(endCall).not.toHaveBeenCalled(); }); it("does not run when staleCallReaperSeconds is disabled", async () => { diff --git a/extensions/whatsapp/package.json b/extensions/whatsapp/package.json index c87a5f26c2b..ec73a1b0613 100644 --- a/extensions/whatsapp/package.json +++ b/extensions/whatsapp/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/whatsapp", - "version": "2026.3.9", + "version": "2026.3.14", "private": true, "description": "OpenClaw WhatsApp channel plugin", "type": "module", diff --git a/extensions/whatsapp/src/channel.outbound.test.ts b/extensions/whatsapp/src/channel.outbound.test.ts index 758274619e0..70220dcac3b 100644 --- a/extensions/whatsapp/src/channel.outbound.test.ts +++ b/extensions/whatsapp/src/channel.outbound.test.ts @@ -1,5 +1,8 @@ -import type { OpenClawConfig } from "openclaw/plugin-sdk/whatsapp"; import { describe, expect, it, vi } from "vitest"; +import { + createWhatsAppPollFixture, + expectWhatsAppPollSent, +} from "../../../src/test-helpers/whatsapp-outbound.js"; const hoisted = vi.hoisted(() => ({ sendPollWhatsApp: vi.fn(async () => ({ messageId: "wa-poll-1", toJid: "1555@s.whatsapp.net" })), @@ -22,25 +25,16 @@ import { whatsappPlugin } from "./channel.js"; describe("whatsappPlugin outbound sendPoll", () => { it("threads cfg into runtime sendPollWhatsApp call", async () => { - const cfg = { marker: "resolved-cfg" } as OpenClawConfig; - const poll = { - question: "Lunch?", - options: ["Pizza", "Sushi"], - maxSelections: 1, - }; + const { cfg, poll, to, accountId } = createWhatsAppPollFixture(); const result = await whatsappPlugin.outbound!.sendPoll!({ cfg, - to: "+1555", + to, poll, - accountId: "work", + accountId, }); - expect(hoisted.sendPollWhatsApp).toHaveBeenCalledWith("+1555", poll, { - verbose: false, - accountId: "work", - cfg, - }); + expectWhatsAppPollSent(hoisted.sendPollWhatsApp, { cfg, poll, to, accountId }); expect(result).toEqual({ messageId: "wa-poll-1", toJid: "1555@s.whatsapp.net" }); }); }); diff --git a/extensions/whatsapp/src/channel.ts b/extensions/whatsapp/src/channel.ts index 274b5e07883..5be1ba412b0 100644 --- a/extensions/whatsapp/src/channel.ts +++ b/extensions/whatsapp/src/channel.ts @@ -8,6 +8,7 @@ import { buildChannelConfigSchema, collectWhatsAppStatusIssues, createActionGate, + createWhatsAppOutboundBase, DEFAULT_ACCOUNT_ID, getChatChannelMeta, listWhatsAppAccountIds, @@ -283,52 +284,16 @@ export const whatsappPlugin: ChannelPlugin = { ); }, }, - outbound: { - deliveryMode: "gateway", + outbound: createWhatsAppOutboundBase({ chunker: (text, limit) => getWhatsAppRuntime().channel.text.chunkText(text, limit), - chunkerMode: "text", - textChunkLimit: 4000, - pollMaxOptions: 12, + sendMessageWhatsApp: async (...args) => + await getWhatsAppRuntime().channel.whatsapp.sendMessageWhatsApp(...args), + sendPollWhatsApp: async (...args) => + await getWhatsAppRuntime().channel.whatsapp.sendPollWhatsApp(...args), + shouldLogVerbose: () => getWhatsAppRuntime().logging.shouldLogVerbose(), resolveTarget: ({ to, allowFrom, mode }) => resolveWhatsAppOutboundTarget({ to, allowFrom, mode }), - sendText: async ({ cfg, to, text, accountId, deps, gifPlayback }) => { - const send = deps?.sendWhatsApp ?? getWhatsAppRuntime().channel.whatsapp.sendMessageWhatsApp; - const result = await send(to, text, { - verbose: false, - cfg, - accountId: accountId ?? undefined, - gifPlayback, - }); - return { channel: "whatsapp", ...result }; - }, - sendMedia: async ({ - cfg, - to, - text, - mediaUrl, - mediaLocalRoots, - accountId, - deps, - gifPlayback, - }) => { - const send = deps?.sendWhatsApp ?? getWhatsAppRuntime().channel.whatsapp.sendMessageWhatsApp; - const result = await send(to, text, { - verbose: false, - cfg, - mediaUrl, - mediaLocalRoots, - accountId: accountId ?? undefined, - gifPlayback, - }); - return { channel: "whatsapp", ...result }; - }, - sendPoll: async ({ cfg, to, poll, accountId }) => - await getWhatsAppRuntime().channel.whatsapp.sendPollWhatsApp(to, poll, { - verbose: getWhatsAppRuntime().logging.shouldLogVerbose(), - accountId: accountId ?? undefined, - cfg, - }), - }, + }), auth: { login: async ({ cfg, accountId, runtime, verbose }) => { const resolvedAccountId = accountId?.trim() || resolveDefaultWhatsAppAccountId(cfg); diff --git a/extensions/zalo/CHANGELOG.md b/extensions/zalo/CHANGELOG.md index 5ae5323034f..6c3b72b8fbb 100644 --- a/extensions/zalo/CHANGELOG.md +++ b/extensions/zalo/CHANGELOG.md @@ -1,5 +1,35 @@ # Changelog +## 2026.3.14 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.13 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.12 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.11 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.10 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.3.9 ### Changes diff --git a/extensions/zalo/package.json b/extensions/zalo/package.json index 6de5909736f..a72aabbb29e 100644 --- a/extensions/zalo/package.json +++ b/extensions/zalo/package.json @@ -1,10 +1,10 @@ { "name": "@openclaw/zalo", - "version": "2026.3.9", + "version": "2026.3.14", "description": "OpenClaw Zalo channel plugin", "type": "module", "dependencies": { - "undici": "7.22.0", + "undici": "7.24.1", "zod": "^4.3.6" }, "openclaw": { diff --git a/extensions/zalo/src/api.test.ts b/extensions/zalo/src/api.test.ts index 00198f5072e..ffdeab84ae4 100644 --- a/extensions/zalo/src/api.test.ts +++ b/extensions/zalo/src/api.test.ts @@ -1,31 +1,26 @@ import { describe, expect, it, vi } from "vitest"; import { deleteWebhook, getWebhookInfo, sendChatAction, type ZaloFetch } from "./api.js"; +function createOkFetcher() { + return vi.fn(async () => new Response(JSON.stringify({ ok: true, result: {} }))); +} + +async function expectPostJsonRequest(run: (token: string, fetcher: ZaloFetch) => Promise) { + const fetcher = createOkFetcher(); + await run("test-token", fetcher); + expect(fetcher).toHaveBeenCalledTimes(1); + const [, init] = fetcher.mock.calls[0] ?? []; + expect(init?.method).toBe("POST"); + expect(init?.headers).toEqual({ "Content-Type": "application/json" }); +} + describe("Zalo API request methods", () => { it("uses POST for getWebhookInfo", async () => { - const fetcher = vi.fn( - async () => new Response(JSON.stringify({ ok: true, result: {} })), - ); - - await getWebhookInfo("test-token", fetcher); - - expect(fetcher).toHaveBeenCalledTimes(1); - const [, init] = fetcher.mock.calls[0] ?? []; - expect(init?.method).toBe("POST"); - expect(init?.headers).toEqual({ "Content-Type": "application/json" }); + await expectPostJsonRequest(getWebhookInfo); }); it("keeps POST for deleteWebhook", async () => { - const fetcher = vi.fn( - async () => new Response(JSON.stringify({ ok: true, result: {} })), - ); - - await deleteWebhook("test-token", fetcher); - - expect(fetcher).toHaveBeenCalledTimes(1); - const [, init] = fetcher.mock.calls[0] ?? []; - expect(init?.method).toBe("POST"); - expect(init?.headers).toEqual({ "Content-Type": "application/json" }); + await expectPostJsonRequest(deleteWebhook); }); it("aborts sendChatAction when the typing timeout elapses", async () => { diff --git a/extensions/zalo/src/channel.directory.test.ts b/extensions/zalo/src/channel.directory.test.ts index 99821c85017..8a303e72a97 100644 --- a/extensions/zalo/src/channel.directory.test.ts +++ b/extensions/zalo/src/channel.directory.test.ts @@ -1,15 +1,10 @@ import type { OpenClawConfig, RuntimeEnv } from "openclaw/plugin-sdk/zalo"; import { describe, expect, it } from "vitest"; +import { createDirectoryTestRuntime, expectDirectorySurface } from "../../test-utils/directory.js"; import { zaloPlugin } from "./channel.js"; describe("zalo directory", () => { - const runtimeEnv: RuntimeEnv = { - log: () => {}, - error: () => {}, - exit: (code: number): never => { - throw new Error(`exit ${code}`); - }, - }; + const runtimeEnv = createDirectoryTestRuntime() as RuntimeEnv; it("lists peers from allowFrom", async () => { const cfg = { @@ -20,12 +15,10 @@ describe("zalo directory", () => { }, } as unknown as OpenClawConfig; - expect(zaloPlugin.directory).toBeTruthy(); - expect(zaloPlugin.directory?.listPeers).toBeTruthy(); - expect(zaloPlugin.directory?.listGroups).toBeTruthy(); + const directory = expectDirectorySurface(zaloPlugin.directory); await expect( - zaloPlugin.directory!.listPeers!({ + directory.listPeers({ cfg, accountId: undefined, query: undefined, @@ -41,7 +34,7 @@ describe("zalo directory", () => { ); await expect( - zaloPlugin.directory!.listGroups!({ + directory.listGroups({ cfg, accountId: undefined, query: undefined, diff --git a/extensions/zalo/src/channel.sendpayload.test.ts b/extensions/zalo/src/channel.sendpayload.test.ts index 6cc072ac6dd..27acb737f9f 100644 --- a/extensions/zalo/src/channel.sendpayload.test.ts +++ b/extensions/zalo/src/channel.sendpayload.test.ts @@ -1,5 +1,9 @@ import type { ReplyPayload } from "openclaw/plugin-sdk/zalo"; import { beforeEach, describe, expect, it, vi } from "vitest"; +import { + installSendPayloadContractSuite, + primeSendMock, +} from "../../../src/test-utils/send-payload-contract.js"; import { zaloPlugin } from "./channel.js"; vi.mock("./send.js", () => ({ @@ -25,78 +29,16 @@ describe("zaloPlugin outbound sendPayload", () => { mockedSend.mockResolvedValue({ ok: true, messageId: "zl-1" }); }); - it("text-only delegates to sendText", async () => { - mockedSend.mockResolvedValue({ ok: true, messageId: "zl-t1" }); - - const result = await zaloPlugin.outbound!.sendPayload!(baseCtx({ text: "hello" })); - - expect(mockedSend).toHaveBeenCalledWith("123456789", "hello", expect.any(Object)); - expect(result).toMatchObject({ channel: "zalo", messageId: "zl-t1" }); - }); - - it("single media delegates to sendMedia", async () => { - mockedSend.mockResolvedValue({ ok: true, messageId: "zl-m1" }); - - const result = await zaloPlugin.outbound!.sendPayload!( - baseCtx({ text: "cap", mediaUrl: "https://example.com/a.jpg" }), - ); - - expect(mockedSend).toHaveBeenCalledWith( - "123456789", - "cap", - expect.objectContaining({ mediaUrl: "https://example.com/a.jpg" }), - ); - expect(result).toMatchObject({ channel: "zalo" }); - }); - - it("multi-media iterates URLs with caption on first", async () => { - mockedSend - .mockResolvedValueOnce({ ok: true, messageId: "zl-1" }) - .mockResolvedValueOnce({ ok: true, messageId: "zl-2" }); - - const result = await zaloPlugin.outbound!.sendPayload!( - baseCtx({ - text: "caption", - mediaUrls: ["https://example.com/1.jpg", "https://example.com/2.jpg"], - }), - ); - - expect(mockedSend).toHaveBeenCalledTimes(2); - expect(mockedSend).toHaveBeenNthCalledWith( - 1, - "123456789", - "caption", - expect.objectContaining({ mediaUrl: "https://example.com/1.jpg" }), - ); - expect(mockedSend).toHaveBeenNthCalledWith( - 2, - "123456789", - "", - expect.objectContaining({ mediaUrl: "https://example.com/2.jpg" }), - ); - expect(result).toMatchObject({ channel: "zalo", messageId: "zl-2" }); - }); - - it("empty payload returns no-op", async () => { - const result = await zaloPlugin.outbound!.sendPayload!(baseCtx({})); - - expect(mockedSend).not.toHaveBeenCalled(); - expect(result).toEqual({ channel: "zalo", messageId: "" }); - }); - - it("chunking splits long text", async () => { - mockedSend - .mockResolvedValueOnce({ ok: true, messageId: "zl-c1" }) - .mockResolvedValueOnce({ ok: true, messageId: "zl-c2" }); - - const longText = "a".repeat(3000); - const result = await zaloPlugin.outbound!.sendPayload!(baseCtx({ text: longText })); - - // textChunkLimit is 2000 with chunkTextForOutbound, so it should split - expect(mockedSend.mock.calls.length).toBeGreaterThanOrEqual(2); - for (const call of mockedSend.mock.calls) { - expect((call[1] as string).length).toBeLessThanOrEqual(2000); - } - expect(result).toMatchObject({ channel: "zalo" }); + installSendPayloadContractSuite({ + channel: "zalo", + chunking: { mode: "split", longTextLength: 3000, maxChunkLength: 2000 }, + createHarness: ({ payload, sendResults }) => { + primeSendMock(mockedSend, { ok: true, messageId: "zl-1" }, sendResults); + return { + run: async () => await zaloPlugin.outbound!.sendPayload!(baseCtx(payload)), + sendMock: mockedSend, + to: "123456789", + }; + }, }); }); diff --git a/extensions/zalo/src/channel.startup.test.ts b/extensions/zalo/src/channel.startup.test.ts index 65e413f0f4f..ea0718d29a2 100644 --- a/extensions/zalo/src/channel.startup.test.ts +++ b/extensions/zalo/src/channel.startup.test.ts @@ -1,6 +1,9 @@ import type { ChannelAccountSnapshot } from "openclaw/plugin-sdk/zalo"; import { afterEach, describe, expect, it, vi } from "vitest"; -import { createStartAccountContext } from "../../test-utils/start-account-context.js"; +import { + expectPendingUntilAbort, + startAccountAndTrackLifecycle, +} from "../../test-utils/start-account-lifecycle.js"; import type { ResolvedZaloAccount } from "./accounts.js"; const hoisted = vi.hoisted(() => ({ @@ -57,37 +60,28 @@ describe("zaloPlugin gateway.startAccount", () => { }), ); - const patches: ChannelAccountSnapshot[] = []; - const abort = new AbortController(); - const task = zaloPlugin.gateway!.startAccount!( - createStartAccountContext({ - account: buildAccount(), - abortSignal: abort.signal, - statusPatchSink: (next) => patches.push({ ...next }), - }), - ); - - let settled = false; - void task.then(() => { - settled = true; + const { abort, patches, task, isSettled } = startAccountAndTrackLifecycle({ + startAccount: zaloPlugin.gateway!.startAccount!, + account: buildAccount(), }); - await vi.waitFor(() => { - expect(hoisted.probeZalo).toHaveBeenCalledOnce(); - expect(hoisted.monitorZaloProvider).toHaveBeenCalledOnce(); + await expectPendingUntilAbort({ + waitForStarted: () => + vi.waitFor(() => { + expect(hoisted.probeZalo).toHaveBeenCalledOnce(); + expect(hoisted.monitorZaloProvider).toHaveBeenCalledOnce(); + }), + isSettled, + abort, + task, }); - expect(settled).toBe(false); expect(patches).toContainEqual( expect.objectContaining({ accountId: "default", }), ); - - abort.abort(); - await task; - - expect(settled).toBe(true); + expect(isSettled()).toBe(true); expect(hoisted.monitorZaloProvider).toHaveBeenCalledWith( expect.objectContaining({ token: "test-token", diff --git a/extensions/zalo/src/channel.ts b/extensions/zalo/src/channel.ts index e4671bb90c1..b374ecfbd63 100644 --- a/extensions/zalo/src/channel.ts +++ b/extensions/zalo/src/channel.ts @@ -1,8 +1,9 @@ import { buildAccountScopedDmSecurityPolicy, - collectOpenProviderGroupPolicyWarnings, buildOpenGroupPolicyRestrictSendersWarning, buildOpenGroupPolicyWarning, + collectOpenProviderGroupPolicyWarnings, + createAccountStatusSink, mapAllowFromEntries, } from "openclaw/plugin-sdk/compat"; import type { @@ -357,6 +358,10 @@ export const zaloPlugin: ChannelPlugin = { `[${account.accountId}] Zalo probe threw before provider start: ${err instanceof Error ? (err.stack ?? err.message) : String(err)}`, ); } + const statusSink = createAccountStatusSink({ + accountId: ctx.accountId, + setStatus: ctx.setStatus, + }); ctx.log?.info(`[${account.accountId}] starting provider${zaloBotLabel} mode=${mode}`); const { monitorZaloProvider } = await import("./monitor.js"); return monitorZaloProvider({ @@ -370,7 +375,7 @@ export const zaloPlugin: ChannelPlugin = { webhookSecret: normalizeSecretInputString(account.config.webhookSecret), webhookPath: account.config.webhookPath, fetcher, - statusSink: (patch) => ctx.setStatus({ accountId: ctx.accountId, ...patch }), + statusSink, }); }, }, diff --git a/extensions/zalo/src/config-schema.ts b/extensions/zalo/src/config-schema.ts index 5f4886cdaf9..253830eb858 100644 --- a/extensions/zalo/src/config-schema.ts +++ b/extensions/zalo/src/config-schema.ts @@ -1,6 +1,8 @@ import { - AllowFromEntrySchema, + AllowFromListSchema, buildCatchallMultiAccountChannelSchema, + DmPolicySchema, + GroupPolicySchema, } from "openclaw/plugin-sdk/compat"; import { MarkdownConfigSchema } from "openclaw/plugin-sdk/zalo"; import { z } from "zod"; @@ -15,10 +17,10 @@ const zaloAccountSchema = z.object({ webhookUrl: z.string().optional(), webhookSecret: buildSecretInputSchema().optional(), webhookPath: z.string().optional(), - dmPolicy: z.enum(["pairing", "allowlist", "open", "disabled"]).optional(), - allowFrom: z.array(AllowFromEntrySchema).optional(), - groupPolicy: z.enum(["disabled", "allowlist", "open"]).optional(), - groupAllowFrom: z.array(AllowFromEntrySchema).optional(), + dmPolicy: DmPolicySchema.optional(), + allowFrom: AllowFromListSchema, + groupPolicy: GroupPolicySchema.optional(), + groupAllowFrom: AllowFromListSchema, mediaMaxMb: z.number().optional(), proxy: z.string().optional(), responsePrefix: z.string().optional(), diff --git a/extensions/zalo/src/monitor.lifecycle.test.ts b/extensions/zalo/src/monitor.lifecycle.test.ts index 6cce789da56..e5fa65e1063 100644 --- a/extensions/zalo/src/monitor.lifecycle.test.ts +++ b/extensions/zalo/src/monitor.lifecycle.test.ts @@ -32,6 +32,41 @@ async function waitForPollingLoopStart(): Promise { await vi.waitFor(() => expect(getUpdatesMock).toHaveBeenCalledTimes(1)); } +const TEST_ACCOUNT = { + accountId: "default", + config: {}, +} as unknown as ResolvedZaloAccount; + +const TEST_CONFIG = {} as OpenClawConfig; + +function createLifecycleRuntime() { + return { + log: vi.fn<(message: string) => void>(), + error: vi.fn<(message: string) => void>(), + }; +} + +async function startLifecycleMonitor( + options: { + useWebhook?: boolean; + webhookSecret?: string; + webhookUrl?: string; + } = {}, +) { + const { monitorZaloProvider } = await import("./monitor.js"); + const abort = new AbortController(); + const runtime = createLifecycleRuntime(); + const run = monitorZaloProvider({ + token: "test-token", + account: TEST_ACCOUNT, + config: TEST_CONFIG, + runtime, + abortSignal: abort.signal, + ...options, + }); + return { abort, runtime, run }; +} + describe("monitorZaloProvider lifecycle", () => { afterEach(() => { vi.clearAllMocks(); @@ -39,26 +74,9 @@ describe("monitorZaloProvider lifecycle", () => { }); it("stays alive in polling mode until abort", async () => { - const { monitorZaloProvider } = await import("./monitor.js"); - const abort = new AbortController(); - const runtime = { - log: vi.fn<(message: string) => void>(), - error: vi.fn<(message: string) => void>(), - }; - const account = { - accountId: "default", - config: {}, - } as unknown as ResolvedZaloAccount; - const config = {} as OpenClawConfig; - let settled = false; - const run = monitorZaloProvider({ - token: "test-token", - account, - config, - runtime, - abortSignal: abort.signal, - }).then(() => { + const { abort, runtime, run } = await startLifecycleMonitor(); + const monitoredRun = run.then(() => { settled = true; }); @@ -70,7 +88,7 @@ describe("monitorZaloProvider lifecycle", () => { expect(settled).toBe(false); abort.abort(); - await run; + await monitoredRun; expect(settled).toBe(true); expect(runtime.log).toHaveBeenCalledWith( @@ -84,25 +102,7 @@ describe("monitorZaloProvider lifecycle", () => { result: { url: "https://example.com/hooks/zalo" }, }); - const { monitorZaloProvider } = await import("./monitor.js"); - const abort = new AbortController(); - const runtime = { - log: vi.fn<(message: string) => void>(), - error: vi.fn<(message: string) => void>(), - }; - const account = { - accountId: "default", - config: {}, - } as unknown as ResolvedZaloAccount; - const config = {} as OpenClawConfig; - - const run = monitorZaloProvider({ - token: "test-token", - account, - config, - runtime, - abortSignal: abort.signal, - }); + const { abort, runtime, run } = await startLifecycleMonitor(); await waitForPollingLoopStart(); @@ -120,25 +120,7 @@ describe("monitorZaloProvider lifecycle", () => { const { ZaloApiError } = await import("./api.js"); getWebhookInfoMock.mockRejectedValueOnce(new ZaloApiError("Not Found", 404, "Not Found")); - const { monitorZaloProvider } = await import("./monitor.js"); - const abort = new AbortController(); - const runtime = { - log: vi.fn<(message: string) => void>(), - error: vi.fn<(message: string) => void>(), - }; - const account = { - accountId: "default", - config: {}, - } as unknown as ResolvedZaloAccount; - const config = {} as OpenClawConfig; - - const run = monitorZaloProvider({ - token: "test-token", - account, - config, - runtime, - abortSignal: abort.signal, - }); + const { abort, runtime, run } = await startLifecycleMonitor(); await waitForPollingLoopStart(); @@ -165,29 +147,13 @@ describe("monitorZaloProvider lifecycle", () => { }), ); - const { monitorZaloProvider } = await import("./monitor.js"); - const abort = new AbortController(); - const runtime = { - log: vi.fn<(message: string) => void>(), - error: vi.fn<(message: string) => void>(), - }; - const account = { - accountId: "default", - config: {}, - } as unknown as ResolvedZaloAccount; - const config = {} as OpenClawConfig; - let settled = false; - const run = monitorZaloProvider({ - token: "test-token", - account, - config, - runtime, - abortSignal: abort.signal, + const { abort, runtime, run } = await startLifecycleMonitor({ useWebhook: true, webhookUrl: "https://example.com/hooks/zalo", webhookSecret: "supersecret", // pragma: allowlist secret - }).then(() => { + }); + const monitoredRun = run.then(() => { settled = true; }); @@ -202,7 +168,7 @@ describe("monitorZaloProvider lifecycle", () => { expect(registry.httpRoutes).toHaveLength(1); resolveDeleteWebhook?.(); - await run; + await monitoredRun; expect(settled).toBe(true); expect(registry.httpRoutes).toHaveLength(0); diff --git a/extensions/zalo/src/monitor.ts b/extensions/zalo/src/monitor.ts index bd1351bd147..d82c0d96ba4 100644 --- a/extensions/zalo/src/monitor.ts +++ b/extensions/zalo/src/monitor.ts @@ -75,6 +75,35 @@ const WEBHOOK_CLEANUP_TIMEOUT_MS = 5_000; const ZALO_TYPING_TIMEOUT_MS = 5_000; type ZaloCoreRuntime = ReturnType; +type ZaloStatusSink = (patch: { lastInboundAt?: number; lastOutboundAt?: number }) => void; +type ZaloProcessingContext = { + token: string; + account: ResolvedZaloAccount; + config: OpenClawConfig; + runtime: ZaloRuntimeEnv; + core: ZaloCoreRuntime; + statusSink?: ZaloStatusSink; + fetcher?: ZaloFetch; +}; +type ZaloPollingLoopParams = ZaloProcessingContext & { + abortSignal: AbortSignal; + isStopped: () => boolean; + mediaMaxMb: number; +}; +type ZaloUpdateProcessingParams = ZaloProcessingContext & { + update: ZaloUpdate; + mediaMaxMb: number; +}; +type ZaloMessagePipelineParams = ZaloProcessingContext & { + message: ZaloMessage; + text?: string; + mediaPath?: string; + mediaType?: string; +}; +type ZaloImageMessageParams = ZaloProcessingContext & { + message: ZaloMessage; + mediaMaxMb: number; +}; function formatZaloError(error: unknown): string { if (error instanceof Error) { @@ -135,32 +164,21 @@ export async function handleZaloWebhookRequest( res: ServerResponse, ): Promise { return handleZaloWebhookRequestInternal(req, res, async ({ update, target }) => { - await processUpdate( + await processUpdate({ update, - target.token, - target.account, - target.config, - target.runtime, - target.core as ZaloCoreRuntime, - target.mediaMaxMb, - target.statusSink, - target.fetcher, - ); + token: target.token, + account: target.account, + config: target.config, + runtime: target.runtime, + core: target.core as ZaloCoreRuntime, + mediaMaxMb: target.mediaMaxMb, + statusSink: target.statusSink, + fetcher: target.fetcher, + }); }); } -function startPollingLoop(params: { - token: string; - account: ResolvedZaloAccount; - config: OpenClawConfig; - runtime: ZaloRuntimeEnv; - core: ZaloCoreRuntime; - abortSignal: AbortSignal; - isStopped: () => boolean; - mediaMaxMb: number; - statusSink?: (patch: { lastInboundAt?: number; lastOutboundAt?: number }) => void; - fetcher?: ZaloFetch; -}) { +function startPollingLoop(params: ZaloPollingLoopParams) { const { token, account, @@ -174,6 +192,16 @@ function startPollingLoop(params: { fetcher, } = params; const pollTimeout = 30; + const processingContext = { + token, + account, + config, + runtime, + core, + mediaMaxMb, + statusSink, + fetcher, + }; runtime.log?.(`[${account.accountId}] Zalo polling loop started timeout=${String(pollTimeout)}s`); @@ -186,17 +214,10 @@ function startPollingLoop(params: { const response = await getUpdates(token, { timeout: pollTimeout }, fetcher); if (response.ok && response.result) { statusSink?.({ lastInboundAt: Date.now() }); - await processUpdate( - response.result, - token, - account, - config, - runtime, - core, - mediaMaxMb, - statusSink, - fetcher, - ); + await processUpdate({ + update: response.result, + ...processingContext, + }); } } catch (err) { if (err instanceof ZaloApiError && err.isPollingTimeout) { @@ -215,38 +236,27 @@ function startPollingLoop(params: { void poll(); } -async function processUpdate( - update: ZaloUpdate, - token: string, - account: ResolvedZaloAccount, - config: OpenClawConfig, - runtime: ZaloRuntimeEnv, - core: ZaloCoreRuntime, - mediaMaxMb: number, - statusSink?: (patch: { lastInboundAt?: number; lastOutboundAt?: number }) => void, - fetcher?: ZaloFetch, -): Promise { +async function processUpdate(params: ZaloUpdateProcessingParams): Promise { + const { update, token, account, config, runtime, core, mediaMaxMb, statusSink, fetcher } = params; const { event_name, message } = update; + const sharedContext = { token, account, config, runtime, core, statusSink, fetcher }; if (!message) { return; } switch (event_name) { case "message.text.received": - await handleTextMessage(message, token, account, config, runtime, core, statusSink, fetcher); + await handleTextMessage({ + message, + ...sharedContext, + }); break; case "message.image.received": - await handleImageMessage( + await handleImageMessage({ message, - token, - account, - config, - runtime, - core, + ...sharedContext, mediaMaxMb, - statusSink, - fetcher, - ); + }); break; case "message.sticker.received": logVerbose(core, runtime, `[${account.accountId}] Received sticker from ${message.from.id}`); @@ -262,46 +272,24 @@ async function processUpdate( } async function handleTextMessage( - message: ZaloMessage, - token: string, - account: ResolvedZaloAccount, - config: OpenClawConfig, - runtime: ZaloRuntimeEnv, - core: ZaloCoreRuntime, - statusSink?: (patch: { lastInboundAt?: number; lastOutboundAt?: number }) => void, - fetcher?: ZaloFetch, + params: ZaloProcessingContext & { message: ZaloMessage }, ): Promise { + const { message } = params; const { text } = message; if (!text?.trim()) { return; } await processMessageWithPipeline({ - message, - token, - account, - config, - runtime, - core, + ...params, text, mediaPath: undefined, mediaType: undefined, - statusSink, - fetcher, }); } -async function handleImageMessage( - message: ZaloMessage, - token: string, - account: ResolvedZaloAccount, - config: OpenClawConfig, - runtime: ZaloRuntimeEnv, - core: ZaloCoreRuntime, - mediaMaxMb: number, - statusSink?: (patch: { lastInboundAt?: number; lastOutboundAt?: number }) => void, - fetcher?: ZaloFetch, -): Promise { +async function handleImageMessage(params: ZaloImageMessageParams): Promise { + const { message, mediaMaxMb, account, core, runtime } = params; const { photo, caption } = message; let mediaPath: string | undefined; @@ -325,33 +313,14 @@ async function handleImageMessage( } await processMessageWithPipeline({ - message, - token, - account, - config, - runtime, - core, + ...params, text: caption, mediaPath, mediaType, - statusSink, - fetcher, }); } -async function processMessageWithPipeline(params: { - message: ZaloMessage; - token: string; - account: ResolvedZaloAccount; - config: OpenClawConfig; - runtime: ZaloRuntimeEnv; - core: ZaloCoreRuntime; - text?: string; - mediaPath?: string; - mediaType?: string; - statusSink?: (patch: { lastInboundAt?: number; lastOutboundAt?: number }) => void; - fetcher?: ZaloFetch; -}): Promise { +async function processMessageWithPipeline(params: ZaloMessagePipelineParams): Promise { const { message, token, @@ -609,7 +578,7 @@ async function deliverZaloReply(params: { core: ZaloCoreRuntime; config: OpenClawConfig; accountId?: string; - statusSink?: (patch: { lastInboundAt?: number; lastOutboundAt?: number }) => void; + statusSink?: ZaloStatusSink; fetcher?: ZaloFetch; tableMode?: MarkdownTableMode; }): Promise { diff --git a/extensions/zalo/src/monitor.webhook.test.ts b/extensions/zalo/src/monitor.webhook.test.ts index 297d8249d3a..57b5f43202e 100644 --- a/extensions/zalo/src/monitor.webhook.test.ts +++ b/extensions/zalo/src/monitor.webhook.test.ts @@ -283,6 +283,7 @@ describe("handleZaloWebhookRequest", () => { try { await withServer(webhookRequestHandler, async (baseUrl) => { + let saw429 = false; for (let i = 0; i < 200; i += 1) { const response = await fetch(`${baseUrl}/hook-query-status?nonce=${i}`, { method: "POST", @@ -292,10 +293,15 @@ describe("handleZaloWebhookRequest", () => { }, body: "{}", }); - expect(response.status).toBe(401); + expect([401, 429]).toContain(response.status); + if (response.status === 429) { + saw429 = true; + break; + } } - expect(getZaloWebhookStatusCounterSizeForTest()).toBe(1); + expect(saw429).toBe(true); + expect(getZaloWebhookStatusCounterSizeForTest()).toBe(2); }); } finally { unregister(); @@ -322,6 +328,91 @@ describe("handleZaloWebhookRequest", () => { } }); + it("rate limits unauthorized secret guesses before authentication succeeds", async () => { + const unregister = registerTarget({ path: "/hook-preauth-rate" }); + + try { + await withServer(webhookRequestHandler, async (baseUrl) => { + const saw429 = await postUntilRateLimited({ + baseUrl, + path: "/hook-preauth-rate", + secret: "invalid-token", // pragma: allowlist secret + withNonceQuery: true, + }); + + expect(saw429).toBe(true); + expect(getZaloWebhookRateLimitStateSizeForTest()).toBe(1); + }); + } finally { + unregister(); + } + }); + + it("does not let unauthorized floods rate-limit authenticated traffic from a different trusted forwarded client IP", async () => { + const unregister = registerTarget({ + path: "/hook-preauth-split", + config: { + gateway: { + trustedProxies: ["127.0.0.1"], + }, + } as OpenClawConfig, + }); + + try { + await withServer(webhookRequestHandler, async (baseUrl) => { + for (let i = 0; i < 130; i += 1) { + const response = await fetch(`${baseUrl}/hook-preauth-split?nonce=${i}`, { + method: "POST", + headers: { + "x-bot-api-secret-token": "invalid-token", // pragma: allowlist secret + "content-type": "application/json", + "x-forwarded-for": "203.0.113.10", + }, + body: "{}", + }); + if (response.status === 429) { + break; + } + } + + const validResponse = await fetch(`${baseUrl}/hook-preauth-split`, { + method: "POST", + headers: { + "x-bot-api-secret-token": "secret", + "content-type": "application/json", + "x-forwarded-for": "198.51.100.20", + }, + body: JSON.stringify({ event_name: "message.unsupported.received" }), + }); + + expect(validResponse.status).toBe(200); + }); + } finally { + unregister(); + } + }); + + it("still returns 401 before 415 when both secret and content-type are invalid", async () => { + const unregister = registerTarget({ path: "/hook-auth-before-type" }); + + try { + await withServer(webhookRequestHandler, async (baseUrl) => { + const response = await fetch(`${baseUrl}/hook-auth-before-type`, { + method: "POST", + headers: { + "x-bot-api-secret-token": "invalid-token", // pragma: allowlist secret + "content-type": "text/plain", + }, + body: "not-json", + }); + + expect(response.status).toBe(401); + }); + } finally { + unregister(); + } + }); + it("scopes DM pairing store reads and writes to accountId", async () => { const { core, readAllowFromStore, upsertPairingRequest } = createPairingAuthCore({ pairingCreated: false, diff --git a/extensions/zalo/src/monitor.webhook.ts b/extensions/zalo/src/monitor.webhook.ts index 8fad827fddc..ef10d3a9a0e 100644 --- a/extensions/zalo/src/monitor.webhook.ts +++ b/extensions/zalo/src/monitor.webhook.ts @@ -16,6 +16,7 @@ import { WEBHOOK_ANOMALY_COUNTER_DEFAULTS, WEBHOOK_RATE_LIMIT_DEFAULTS, } from "openclaw/plugin-sdk/zalo"; +import { resolveClientIp } from "../../../src/gateway/net.js"; import type { ResolvedZaloAccount } from "./accounts.js"; import type { ZaloFetch, ZaloUpdate } from "./api.js"; import type { ZaloRuntimeEnv } from "./monitor.js"; @@ -109,6 +110,10 @@ function recordWebhookStatus( }); } +function headerValue(value: string | string[] | undefined): string | undefined { + return Array.isArray(value) ? value[0] : value; +} + export function registerZaloWebhookTarget( target: ZaloWebhookTarget, opts?: { @@ -140,6 +145,33 @@ export async function handleZaloWebhookRequest( targetsByPath: webhookTargets, allowMethods: ["POST"], handle: async ({ targets, path }) => { + const trustedProxies = targets[0]?.config.gateway?.trustedProxies; + const allowRealIpFallback = targets[0]?.config.gateway?.allowRealIpFallback === true; + const clientIp = + resolveClientIp({ + remoteAddr: req.socket.remoteAddress, + forwardedFor: headerValue(req.headers["x-forwarded-for"]), + realIp: headerValue(req.headers["x-real-ip"]), + trustedProxies, + allowRealIpFallback, + }) ?? + req.socket.remoteAddress ?? + "unknown"; + const rateLimitKey = `${path}:${clientIp}`; + const nowMs = Date.now(); + if ( + !applyBasicWebhookRequestGuards({ + req, + res, + rateLimiter: webhookRateLimiter, + rateLimitKey, + nowMs, + }) + ) { + recordWebhookStatus(targets[0]?.runtime, path, res.statusCode); + return true; + } + const headerToken = String(req.headers["x-bot-api-secret-token"] ?? ""); const target = resolveWebhookTargetWithAuthOrRejectSync({ targets, @@ -150,16 +182,12 @@ export async function handleZaloWebhookRequest( recordWebhookStatus(targets[0]?.runtime, path, res.statusCode); return true; } - const rateLimitKey = `${path}:${req.socket.remoteAddress ?? "unknown"}`; - const nowMs = Date.now(); - + // Preserve the historical 401-before-415 ordering for invalid secrets while still + // consuming rate-limit budget on unauthenticated guesses. if ( !applyBasicWebhookRequestGuards({ req, res, - rateLimiter: webhookRateLimiter, - rateLimitKey, - nowMs, requireJsonContentType: true, }) ) { diff --git a/extensions/zalo/src/onboarding.ts b/extensions/zalo/src/onboarding.ts index e23765f4f7d..4c6f7cbe4de 100644 --- a/extensions/zalo/src/onboarding.ts +++ b/extensions/zalo/src/onboarding.ts @@ -12,6 +12,7 @@ import { mergeAllowFromEntries, normalizeAccountId, promptSingleChannelSecretInput, + runSingleChannelSecretStep, resolveAccountIdForConfigure, setTopLevelChannelDmPolicyWithAllowFrom, } from "openclaw/plugin-sdk/zalo"; @@ -255,80 +256,66 @@ export const zaloOnboardingAdapter: ChannelOnboardingAdapter = { const hasConfigToken = Boolean( hasConfiguredSecretInput(resolvedAccount.config.botToken) || resolvedAccount.config.tokenFile, ); - const tokenPromptState = buildSingleChannelSecretPromptState({ - accountConfigured, - hasConfigToken, - allowEnv, - envValue: process.env.ZALO_BOT_TOKEN, - }); - - let token: SecretInput | null = null; - if (!accountConfigured) { - await noteZaloTokenHelp(prompter); - } - const tokenResult = await promptSingleChannelSecretInput({ + const tokenStep = await runSingleChannelSecretStep({ cfg: next, prompter, providerHint: "zalo", credentialLabel: "bot token", - accountConfigured: tokenPromptState.accountConfigured, - canUseEnv: tokenPromptState.canUseEnv, - hasConfigToken: tokenPromptState.hasConfigToken, + accountConfigured, + hasConfigToken, + allowEnv, + envValue: process.env.ZALO_BOT_TOKEN, envPrompt: "ZALO_BOT_TOKEN detected. Use env var?", keepPrompt: "Zalo token already configured. Keep it?", inputPrompt: "Enter Zalo bot token", preferredEnvVar: "ZALO_BOT_TOKEN", - }); - if (tokenResult.action === "set") { - token = tokenResult.value; - } - if (tokenResult.action === "use-env" && zaloAccountId === DEFAULT_ACCOUNT_ID) { - next = { - ...next, - channels: { - ...next.channels, - zalo: { - ...next.channels?.zalo, - enabled: true, - }, - }, - } as OpenClawConfig; - } - - if (token) { - if (zaloAccountId === DEFAULT_ACCOUNT_ID) { - next = { - ...next, - channels: { - ...next.channels, - zalo: { - ...next.channels?.zalo, - enabled: true, - botToken: token, - }, - }, - } as OpenClawConfig; - } else { - next = { - ...next, - channels: { - ...next.channels, - zalo: { - ...next.channels?.zalo, - enabled: true, - accounts: { - ...next.channels?.zalo?.accounts, - [zaloAccountId]: { - ...next.channels?.zalo?.accounts?.[zaloAccountId], + onMissingConfigured: async () => await noteZaloTokenHelp(prompter), + applyUseEnv: async (cfg) => + zaloAccountId === DEFAULT_ACCOUNT_ID + ? ({ + ...cfg, + channels: { + ...cfg.channels, + zalo: { + ...cfg.channels?.zalo, enabled: true, - botToken: token, }, }, - }, - }, - } as OpenClawConfig; - } - } + } as OpenClawConfig) + : cfg, + applySet: async (cfg, value) => + zaloAccountId === DEFAULT_ACCOUNT_ID + ? ({ + ...cfg, + channels: { + ...cfg.channels, + zalo: { + ...cfg.channels?.zalo, + enabled: true, + botToken: value, + }, + }, + } as OpenClawConfig) + : ({ + ...cfg, + channels: { + ...cfg.channels, + zalo: { + ...cfg.channels?.zalo, + enabled: true, + accounts: { + ...cfg.channels?.zalo?.accounts, + [zaloAccountId]: { + ...cfg.channels?.zalo?.accounts?.[zaloAccountId], + enabled: true, + botToken: value, + }, + }, + }, + }, + } as OpenClawConfig), + }); + next = tokenStep.cfg; const wantsWebhook = await prompter.confirm({ message: "Use webhook mode for Zalo?", diff --git a/extensions/zalo/src/send.ts b/extensions/zalo/src/send.ts index 44f1549067a..e38427fcb14 100644 --- a/extensions/zalo/src/send.ts +++ b/extensions/zalo/src/send.ts @@ -21,6 +21,28 @@ export type ZaloSendResult = { error?: string; }; +function toZaloSendResult(response: { + ok?: boolean; + result?: { message_id?: string }; +}): ZaloSendResult { + if (response.ok && response.result) { + return { ok: true, messageId: response.result.message_id }; + } + return { ok: false, error: "Failed to send message" }; +} + +async function runZaloSend( + failureMessage: string, + send: () => Promise<{ ok?: boolean; result?: { message_id?: string } }>, +): Promise { + try { + const result = toZaloSendResult(await send()); + return result.ok ? result : { ok: false, error: failureMessage }; + } catch (err) { + return { ok: false, error: err instanceof Error ? err.message : String(err) }; + } +} + function resolveSendContext(options: ZaloSendOptions): { token: string; fetcher?: ZaloFetch; @@ -55,15 +77,30 @@ function resolveValidatedSendContext( return { ok: true, chatId: trimmedChatId, token, fetcher }; } +function resolveSendContextOrFailure( + chatId: string, + options: ZaloSendOptions, +): + | { context: { chatId: string; token: string; fetcher?: ZaloFetch } } + | { failure: ZaloSendResult } { + const context = resolveValidatedSendContext(chatId, options); + return context.ok + ? { context } + : { + failure: { ok: false, error: context.error }, + }; +} + export async function sendMessageZalo( chatId: string, text: string, options: ZaloSendOptions = {}, ): Promise { - const context = resolveValidatedSendContext(chatId, options); - if (!context.ok) { - return { ok: false, error: context.error }; + const resolved = resolveSendContextOrFailure(chatId, options); + if ("failure" in resolved) { + return resolved.failure; } + const { context } = resolved; if (options.mediaUrl) { return sendPhotoZalo(context.chatId, options.mediaUrl, { @@ -73,24 +110,16 @@ export async function sendMessageZalo( }); } - try { - const response = await sendMessage( + return await runZaloSend("Failed to send message", () => + sendMessage( context.token, { chat_id: context.chatId, text: text.slice(0, 2000), }, context.fetcher, - ); - - if (response.ok && response.result) { - return { ok: true, messageId: response.result.message_id }; - } - - return { ok: false, error: "Failed to send message" }; - } catch (err) { - return { ok: false, error: err instanceof Error ? err.message : String(err) }; - } + ), + ); } export async function sendPhotoZalo( @@ -98,17 +127,18 @@ export async function sendPhotoZalo( photoUrl: string, options: ZaloSendOptions = {}, ): Promise { - const context = resolveValidatedSendContext(chatId, options); - if (!context.ok) { - return { ok: false, error: context.error }; + const resolved = resolveSendContextOrFailure(chatId, options); + if ("failure" in resolved) { + return resolved.failure; } + const { context } = resolved; if (!photoUrl?.trim()) { return { ok: false, error: "No photo URL provided" }; } - try { - const response = await sendPhoto( + return await runZaloSend("Failed to send photo", () => + sendPhoto( context.token, { chat_id: context.chatId, @@ -116,14 +146,6 @@ export async function sendPhotoZalo( caption: options.caption?.slice(0, 2000), }, context.fetcher, - ); - - if (response.ok && response.result) { - return { ok: true, messageId: response.result.message_id }; - } - - return { ok: false, error: "Failed to send photo" }; - } catch (err) { - return { ok: false, error: err instanceof Error ? err.message : String(err) }; - } + ), + ); } diff --git a/extensions/zalo/src/status-issues.test.ts b/extensions/zalo/src/status-issues.test.ts new file mode 100644 index 00000000000..581a0dfe916 --- /dev/null +++ b/extensions/zalo/src/status-issues.test.ts @@ -0,0 +1,29 @@ +import { describe, expect, it } from "vitest"; +import { expectOpenDmPolicyConfigIssue } from "../../test-utils/status-issues.js"; +import { collectZaloStatusIssues } from "./status-issues.js"; + +describe("collectZaloStatusIssues", () => { + it("warns when dmPolicy is open", () => { + expectOpenDmPolicyConfigIssue({ + collectIssues: collectZaloStatusIssues, + account: { + accountId: "default", + enabled: true, + configured: true, + dmPolicy: "open", + }, + }); + }); + + it("skips unconfigured accounts", () => { + const issues = collectZaloStatusIssues([ + { + accountId: "default", + enabled: true, + configured: false, + dmPolicy: "open", + }, + ]); + expect(issues).toHaveLength(0); + }); +}); diff --git a/extensions/zalo/src/status-issues.ts b/extensions/zalo/src/status-issues.ts index cf6b3a3a384..c19992a64ee 100644 --- a/extensions/zalo/src/status-issues.ts +++ b/extensions/zalo/src/status-issues.ts @@ -1,38 +1,16 @@ import type { ChannelAccountSnapshot, ChannelStatusIssue } from "openclaw/plugin-sdk/zalo"; +import { coerceStatusIssueAccountId, readStatusIssueFields } from "../../shared/status-issues.js"; -type ZaloAccountStatus = { - accountId?: unknown; - enabled?: unknown; - configured?: unknown; - dmPolicy?: unknown; -}; - -const isRecord = (value: unknown): value is Record => - Boolean(value && typeof value === "object"); - -const asString = (value: unknown): string | undefined => - typeof value === "string" ? value : typeof value === "number" ? String(value) : undefined; - -function readZaloAccountStatus(value: ChannelAccountSnapshot): ZaloAccountStatus | null { - if (!isRecord(value)) { - return null; - } - return { - accountId: value.accountId, - enabled: value.enabled, - configured: value.configured, - dmPolicy: value.dmPolicy, - }; -} +const ZALO_STATUS_FIELDS = ["accountId", "enabled", "configured", "dmPolicy"] as const; export function collectZaloStatusIssues(accounts: ChannelAccountSnapshot[]): ChannelStatusIssue[] { const issues: ChannelStatusIssue[] = []; for (const entry of accounts) { - const account = readZaloAccountStatus(entry); + const account = readStatusIssueFields(entry, ZALO_STATUS_FIELDS); if (!account) { continue; } - const accountId = asString(account.accountId) ?? "default"; + const accountId = coerceStatusIssueAccountId(account.accountId) ?? "default"; const enabled = account.enabled !== false; const configured = account.configured === true; if (!enabled || !configured) { diff --git a/extensions/zalo/src/token.test.ts b/extensions/zalo/src/token.test.ts index d6b02f30483..ff3e84ce293 100644 --- a/extensions/zalo/src/token.test.ts +++ b/extensions/zalo/src/token.test.ts @@ -1,3 +1,6 @@ +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; import { describe, expect, it } from "vitest"; import { resolveZaloToken } from "./token.js"; import type { ZaloConfig } from "./types.js"; @@ -55,4 +58,20 @@ describe("resolveZaloToken", () => { expect(res.token).toBe("work-token"); expect(res.source).toBe("config"); }); + + it.runIf(process.platform !== "win32")("rejects symlinked token files", () => { + const dir = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-zalo-token-")); + const tokenFile = path.join(dir, "token.txt"); + const tokenLink = path.join(dir, "token-link.txt"); + fs.writeFileSync(tokenFile, "file-token\n", "utf8"); + fs.symlinkSync(tokenFile, tokenLink); + + const cfg = { + tokenFile: tokenLink, + } as ZaloConfig; + const res = resolveZaloToken(cfg); + expect(res.token).toBe(""); + expect(res.source).toBe("none"); + fs.rmSync(dir, { recursive: true, force: true }); + }); }); diff --git a/extensions/zalo/src/token.ts b/extensions/zalo/src/token.ts index 00ed1d720f7..10a4aca6cd1 100644 --- a/extensions/zalo/src/token.ts +++ b/extensions/zalo/src/token.ts @@ -1,5 +1,5 @@ -import { readFileSync } from "node:fs"; import { DEFAULT_ACCOUNT_ID, normalizeAccountId } from "openclaw/plugin-sdk/account-id"; +import { tryReadSecretFileSync } from "openclaw/plugin-sdk/core"; import type { BaseTokenResolution } from "openclaw/plugin-sdk/zalo"; import { normalizeResolvedSecretInputString, normalizeSecretInputString } from "./secret-input.js"; import type { ZaloConfig } from "./types.js"; @@ -9,16 +9,7 @@ export type ZaloTokenResolution = BaseTokenResolution & { }; function readTokenFromFile(tokenFile: string | undefined): string { - const trimmedPath = tokenFile?.trim(); - if (!trimmedPath) { - return ""; - } - try { - return readFileSync(trimmedPath, "utf8").trim(); - } catch { - // ignore read failures - return ""; - } + return tryReadSecretFileSync(tokenFile, "Zalo token file", { rejectSymlink: true }) ?? ""; } export function resolveZaloToken( diff --git a/extensions/zalouser/CHANGELOG.md b/extensions/zalouser/CHANGELOG.md index 10c22ce4029..9731672126c 100644 --- a/extensions/zalouser/CHANGELOG.md +++ b/extensions/zalouser/CHANGELOG.md @@ -1,5 +1,35 @@ # Changelog +## 2026.3.14 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.13 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.12 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.11 + +### Changes + +- Version alignment with core OpenClaw release numbers. + +## 2026.3.10 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.3.9 ### Changes diff --git a/extensions/zalouser/package.json b/extensions/zalouser/package.json index 79bf5723d48..e7c12c9b4b2 100644 --- a/extensions/zalouser/package.json +++ b/extensions/zalouser/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/zalouser", - "version": "2026.3.9", + "version": "2026.3.14", "description": "OpenClaw Zalo Personal Account plugin via native zca-js integration", "type": "module", "dependencies": { diff --git a/extensions/zalouser/src/accounts.test-mocks.ts b/extensions/zalouser/src/accounts.test-mocks.ts new file mode 100644 index 00000000000..0206095d0fc --- /dev/null +++ b/extensions/zalouser/src/accounts.test-mocks.ts @@ -0,0 +1,10 @@ +import { vi } from "vitest"; +import { createDefaultResolvedZalouserAccount } from "./test-helpers.js"; + +vi.mock("./accounts.js", async (importOriginal) => { + const actual = (await importOriginal()) as Record; + return { + ...actual, + resolveZalouserAccountSync: () => createDefaultResolvedZalouserAccount(), + }; +}); diff --git a/extensions/zalouser/src/accounts.ts b/extensions/zalouser/src/accounts.ts index 5ebec2d2c93..26a02ed47a0 100644 --- a/extensions/zalouser/src/accounts.ts +++ b/extensions/zalouser/src/accounts.ts @@ -43,17 +43,24 @@ function resolveProfile(config: ZalouserAccountConfig, accountId: string): strin return "default"; } -export async function resolveZalouserAccount(params: { - cfg: OpenClawConfig; - accountId?: string | null; -}): Promise { +function resolveZalouserAccountBase(params: { cfg: OpenClawConfig; accountId?: string | null }) { const accountId = normalizeAccountId(params.accountId); const baseEnabled = (params.cfg.channels?.zalouser as ZalouserConfig | undefined)?.enabled !== false; const merged = mergeZalouserAccountConfig(params.cfg, accountId); - const accountEnabled = merged.enabled !== false; - const enabled = baseEnabled && accountEnabled; - const profile = resolveProfile(merged, accountId); + return { + accountId, + enabled: baseEnabled && merged.enabled !== false, + merged, + profile: resolveProfile(merged, accountId), + }; +} + +export async function resolveZalouserAccount(params: { + cfg: OpenClawConfig; + accountId?: string | null; +}): Promise { + const { accountId, enabled, merged, profile } = resolveZalouserAccountBase(params); const authenticated = await checkZaloAuthenticated(profile); return { @@ -70,13 +77,7 @@ export function resolveZalouserAccountSync(params: { cfg: OpenClawConfig; accountId?: string | null; }): ResolvedZalouserAccount { - const accountId = normalizeAccountId(params.accountId); - const baseEnabled = - (params.cfg.channels?.zalouser as ZalouserConfig | undefined)?.enabled !== false; - const merged = mergeZalouserAccountConfig(params.cfg, accountId); - const accountEnabled = merged.enabled !== false; - const enabled = baseEnabled && accountEnabled; - const profile = resolveProfile(merged, accountId); + const { accountId, enabled, merged, profile } = resolveZalouserAccountBase(params); return { accountId, diff --git a/extensions/zalouser/src/channel.directory.test.ts b/extensions/zalouser/src/channel.directory.test.ts index f8c13b208e4..1736118bc0e 100644 --- a/extensions/zalouser/src/channel.directory.test.ts +++ b/extensions/zalouser/src/channel.directory.test.ts @@ -1,5 +1,6 @@ -import type { RuntimeEnv } from "openclaw/plugin-sdk/zalouser"; import { describe, expect, it, vi } from "vitest"; +import "./accounts.test-mocks.js"; +import { createZalouserRuntimeEnv } from "./test-helpers.js"; const listZaloGroupMembersMock = vi.hoisted(() => vi.fn(async () => [])); @@ -11,30 +12,9 @@ vi.mock("./zalo-js.js", async (importOriginal) => { }; }); -vi.mock("./accounts.js", async (importOriginal) => { - const actual = (await importOriginal()) as Record; - return { - ...actual, - resolveZalouserAccountSync: () => ({ - accountId: "default", - profile: "default", - name: "test", - enabled: true, - authenticated: true, - config: {}, - }), - }; -}); - import { zalouserPlugin } from "./channel.js"; -const runtimeStub: RuntimeEnv = { - log: vi.fn(), - error: vi.fn(), - exit: ((code: number): never => { - throw new Error(`exit ${code}`); - }) as RuntimeEnv["exit"], -}; +const runtimeStub = createZalouserRuntimeEnv(); describe("zalouser directory group members", () => { it("accepts prefixed group ids from directory groups list output", async () => { diff --git a/extensions/zalouser/src/channel.sendpayload.test.ts b/extensions/zalouser/src/channel.sendpayload.test.ts index 534f9c39b95..27a8adf2c0d 100644 --- a/extensions/zalouser/src/channel.sendpayload.test.ts +++ b/extensions/zalouser/src/channel.sendpayload.test.ts @@ -1,26 +1,18 @@ import type { ReplyPayload } from "openclaw/plugin-sdk/zalouser"; import { beforeEach, describe, expect, it, vi } from "vitest"; +import "./accounts.test-mocks.js"; +import { + installSendPayloadContractSuite, + primeSendMock, +} from "../../../src/test-utils/send-payload-contract.js"; import { zalouserPlugin } from "./channel.js"; +import { setZalouserRuntime } from "./runtime.js"; vi.mock("./send.js", () => ({ sendMessageZalouser: vi.fn().mockResolvedValue({ ok: true, messageId: "zlu-1" }), sendReactionZalouser: vi.fn().mockResolvedValue({ ok: true }), })); -vi.mock("./accounts.js", async (importOriginal) => { - const actual = (await importOriginal()) as Record; - return { - ...actual, - resolveZalouserAccountSync: () => ({ - accountId: "default", - profile: "default", - name: "test", - enabled: true, - config: {}, - }), - }; -}); - function baseCtx(payload: ReplyPayload) { return { cfg: {}, @@ -34,21 +26,20 @@ describe("zalouserPlugin outbound sendPayload", () => { let mockedSend: ReturnType>; beforeEach(async () => { + setZalouserRuntime({ + channel: { + text: { + resolveChunkMode: vi.fn(() => "length"), + resolveTextChunkLimit: vi.fn(() => 1200), + }, + }, + } as never); const mod = await import("./send.js"); mockedSend = vi.mocked(mod.sendMessageZalouser); mockedSend.mockClear(); mockedSend.mockResolvedValue({ ok: true, messageId: "zlu-1" }); }); - it("text-only delegates to sendText", async () => { - mockedSend.mockResolvedValue({ ok: true, messageId: "zlu-t1" }); - - const result = await zalouserPlugin.outbound!.sendPayload!(baseCtx({ text: "hello" })); - - expect(mockedSend).toHaveBeenCalledWith("987654321", "hello", expect.any(Object)); - expect(result).toMatchObject({ channel: "zalouser", messageId: "zlu-t1" }); - }); - it("group target delegates with isGroup=true and stripped threadId", async () => { mockedSend.mockResolvedValue({ ok: true, messageId: "zlu-g1" }); @@ -60,26 +51,11 @@ describe("zalouserPlugin outbound sendPayload", () => { expect(mockedSend).toHaveBeenCalledWith( "1471383327500481391", "hello group", - expect.objectContaining({ isGroup: true }), + expect.objectContaining({ isGroup: true, textMode: "markdown" }), ); expect(result).toMatchObject({ channel: "zalouser", messageId: "zlu-g1" }); }); - it("single media delegates to sendMedia", async () => { - mockedSend.mockResolvedValue({ ok: true, messageId: "zlu-m1" }); - - const result = await zalouserPlugin.outbound!.sendPayload!( - baseCtx({ text: "cap", mediaUrl: "https://example.com/a.jpg" }), - ); - - expect(mockedSend).toHaveBeenCalledWith( - "987654321", - "cap", - expect.objectContaining({ mediaUrl: "https://example.com/a.jpg" }), - ); - expect(result).toMatchObject({ channel: "zalouser" }); - }); - it("treats bare numeric targets as direct chats for backward compatibility", async () => { mockedSend.mockResolvedValue({ ok: true, messageId: "zlu-d1" }); @@ -91,7 +67,7 @@ describe("zalouserPlugin outbound sendPayload", () => { expect(mockedSend).toHaveBeenCalledWith( "987654321", "hello", - expect.objectContaining({ isGroup: false }), + expect.objectContaining({ isGroup: false, textMode: "markdown" }), ); expect(result).toMatchObject({ channel: "zalouser", messageId: "zlu-d1" }); }); @@ -107,60 +83,45 @@ describe("zalouserPlugin outbound sendPayload", () => { expect(mockedSend).toHaveBeenCalledWith( "g-1471383327500481391", "hello native group", - expect.objectContaining({ isGroup: true }), + expect.objectContaining({ isGroup: true, textMode: "markdown" }), ); expect(result).toMatchObject({ channel: "zalouser", messageId: "zlu-g-native" }); }); - it("multi-media iterates URLs with caption on first", async () => { - mockedSend - .mockResolvedValueOnce({ ok: true, messageId: "zlu-1" }) - .mockResolvedValueOnce({ ok: true, messageId: "zlu-2" }); + it("passes long markdown through once so formatting happens before chunking", async () => { + const text = `**${"a".repeat(2501)}**`; + mockedSend.mockResolvedValue({ ok: true, messageId: "zlu-code" }); - const result = await zalouserPlugin.outbound!.sendPayload!( - baseCtx({ - text: "caption", - mediaUrls: ["https://example.com/1.jpg", "https://example.com/2.jpg"], + const result = await zalouserPlugin.outbound!.sendPayload!({ + ...baseCtx({ text }), + to: "987654321", + }); + + expect(mockedSend).toHaveBeenCalledTimes(1); + expect(mockedSend).toHaveBeenCalledWith( + "987654321", + text, + expect.objectContaining({ + isGroup: false, + textMode: "markdown", + textChunkMode: "length", + textChunkLimit: 1200, }), ); - - expect(mockedSend).toHaveBeenCalledTimes(2); - expect(mockedSend).toHaveBeenNthCalledWith( - 1, - "987654321", - "caption", - expect.objectContaining({ mediaUrl: "https://example.com/1.jpg" }), - ); - expect(mockedSend).toHaveBeenNthCalledWith( - 2, - "987654321", - "", - expect.objectContaining({ mediaUrl: "https://example.com/2.jpg" }), - ); - expect(result).toMatchObject({ channel: "zalouser", messageId: "zlu-2" }); + expect(result).toMatchObject({ channel: "zalouser", messageId: "zlu-code" }); }); - it("empty payload returns no-op", async () => { - const result = await zalouserPlugin.outbound!.sendPayload!(baseCtx({})); - - expect(mockedSend).not.toHaveBeenCalled(); - expect(result).toEqual({ channel: "zalouser", messageId: "" }); - }); - - it("chunking splits long text", async () => { - mockedSend - .mockResolvedValueOnce({ ok: true, messageId: "zlu-c1" }) - .mockResolvedValueOnce({ ok: true, messageId: "zlu-c2" }); - - const longText = "a".repeat(3000); - const result = await zalouserPlugin.outbound!.sendPayload!(baseCtx({ text: longText })); - - // textChunkLimit is 2000 with chunkTextForOutbound, so it should split - expect(mockedSend.mock.calls.length).toBeGreaterThanOrEqual(2); - for (const call of mockedSend.mock.calls) { - expect((call[1] as string).length).toBeLessThanOrEqual(2000); - } - expect(result).toMatchObject({ channel: "zalouser" }); + installSendPayloadContractSuite({ + channel: "zalouser", + chunking: { mode: "passthrough", longTextLength: 3000 }, + createHarness: ({ payload, sendResults }) => { + primeSendMock(mockedSend, { ok: true, messageId: "zlu-1" }, sendResults); + return { + run: async () => await zalouserPlugin.outbound!.sendPayload!(baseCtx(payload)), + sendMock: mockedSend, + to: "987654321", + }; + }, }); }); diff --git a/extensions/zalouser/src/channel.test.ts b/extensions/zalouser/src/channel.test.ts index 231bcc8b2d3..321df502b38 100644 --- a/extensions/zalouser/src/channel.test.ts +++ b/extensions/zalouser/src/channel.test.ts @@ -1,30 +1,92 @@ import { beforeEach, describe, expect, it, vi } from "vitest"; import { zalouserPlugin } from "./channel.js"; -import { sendReactionZalouser } from "./send.js"; +import { setZalouserRuntime } from "./runtime.js"; +import { sendMessageZalouser, sendReactionZalouser } from "./send.js"; vi.mock("./send.js", async (importOriginal) => { const actual = (await importOriginal()) as Record; return { ...actual, + sendMessageZalouser: vi.fn(async () => ({ ok: true, messageId: "mid-1" })), sendReactionZalouser: vi.fn(async () => ({ ok: true })), }; }); +const mockSendMessage = vi.mocked(sendMessageZalouser); const mockSendReaction = vi.mocked(sendReactionZalouser); -describe("zalouser outbound chunker", () => { - it("chunks without empty strings and respects limit", () => { - const chunker = zalouserPlugin.outbound?.chunker; - expect(chunker).toBeTypeOf("function"); - if (!chunker) { +function getResolveToolPolicy() { + const resolveToolPolicy = zalouserPlugin.groups?.resolveToolPolicy; + expect(resolveToolPolicy).toBeTypeOf("function"); + if (!resolveToolPolicy) { + throw new Error("resolveToolPolicy unavailable"); + } + return resolveToolPolicy; +} + +function resolveGroupToolPolicy( + groups: Record, + groupId: string, +) { + return getResolveToolPolicy()({ + cfg: { + channels: { + zalouser: { + groups, + }, + }, + }, + accountId: "default", + groupId, + groupChannel: groupId, + }); +} + +describe("zalouser outbound", () => { + beforeEach(() => { + mockSendMessage.mockClear(); + setZalouserRuntime({ + channel: { + text: { + resolveChunkMode: vi.fn(() => "newline"), + resolveTextChunkLimit: vi.fn(() => 10), + }, + }, + } as never); + }); + + it("passes markdown chunk settings through sendText", async () => { + const sendText = zalouserPlugin.outbound?.sendText; + expect(sendText).toBeTypeOf("function"); + if (!sendText) { return; } - const limit = 10; - const chunks = chunker("hello world\nthis is a test", limit); - expect(chunks.length).toBeGreaterThan(1); - expect(chunks.every((c) => c.length > 0)).toBe(true); - expect(chunks.every((c) => c.length <= limit)).toBe(true); + const result = await sendText({ + cfg: { channels: { zalouser: { enabled: true } } } as never, + to: "group:123456", + text: "hello world\nthis is a test", + accountId: "default", + } as never); + + expect(mockSendMessage).toHaveBeenCalledWith( + "123456", + "hello world\nthis is a test", + expect.objectContaining({ + profile: "default", + isGroup: true, + textMode: "markdown", + textChunkMode: "newline", + textChunkLimit: 10, + }), + ); + expect(result).toEqual( + expect.objectContaining({ + channel: "zalouser", + messageId: "mid-1", + ok: true, + }), + ); }); }); @@ -58,48 +120,12 @@ describe("zalouser channel policies", () => { }); it("resolves group tool policy by explicit group id", () => { - const resolveToolPolicy = zalouserPlugin.groups?.resolveToolPolicy; - expect(resolveToolPolicy).toBeTypeOf("function"); - if (!resolveToolPolicy) { - return; - } - const policy = resolveToolPolicy({ - cfg: { - channels: { - zalouser: { - groups: { - "123": { tools: { allow: ["search"] } }, - }, - }, - }, - }, - accountId: "default", - groupId: "123", - groupChannel: "123", - }); + const policy = resolveGroupToolPolicy({ "123": { tools: { allow: ["search"] } } }, "123"); expect(policy).toEqual({ allow: ["search"] }); }); it("falls back to wildcard group policy", () => { - const resolveToolPolicy = zalouserPlugin.groups?.resolveToolPolicy; - expect(resolveToolPolicy).toBeTypeOf("function"); - if (!resolveToolPolicy) { - return; - } - const policy = resolveToolPolicy({ - cfg: { - channels: { - zalouser: { - groups: { - "*": { tools: { deny: ["system.run"] } }, - }, - }, - }, - }, - accountId: "default", - groupId: "missing", - groupChannel: "missing", - }); + const policy = resolveGroupToolPolicy({ "*": { tools: { deny: ["system.run"] } } }, "missing"); expect(policy).toEqual({ deny: ["system.run"] }); }); diff --git a/extensions/zalouser/src/channel.ts b/extensions/zalouser/src/channel.ts index e01775d0dbb..81fce5e3ab9 100644 --- a/extensions/zalouser/src/channel.ts +++ b/extensions/zalouser/src/channel.ts @@ -1,5 +1,6 @@ import { buildAccountScopedDmSecurityPolicy, + createAccountStatusSink, mapAllowFromEntries, } from "openclaw/plugin-sdk/compat"; import type { @@ -19,15 +20,16 @@ import { buildBaseAccountStatusSnapshot, buildChannelConfigSchema, DEFAULT_ACCOUNT_ID, - chunkTextForOutbound, deleteAccountFromConfigSection, formatAllowFromLowercase, + isDangerousNameMatchingEnabled, isNumericTargetId, migrateBaseNameToDefaultAccount, normalizeAccountId, sendPayloadWithChunkedTextAndMedia, setAccountEnabledInConfigSection, } from "openclaw/plugin-sdk/zalouser"; +import { buildPassiveProbedChannelStatusSummary } from "../../shared/channel-status-summary.js"; import { listZalouserAccountIds, resolveDefaultZalouserAccountId, @@ -42,6 +44,7 @@ import { resolveZalouserReactionMessageIds } from "./message-sid.js"; import { zalouserOnboardingAdapter } from "./onboarding.js"; import { probeZalouser } from "./probe.js"; import { writeQrDataUrlToTempFile } from "./qr-temp-file.js"; +import { getZalouserRuntime } from "./runtime.js"; import { sendMessageZalouser, sendReactionZalouser } from "./send.js"; import { collectZalouserStatusIssues } from "./status-issues.js"; import { @@ -165,6 +168,16 @@ function resolveZalouserQrProfile(accountId?: string | null): string { return normalized; } +function resolveZalouserOutboundChunkMode(cfg: OpenClawConfig, accountId?: string) { + return getZalouserRuntime().channel.text.resolveChunkMode(cfg, "zalouser", accountId); +} + +function resolveZalouserOutboundTextChunkLimit(cfg: OpenClawConfig, accountId?: string) { + return getZalouserRuntime().channel.text.resolveTextChunkLimit(cfg, "zalouser", accountId, { + fallbackLimit: zalouserDock.outbound?.textChunkLimit ?? 2000, + }); +} + function mapUser(params: { id: string; name?: string | null; @@ -205,6 +218,7 @@ function resolveZalouserGroupPolicyEntry(params: ChannelGroupContext) { groupId: params.groupId, groupChannel: params.groupChannel, includeWildcard: true, + allowNameMatching: isDangerousNameMatchingEnabled(account.config), }), ); } @@ -594,14 +608,11 @@ export const zalouserPlugin: ChannelPlugin = { }, outbound: { deliveryMode: "direct", - chunker: chunkTextForOutbound, - chunkerMode: "text", - textChunkLimit: 2000, + chunker: (text, limit) => getZalouserRuntime().channel.text.chunkMarkdownText(text, limit), + chunkerMode: "markdown", sendPayload: async (ctx) => await sendPayloadWithChunkedTextAndMedia({ ctx, - textChunkLimit: zalouserPlugin.outbound!.textChunkLimit, - chunker: zalouserPlugin.outbound!.chunker, sendText: (nextCtx) => zalouserPlugin.outbound!.sendText!(nextCtx), sendMedia: (nextCtx) => zalouserPlugin.outbound!.sendMedia!(nextCtx), emptyResult: { channel: "zalouser", messageId: "" }, @@ -612,6 +623,9 @@ export const zalouserPlugin: ChannelPlugin = { const result = await sendMessageZalouser(target.threadId, text, { profile: account.profile, isGroup: target.isGroup, + textMode: "markdown", + textChunkMode: resolveZalouserOutboundChunkMode(cfg, account.accountId), + textChunkLimit: resolveZalouserOutboundTextChunkLimit(cfg, account.accountId), }); return buildChannelSendResult("zalouser", result); }, @@ -623,6 +637,9 @@ export const zalouserPlugin: ChannelPlugin = { isGroup: target.isGroup, mediaUrl, mediaLocalRoots, + textMode: "markdown", + textChunkMode: resolveZalouserOutboundChunkMode(cfg, account.accountId), + textChunkLimit: resolveZalouserOutboundTextChunkLimit(cfg, account.accountId), }); return buildChannelSendResult("zalouser", result); }, @@ -636,15 +653,7 @@ export const zalouserPlugin: ChannelPlugin = { lastError: null, }, collectStatusIssues: collectZalouserStatusIssues, - buildChannelSummary: ({ snapshot }) => ({ - configured: snapshot.configured ?? false, - running: snapshot.running ?? false, - lastStartAt: snapshot.lastStartAt ?? null, - lastStopAt: snapshot.lastStopAt ?? null, - lastError: snapshot.lastError ?? null, - probe: snapshot.probe, - lastProbeAt: snapshot.lastProbeAt ?? null, - }), + buildChannelSummary: ({ snapshot }) => buildPassiveProbedChannelStatusSummary(snapshot), probeAccount: async ({ account, timeoutMs }) => probeZalouser(account.profile, timeoutMs), buildAccountSnapshot: async ({ account, runtime }) => { const configured = await checkZcaAuthenticated(account.profile); @@ -682,6 +691,10 @@ export const zalouserPlugin: ChannelPlugin = { } catch { // ignore probe errors } + const statusSink = createAccountStatusSink({ + accountId: ctx.accountId, + setStatus: ctx.setStatus, + }); ctx.log?.info(`[${account.accountId}] starting zalouser provider${userLabel}`); const { monitorZalouserProvider } = await import("./monitor.js"); return monitorZalouserProvider({ @@ -689,7 +702,7 @@ export const zalouserPlugin: ChannelPlugin = { config: ctx.cfg, runtime: ctx.runtime, abortSignal: ctx.abortSignal, - statusSink: (patch) => ctx.setStatus({ accountId: ctx.accountId, ...patch }), + statusSink, }); }, loginWithQrStart: async (params) => { diff --git a/extensions/zalouser/src/config-schema.ts b/extensions/zalouser/src/config-schema.ts index e5cb64d012e..1ff115876c4 100644 --- a/extensions/zalouser/src/config-schema.ts +++ b/extensions/zalouser/src/config-schema.ts @@ -1,6 +1,8 @@ import { - AllowFromEntrySchema, + AllowFromListSchema, buildCatchallMultiAccountChannelSchema, + DmPolicySchema, + GroupPolicySchema, } from "openclaw/plugin-sdk/compat"; import { MarkdownConfigSchema, ToolPolicySchema } from "openclaw/plugin-sdk/zalouser"; import { z } from "zod"; @@ -17,11 +19,12 @@ const zalouserAccountSchema = z.object({ enabled: z.boolean().optional(), markdown: MarkdownConfigSchema, profile: z.string().optional(), - dmPolicy: z.enum(["pairing", "allowlist", "open", "disabled"]).optional(), - allowFrom: z.array(AllowFromEntrySchema).optional(), + dangerouslyAllowNameMatching: z.boolean().optional(), + dmPolicy: DmPolicySchema.optional(), + allowFrom: AllowFromListSchema, historyLimit: z.number().int().min(0).optional(), - groupAllowFrom: z.array(AllowFromEntrySchema).optional(), - groupPolicy: z.enum(["disabled", "allowlist", "open"]).optional(), + groupAllowFrom: AllowFromListSchema, + groupPolicy: GroupPolicySchema.optional(), groups: z.object({}).catchall(groupConfigSchema).optional(), messagePrefix: z.string().optional(), responsePrefix: z.string().optional(), diff --git a/extensions/zalouser/src/group-policy.test.ts b/extensions/zalouser/src/group-policy.test.ts index 0ab0e01d763..adbeffbe86f 100644 --- a/extensions/zalouser/src/group-policy.test.ts +++ b/extensions/zalouser/src/group-policy.test.ts @@ -23,6 +23,18 @@ describe("zalouser group policy helpers", () => { ).toEqual(["123", "group:123", "chan-1", "Team Alpha", "team-alpha", "*"]); }); + it("builds id-only candidates when name matching is disabled", () => { + expect( + buildZalouserGroupCandidates({ + groupId: "123", + groupChannel: "chan-1", + groupName: "Team Alpha", + includeGroupIdAlias: true, + allowNameMatching: false, + }), + ).toEqual(["123", "group:123", "*"]); + }); + it("finds the first matching group entry", () => { const groups = { "group:123": { allow: true }, diff --git a/extensions/zalouser/src/group-policy.ts b/extensions/zalouser/src/group-policy.ts index 1b6ca8e200e..4d116f15bf2 100644 --- a/extensions/zalouser/src/group-policy.ts +++ b/extensions/zalouser/src/group-policy.ts @@ -23,6 +23,7 @@ export function buildZalouserGroupCandidates(params: { groupName?: string | null; includeGroupIdAlias?: boolean; includeWildcard?: boolean; + allowNameMatching?: boolean; }): string[] { const seen = new Set(); const out: string[] = []; @@ -43,10 +44,12 @@ export function buildZalouserGroupCandidates(params: { if (params.includeGroupIdAlias === true && groupId) { push(`group:${groupId}`); } - push(groupChannel); - push(groupName); - if (groupName) { - push(normalizeZalouserGroupSlug(groupName)); + if (params.allowNameMatching !== false) { + push(groupChannel); + push(groupName); + if (groupName) { + push(normalizeZalouserGroupSlug(groupName)); + } } if (params.includeWildcard !== false) { push("*"); diff --git a/extensions/zalouser/src/monitor.account-scope.test.ts b/extensions/zalouser/src/monitor.account-scope.test.ts index 919bd25887c..ff8884282ac 100644 --- a/extensions/zalouser/src/monitor.account-scope.test.ts +++ b/extensions/zalouser/src/monitor.account-scope.test.ts @@ -4,6 +4,7 @@ import "./monitor.send-mocks.js"; import { __testing } from "./monitor.js"; import { sendMessageZalouserMock } from "./monitor.send-mocks.js"; import { setZalouserRuntime } from "./runtime.js"; +import { createZalouserRuntimeEnv } from "./test-helpers.js"; import type { ResolvedZalouserAccount, ZaloInboundMessage } from "./types.js"; describe("zalouser monitor pairing account scoping", () => { @@ -80,19 +81,11 @@ describe("zalouser monitor pairing account scoping", () => { raw: { source: "test" }, }; - const runtime: RuntimeEnv = { - log: vi.fn(), - error: vi.fn(), - exit: ((code: number): never => { - throw new Error(`exit ${code}`); - }) as RuntimeEnv["exit"], - }; - await __testing.processMessage({ message, account, config, - runtime, + runtime: createZalouserRuntimeEnv(), }); expect(readAllowFromStore).toHaveBeenCalledWith( diff --git a/extensions/zalouser/src/monitor.group-gating.test.ts b/extensions/zalouser/src/monitor.group-gating.test.ts index b3e38efecd6..ef68d6f2529 100644 --- a/extensions/zalouser/src/monitor.group-gating.test.ts +++ b/extensions/zalouser/src/monitor.group-gating.test.ts @@ -9,6 +9,7 @@ import { sendTypingZalouserMock, } from "./monitor.send-mocks.js"; import { setZalouserRuntime } from "./runtime.js"; +import { createZalouserRuntimeEnv } from "./test-helpers.js"; import type { ResolvedZalouserAccount, ZaloInboundMessage } from "./types.js"; function createAccount(): ResolvedZalouserAccount { @@ -39,18 +40,11 @@ function createConfig(): OpenClawConfig { }; } -function createRuntimeEnv(): RuntimeEnv { - return { - log: vi.fn(), - error: vi.fn(), - exit: ((code: number): never => { - throw new Error(`exit ${code}`); - }) as RuntimeEnv["exit"], - }; -} +const createRuntimeEnv = () => createZalouserRuntimeEnv(); function installRuntime(params: { commandAuthorized?: boolean; + replyPayload?: { text?: string; mediaUrl?: string; mediaUrls?: string[] }; resolveCommandAuthorizedFromAuthorizers?: (params: { useAccessGroups: boolean; authorizers: Array<{ configured: boolean; allowed: boolean }>; @@ -58,6 +52,9 @@ function installRuntime(params: { }) { const dispatchReplyWithBufferedBlockDispatcher = vi.fn(async ({ dispatcherOptions, ctx }) => { await dispatcherOptions.typingCallbacks?.onReplyStart?.(); + if (params.replyPayload) { + await dispatcherOptions.deliver(params.replyPayload); + } return { queuedFinal: false, counts: { tool: 0, block: 0, final: 0 }, ctx }; }); const resolveCommandAuthorizedFromAuthorizers = vi.fn( @@ -166,7 +163,8 @@ function installRuntime(params: { text: { resolveMarkdownTableMode: vi.fn(() => "code"), convertMarkdownTables: vi.fn((text: string) => text), - resolveChunkMode: vi.fn(() => "line"), + resolveChunkMode: vi.fn(() => "length"), + resolveTextChunkLimit: vi.fn(() => 1200), chunkMarkdownTextWithMode: vi.fn((text: string) => [text]), }, }, @@ -182,6 +180,31 @@ function installRuntime(params: { }; } +function installGroupCommandAuthRuntime() { + return installRuntime({ + resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) => + useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed), + }); +} + +async function processGroupControlCommand(params: { + account: ResolvedZalouserAccount; + content?: string; + commandContent?: string; +}) { + await __testing.processMessage({ + message: createGroupMessage({ + content: params.content ?? "/new", + commandContent: params.commandContent ?? "/new", + hasAnyMention: true, + wasExplicitlyMentioned: true, + }), + account: params.account, + config: createConfig(), + runtime: createRuntimeEnv(), + }); +} + function createGroupMessage(overrides: Partial = {}): ZaloInboundMessage { return { threadId: "g-1", @@ -224,57 +247,152 @@ describe("zalouser monitor group mention gating", () => { sendSeenZalouserMock.mockClear(); }); - it("skips unmentioned group messages when requireMention=true", async () => { + async function processMessageWithDefaults(params: { + message: ZaloInboundMessage; + account?: ResolvedZalouserAccount; + historyState?: { + historyLimit: number; + groupHistories: Map< + string, + Array<{ sender: string; body: string; timestamp?: number; messageId?: string }> + >; + }; + }) { + await __testing.processMessage({ + message: params.message, + account: params.account ?? createAccount(), + config: createConfig(), + runtime: createZalouserRuntimeEnv(), + historyState: params.historyState, + }); + } + + async function expectSkippedGroupMessage(message?: Partial) { const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ commandAuthorized: false, }); - await __testing.processMessage({ - message: createGroupMessage(), - account: createAccount(), - config: createConfig(), - runtime: createRuntimeEnv(), + await processMessageWithDefaults({ + message: createGroupMessage(message), }); - expect(dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled(); expect(sendTypingZalouserMock).not.toHaveBeenCalled(); - }); + } - it("fails closed when requireMention=true but mention detection is unavailable", async () => { + async function expectGroupCommandAuthorizers(params: { + accountConfig: ResolvedZalouserAccount["config"]; + expectedAuthorizers: Array<{ configured: boolean; allowed: boolean }>; + }) { + const { dispatchReplyWithBufferedBlockDispatcher, resolveCommandAuthorizedFromAuthorizers } = + installGroupCommandAuthRuntime(); + await processGroupControlCommand({ + account: { + ...createAccount(), + config: params.accountConfig, + }, + }); + expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); + const authCall = resolveCommandAuthorizedFromAuthorizers.mock.calls[0]?.[0]; + expect(authCall?.authorizers).toEqual(params.expectedAuthorizers); + } + + async function processOpenDmMessage(params?: { + message?: Partial; + readSessionUpdatedAt?: (input?: { + storePath: string; + sessionKey: string; + }) => number | undefined; + }) { + const runtime = installRuntime({ + commandAuthorized: false, + }); + if (params?.readSessionUpdatedAt) { + runtime.readSessionUpdatedAt.mockImplementation(params.readSessionUpdatedAt); + } + const account = createAccount(); + await processMessageWithDefaults({ + message: createDmMessage(params?.message), + account: { + ...account, + config: { + ...account.config, + dmPolicy: "open", + }, + }, + }); + return runtime; + } + + async function expectDangerousNameMatching(params: { + dangerouslyAllowNameMatching?: boolean; + expectedDispatches: number; + }) { const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ commandAuthorized: false, }); - await __testing.processMessage({ - message: createGroupMessage({ - canResolveExplicitMention: false, - hasAnyMention: false, - wasExplicitlyMentioned: false, - }), - account: createAccount(), - config: createConfig(), - runtime: createRuntimeEnv(), - }); - - expect(dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled(); - expect(sendTypingZalouserMock).not.toHaveBeenCalled(); - }); - - it("dispatches explicitly-mentioned group messages and marks WasMentioned", async () => { - const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ - commandAuthorized: false, - }); - await __testing.processMessage({ + await processMessageWithDefaults({ message: createGroupMessage({ + threadId: "g-attacker-001", + groupName: "Trusted Team", + senderId: "666", hasAnyMention: true, wasExplicitlyMentioned: true, content: "ping @bot", }), - account: createAccount(), - config: createConfig(), - runtime: createRuntimeEnv(), + account: { + ...createAccount(), + config: { + ...createAccount().config, + ...(params.dangerouslyAllowNameMatching ? { dangerouslyAllowNameMatching: true } : {}), + groupPolicy: "allowlist", + groupAllowFrom: ["*"], + groups: { + "group:g-trusted-001": { allow: true }, + "Trusted Team": { allow: true }, + }, + }, + }, }); + expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes( + params.expectedDispatches, + ); + return dispatchReplyWithBufferedBlockDispatcher; + } + async function dispatchGroupMessage(params: { + commandAuthorized: boolean; + message: Partial; + }) { + const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ + commandAuthorized: params.commandAuthorized, + }); + await processMessageWithDefaults({ + message: createGroupMessage(params.message), + }); expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); - const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; + return dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; + } + + it("skips unmentioned group messages when requireMention=true", async () => { + await expectSkippedGroupMessage(); + }); + + it("fails closed when requireMention=true but mention detection is unavailable", async () => { + await expectSkippedGroupMessage({ + canResolveExplicitMention: false, + hasAnyMention: false, + wasExplicitlyMentioned: false, + }); + }); + + it("dispatches explicitly-mentioned group messages and marks WasMentioned", async () => { + const callArg = await dispatchGroupMessage({ + commandAuthorized: false, + message: { + hasAnyMention: true, + wasExplicitlyMentioned: true, + content: "ping @bot", + }, + }); expect(callArg?.ctx?.WasMentioned).toBe(true); expect(callArg?.ctx?.To).toBe("zalouser:group:g-1"); expect(callArg?.ctx?.OriginatingTo).toBe("zalouser:group:g-1"); @@ -285,77 +403,78 @@ describe("zalouser monitor group mention gating", () => { }); it("allows authorized control commands to bypass mention gating", async () => { - const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ + const callArg = await dispatchGroupMessage({ commandAuthorized: true, - }); - await __testing.processMessage({ - message: createGroupMessage({ + message: { content: "/status", hasAnyMention: false, wasExplicitlyMentioned: false, - }), - account: createAccount(), - config: createConfig(), - runtime: createRuntimeEnv(), + }, }); - - expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); - const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; expect(callArg?.ctx?.WasMentioned).toBe(true); }); - it("uses commandContent for mention-prefixed control commands", async () => { - const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ - commandAuthorized: true, - }); - await __testing.processMessage({ - message: createGroupMessage({ - content: "@Bot /new", - commandContent: "/new", - hasAnyMention: true, - wasExplicitlyMentioned: true, - }), - account: createAccount(), - config: createConfig(), - runtime: createRuntimeEnv(), + it("passes long markdown replies through once so formatting happens before chunking", async () => { + const replyText = `**${"a".repeat(2501)}**`; + installRuntime({ + commandAuthorized: false, + replyPayload: { text: replyText }, }); - expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); - const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; - expect(callArg?.ctx?.CommandBody).toBe("/new"); - expect(callArg?.ctx?.BodyForCommands).toBe("/new"); - }); - - it("allows group control commands when only allowFrom is configured", async () => { - const { dispatchReplyWithBufferedBlockDispatcher, resolveCommandAuthorizedFromAuthorizers } = - installRuntime({ - resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) => - useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed), - }); await __testing.processMessage({ - message: createGroupMessage({ - content: "/new", - commandContent: "/new", - hasAnyMention: true, - wasExplicitlyMentioned: true, + message: createDmMessage({ + content: "hello", }), account: { ...createAccount(), config: { ...createAccount().config, - allowFrom: ["123"], + dmPolicy: "open", }, }, config: createConfig(), runtime: createRuntimeEnv(), }); - expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); - const authCall = resolveCommandAuthorizedFromAuthorizers.mock.calls[0]?.[0]; - expect(authCall?.authorizers).toEqual([ - { configured: true, allowed: true }, - { configured: true, allowed: true }, - ]); + expect(sendMessageZalouserMock).toHaveBeenCalledTimes(1); + expect(sendMessageZalouserMock).toHaveBeenCalledWith( + "u-1", + replyText, + expect.objectContaining({ + isGroup: false, + profile: "default", + textMode: "markdown", + textChunkMode: "length", + textChunkLimit: 1200, + }), + ); + }); + + it("uses commandContent for mention-prefixed control commands", async () => { + const callArg = await dispatchGroupMessage({ + commandAuthorized: true, + message: { + content: "@Bot /new", + commandContent: "/new", + hasAnyMention: true, + wasExplicitlyMentioned: true, + }, + }); + expect(callArg?.ctx?.CommandBody).toBe("/new"); + expect(callArg?.ctx?.BodyForCommands).toBe("/new"); + }); + + it("allows group control commands when only allowFrom is configured", async () => { + await expectGroupCommandAuthorizers({ + accountConfig: { + ...createAccount().config, + allowFrom: ["123"], + }, + expectedAuthorizers: [ + { configured: true, allowed: true }, + { configured: true, allowed: true }, + ], + }); }); it("blocks group messages when sender is not in groupAllowFrom/allowFrom", async () => { @@ -383,57 +502,36 @@ describe("zalouser monitor group mention gating", () => { expect(dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled(); }); - it("allows group control commands when sender is in groupAllowFrom", async () => { - const { dispatchReplyWithBufferedBlockDispatcher, resolveCommandAuthorizedFromAuthorizers } = - installRuntime({ - resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) => - useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed), - }); - await __testing.processMessage({ - message: createGroupMessage({ - content: "/new", - commandContent: "/new", - hasAnyMention: true, - wasExplicitlyMentioned: true, - }), - account: { - ...createAccount(), - config: { - ...createAccount().config, - allowFrom: ["999"], - groupAllowFrom: ["123"], - }, - }, - config: createConfig(), - runtime: createRuntimeEnv(), - }); + it("does not accept a different group id by matching only the mutable group name by default", async () => { + await expectDangerousNameMatching({ expectedDispatches: 0 }); + }); - expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); - const authCall = resolveCommandAuthorizedFromAuthorizers.mock.calls[0]?.[0]; - expect(authCall?.authorizers).toEqual([ - { configured: true, allowed: false }, - { configured: true, allowed: true }, - ]); + it("accepts mutable group-name matches only when dangerouslyAllowNameMatching is enabled", async () => { + const dispatchReplyWithBufferedBlockDispatcher = await expectDangerousNameMatching({ + dangerouslyAllowNameMatching: true, + expectedDispatches: 1, + }); + const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; + expect(callArg?.ctx?.To).toBe("zalouser:group:g-attacker-001"); + }); + + it("allows group control commands when sender is in groupAllowFrom", async () => { + await expectGroupCommandAuthorizers({ + accountConfig: { + ...createAccount().config, + allowFrom: ["999"], + groupAllowFrom: ["123"], + }, + expectedAuthorizers: [ + { configured: true, allowed: false }, + { configured: true, allowed: true }, + ], + }); }); it("routes DM messages with direct peer kind", async () => { const { dispatchReplyWithBufferedBlockDispatcher, resolveAgentRoute, buildAgentSessionKey } = - installRuntime({ - commandAuthorized: false, - }); - const account = createAccount(); - await __testing.processMessage({ - message: createDmMessage(), - account: { - ...account, - config: { - ...account.config, - dmPolicy: "open", - }, - }, - config: createConfig(), - runtime: createRuntimeEnv(), - }); + await processOpenDmMessage(); expect(resolveAgentRoute).toHaveBeenCalledWith( expect.objectContaining({ @@ -451,24 +549,9 @@ describe("zalouser monitor group mention gating", () => { }); it("reuses the legacy DM session key when only the old group-shaped session exists", async () => { - const { dispatchReplyWithBufferedBlockDispatcher, readSessionUpdatedAt } = installRuntime({ - commandAuthorized: false, - }); - readSessionUpdatedAt.mockImplementation((input?: { storePath: string; sessionKey: string }) => - input?.sessionKey === "agent:main:zalouser:group:321" ? 123 : undefined, - ); - const account = createAccount(); - await __testing.processMessage({ - message: createDmMessage(), - account: { - ...account, - config: { - ...account.config, - dmPolicy: "open", - }, - }, - config: createConfig(), - runtime: createRuntimeEnv(), + const { dispatchReplyWithBufferedBlockDispatcher } = await processOpenDmMessage({ + readSessionUpdatedAt: (input?: { storePath: string; sessionKey: string }) => + input?.sessionKey === "agent:main:zalouser:group:321" ? 123 : undefined, }); const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; diff --git a/extensions/zalouser/src/monitor.ts b/extensions/zalouser/src/monitor.ts index 6590082e830..2bfa1be8aa4 100644 --- a/extensions/zalouser/src/monitor.ts +++ b/extensions/zalouser/src/monitor.ts @@ -19,6 +19,7 @@ import { createScopedPairingAccess, createReplyPrefixOptions, evaluateGroupRouteAccessForPolicy, + isDangerousNameMatchingEnabled, issuePairingChallenge, resolveOutboundMediaUrls, mergeAllowlist, @@ -30,6 +31,7 @@ import { summarizeMapping, warnMissingProviderGroupPolicyFallbackOnce, } from "openclaw/plugin-sdk/zalouser"; +import { createDeferred } from "../../shared/deferred.js"; import { buildZalouserGroupCandidates, findZalouserGroupEntry, @@ -128,16 +130,6 @@ function resolveInboundQueueKey(message: ZaloInboundMessage): string { return `direct:${senderId || threadId}`; } -function createDeferred() { - let resolve!: (value: T | PromiseLike) => void; - let reject!: (reason?: unknown) => void; - const promise = new Promise((res, rej) => { - resolve = res; - reject = rej; - }); - return { promise, resolve, reject }; -} - function resolveZalouserDmSessionScope(config: OpenClawConfig) { const configured = config.session?.dmScope; return configured === "main" || !configured ? "per-channel-peer" : configured; @@ -212,6 +204,7 @@ function resolveGroupRequireMention(params: { groupId: string; groupName?: string | null; groups: Record; + allowNameMatching?: boolean; }): boolean { const entry = findZalouserGroupEntry( params.groups ?? {}, @@ -220,6 +213,7 @@ function resolveGroupRequireMention(params: { groupName: params.groupName, includeGroupIdAlias: true, includeWildcard: true, + allowNameMatching: params.allowNameMatching, }), ); if (typeof entry?.requireMention === "boolean") { @@ -316,6 +310,7 @@ async function processMessage( }); const groups = account.config.groups ?? {}; + const allowNameMatching = isDangerousNameMatchingEnabled(account.config); if (isGroup) { const groupEntry = findZalouserGroupEntry( groups, @@ -324,6 +319,7 @@ async function processMessage( groupName, includeGroupIdAlias: true, includeWildcard: true, + allowNameMatching, }), ); const routeAccess = evaluateGroupRouteAccessForPolicy({ @@ -466,6 +462,7 @@ async function processMessage( groupId: chatId, groupName, groups, + allowNameMatching, }) : false; const mentionRegexes = core.channel.mentions.buildMentionRegexes(config, route.agentId); @@ -703,6 +700,10 @@ async function deliverZalouserReply(params: { params; const tableMode = params.tableMode ?? "code"; const text = core.channel.text.convertMarkdownTables(payload.text ?? "", tableMode); + const chunkMode = core.channel.text.resolveChunkMode(config, "zalouser", accountId); + const textChunkLimit = core.channel.text.resolveTextChunkLimit(config, "zalouser", accountId, { + fallbackLimit: ZALOUSER_TEXT_LIMIT, + }); const sentMedia = await sendMediaWithLeadingCaption({ mediaUrls: resolveOutboundMediaUrls(payload), @@ -713,6 +714,9 @@ async function deliverZalouserReply(params: { profile, mediaUrl, isGroup, + textMode: "markdown", + textChunkMode: chunkMode, + textChunkLimit, }); statusSink?.({ lastOutboundAt: Date.now() }); }, @@ -725,20 +729,17 @@ async function deliverZalouserReply(params: { } if (text) { - const chunkMode = core.channel.text.resolveChunkMode(config, "zalouser", accountId); - const chunks = core.channel.text.chunkMarkdownTextWithMode( - text, - ZALOUSER_TEXT_LIMIT, - chunkMode, - ); - logVerbose(core, runtime, `Sending ${chunks.length} text chunk(s) to ${chatId}`); - for (const chunk of chunks) { - try { - await sendMessageZalouser(chatId, chunk, { profile, isGroup }); - statusSink?.({ lastOutboundAt: Date.now() }); - } catch (err) { - runtime.error(`Zalouser message send failed: ${String(err)}`); - } + try { + await sendMessageZalouser(chatId, text, { + profile, + isGroup, + textMode: "markdown", + textChunkMode: chunkMode, + textChunkLimit, + }); + statusSink?.({ lastOutboundAt: Date.now() }); + } catch (err) { + runtime.error(`Zalouser message send failed: ${String(err)}`); } } } diff --git a/extensions/zalouser/src/onboarding.ts b/extensions/zalouser/src/onboarding.ts index ae8f53bf0d5..d5b828b6711 100644 --- a/extensions/zalouser/src/onboarding.ts +++ b/extensions/zalouser/src/onboarding.ts @@ -9,6 +9,7 @@ import { formatResolvedUnresolvedNote, mergeAllowFromEntries, normalizeAccountId, + patchScopedAccountConfig, promptChannelAccessConfig, resolveAccountIdForConfigure, setTopLevelChannelDmPolicyWithAllowFrom, @@ -36,37 +37,13 @@ function setZalouserAccountScopedConfig( defaultPatch: Record, accountPatch: Record = defaultPatch, ): OpenClawConfig { - if (accountId === DEFAULT_ACCOUNT_ID) { - return { - ...cfg, - channels: { - ...cfg.channels, - zalouser: { - ...cfg.channels?.zalouser, - enabled: true, - ...defaultPatch, - }, - }, - } as OpenClawConfig; - } - return { - ...cfg, - channels: { - ...cfg.channels, - zalouser: { - ...cfg.channels?.zalouser, - enabled: true, - accounts: { - ...cfg.channels?.zalouser?.accounts, - [accountId]: { - ...cfg.channels?.zalouser?.accounts?.[accountId], - enabled: cfg.channels?.zalouser?.accounts?.[accountId]?.enabled ?? true, - ...accountPatch, - }, - }, - }, - }, - } as OpenClawConfig; + return patchScopedAccountConfig({ + cfg, + channelKey: channel, + accountId, + patch: defaultPatch, + accountPatch, + }) as OpenClawConfig; } function setZalouserDmPolicy( diff --git a/extensions/zalouser/src/send.test.ts b/extensions/zalouser/src/send.test.ts index 92b3cec25f2..cc920e6be7e 100644 --- a/extensions/zalouser/src/send.test.ts +++ b/extensions/zalouser/src/send.test.ts @@ -8,6 +8,7 @@ import { sendSeenZalouser, sendTypingZalouser, } from "./send.js"; +import { parseZalouserTextStyles } from "./text-styles.js"; import { sendZaloDeliveredEvent, sendZaloLink, @@ -16,6 +17,7 @@ import { sendZaloTextMessage, sendZaloTypingEvent, } from "./zalo-js.js"; +import { TextStyle } from "./zca-client.js"; vi.mock("./zalo-js.js", () => ({ sendZaloTextMessage: vi.fn(), @@ -43,36 +45,272 @@ describe("zalouser send helpers", () => { mockSendSeen.mockReset(); }); - it("delegates text send to JS transport", async () => { + it("keeps plain text literal by default", async () => { mockSendText.mockResolvedValueOnce({ ok: true, messageId: "mid-1" }); - const result = await sendMessageZalouser("thread-1", "hello", { + const result = await sendMessageZalouser("thread-1", "**hello**", { profile: "default", isGroup: true, }); - expect(mockSendText).toHaveBeenCalledWith("thread-1", "hello", { - profile: "default", - isGroup: true, - }); + expect(mockSendText).toHaveBeenCalledWith( + "thread-1", + "**hello**", + expect.objectContaining({ + profile: "default", + isGroup: true, + }), + ); expect(result).toEqual({ ok: true, messageId: "mid-1" }); }); - it("maps image helper to media send", async () => { + it("formats markdown text when markdown mode is enabled", async () => { + mockSendText.mockResolvedValueOnce({ ok: true, messageId: "mid-1b" }); + + await sendMessageZalouser("thread-1", "**hello**", { + profile: "default", + isGroup: true, + textMode: "markdown", + }); + + expect(mockSendText).toHaveBeenCalledWith( + "thread-1", + "hello", + expect.objectContaining({ + profile: "default", + isGroup: true, + textMode: "markdown", + textStyles: [{ start: 0, len: 5, st: TextStyle.Bold }], + }), + ); + }); + + it("formats image captions in markdown mode", async () => { mockSendText.mockResolvedValueOnce({ ok: true, messageId: "mid-2" }); await sendImageZalouser("thread-2", "https://example.com/a.png", { profile: "p2", - caption: "cap", + caption: "_cap_", isGroup: false, + textMode: "markdown", }); - expect(mockSendText).toHaveBeenCalledWith("thread-2", "cap", { + expect(mockSendText).toHaveBeenCalledWith( + "thread-2", + "cap", + expect.objectContaining({ + profile: "p2", + caption: undefined, + isGroup: false, + mediaUrl: "https://example.com/a.png", + textMode: "markdown", + textStyles: [{ start: 0, len: 3, st: TextStyle.Italic }], + }), + ); + }); + + it("does not keep the raw markdown caption as a media fallback after formatting", async () => { + mockSendText.mockResolvedValueOnce({ ok: true, messageId: "mid-2b" }); + + await sendImageZalouser("thread-2", "https://example.com/a.png", { profile: "p2", - caption: "cap", + caption: "```\n```", isGroup: false, - mediaUrl: "https://example.com/a.png", + textMode: "markdown", }); + + expect(mockSendText).toHaveBeenCalledWith( + "thread-2", + "", + expect.objectContaining({ + profile: "p2", + caption: undefined, + isGroup: false, + mediaUrl: "https://example.com/a.png", + textMode: "markdown", + textStyles: undefined, + }), + ); + }); + + it("rechunks normalized markdown text before sending to avoid transport truncation", async () => { + const text = "\t".repeat(500) + "a".repeat(1500); + const formatted = parseZalouserTextStyles(text); + mockSendText + .mockResolvedValueOnce({ ok: true, messageId: "mid-2c-1" }) + .mockResolvedValueOnce({ ok: true, messageId: "mid-2c-2" }); + + const result = await sendMessageZalouser("thread-2c", text, { + profile: "p2c", + isGroup: false, + textMode: "markdown", + }); + + expect(formatted.text.length).toBeGreaterThan(2000); + expect(mockSendText).toHaveBeenCalledTimes(2); + expect(mockSendText.mock.calls.map((call) => call[1]).join("")).toBe(formatted.text); + expect(mockSendText.mock.calls.every((call) => (call[1] as string).length <= 2000)).toBe(true); + expect(result).toEqual({ ok: true, messageId: "mid-2c-2" }); + }); + + it("preserves text styles when splitting long formatted markdown", async () => { + const text = `**${"a".repeat(2501)}**`; + mockSendText + .mockResolvedValueOnce({ ok: true, messageId: "mid-2d-1" }) + .mockResolvedValueOnce({ ok: true, messageId: "mid-2d-2" }); + + const result = await sendMessageZalouser("thread-2d", text, { + profile: "p2d", + isGroup: false, + textMode: "markdown", + }); + + expect(mockSendText).toHaveBeenNthCalledWith( + 1, + "thread-2d", + "a".repeat(2000), + expect.objectContaining({ + profile: "p2d", + isGroup: false, + textMode: "markdown", + textStyles: [{ start: 0, len: 2000, st: TextStyle.Bold }], + }), + ); + expect(mockSendText).toHaveBeenNthCalledWith( + 2, + "thread-2d", + "a".repeat(501), + expect.objectContaining({ + profile: "p2d", + isGroup: false, + textMode: "markdown", + textStyles: [{ start: 0, len: 501, st: TextStyle.Bold }], + }), + ); + expect(result).toEqual({ ok: true, messageId: "mid-2d-2" }); + }); + + it("preserves formatted text and styles when newline chunk mode splits after parsing", async () => { + const text = `**${"a".repeat(1995)}**\n\nsecond paragraph`; + const formatted = parseZalouserTextStyles(text); + mockSendText + .mockResolvedValueOnce({ ok: true, messageId: "mid-2d-3" }) + .mockResolvedValueOnce({ ok: true, messageId: "mid-2d-4" }); + + const result = await sendMessageZalouser("thread-2d-2", text, { + profile: "p2d-2", + isGroup: false, + textMode: "markdown", + textChunkMode: "newline", + }); + + expect(mockSendText).toHaveBeenCalledTimes(2); + expect(mockSendText.mock.calls.map((call) => call[1]).join("")).toBe(formatted.text); + expect(mockSendText).toHaveBeenNthCalledWith( + 1, + "thread-2d-2", + `${"a".repeat(1995)}\n\n`, + expect.objectContaining({ + profile: "p2d-2", + isGroup: false, + textMode: "markdown", + textChunkMode: "newline", + textStyles: [{ start: 0, len: 1995, st: TextStyle.Bold }], + }), + ); + expect(mockSendText).toHaveBeenNthCalledWith( + 2, + "thread-2d-2", + "second paragraph", + expect.objectContaining({ + profile: "p2d-2", + isGroup: false, + textMode: "markdown", + textChunkMode: "newline", + textStyles: undefined, + }), + ); + expect(result).toEqual({ ok: true, messageId: "mid-2d-4" }); + }); + + it("respects an explicit text chunk limit when splitting formatted markdown", async () => { + const text = `**${"a".repeat(1501)}**`; + mockSendText + .mockResolvedValueOnce({ ok: true, messageId: "mid-2d-5" }) + .mockResolvedValueOnce({ ok: true, messageId: "mid-2d-6" }); + + const result = await sendMessageZalouser("thread-2d-3", text, { + profile: "p2d-3", + isGroup: false, + textMode: "markdown", + textChunkLimit: 1200, + } as never); + + expect(mockSendText).toHaveBeenCalledTimes(2); + expect(mockSendText).toHaveBeenNthCalledWith( + 1, + "thread-2d-3", + "a".repeat(1200), + expect.objectContaining({ + profile: "p2d-3", + isGroup: false, + textMode: "markdown", + textChunkLimit: 1200, + textStyles: [{ start: 0, len: 1200, st: TextStyle.Bold }], + }), + ); + expect(mockSendText).toHaveBeenNthCalledWith( + 2, + "thread-2d-3", + "a".repeat(301), + expect.objectContaining({ + profile: "p2d-3", + isGroup: false, + textMode: "markdown", + textChunkLimit: 1200, + textStyles: [{ start: 0, len: 301, st: TextStyle.Bold }], + }), + ); + expect(result).toEqual({ ok: true, messageId: "mid-2d-6" }); + }); + + it("sends overflow markdown captions as follow-up text after the media message", async () => { + const caption = "\t".repeat(500) + "a".repeat(1500); + const formatted = parseZalouserTextStyles(caption); + mockSendText + .mockResolvedValueOnce({ ok: true, messageId: "mid-2e-1" }) + .mockResolvedValueOnce({ ok: true, messageId: "mid-2e-2" }); + + const result = await sendImageZalouser("thread-2e", "https://example.com/long.png", { + profile: "p2e", + caption, + isGroup: false, + textMode: "markdown", + }); + + expect(mockSendText).toHaveBeenCalledTimes(2); + expect(mockSendText.mock.calls.map((call) => call[1]).join("")).toBe(formatted.text); + expect(mockSendText).toHaveBeenNthCalledWith( + 1, + "thread-2e", + expect.any(String), + expect.objectContaining({ + profile: "p2e", + caption: undefined, + isGroup: false, + mediaUrl: "https://example.com/long.png", + textMode: "markdown", + }), + ); + expect(mockSendText).toHaveBeenNthCalledWith( + 2, + "thread-2e", + expect.any(String), + expect.not.objectContaining({ + mediaUrl: "https://example.com/long.png", + }), + ); + expect(result).toEqual({ ok: true, messageId: "mid-2e-2" }); }); it("delegates link helper to JS transport", async () => { diff --git a/extensions/zalouser/src/send.ts b/extensions/zalouser/src/send.ts index 07ae1408bff..55ff17df636 100644 --- a/extensions/zalouser/src/send.ts +++ b/extensions/zalouser/src/send.ts @@ -1,3 +1,4 @@ +import { parseZalouserTextStyles } from "./text-styles.js"; import type { ZaloEventMessage, ZaloSendOptions, ZaloSendResult } from "./types.js"; import { sendZaloDeliveredEvent, @@ -7,16 +8,58 @@ import { sendZaloTextMessage, sendZaloTypingEvent, } from "./zalo-js.js"; +import { TextStyle } from "./zca-client.js"; export type ZalouserSendOptions = ZaloSendOptions; export type ZalouserSendResult = ZaloSendResult; +const ZALO_TEXT_LIMIT = 2000; +const DEFAULT_TEXT_CHUNK_MODE = "length"; + +type StyledTextChunk = { + text: string; + styles?: ZaloSendOptions["textStyles"]; +}; + +type TextChunkMode = NonNullable; + export async function sendMessageZalouser( threadId: string, text: string, options: ZalouserSendOptions = {}, ): Promise { - return await sendZaloTextMessage(threadId, text, options); + const prepared = + options.textMode === "markdown" + ? parseZalouserTextStyles(text) + : { text, styles: options.textStyles }; + const textChunkLimit = options.textChunkLimit ?? ZALO_TEXT_LIMIT; + const chunks = splitStyledText( + prepared.text, + (prepared.styles?.length ?? 0) > 0 ? prepared.styles : undefined, + textChunkLimit, + options.textChunkMode, + ); + + let lastResult: ZalouserSendResult | null = null; + for (const [index, chunk] of chunks.entries()) { + const chunkOptions = + index === 0 + ? { ...options, textStyles: chunk.styles } + : { + ...options, + caption: undefined, + mediaLocalRoots: undefined, + mediaUrl: undefined, + textStyles: chunk.styles, + }; + const result = await sendZaloTextMessage(threadId, chunk.text, chunkOptions); + if (!result.ok) { + return result; + } + lastResult = result; + } + + return lastResult ?? { ok: false, error: "No message content provided" }; } export async function sendImageZalouser( @@ -24,8 +67,9 @@ export async function sendImageZalouser( imageUrl: string, options: ZalouserSendOptions = {}, ): Promise { - return await sendZaloTextMessage(threadId, options.caption ?? "", { + return await sendMessageZalouser(threadId, options.caption ?? "", { ...options, + caption: undefined, mediaUrl: imageUrl, }); } @@ -85,3 +129,144 @@ export async function sendSeenZalouser(params: { }): Promise { await sendZaloSeenEvent(params); } + +function splitStyledText( + text: string, + styles: ZaloSendOptions["textStyles"], + limit: number, + mode: ZaloSendOptions["textChunkMode"], +): StyledTextChunk[] { + if (text.length === 0) { + return [{ text, styles: undefined }]; + } + + const chunks: StyledTextChunk[] = []; + for (const range of splitTextRanges(text, limit, mode ?? DEFAULT_TEXT_CHUNK_MODE)) { + const { start, end } = range; + chunks.push({ + text: text.slice(start, end), + styles: sliceTextStyles(styles, start, end), + }); + } + return chunks; +} + +function sliceTextStyles( + styles: ZaloSendOptions["textStyles"], + start: number, + end: number, +): ZaloSendOptions["textStyles"] { + if (!styles || styles.length === 0) { + return undefined; + } + + const chunkStyles = styles + .map((style) => { + const overlapStart = Math.max(style.start, start); + const overlapEnd = Math.min(style.start + style.len, end); + if (overlapEnd <= overlapStart) { + return null; + } + + if (style.st === TextStyle.Indent) { + return { + start: overlapStart - start, + len: overlapEnd - overlapStart, + st: style.st, + indentSize: style.indentSize, + }; + } + + return { + start: overlapStart - start, + len: overlapEnd - overlapStart, + st: style.st, + }; + }) + .filter((style): style is NonNullable => style !== null); + + return chunkStyles.length > 0 ? chunkStyles : undefined; +} + +function splitTextRanges( + text: string, + limit: number, + mode: TextChunkMode, +): Array<{ start: number; end: number }> { + if (mode === "newline") { + return splitTextRangesByPreferredBreaks(text, limit); + } + + const ranges: Array<{ start: number; end: number }> = []; + for (let start = 0; start < text.length; start += limit) { + ranges.push({ + start, + end: Math.min(text.length, start + limit), + }); + } + return ranges; +} + +function splitTextRangesByPreferredBreaks( + text: string, + limit: number, +): Array<{ start: number; end: number }> { + const ranges: Array<{ start: number; end: number }> = []; + let start = 0; + + while (start < text.length) { + const maxEnd = Math.min(text.length, start + limit); + let end = maxEnd; + if (maxEnd < text.length) { + end = + findParagraphBreak(text, start, maxEnd) ?? + findLastBreak(text, "\n", start, maxEnd) ?? + findLastWhitespaceBreak(text, start, maxEnd) ?? + maxEnd; + } + + if (end <= start) { + end = maxEnd; + } + + ranges.push({ start, end }); + start = end; + } + + return ranges; +} + +function findParagraphBreak(text: string, start: number, end: number): number | undefined { + const slice = text.slice(start, end); + const matches = slice.matchAll(/\n[\t ]*\n+/g); + let lastMatch: RegExpMatchArray | undefined; + for (const match of matches) { + lastMatch = match; + } + if (!lastMatch || lastMatch.index === undefined) { + return undefined; + } + return start + lastMatch.index + lastMatch[0].length; +} + +function findLastBreak( + text: string, + marker: string, + start: number, + end: number, +): number | undefined { + const index = text.lastIndexOf(marker, end - 1); + if (index < start) { + return undefined; + } + return index + marker.length; +} + +function findLastWhitespaceBreak(text: string, start: number, end: number): number | undefined { + for (let index = end - 1; index > start; index -= 1) { + if (/\s/.test(text[index])) { + return index + 1; + } + } + return undefined; +} diff --git a/extensions/zalouser/src/status-issues.test.ts b/extensions/zalouser/src/status-issues.test.ts index 73f7277b2b9..c1e142c88e8 100644 --- a/extensions/zalouser/src/status-issues.test.ts +++ b/extensions/zalouser/src/status-issues.test.ts @@ -1,4 +1,5 @@ import { describe, expect, it } from "vitest"; +import { expectOpenDmPolicyConfigIssue } from "../../test-utils/status-issues.js"; import { collectZalouserStatusIssues } from "./status-issues.js"; describe("collectZalouserStatusIssues", () => { @@ -17,16 +18,15 @@ describe("collectZalouserStatusIssues", () => { }); it("warns when dmPolicy is open", () => { - const issues = collectZalouserStatusIssues([ - { + expectOpenDmPolicyConfigIssue({ + collectIssues: collectZalouserStatusIssues, + account: { accountId: "default", enabled: true, configured: true, dmPolicy: "open", }, - ]); - expect(issues).toHaveLength(1); - expect(issues[0]?.kind).toBe("config"); + }); }); it("skips disabled accounts", () => { diff --git a/extensions/zalouser/src/status-issues.ts b/extensions/zalouser/src/status-issues.ts index fca889a5115..b42c915e00a 100644 --- a/extensions/zalouser/src/status-issues.ts +++ b/extensions/zalouser/src/status-issues.ts @@ -1,42 +1,24 @@ import type { ChannelAccountSnapshot, ChannelStatusIssue } from "openclaw/plugin-sdk/zalouser"; +import { coerceStatusIssueAccountId, readStatusIssueFields } from "../../shared/status-issues.js"; -type ZalouserAccountStatus = { - accountId?: unknown; - enabled?: unknown; - configured?: unknown; - dmPolicy?: unknown; - lastError?: unknown; -}; - -const isRecord = (value: unknown): value is Record => - Boolean(value && typeof value === "object"); - -const asString = (value: unknown): string | undefined => - typeof value === "string" ? value : typeof value === "number" ? String(value) : undefined; - -function readZalouserAccountStatus(value: ChannelAccountSnapshot): ZalouserAccountStatus | null { - if (!isRecord(value)) { - return null; - } - return { - accountId: value.accountId, - enabled: value.enabled, - configured: value.configured, - dmPolicy: value.dmPolicy, - lastError: value.lastError, - }; -} +const ZALOUSER_STATUS_FIELDS = [ + "accountId", + "enabled", + "configured", + "dmPolicy", + "lastError", +] as const; export function collectZalouserStatusIssues( accounts: ChannelAccountSnapshot[], ): ChannelStatusIssue[] { const issues: ChannelStatusIssue[] = []; for (const entry of accounts) { - const account = readZalouserAccountStatus(entry); + const account = readStatusIssueFields(entry, ZALOUSER_STATUS_FIELDS); if (!account) { continue; } - const accountId = asString(account.accountId) ?? "default"; + const accountId = coerceStatusIssueAccountId(account.accountId) ?? "default"; const enabled = account.enabled !== false; if (!enabled) { continue; diff --git a/extensions/zalouser/src/test-helpers.ts b/extensions/zalouser/src/test-helpers.ts new file mode 100644 index 00000000000..8b43e182c54 --- /dev/null +++ b/extensions/zalouser/src/test-helpers.ts @@ -0,0 +1,26 @@ +import type { RuntimeEnv } from "openclaw/plugin-sdk/zalouser"; +import type { ResolvedZalouserAccount } from "./types.js"; + +export function createZalouserRuntimeEnv(): RuntimeEnv { + return { + log: () => {}, + error: () => {}, + exit: ((code: number): never => { + throw new Error(`exit ${code}`); + }) as RuntimeEnv["exit"], + }; +} + +export function createDefaultResolvedZalouserAccount( + overrides: Partial = {}, +): ResolvedZalouserAccount { + return { + accountId: "default", + profile: "default", + name: "test", + enabled: true, + authenticated: true, + config: {}, + ...overrides, + }; +} diff --git a/extensions/zalouser/src/text-styles.test.ts b/extensions/zalouser/src/text-styles.test.ts new file mode 100644 index 00000000000..01e6c2da86b --- /dev/null +++ b/extensions/zalouser/src/text-styles.test.ts @@ -0,0 +1,203 @@ +import { describe, expect, it } from "vitest"; +import { parseZalouserTextStyles } from "./text-styles.js"; +import { TextStyle } from "./zca-client.js"; + +describe("parseZalouserTextStyles", () => { + it("renders inline markdown emphasis as Zalo style ranges", () => { + expect(parseZalouserTextStyles("**bold** *italic* ~~strike~~")).toEqual({ + text: "bold italic strike", + styles: [ + { start: 0, len: 4, st: TextStyle.Bold }, + { start: 5, len: 6, st: TextStyle.Italic }, + { start: 12, len: 6, st: TextStyle.StrikeThrough }, + ], + }); + }); + + it("keeps inline code and plain math markers literal", () => { + expect(parseZalouserTextStyles("before `inline *code*` after\n2 * 3 * 4")).toEqual({ + text: "before `inline *code*` after\n2 * 3 * 4", + styles: [], + }); + }); + + it("preserves backslash escapes inside code spans and fenced code blocks", () => { + expect(parseZalouserTextStyles("before `\\*` after\n```ts\n\\*\\_\\\\\n```")).toEqual({ + text: "before `\\*` after\n\\*\\_\\\\", + styles: [], + }); + }); + + it("closes fenced code blocks when the input uses CRLF newlines", () => { + expect(parseZalouserTextStyles("```\r\n*code*\r\n```\r\n**after**")).toEqual({ + text: "*code*\nafter", + styles: [{ start: 7, len: 5, st: TextStyle.Bold }], + }); + }); + + it("maps headings, block quotes, and lists into line styles", () => { + expect(parseZalouserTextStyles(["# Title", "> quoted", " - nested"].join("\n"))).toEqual({ + text: "Title\nquoted\nnested", + styles: [ + { start: 0, len: 5, st: TextStyle.Bold }, + { start: 0, len: 5, st: TextStyle.Big }, + { start: 6, len: 6, st: TextStyle.Indent, indentSize: 1 }, + { start: 13, len: 6, st: TextStyle.UnorderedList }, + ], + }); + }); + + it("treats 1-3 leading spaces as markdown padding for headings and lists", () => { + expect(parseZalouserTextStyles(" # Title\n 1. item\n - bullet")).toEqual({ + text: "Title\nitem\nbullet", + styles: [ + { start: 0, len: 5, st: TextStyle.Bold }, + { start: 0, len: 5, st: TextStyle.Big }, + { start: 6, len: 4, st: TextStyle.OrderedList }, + { start: 11, len: 6, st: TextStyle.UnorderedList }, + ], + }); + }); + + it("strips fenced code markers and preserves leading indentation with nbsp", () => { + expect(parseZalouserTextStyles("```ts\n const x = 1\n\treturn x\n```")).toEqual({ + text: "\u00A0\u00A0const x = 1\n\u00A0\u00A0\u00A0\u00A0return x", + styles: [], + }); + }); + + it("treats tilde fences as literal code blocks", () => { + expect(parseZalouserTextStyles("~~~bash\n*cmd*\n~~~")).toEqual({ + text: "*cmd*", + styles: [], + }); + }); + + it("treats fences indented under list items as literal code blocks", () => { + expect(parseZalouserTextStyles(" ```\n*cmd*\n ```")).toEqual({ + text: "*cmd*", + styles: [], + }); + }); + + it("treats quoted backtick fences as literal code blocks", () => { + expect(parseZalouserTextStyles("> ```js\n> *cmd*\n> ```")).toEqual({ + text: "*cmd*", + styles: [], + }); + }); + + it("treats quoted tilde fences as literal code blocks", () => { + expect(parseZalouserTextStyles("> ~~~\n> *cmd*\n> ~~~")).toEqual({ + text: "*cmd*", + styles: [], + }); + }); + + it("preserves quote-prefixed lines inside normal fenced code blocks", () => { + expect(parseZalouserTextStyles("```\n> prompt\n```")).toEqual({ + text: "> prompt", + styles: [], + }); + }); + + it("does not treat quote-prefixed fence text inside code as a closing fence", () => { + expect(parseZalouserTextStyles("```\n> ```\n*still code*\n```")).toEqual({ + text: "> ```\n*still code*", + styles: [], + }); + }); + + it("treats indented blockquotes as quoted lines", () => { + expect(parseZalouserTextStyles(" > quoted")).toEqual({ + text: "quoted", + styles: [{ start: 0, len: 6, st: TextStyle.Indent, indentSize: 1 }], + }); + }); + + it("treats spaced nested blockquotes as deeper quoted lines", () => { + expect(parseZalouserTextStyles("> > quoted")).toEqual({ + text: "quoted", + styles: [{ start: 0, len: 6, st: TextStyle.Indent, indentSize: 2 }], + }); + }); + + it("treats indented quoted fences as literal code blocks", () => { + expect(parseZalouserTextStyles(" > ```\n > *cmd*\n > ```")).toEqual({ + text: "*cmd*", + styles: [], + }); + }); + + it("treats spaced nested quoted fences as literal code blocks", () => { + expect(parseZalouserTextStyles("> > ```\n> > code\n> > ```")).toEqual({ + text: "code", + styles: [], + }); + }); + + it("preserves inner quote markers inside quoted fenced code blocks", () => { + expect(parseZalouserTextStyles("> ```\n>> prompt\n> ```")).toEqual({ + text: "> prompt", + styles: [], + }); + }); + + it("keeps quote indentation on heading lines", () => { + expect(parseZalouserTextStyles("> # Title")).toEqual({ + text: "Title", + styles: [ + { start: 0, len: 5, st: TextStyle.Bold }, + { start: 0, len: 5, st: TextStyle.Big }, + { start: 0, len: 5, st: TextStyle.Indent, indentSize: 1 }, + ], + }); + }); + + it("keeps unmatched fences literal", () => { + expect(parseZalouserTextStyles("```python")).toEqual({ + text: "```python", + styles: [], + }); + }); + + it("keeps unclosed fenced blocks literal until eof", () => { + expect(parseZalouserTextStyles("```python\n\\*not italic*\n_next_")).toEqual({ + text: "```python\n\\*not italic*\n_next_", + styles: [], + }); + }); + + it("supports nested markdown and tag styles regardless of order", () => { + expect(parseZalouserTextStyles("**{red}x{/red}** {red}**y**{/red}")).toEqual({ + text: "x y", + styles: [ + { start: 0, len: 1, st: TextStyle.Bold }, + { start: 0, len: 1, st: TextStyle.Red }, + { start: 2, len: 1, st: TextStyle.Red }, + { start: 2, len: 1, st: TextStyle.Bold }, + ], + }); + }); + + it("treats small text tags as normal text", () => { + expect(parseZalouserTextStyles("{small}tiny{/small}")).toEqual({ + text: "tiny", + styles: [], + }); + }); + + it("keeps escaped markers literal", () => { + expect(parseZalouserTextStyles("\\*literal\\* \\{underline}tag{/underline}")).toEqual({ + text: "*literal* {underline}tag{/underline}", + styles: [], + }); + }); + + it("keeps indented code blocks literal", () => { + expect(parseZalouserTextStyles(" *cmd*")).toEqual({ + text: "\u00A0\u00A0\u00A0\u00A0*cmd*", + styles: [], + }); + }); +}); diff --git a/extensions/zalouser/src/text-styles.ts b/extensions/zalouser/src/text-styles.ts new file mode 100644 index 00000000000..cdfe8b492b5 --- /dev/null +++ b/extensions/zalouser/src/text-styles.ts @@ -0,0 +1,537 @@ +import { TextStyle, type Style } from "./zca-client.js"; + +type InlineStyle = (typeof TextStyle)[keyof typeof TextStyle]; + +type LineStyle = { + lineIndex: number; + style: InlineStyle; + indentSize?: number; +}; + +type Segment = { + text: string; + styles: InlineStyle[]; +}; + +type InlineMarker = { + pattern: RegExp; + extractText: (match: RegExpExecArray) => string; + resolveStyles?: (match: RegExpExecArray) => InlineStyle[]; + literal?: boolean; +}; + +type ResolvedInlineMatch = { + match: RegExpExecArray; + marker: InlineMarker; + styles: InlineStyle[]; + text: string; + priority: number; +}; + +type FenceMarker = { + char: "`" | "~"; + length: number; + indent: number; +}; + +type ActiveFence = FenceMarker & { + quoteIndent: number; +}; + +const TAG_STYLE_MAP: Record = { + red: TextStyle.Red, + orange: TextStyle.Orange, + yellow: TextStyle.Yellow, + green: TextStyle.Green, + small: null, + big: TextStyle.Big, + underline: TextStyle.Underline, +}; + +const INLINE_MARKERS: InlineMarker[] = [ + { + pattern: /`([^`\n]+)`/g, + extractText: (match) => match[0], + literal: true, + }, + { + pattern: /\\([*_~#\\{}>+\-`])/g, + extractText: (match) => match[1], + literal: true, + }, + { + pattern: new RegExp(`\\{(${Object.keys(TAG_STYLE_MAP).join("|")})\\}(.+?)\\{/\\1\\}`, "g"), + extractText: (match) => match[2], + resolveStyles: (match) => { + const style = TAG_STYLE_MAP[match[1]]; + return style ? [style] : []; + }, + }, + { + pattern: /(? match[1], + resolveStyles: () => [TextStyle.Bold, TextStyle.Italic], + }, + { + pattern: /(? match[1], + resolveStyles: () => [TextStyle.Bold], + }, + { + pattern: /(? match[1], + resolveStyles: () => [TextStyle.Bold], + }, + { + pattern: /(? match[1], + resolveStyles: () => [TextStyle.StrikeThrough], + }, + { + pattern: /(? match[1], + resolveStyles: () => [TextStyle.Italic], + }, + { + pattern: /(? match[1], + resolveStyles: () => [TextStyle.Italic], + }, +]; + +export function parseZalouserTextStyles(input: string): { text: string; styles: Style[] } { + const allStyles: Style[] = []; + + const escapeMap: string[] = []; + const lines = input.replace(/\r\n?/g, "\n").split("\n"); + const lineStyles: LineStyle[] = []; + const processedLines: string[] = []; + let activeFence: ActiveFence | null = null; + + for (let lineIndex = 0; lineIndex < lines.length; lineIndex += 1) { + const rawLine = lines[lineIndex]; + const { text: unquotedLine, indent: baseIndent } = stripQuotePrefix(rawLine); + + if (activeFence) { + const codeLine = + activeFence.quoteIndent > 0 + ? stripQuotePrefix(rawLine, activeFence.quoteIndent).text + : rawLine; + if (isClosingFence(codeLine, activeFence)) { + activeFence = null; + continue; + } + processedLines.push( + escapeLiteralText( + normalizeCodeBlockLeadingWhitespace(stripCodeFenceIndent(codeLine, activeFence.indent)), + escapeMap, + ), + ); + continue; + } + + let line = unquotedLine; + const openingFence = resolveOpeningFence(rawLine); + if (openingFence) { + const fenceLine = openingFence.quoteIndent > 0 ? unquotedLine : rawLine; + if (!hasClosingFence(lines, lineIndex + 1, openingFence)) { + processedLines.push(escapeLiteralText(fenceLine, escapeMap)); + activeFence = openingFence; + continue; + } + activeFence = openingFence; + continue; + } + + const outputLineIndex = processedLines.length; + if (isIndentedCodeBlockLine(line)) { + if (baseIndent > 0) { + lineStyles.push({ + lineIndex: outputLineIndex, + style: TextStyle.Indent, + indentSize: baseIndent, + }); + } + processedLines.push(escapeLiteralText(normalizeCodeBlockLeadingWhitespace(line), escapeMap)); + continue; + } + + const { text: markdownLine, size: markdownPadding } = stripOptionalMarkdownPadding(line); + + const headingMatch = markdownLine.match(/^(#{1,4})\s(.*)$/); + if (headingMatch) { + const depth = headingMatch[1].length; + lineStyles.push({ lineIndex: outputLineIndex, style: TextStyle.Bold }); + if (depth === 1) { + lineStyles.push({ lineIndex: outputLineIndex, style: TextStyle.Big }); + } + if (baseIndent > 0) { + lineStyles.push({ + lineIndex: outputLineIndex, + style: TextStyle.Indent, + indentSize: baseIndent, + }); + } + processedLines.push(headingMatch[2]); + continue; + } + + const indentMatch = markdownLine.match(/^(\s+)(.*)$/); + let indentLevel = 0; + let content = markdownLine; + if (indentMatch) { + indentLevel = clampIndent(indentMatch[1].length); + content = indentMatch[2]; + } + const totalIndent = Math.min(5, baseIndent + indentLevel); + + if (/^[-*+]\s\[[ xX]\]\s/.test(content)) { + if (totalIndent > 0) { + lineStyles.push({ + lineIndex: outputLineIndex, + style: TextStyle.Indent, + indentSize: totalIndent, + }); + } + processedLines.push(content); + continue; + } + + const orderedListMatch = content.match(/^(\d+)\.\s(.*)$/); + if (orderedListMatch) { + if (totalIndent > 0) { + lineStyles.push({ + lineIndex: outputLineIndex, + style: TextStyle.Indent, + indentSize: totalIndent, + }); + } + lineStyles.push({ lineIndex: outputLineIndex, style: TextStyle.OrderedList }); + processedLines.push(orderedListMatch[2]); + continue; + } + + const unorderedListMatch = content.match(/^[-*+]\s(.*)$/); + if (unorderedListMatch) { + if (totalIndent > 0) { + lineStyles.push({ + lineIndex: outputLineIndex, + style: TextStyle.Indent, + indentSize: totalIndent, + }); + } + lineStyles.push({ lineIndex: outputLineIndex, style: TextStyle.UnorderedList }); + processedLines.push(unorderedListMatch[1]); + continue; + } + + if (markdownPadding > 0) { + if (baseIndent > 0) { + lineStyles.push({ + lineIndex: outputLineIndex, + style: TextStyle.Indent, + indentSize: baseIndent, + }); + } + processedLines.push(line); + continue; + } + + if (totalIndent > 0) { + lineStyles.push({ + lineIndex: outputLineIndex, + style: TextStyle.Indent, + indentSize: totalIndent, + }); + processedLines.push(content); + continue; + } + + processedLines.push(line); + } + + const segments = parseInlineSegments(processedLines.join("\n")); + + let plainText = ""; + for (const segment of segments) { + const start = plainText.length; + plainText += segment.text; + for (const style of segment.styles) { + allStyles.push({ start, len: segment.text.length, st: style } as Style); + } + } + + if (escapeMap.length > 0) { + const escapeRegex = /\x01(\d+)\x02/g; + const shifts: Array<{ pos: number; delta: number }> = []; + let cumulativeDelta = 0; + + for (const match of plainText.matchAll(escapeRegex)) { + const escapeIndex = Number.parseInt(match[1], 10); + cumulativeDelta += match[0].length - escapeMap[escapeIndex].length; + shifts.push({ pos: (match.index ?? 0) + match[0].length, delta: cumulativeDelta }); + } + + for (const style of allStyles) { + let startDelta = 0; + let endDelta = 0; + const end = style.start + style.len; + for (const shift of shifts) { + if (shift.pos <= style.start) { + startDelta = shift.delta; + } + if (shift.pos <= end) { + endDelta = shift.delta; + } + } + style.start -= startDelta; + style.len -= endDelta - startDelta; + } + + plainText = plainText.replace( + escapeRegex, + (_match, index) => escapeMap[Number.parseInt(index, 10)], + ); + } + + const finalLines = plainText.split("\n"); + let offset = 0; + for (let lineIndex = 0; lineIndex < finalLines.length; lineIndex += 1) { + const lineLength = finalLines[lineIndex].length; + if (lineLength > 0) { + for (const lineStyle of lineStyles) { + if (lineStyle.lineIndex !== lineIndex) { + continue; + } + + if (lineStyle.style === TextStyle.Indent) { + allStyles.push({ + start: offset, + len: lineLength, + st: TextStyle.Indent, + indentSize: lineStyle.indentSize, + }); + } else { + allStyles.push({ start: offset, len: lineLength, st: lineStyle.style } as Style); + } + } + } + offset += lineLength + 1; + } + + return { text: plainText, styles: allStyles }; +} + +function clampIndent(spaceCount: number): number { + return Math.min(5, Math.max(1, Math.floor(spaceCount / 2))); +} + +function stripOptionalMarkdownPadding(line: string): { text: string; size: number } { + const match = line.match(/^( {1,3})(?=\S)/); + if (!match) { + return { text: line, size: 0 }; + } + return { + text: line.slice(match[1].length), + size: match[1].length, + }; +} + +function hasClosingFence(lines: string[], startIndex: number, fence: ActiveFence): boolean { + for (let index = startIndex; index < lines.length; index += 1) { + const candidate = + fence.quoteIndent > 0 ? stripQuotePrefix(lines[index], fence.quoteIndent).text : lines[index]; + if (isClosingFence(candidate, fence)) { + return true; + } + } + return false; +} + +function resolveOpeningFence(line: string): ActiveFence | null { + const directFence = parseFenceMarker(line); + if (directFence) { + return { ...directFence, quoteIndent: 0 }; + } + + const quoted = stripQuotePrefix(line); + if (quoted.indent === 0) { + return null; + } + + const quotedFence = parseFenceMarker(quoted.text); + if (!quotedFence) { + return null; + } + + return { + ...quotedFence, + quoteIndent: quoted.indent, + }; +} + +function stripQuotePrefix( + line: string, + maxDepth = Number.POSITIVE_INFINITY, +): { text: string; indent: number } { + let cursor = 0; + while (cursor < line.length && cursor < 3 && line[cursor] === " ") { + cursor += 1; + } + + let removedDepth = 0; + let consumedCursor = cursor; + while (removedDepth < maxDepth && consumedCursor < line.length && line[consumedCursor] === ">") { + removedDepth += 1; + consumedCursor += 1; + if (line[consumedCursor] === " ") { + consumedCursor += 1; + } + } + + if (removedDepth === 0) { + return { text: line, indent: 0 }; + } + + return { + text: line.slice(consumedCursor), + indent: Math.min(5, removedDepth), + }; +} + +function parseFenceMarker(line: string): FenceMarker | null { + const match = line.match(/^([ ]{0,3})(`{3,}|~{3,})(.*)$/); + if (!match) { + return null; + } + + const marker = match[2]; + const char = marker[0]; + if (char !== "`" && char !== "~") { + return null; + } + + return { + char, + length: marker.length, + indent: match[1].length, + }; +} + +function isClosingFence(line: string, fence: FenceMarker): boolean { + const match = line.match(/^([ ]{0,3})(`{3,}|~{3,})[ \t]*$/); + if (!match) { + return false; + } + return match[2][0] === fence.char && match[2].length >= fence.length; +} + +function escapeLiteralText(input: string, escapeMap: string[]): string { + return input.replace(/[\\*_~{}`]/g, (ch) => { + const index = escapeMap.length; + escapeMap.push(ch); + return `\x01${index}\x02`; + }); +} + +function parseInlineSegments(text: string, inheritedStyles: InlineStyle[] = []): Segment[] { + const segments: Segment[] = []; + let cursor = 0; + + while (cursor < text.length) { + const nextMatch = findNextInlineMatch(text, cursor); + if (!nextMatch) { + pushSegment(segments, text.slice(cursor), inheritedStyles); + break; + } + + if (nextMatch.match.index > cursor) { + pushSegment(segments, text.slice(cursor, nextMatch.match.index), inheritedStyles); + } + + const combinedStyles = [...inheritedStyles, ...nextMatch.styles]; + if (nextMatch.marker.literal) { + pushSegment(segments, nextMatch.text, combinedStyles); + } else { + segments.push(...parseInlineSegments(nextMatch.text, combinedStyles)); + } + + cursor = nextMatch.match.index + nextMatch.match[0].length; + } + + return segments; +} + +function findNextInlineMatch(text: string, startIndex: number): ResolvedInlineMatch | null { + let bestMatch: ResolvedInlineMatch | null = null; + + for (const [priority, marker] of INLINE_MARKERS.entries()) { + const regex = new RegExp(marker.pattern.source, marker.pattern.flags); + regex.lastIndex = startIndex; + const match = regex.exec(text); + if (!match) { + continue; + } + + if ( + bestMatch && + (match.index > bestMatch.match.index || + (match.index === bestMatch.match.index && priority > bestMatch.priority)) + ) { + continue; + } + + bestMatch = { + match, + marker, + text: marker.extractText(match), + styles: marker.resolveStyles?.(match) ?? [], + priority, + }; + } + + return bestMatch; +} + +function pushSegment(segments: Segment[], text: string, styles: InlineStyle[]): void { + if (!text) { + return; + } + + const lastSegment = segments.at(-1); + if (lastSegment && sameStyles(lastSegment.styles, styles)) { + lastSegment.text += text; + return; + } + + segments.push({ + text, + styles: [...styles], + }); +} + +function sameStyles(left: InlineStyle[], right: InlineStyle[]): boolean { + return left.length === right.length && left.every((style, index) => style === right[index]); +} + +function normalizeCodeBlockLeadingWhitespace(line: string): string { + return line.replace(/^[ \t]+/, (leadingWhitespace) => + leadingWhitespace.replace(/\t/g, "\u00A0\u00A0\u00A0\u00A0").replace(/ /g, "\u00A0"), + ); +} + +function isIndentedCodeBlockLine(line: string): boolean { + return /^(?: {4,}|\t)/.test(line); +} + +function stripCodeFenceIndent(line: string, indent: number): string { + let consumed = 0; + let cursor = 0; + + while (cursor < line.length && consumed < indent && line[cursor] === " ") { + cursor += 1; + consumed += 1; + } + + return line.slice(cursor); +} diff --git a/extensions/zalouser/src/types.ts b/extensions/zalouser/src/types.ts index d704a1b3f78..08dc2fd8d12 100644 --- a/extensions/zalouser/src/types.ts +++ b/extensions/zalouser/src/types.ts @@ -1,3 +1,5 @@ +import type { Style } from "./zca-client.js"; + export type ZcaFriend = { userId: string; displayName: string; @@ -59,6 +61,10 @@ export type ZaloSendOptions = { caption?: string; isGroup?: boolean; mediaLocalRoots?: readonly string[]; + textMode?: "markdown" | "plain"; + textChunkMode?: "length" | "newline"; + textChunkLimit?: number; + textStyles?: Style[]; }; export type ZaloSendResult = { @@ -91,6 +97,7 @@ type ZalouserSharedConfig = { enabled?: boolean; name?: string; profile?: string; + dangerouslyAllowNameMatching?: boolean; dmPolicy?: "pairing" | "allowlist" | "open" | "disabled"; allowFrom?: Array; historyLimit?: number; diff --git a/extensions/zalouser/src/zalo-js.ts b/extensions/zalouser/src/zalo-js.ts index 25d263b7d6a..0e2d744232f 100644 --- a/extensions/zalouser/src/zalo-js.ts +++ b/extensions/zalouser/src/zalo-js.ts @@ -20,6 +20,7 @@ import type { } from "./types.js"; import { LoginQRCallbackEventType, + TextStyle, ThreadType, Zalo, type API, @@ -136,6 +137,39 @@ function toErrorMessage(error: unknown): string { return String(error); } +function clampTextStyles( + text: string, + styles?: ZaloSendOptions["textStyles"], +): ZaloSendOptions["textStyles"] { + if (!styles || styles.length === 0) { + return undefined; + } + const maxLength = text.length; + const clamped = styles + .map((style) => { + const start = Math.max(0, Math.min(style.start, maxLength)); + const end = Math.min(style.start + style.len, maxLength); + if (end <= start) { + return null; + } + if (style.st === TextStyle.Indent) { + return { + start, + len: end - start, + st: style.st, + indentSize: style.indentSize, + }; + } + return { + start, + len: end - start, + st: style.st, + }; + }) + .filter((style): style is NonNullable => style !== null); + return clamped.length > 0 ? clamped : undefined; +} + function toNumberId(value: unknown): string { if (typeof value === "number" && Number.isFinite(value)) { return String(Math.trunc(value)); @@ -1018,11 +1052,16 @@ export async function sendZaloTextMessage( kind: media.kind, }); const payloadText = (text || options.caption || "").slice(0, 2000); + const textStyles = clampTextStyles(payloadText, options.textStyles); if (media.kind === "audio") { let textMessageId: string | undefined; if (payloadText) { - const textResponse = await api.sendMessage(payloadText, trimmedThreadId, type); + const textResponse = await api.sendMessage( + textStyles ? { msg: payloadText, styles: textStyles } : payloadText, + trimmedThreadId, + type, + ); textMessageId = extractSendMessageId(textResponse); } @@ -1055,6 +1094,7 @@ export async function sendZaloTextMessage( const response = await api.sendMessage( { msg: payloadText, + ...(textStyles ? { styles: textStyles } : {}), attachments: [ { data: media.buffer, @@ -1071,7 +1111,13 @@ export async function sendZaloTextMessage( return { ok: true, messageId: extractSendMessageId(response) }; } - const response = await api.sendMessage(text.slice(0, 2000), trimmedThreadId, type); + const payloadText = text.slice(0, 2000); + const textStyles = clampTextStyles(payloadText, options.textStyles); + const response = await api.sendMessage( + textStyles ? { msg: payloadText, styles: textStyles } : payloadText, + trimmedThreadId, + type, + ); return { ok: true, messageId: extractSendMessageId(response) }; } catch (error) { return { ok: false, error: toErrorMessage(error) }; diff --git a/extensions/zalouser/src/zca-client.ts b/extensions/zalouser/src/zca-client.ts index 57172eef64d..00a1c8c1be0 100644 --- a/extensions/zalouser/src/zca-client.ts +++ b/extensions/zalouser/src/zca-client.ts @@ -28,6 +28,39 @@ export const Reactions = ReactionsRuntime as Record & { NONE: string; }; +// Mirror zca-js sendMessage style constants locally because the package root +// typing surface does not consistently expose TextStyle/Style to tsgo. +export const TextStyle = { + Bold: "b", + Italic: "i", + Underline: "u", + StrikeThrough: "s", + Red: "c_db342e", + Orange: "c_f27806", + Yellow: "c_f7b503", + Green: "c_15a85f", + Small: "f_13", + Big: "f_18", + UnorderedList: "lst_1", + OrderedList: "lst_2", + Indent: "ind_$", +} as const; + +type TextStyleValue = (typeof TextStyle)[keyof typeof TextStyle]; + +export type Style = + | { + start: number; + len: number; + st: Exclude; + } + | { + start: number; + len: number; + st: typeof TextStyle.Indent; + indentSize?: number; + }; + export type Credentials = { imei: string; cookie: unknown; diff --git a/package.json b/package.json index bc625b74e71..567798c3b4a 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openclaw", - "version": "2026.3.9", + "version": "2026.3.14", "description": "Multi-channel AI gateway with extensible messaging integrations", "keywords": [], "homepage": "https://github.com/openclaw/openclaw#readme", @@ -216,6 +216,7 @@ }, "scripts": { "android:assemble": "cd apps/android && ./gradlew :app:assembleDebug", + "android:bundle:release": "bun apps/android/scripts/build-release-aab.ts", "android:format": "cd apps/android && ./gradlew :app:ktlintFormat :benchmark:ktlintFormat", "android:install": "cd apps/android && ./gradlew :app:installDebug", "android:lint": "cd apps/android && ./gradlew :app:ktlintCheck :benchmark:ktlintCheck", @@ -262,10 +263,13 @@ "gateway:watch": "node scripts/watch-node.mjs gateway --force", "gen:host-env-policy:swift": "node scripts/generate-host-env-security-policy-swift.mjs --write", "ghsa:patch": "node scripts/ghsa-patch.mjs", - "ios:build": "bash -lc './scripts/ios-configure-signing.sh && cd apps/ios && xcodegen generate && xcodebuild -project OpenClaw.xcodeproj -scheme OpenClaw -destination \"${IOS_DEST:-platform=iOS Simulator,name=iPhone 17}\" -configuration Debug build'", - "ios:gen": "bash -lc './scripts/ios-configure-signing.sh && cd apps/ios && xcodegen generate'", - "ios:open": "bash -lc './scripts/ios-configure-signing.sh && cd apps/ios && xcodegen generate && open OpenClaw.xcodeproj'", - "ios:run": "bash -lc './scripts/ios-configure-signing.sh && cd apps/ios && xcodegen generate && xcodebuild -project OpenClaw.xcodeproj -scheme OpenClaw -destination \"${IOS_DEST:-platform=iOS Simulator,name=iPhone 17}\" -configuration Debug build && xcrun simctl boot \"${IOS_SIM:-iPhone 17}\" || true && xcrun simctl launch booted ai.openclaw.ios'", + "ios:beta": "bash scripts/ios-beta-release.sh", + "ios:beta:archive": "bash scripts/ios-beta-archive.sh", + "ios:beta:prepare": "bash scripts/ios-beta-prepare.sh", + "ios:build": "bash -lc './scripts/ios-configure-signing.sh && ./scripts/ios-write-version-xcconfig.sh && cd apps/ios && xcodegen generate && xcodebuild -project OpenClaw.xcodeproj -scheme OpenClaw -destination \"${IOS_DEST:-platform=iOS Simulator,name=iPhone 17}\" -configuration Debug build'", + "ios:gen": "bash -lc './scripts/ios-configure-signing.sh && ./scripts/ios-write-version-xcconfig.sh && cd apps/ios && xcodegen generate'", + "ios:open": "bash -lc './scripts/ios-configure-signing.sh && ./scripts/ios-write-version-xcconfig.sh && cd apps/ios && xcodegen generate && open OpenClaw.xcodeproj'", + "ios:run": "bash -lc './scripts/ios-configure-signing.sh && ./scripts/ios-write-version-xcconfig.sh && cd apps/ios && xcodegen generate && xcodebuild -project OpenClaw.xcodeproj -scheme OpenClaw -destination \"${IOS_DEST:-platform=iOS Simulator,name=iPhone 17}\" -configuration Debug build && xcrun simctl boot \"${IOS_SIM:-iPhone 17}\" || true && xcrun simctl launch booted ai.openclaw.ios'", "lint": "oxlint --type-aware", "lint:agent:ingress-owner": "node scripts/check-ingress-agent-owner-context.mjs", "lint:all": "pnpm lint && pnpm lint:swift", @@ -291,13 +295,15 @@ "plugins:sync": "node --import tsx scripts/sync-plugin-versions.ts", "prepack": "pnpm build && pnpm ui:build", "prepare": "command -v git >/dev/null 2>&1 && git rev-parse --is-inside-work-tree >/dev/null 2>&1 && git config core.hooksPath git-hooks || exit 0", - "protocol:check": "pnpm protocol:gen && pnpm protocol:gen:swift && git diff --exit-code -- dist/protocol.schema.json apps/macos/Sources/OpenClawProtocol/GatewayModels.swift", + "protocol:check": "pnpm protocol:gen && pnpm protocol:gen:swift && git diff --exit-code -- dist/protocol.schema.json apps/macos/Sources/OpenClawProtocol/GatewayModels.swift apps/shared/OpenClawKit/Sources/OpenClawProtocol/GatewayModels.swift", "protocol:gen": "node --import tsx scripts/protocol-gen.ts", "protocol:gen:swift": "node --import tsx scripts/protocol-gen-swift.ts", "release:check": "node --import tsx scripts/release-check.ts", + "release:openclaw:npm:check": "node --import tsx scripts/openclaw-npm-release-check.ts", "start": "node scripts/run-node.mjs", "test": "node scripts/test-parallel.mjs", "test:all": "pnpm lint && pnpm build && pnpm test && pnpm test:e2e && pnpm test:live && pnpm test:docker:all", + "test:auth:compat": "vitest run --config vitest.gateway.config.ts src/gateway/server.auth.compat-baseline.test.ts src/gateway/client.test.ts src/gateway/reconnect-gating.test.ts src/gateway/protocol/connect-error-details.test.ts", "test:channels": "vitest run --config vitest.channels.config.ts", "test:coverage": "vitest run --config vitest.unit.config.ts --coverage", "test:docker:all": "pnpm test:docker:live-models && pnpm test:docker:live-gateway && pnpm test:docker:onboard && pnpm test:docker:gateway-network && pnpm test:docker:qr && pnpm test:docker:doctor-switch && pnpm test:docker:plugins && pnpm test:docker:cleanup", @@ -320,6 +326,9 @@ "test:install:smoke": "bash scripts/test-install-sh-docker.sh", "test:live": "OPENCLAW_LIVE_TEST=1 CLAWDBOT_LIVE_TEST=1 vitest run --config vitest.live.config.ts", "test:macmini": "OPENCLAW_TEST_VM_FORKS=0 OPENCLAW_TEST_PROFILE=serial node scripts/test-parallel.mjs", + "test:parallels:linux": "bash scripts/e2e/parallels-linux-smoke.sh", + "test:parallels:macos": "bash scripts/e2e/parallels-macos-smoke.sh", + "test:parallels:windows": "bash scripts/e2e/parallels-windows-smoke.sh", "test:perf:budget": "node scripts/test-perf-budget.mjs", "test:perf:hotspots": "node scripts/test-hotspots.mjs", "test:sectriage": "pnpm exec vitest run --config vitest.gateway.config.ts && vitest run --config vitest.unit.config.ts --exclude src/daemon/launchd.integration.test.ts --exclude src/process/exec.test.ts", @@ -333,25 +342,26 @@ "ui:install": "node scripts/ui.js install" }, "dependencies": { - "@agentclientprotocol/sdk": "0.15.0", - "@aws-sdk/client-bedrock": "^3.1004.0", + "@agentclientprotocol/sdk": "0.16.1", + "@aws-sdk/client-bedrock": "^3.1009.0", "@buape/carbon": "0.0.0-beta-20260216184201", "@clack/prompts": "^1.1.0", - "@discordjs/voice": "^0.19.0", + "@discordjs/voice": "^0.19.1", "@grammyjs/runner": "^2.0.3", "@grammyjs/transformer-throttler": "^1.2.1", "@homebridge/ciao": "^1.3.5", "@larksuiteoapi/node-sdk": "^1.59.0", "@line/bot-sdk": "^10.6.0", "@lydell/node-pty": "1.2.0-beta.3", - "@mariozechner/pi-agent-core": "0.57.1", - "@mariozechner/pi-ai": "0.57.1", - "@mariozechner/pi-coding-agent": "0.57.1", - "@mariozechner/pi-tui": "0.57.1", + "@mariozechner/pi-agent-core": "0.58.0", + "@mariozechner/pi-ai": "0.58.0", + "@mariozechner/pi-coding-agent": "0.58.0", + "@mariozechner/pi-tui": "0.58.0", + "@modelcontextprotocol/sdk": "1.27.1", "@mozilla/readability": "^0.6.0", "@sinclair/typebox": "0.34.48", "@slack/bolt": "^4.6.0", - "@slack/web-api": "^7.14.1", + "@slack/web-api": "^7.15.0", "@whiskeysockets/baileys": "7.0.0-rc.9", "ajv": "^8.18.0", "chalk": "^5.6.2", @@ -359,12 +369,13 @@ "cli-highlight": "^2.1.11", "commander": "^14.0.3", "croner": "^10.0.1", - "discord-api-types": "^0.38.41", + "discord-api-types": "^0.38.42", "dotenv": "^17.3.1", "express": "^5.2.1", - "file-type": "^21.3.0", + "file-type": "^21.3.2", "grammy": "^1.41.1", - "https-proxy-agent": "^7.0.6", + "hono": "4.12.7", + "https-proxy-agent": "^8.0.0", "ipaddr.js": "^2.3.0", "jiti": "^2.6.1", "json5": "^2.2.3", @@ -382,7 +393,7 @@ "sqlite-vec": "0.1.7-alpha.2", "tar": "7.5.11", "tslog": "^4.10.2", - "undici": "^7.22.0", + "undici": "^7.24.1", "ws": "^8.19.0", "yaml": "^2.8.2", "zod": "^4.3.6" @@ -393,44 +404,51 @@ "@lit/context": "^1.1.6", "@types/express": "^5.0.6", "@types/markdown-it": "^14.1.2", - "@types/node": "^25.3.5", + "@types/node": "^25.5.0", "@types/qrcode-terminal": "^0.12.2", "@types/ws": "^8.18.1", - "@typescript/native-preview": "7.0.0-dev.20260308.1", - "@vitest/coverage-v8": "^4.0.18", + "@typescript/native-preview": "7.0.0-dev.20260313.1", + "@vitest/coverage-v8": "^4.1.0", "jscpd": "4.0.8", + "jsdom": "^28.1.0", "lit": "^3.3.2", - "oxfmt": "0.36.0", - "oxlint": "^1.51.0", + "oxfmt": "0.40.0", + "oxlint": "^1.55.0", "oxlint-tsgolint": "^0.16.0", "signal-utils": "0.21.1", - "tsdown": "0.21.0", + "tsdown": "0.21.2", "tsx": "^4.21.0", "typescript": "^5.9.3", - "vitest": "^4.0.18" + "vitest": "^4.1.0" }, "peerDependencies": { "@napi-rs/canvas": "^0.1.89", "node-llama-cpp": "3.16.2" }, + "peerDependenciesMeta": { + "node-llama-cpp": { + "optional": true + } + }, "engines": { - "node": ">=22.12.0" + "node": ">=22.16.0" }, "packageManager": "pnpm@10.23.0", "pnpm": { "minimumReleaseAge": 2880, "overrides": { - "hono": "4.12.5", + "hono": "4.12.7", "@hono/node-server": "1.19.10", "fast-xml-parser": "5.3.8", "request": "npm:@cypress/request@3.0.10", "request-promise": "npm:@cypress/request-promise@5.0.0", + "file-type": "21.3.2", "form-data": "2.5.4", "minimatch": "10.2.4", "qs": "6.14.2", "node-domexception": "npm:@nolyfill/domexception@^1.0.28", "@sinclair/typebox": "0.34.48", - "tar": "7.5.10", + "tar": "7.5.11", "tough-cookie": "4.1.3" }, "onlyBuiltDependencies": [ diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 3ae9ea71e0c..6460473fe84 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -5,17 +5,18 @@ settings: excludeLinksFromLockfile: false overrides: - hono: 4.12.5 + hono: 4.12.7 '@hono/node-server': 1.19.10 fast-xml-parser: 5.3.8 request: npm:@cypress/request@3.0.10 request-promise: npm:@cypress/request-promise@5.0.0 + file-type: 21.3.2 form-data: 2.5.4 minimatch: 10.2.4 qs: 6.14.2 node-domexception: npm:@nolyfill/domexception@^1.0.28 '@sinclair/typebox': 0.34.48 - tar: 7.5.10 + tar: 7.5.11 tough-cookie: 4.1.3 packageExtensionsChecksum: sha256-n+P/SQo4Pf+dHYpYn1Y6wL4cJEVoVzZ835N0OEp4TM8= @@ -25,20 +26,20 @@ importers: .: dependencies: '@agentclientprotocol/sdk': - specifier: 0.15.0 - version: 0.15.0(zod@4.3.6) + specifier: 0.16.1 + version: 0.16.1(zod@4.3.6) '@aws-sdk/client-bedrock': - specifier: ^3.1004.0 - version: 3.1004.0 + specifier: ^3.1009.0 + version: 3.1009.0 '@buape/carbon': specifier: 0.0.0-beta-20260216184201 - version: 0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.12.5)(opusscript@0.1.1) + version: 0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.12.7)(opusscript@0.1.1) '@clack/prompts': specifier: ^1.1.0 version: 1.1.0 '@discordjs/voice': - specifier: ^0.19.0 - version: 0.19.0(@discordjs/opus@0.10.0)(opusscript@0.1.1) + specifier: ^0.19.1 + version: 0.19.1(@discordjs/opus@0.10.0)(opusscript@0.1.1) '@grammyjs/runner': specifier: ^2.0.3 version: 2.0.3(grammy@1.41.1) @@ -58,17 +59,20 @@ importers: specifier: 1.2.0-beta.3 version: 1.2.0-beta.3 '@mariozechner/pi-agent-core': - specifier: 0.57.1 - version: 0.57.1(ws@8.19.0)(zod@4.3.6) + specifier: 0.58.0 + version: 0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-ai': - specifier: 0.57.1 - version: 0.57.1(ws@8.19.0)(zod@4.3.6) + specifier: 0.58.0 + version: 0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-coding-agent': - specifier: 0.57.1 - version: 0.57.1(ws@8.19.0)(zod@4.3.6) + specifier: 0.58.0 + version: 0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-tui': - specifier: 0.57.1 - version: 0.57.1 + specifier: 0.58.0 + version: 0.58.0 + '@modelcontextprotocol/sdk': + specifier: 1.27.1 + version: 1.27.1(zod@4.3.6) '@mozilla/readability': specifier: ^0.6.0 version: 0.6.0 @@ -82,8 +86,8 @@ importers: specifier: ^4.6.0 version: 4.6.0(@types/express@5.0.6) '@slack/web-api': - specifier: ^7.14.1 - version: 7.14.1 + specifier: ^7.15.0 + version: 7.15.0 '@whiskeysockets/baileys': specifier: 7.0.0-rc.9 version: 7.0.0-rc.9(audio-decode@2.2.3)(sharp@0.34.5) @@ -106,8 +110,8 @@ importers: specifier: ^10.0.1 version: 10.0.1 discord-api-types: - specifier: ^0.38.41 - version: 0.38.41 + specifier: ^0.38.42 + version: 0.38.42 dotenv: specifier: ^17.3.1 version: 17.3.1 @@ -115,14 +119,17 @@ importers: specifier: ^5.2.1 version: 5.2.1 file-type: - specifier: ^21.3.0 - version: 21.3.0 + specifier: 21.3.2 + version: 21.3.2 grammy: specifier: ^1.41.1 version: 1.41.1 + hono: + specifier: 4.12.7 + version: 4.12.7 https-proxy-agent: - specifier: ^7.0.6 - version: 7.0.6 + specifier: ^8.0.0 + version: 8.0.0 ipaddr.js: specifier: ^2.3.0 version: 2.3.0 @@ -172,14 +179,14 @@ importers: specifier: 0.1.7-alpha.2 version: 0.1.7-alpha.2 tar: - specifier: 7.5.10 - version: 7.5.10 + specifier: 7.5.11 + version: 7.5.11 tslog: specifier: ^4.10.2 version: 4.10.2 undici: - specifier: ^7.22.0 - version: 7.22.0 + specifier: ^7.24.1 + version: 7.24.1 ws: specifier: ^8.19.0 version: 8.19.0 @@ -206,8 +213,8 @@ importers: specifier: ^14.1.2 version: 14.1.2 '@types/node': - specifier: ^25.3.5 - version: 25.3.5 + specifier: ^25.5.0 + version: 25.5.0 '@types/qrcode-terminal': specifier: ^0.12.2 version: 0.12.2 @@ -215,23 +222,26 @@ importers: specifier: ^8.18.1 version: 8.18.1 '@typescript/native-preview': - specifier: 7.0.0-dev.20260308.1 - version: 7.0.0-dev.20260308.1 + specifier: 7.0.0-dev.20260313.1 + version: 7.0.0-dev.20260313.1 '@vitest/coverage-v8': - specifier: ^4.0.18 - version: 4.0.18(@vitest/browser@4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18))(vitest@4.0.18) + specifier: ^4.1.0 + version: 4.1.0(@vitest/browser@4.1.0(vite@8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.1.0))(vitest@4.1.0) jscpd: specifier: 4.0.8 version: 4.0.8 + jsdom: + specifier: ^28.1.0 + version: 28.1.0(@noble/hashes@2.0.1) lit: specifier: ^3.3.2 version: 3.3.2 oxfmt: - specifier: 0.36.0 - version: 0.36.0 + specifier: 0.40.0 + version: 0.40.0 oxlint: - specifier: ^1.51.0 - version: 1.51.0(oxlint-tsgolint@0.16.0) + specifier: ^1.55.0 + version: 1.55.0(oxlint-tsgolint@0.16.0) oxlint-tsgolint: specifier: ^0.16.0 version: 0.16.0 @@ -239,8 +249,8 @@ importers: specifier: 0.21.1 version: 0.21.1(signal-polyfill@0.2.2) tsdown: - specifier: 0.21.0 - version: 0.21.0(@typescript/native-preview@7.0.0-dev.20260308.1)(typescript@5.9.3) + specifier: 0.21.2 + version: 0.21.2(@typescript/native-preview@7.0.0-dev.20260313.1)(typescript@5.9.3) tsx: specifier: ^4.21.0 version: 4.21.0 @@ -248,14 +258,14 @@ importers: specifier: ^5.9.3 version: 5.9.3 vitest: - specifier: ^4.0.18 - version: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.5)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + specifier: ^4.1.0 + version: 4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(@vitest/browser-playwright@4.1.0)(jsdom@28.1.0(@noble/hashes@2.0.1))(vite@8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)) extensions/acpx: dependencies: acpx: - specifier: 0.1.15 - version: 0.1.15(zod@4.3.6) + specifier: 0.3.0 + version: 0.3.0(zod@4.3.6) extensions/bluebubbles: dependencies: @@ -304,8 +314,8 @@ importers: extensions/diffs: dependencies: '@pierre/diffs': - specifier: 1.0.11 - version: 1.0.11(react-dom@19.2.4(react@19.2.4))(react@19.2.4) + specifier: 1.1.0 + version: 1.1.0(react-dom@19.2.4(react@19.2.4))(react@19.2.4) '@sinclair/typebox': specifier: 0.34.48 version: 0.34.48 @@ -324,8 +334,8 @@ importers: specifier: 0.34.48 version: 0.34.48 https-proxy-agent: - specifier: ^7.0.6 - version: 7.0.6 + specifier: ^8.0.0 + version: 8.0.0 zod: specifier: ^4.3.6 version: 4.3.6 @@ -338,8 +348,8 @@ importers: specifier: ^10.6.1 version: 10.6.1 openclaw: - specifier: '>=2026.3.2' - version: 2026.3.2(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(hono@4.12.5)(node-llama-cpp@3.16.2(typescript@5.9.3)) + specifier: '>=2026.3.11' + version: 2026.3.13(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3)) extensions/imessage: {} @@ -369,8 +379,8 @@ importers: extensions/matrix: dependencies: '@mariozechner/pi-agent-core': - specifier: 0.57.1 - version: 0.57.1(ws@8.19.0)(zod@4.3.6) + specifier: 0.58.0 + version: 0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) '@matrix-org/matrix-sdk-crypto-nodejs': specifier: ^0.4.0 version: 0.4.0 @@ -381,8 +391,8 @@ importers: specifier: 14.1.1 version: 14.1.1 music-metadata: - specifier: ^11.12.1 - version: 11.12.1 + specifier: ^11.12.3 + version: 11.12.3 zod: specifier: ^4.3.6 version: 4.3.6 @@ -399,8 +409,8 @@ importers: extensions/memory-core: dependencies: openclaw: - specifier: '>=2026.3.2' - version: 2026.3.2(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(hono@4.12.5)(node-llama-cpp@3.16.2(typescript@5.9.3)) + specifier: '>=2026.3.11' + version: 2026.3.13(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3)) extensions/memory-lancedb: dependencies: @@ -411,8 +421,8 @@ importers: specifier: 0.34.48 version: 0.34.48 openai: - specifier: ^6.27.0 - version: 6.27.0(ws@8.19.0)(zod@4.3.6) + specifier: ^6.29.0 + version: 6.29.0(ws@8.19.0)(zod@4.3.6) extensions/minimax-portal-auth: {} @@ -440,8 +450,12 @@ importers: specifier: ^4.3.6 version: 4.3.6 + extensions/ollama: {} + extensions/open-prose: {} + extensions/sglang: {} + extensions/signal: {} extensions/slack: {} @@ -484,6 +498,8 @@ importers: specifier: ^4.3.6 version: 4.3.6 + extensions/vllm: {} + extensions/voice-call: dependencies: '@sinclair/typebox': @@ -504,8 +520,8 @@ importers: extensions/zalo: dependencies: undici: - specifier: 7.22.0 - version: 7.22.0 + specifier: 7.24.1 + version: 7.24.1 zod: specifier: ^4.3.6 version: 4.3.6 @@ -546,8 +562,8 @@ importers: specifier: 3.0.0 version: 3.0.0 dompurify: - specifier: ^3.3.2 - version: 3.3.2 + specifier: ^3.3.3 + version: 3.3.3 lit: specifier: ^3.3.2 version: 3.3.2 @@ -561,31 +577,37 @@ importers: specifier: ^0.21.1 version: 0.21.1(signal-polyfill@0.2.2) vite: - specifier: 7.3.1 - version: 7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + specifier: 8.0.0 + version: 8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2) devDependencies: '@vitest/browser-playwright': - specifier: 4.0.18 - version: 4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) + specifier: 4.1.0 + version: 4.1.0(playwright@1.58.2)(vite@8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.1.0) + jsdom: + specifier: ^28.1.0 + version: 28.1.0(@noble/hashes@2.0.1) playwright: specifier: ^1.58.2 version: 1.58.2 vitest: - specifier: 4.0.18 - version: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.5)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + specifier: 4.1.0 + version: 4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(@vitest/browser-playwright@4.1.0)(jsdom@28.1.0(@noble/hashes@2.0.1))(vite@8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)) packages: - '@agentclientprotocol/sdk@0.14.1': - resolution: {integrity: sha512-b6r3PS3Nly+Wyw9U+0nOr47bV8tfS476EgyEMhoKvJCZLbgqoDFN7DJwkxL88RR0aiOqOYV1ZnESHqb+RmdH8w==} - peerDependencies: - zod: ^3.25.0 || ^4.0.0 + '@acemir/cssom@0.9.31': + resolution: {integrity: sha512-ZnR3GSaH+/vJ0YlHau21FjfLYjMpYVIzTD8M8vIEQvIGxeOXyXdzCI140rrCY862p/C/BbzWsjc1dgnM9mkoTA==} '@agentclientprotocol/sdk@0.15.0': resolution: {integrity: sha512-TH4utu23Ix8ec34srBHmDD4p3HI0cYleS1jN9lghRczPfhFlMBNrQgZWeBBe12DWy27L11eIrtciY2MXFSEiDg==} peerDependencies: zod: ^3.25.0 || ^4.0.0 + '@agentclientprotocol/sdk@0.16.1': + resolution: {integrity: sha512-1ad+Sc/0sCtZGHthxxvgEUo5Wsbw16I+aF+YwdiLnPwkZG8KAGUEAPK6LM6Pf69lCyJPt1Aomk1d+8oE3C4ZEw==} + peerDependencies: + zod: ^3.25.0 || ^4.0.0 + '@anthropic-ai/sdk@0.73.0': resolution: {integrity: sha512-URURVzhxXGJDGUGFunIOtBlSl7KWvZiAAKY/ttTkZAkXT9bTPqdk2eK0b8qqSxXpikh3QKPnPYpiyX98zf5ebw==} hasBin: true @@ -595,6 +617,16 @@ packages: zod: optional: true + '@asamuzakjp/css-color@5.0.1': + resolution: {integrity: sha512-2SZFvqMyvboVV1d15lMf7XiI3m7SDqXUuKaTymJYLN6dSGadqp+fVojqJlVoMlbZnlTmu3S0TLwLTJpvBMO1Aw==} + engines: {node: ^20.19.0 || ^22.12.0 || >=24.0.0} + + '@asamuzakjp/dom-selector@6.8.1': + resolution: {integrity: sha512-MvRz1nCqW0fsy8Qz4dnLIvhOlMzqDVBabZx6lH+YywFDdjXhMY37SmpV1XFX3JzG5GWHn63j6HX6QPr3lZXHvQ==} + + '@asamuzakjp/nwsapi@2.3.9': + resolution: {integrity: sha512-n8GuYSrI9bF7FFZ/SjhwevlHc8xaVlb/7HmHelnc/PZXBD2ZR49NnN9sMMuDdEGPeeRQ5d0hqlSlEpgCX3Wl0Q==} + '@aws-crypto/crc32@5.2.0': resolution: {integrity: sha512-nLbCWqQNgUiwwtFsen1AdzAtvuLRsQS8rYgMuxCrdKf9kOssamGLuPwyTY9wyYblNr9+1XM8v6zoDTPPSIeANg==} engines: {node: '>=16.0.0'} @@ -618,20 +650,12 @@ packages: '@aws-crypto/util@5.2.0': resolution: {integrity: sha512-4RkU9EsI6ZpBve5fseQlGNUWKMa1RLPQ1dnjnQoe07ldfIzcsGb5hC5W0Dm7u423KWzawlrpbjXBrXCEv9zazQ==} - '@aws-sdk/client-bedrock-runtime@3.1000.0': - resolution: {integrity: sha512-GA96wgTFB4Z5vhysm+hErbgiEWZ9JqAl09BxARajL7Oanpf0KvdIjxuLp2rD/XqEIks9yG/5Rh9XIAoCUUTZXw==} - engines: {node: '>=20.0.0'} - '@aws-sdk/client-bedrock-runtime@3.1004.0': resolution: {integrity: sha512-t8cl+bPLlHZQD2Sw1a4hSLUybqJZU71+m8znkyeU8CHntFqEp2mMbuLKdHKaAYQ1fAApXMsvzenCAkDzNeeJlw==} engines: {node: '>=20.0.0'} - '@aws-sdk/client-bedrock@3.1000.0': - resolution: {integrity: sha512-wGU8uJXrPW/hZuHdPNVe1kAFIBiKcslBcoDBN0eYBzS13um8p5jJiQJ9WsD1nSpKCmyx7qZXc6xjcbIQPyOrrA==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/client-bedrock@3.1004.0': - resolution: {integrity: sha512-JbfZSV85IL+43S7rPBmeMbvoOYXs1wmrfbEpHkDBjkvbukRQWtoetiPAXNSKDfFq1qVsoq8sWPdoerDQwlUO8w==} + '@aws-sdk/client-bedrock@3.1009.0': + resolution: {integrity: sha512-KzLNqSg1T59sSlQvEA4EL3oDIAMidM54AB1b+UGouPFuUrrwGp2uUlZUYzIIlCvqpf7wEDh8wypqXISRItkgdg==} engines: {node: '>=20.0.0'} '@aws-sdk/client-s3@3.1000.0': @@ -642,8 +666,8 @@ packages: resolution: {integrity: sha512-AlC0oQ1/mdJ8vCIqu524j5RB7M8i8E24bbkZmya1CuiQxkY7SdIZAyw7NDNMGaNINQFq/8oGRMX0HeOfCVsl/A==} engines: {node: '>=20.0.0'} - '@aws-sdk/core@3.973.18': - resolution: {integrity: sha512-GUIlegfcK2LO1J2Y98sCJy63rQSiLiDOgVw7HiHPRqfI2vb3XozTVqemwO0VSGXp54ngCnAQz0Lf0YPCBINNxA==} + '@aws-sdk/core@3.973.20': + resolution: {integrity: sha512-i3GuX+lowD892F3IuJf8o6AbyDupMTdyTxQrCJGcn71ni5hTZ82L4nQhcdumxZ7XPJRJJVHS/CR3uYOIIs0PVA==} engines: {node: '>=20.0.0'} '@aws-sdk/crc64-nvme@3.972.3': @@ -654,82 +678,74 @@ packages: resolution: {integrity: sha512-6ljXKIQ22WFKyIs1jbORIkGanySBHaPPTOI4OxACP5WXgbcR0nDYfqNJfXEGwCK7IzHdNbCSFsNKKs0qCexR8Q==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-env@3.972.16': - resolution: {integrity: sha512-HrdtnadvTGAQUr18sPzGlE5El3ICphnH6SU7UQOMOWFgRKbTRNN8msTxM4emzguUso9CzaHU2xy5ctSrmK5YNA==} + '@aws-sdk/credential-provider-env@3.972.18': + resolution: {integrity: sha512-X0B8AlQY507i5DwjLByeU2Af4ARsl9Vr84koDcXCbAkplmU+1xBFWxEPrWRAoh56waBne/yJqEloSwvRf4x6XA==} engines: {node: '>=20.0.0'} '@aws-sdk/credential-provider-http@3.972.15': resolution: {integrity: sha512-dJuSTreu/T8f24SHDNTjd7eQ4rabr0TzPh2UTCwYexQtzG3nTDKm1e5eIdhiroTMDkPEJeY+WPkA6F9wod/20A==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-http@3.972.18': - resolution: {integrity: sha512-NyB6smuZAixND5jZumkpkunQ0voc4Mwgkd+SZ6cvAzIB7gK8HV8Zd4rS8Kn5MmoGgusyNfVGG+RLoYc4yFiw+A==} + '@aws-sdk/credential-provider-http@3.972.20': + resolution: {integrity: sha512-ey9Lelj001+oOfrbKmS6R2CJAiXX7QKY4Vj9VJv6L2eE6/VjD8DocHIoYqztTm70xDLR4E1jYPTKfIui+eRNDA==} engines: {node: '>=20.0.0'} '@aws-sdk/credential-provider-ini@3.972.13': resolution: {integrity: sha512-JKSoGb7XeabZLBJptpqoZIFbROUIS65NuQnEHGOpuT9GuuZwag2qciKANiDLFiYk4u8nSrJC9JIOnWKVvPVjeA==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-ini@3.972.17': - resolution: {integrity: sha512-dFqh7nfX43B8dO1aPQHOcjC0SnCJ83H3F+1LoCh3X1P7E7N09I+0/taID0asU6GCddfDExqnEvQtDdkuMe5tKQ==} + '@aws-sdk/credential-provider-ini@3.972.20': + resolution: {integrity: sha512-5flXSnKHMloObNF+9N0cupKegnH1Z37cdVlpETVgx8/rAhCe+VNlkcZH3HDg2SDn9bI765S+rhNPXGDJJPfbtA==} engines: {node: '>=20.0.0'} '@aws-sdk/credential-provider-login@3.972.13': resolution: {integrity: sha512-RtYcrxdnJHKY8MFQGLltCURcjuMjnaQpAxPE6+/QEdDHHItMKZgabRe/KScX737F9vJMQsmJy9EmMOkCnoC1JQ==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-login@3.972.17': - resolution: {integrity: sha512-gf2E5b7LpKb+JX2oQsRIDxdRZjBFZt2olCGlWCdb3vBERbXIPgm2t1R5mEnwd4j0UEO/Tbg5zN2KJbHXttJqwA==} + '@aws-sdk/credential-provider-login@3.972.20': + resolution: {integrity: sha512-gEWo54nfqp2jABMu6HNsjVC4hDLpg9HC8IKSJnp0kqWtxIJYHTmiLSsIfI4ScQjxEwpB+jOOH8dOLax1+hy/Hw==} engines: {node: '>=20.0.0'} '@aws-sdk/credential-provider-node@3.972.14': resolution: {integrity: sha512-WqoC2aliIjQM/L3oFf6j+op/enT2i9Cc4UTxxMEKrJNECkq4/PlKE5BOjSYFcq6G9mz65EFbXJh7zOU4CvjSKQ==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-node@3.972.18': - resolution: {integrity: sha512-ZDJa2gd1xiPg/nBDGhUlat02O8obaDEnICBAVS8qieZ0+nDfaB0Z3ec6gjZj27OqFTjnB/Q5a0GwQwb7rMVViw==} + '@aws-sdk/credential-provider-node@3.972.21': + resolution: {integrity: sha512-hah8if3/B/Q+LBYN5FukyQ1Mym6PLPDsBOBsIgNEYD6wLyZg0UmUF/OKIVC3nX9XH8TfTPuITK+7N/jenVACWA==} engines: {node: '>=20.0.0'} '@aws-sdk/credential-provider-process@3.972.13': resolution: {integrity: sha512-rsRG0LQA4VR+jnDyuqtXi2CePYSmfm5GNL9KxiW8DSe25YwJSr06W8TdUfONAC+rjsTI+aIH2rBGG5FjMeANrw==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-process@3.972.16': - resolution: {integrity: sha512-n89ibATwnLEg0ZdZmUds5bq8AfBAdoYEDpqP3uzPLaRuGelsKlIvCYSNNvfgGLi8NaHPNNhs1HjJZYbqkW9b+g==} + '@aws-sdk/credential-provider-process@3.972.18': + resolution: {integrity: sha512-Tpl7SRaPoOLT32jbTWchPsn52hYYgJ0kpiFgnwk8pxTANQdUymVSZkzFvv1+oOgZm1CrbQUP9MBeoMZ9IzLZjA==} engines: {node: '>=20.0.0'} '@aws-sdk/credential-provider-sso@3.972.13': resolution: {integrity: sha512-fr0UU1wx8kNHDhTQBXioc/YviSW8iXuAxHvnH7eQUtn8F8o/FU3uu6EUMvAQgyvn7Ne5QFnC0Cj0BFlwCk+RFw==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-sso@3.972.17': - resolution: {integrity: sha512-wGtte+48xnhnhHMl/MsxzacBPs5A+7JJedjiP452IkHY7vsbYKcvQBqFye8LwdTJVeHtBHv+JFeTscnwepoWGg==} + '@aws-sdk/credential-provider-sso@3.972.20': + resolution: {integrity: sha512-p+R+PYR5Z7Gjqf/6pvbCnzEHcqPCpLzR7Yf127HjJ6EAb4hUcD+qsNRnuww1sB/RmSeCLxyay8FMyqREw4p1RA==} engines: {node: '>=20.0.0'} '@aws-sdk/credential-provider-web-identity@3.972.13': resolution: {integrity: sha512-a6iFMh1pgUH0TdcouBppLJUfPM7Yd3R9S1xFodPtCRoLqCz2RQFA3qjA8x4112PVYXEd4/pHX2eihapq39w0rA==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-web-identity@3.972.17': - resolution: {integrity: sha512-8aiVJh6fTdl8gcyL+sVNcNwTtWpmoFa1Sh7xlj6Z7L/cZ/tYMEBHq44wTYG8Kt0z/PpGNopD89nbj3FHl9QmTA==} + '@aws-sdk/credential-provider-web-identity@3.972.20': + resolution: {integrity: sha512-rWCmh8o7QY4CsUj63qopzMzkDq/yPpkrpb+CnjBEFSOg/02T/we7sSTVg4QsDiVS9uwZ8VyONhq98qt+pIh3KA==} engines: {node: '>=20.0.0'} '@aws-sdk/eventstream-handler-node@3.972.10': resolution: {integrity: sha512-g2Z9s6Y4iNh0wICaEqutgYgt/Pmhv5Ev9G3eKGFe2w9VuZDhc76vYdop6I5OocmpHV79d4TuLG+JWg5rQIVDVA==} engines: {node: '>=20.0.0'} - '@aws-sdk/eventstream-handler-node@3.972.9': - resolution: {integrity: sha512-mKPiiVssgFDWkAXdEDh8+wpr2pFSX/fBn2onXXnrfIAYbdZhYb4WilKbZ3SJMUnQi+Y48jZMam5J0RrgARluaA==} - engines: {node: '>=20.0.0'} - '@aws-sdk/middleware-bucket-endpoint@3.972.6': resolution: {integrity: sha512-3H2bhvb7Cb/S6WFsBy/Dy9q2aegC9JmGH1inO8Lb2sWirSqpLJlZmvQHPE29h2tIxzv6el/14X/tLCQ8BQU6ZQ==} engines: {node: '>=20.0.0'} - '@aws-sdk/middleware-eventstream@3.972.6': - resolution: {integrity: sha512-mB2+3G/oxRC+y9WRk0KCdradE2rSfxxJpcOSmAm+vDh3ex3WQHVLZ1catNIe1j5NQ+3FLBsNMRPVGkZ43PRpjw==} - engines: {node: '>=20.0.0'} - '@aws-sdk/middleware-eventstream@3.972.7': resolution: {integrity: sha512-VWndapHYCfwLgPpCb/xwlMKG4imhFzKJzZcKOEioGn7OHY+6gdr0K7oqy1HZgbLa3ACznZ9fku+DzmAi8fUC0g==} engines: {node: '>=20.0.0'} @@ -746,8 +762,8 @@ packages: resolution: {integrity: sha512-5XHwjPH1lHB+1q4bfC7T8Z5zZrZXfaLcjSMwTd1HPSPrCmPFMbg3UQ5vgNWcVj0xoX4HWqTGkSf2byrjlnRg5w==} engines: {node: '>=20.0.0'} - '@aws-sdk/middleware-host-header@3.972.7': - resolution: {integrity: sha512-aHQZgztBFEpDU1BB00VWCIIm85JjGjQW1OG9+98BdmaOpguJvzmXBGbnAiYcciCd+IS4e9BEq664lhzGnWJHgQ==} + '@aws-sdk/middleware-host-header@3.972.8': + resolution: {integrity: sha512-wAr2REfKsqoKQ+OkNqvOShnBoh+nkPurDKW7uAeVSu6kUECnWlSJiPvnoqxGlfousEY/v9LfS9sNc46hjSYDIQ==} engines: {node: '>=20.0.0'} '@aws-sdk/middleware-location-constraint@3.972.6': @@ -758,16 +774,16 @@ packages: resolution: {integrity: sha512-iFnaMFMQdljAPrvsCVKYltPt2j40LQqukAbXvW7v0aL5I+1GO7bZ/W8m12WxW3gwyK5p5u1WlHg8TSAizC5cZw==} engines: {node: '>=20.0.0'} - '@aws-sdk/middleware-logger@3.972.7': - resolution: {integrity: sha512-LXhiWlWb26txCU1vcI9PneESSeRp/RYY/McuM4SpdrimQR5NgwaPb4VJCadVeuGWgh6QmqZ6rAKSoL1ob16W6w==} + '@aws-sdk/middleware-logger@3.972.8': + resolution: {integrity: sha512-CWl5UCM57WUFaFi5kB7IBY1UmOeLvNZAZ2/OZ5l20ldiJ3TiIz1pC65gYj8X0BCPWkeR1E32mpsCk1L1I4n+lA==} engines: {node: '>=20.0.0'} '@aws-sdk/middleware-recursion-detection@3.972.6': resolution: {integrity: sha512-dY4v3of5EEMvik6+UDwQ96KfUFDk8m1oZDdkSc5lwi4o7rFrjnv0A+yTV+gu230iybQZnKgDLg/rt2P3H+Vscw==} engines: {node: '>=20.0.0'} - '@aws-sdk/middleware-recursion-detection@3.972.7': - resolution: {integrity: sha512-l2VQdcBcYLzIzykCHtXlbpiVCZ94/xniLIkAj0jpnpjY4xlgZx7f56Ypn+uV1y3gG0tNVytJqo3K9bfMFee7SQ==} + '@aws-sdk/middleware-recursion-detection@3.972.8': + resolution: {integrity: sha512-BnnvYs2ZEpdlmZ2PNlV2ZyQ8j8AEkMTjN79y/YA475ER1ByFYrkVR85qmhni8oeTaJcDqbx364wDpitDAA/wCA==} engines: {node: '>=20.0.0'} '@aws-sdk/middleware-sdk-s3@3.972.15': @@ -782,32 +798,28 @@ packages: resolution: {integrity: sha512-ABlFVcIMmuRAwBT+8q5abAxOr7WmaINirDJBnqGY5b5jSDo00UMlg/G4a0xoAgwm6oAECeJcwkvDlxDwKf58fQ==} engines: {node: '>=20.0.0'} - '@aws-sdk/middleware-user-agent@3.972.19': - resolution: {integrity: sha512-Km90fcXt3W/iqujHzuM6IaDkYCj73gsYufcuWXApWdzoTy6KGk8fnchAjePMARU0xegIR3K4N3yIo1vy7OVe8A==} + '@aws-sdk/middleware-user-agent@3.972.21': + resolution: {integrity: sha512-62XRl1GDYPpkt7cx1AX1SPy9wgNE9Iw/NPuurJu4lmhCWS7sGKO+kS53TQ8eRmIxy3skmvNInnk0ZbWrU5Dpyg==} engines: {node: '>=20.0.0'} - '@aws-sdk/middleware-websocket@3.972.10': - resolution: {integrity: sha512-uNqRpbL6djE+XXO4cQ+P8ra37cxNNBP+2IfkVOXu1xFdGMfW+uOTxBQuDPpP43i40PBRBXK5un79l/oYpbzYkA==} - engines: {node: '>= 14.0.0'} - '@aws-sdk/middleware-websocket@3.972.12': resolution: {integrity: sha512-iyPP6FVDKe/5wy5ojC0akpDFG1vX3FeCUU47JuwN8xfvT66xlEI8qUJZPtN55TJVFzzWZJpWL78eqUE31md08Q==} engines: {node: '>= 14.0.0'} - '@aws-sdk/nested-clients@3.996.3': - resolution: {integrity: sha512-AU5TY1V29xqwg/MxmA2odwysTez+ccFAhmfRJk+QZT5HNv90UTA9qKd1J9THlsQkvmH7HWTEV1lDNxkQO5PzNw==} + '@aws-sdk/nested-clients@3.996.10': + resolution: {integrity: sha512-SlDol5Z+C7Ivnc2rKGqiqfSUmUZzY1qHfVs9myt/nxVwswgfpjdKahyTzLTx802Zfq0NFRs7AejwKzzzl5Co2w==} engines: {node: '>=20.0.0'} - '@aws-sdk/nested-clients@3.996.7': - resolution: {integrity: sha512-MlGWA8uPaOs5AiTZ5JLM4uuWDm9EEAnm9cqwvqQIc6kEgel/8s1BaOWm9QgUcfc9K8qd7KkC3n43yDbeXOA2tg==} + '@aws-sdk/nested-clients@3.996.3': + resolution: {integrity: sha512-AU5TY1V29xqwg/MxmA2odwysTez+ccFAhmfRJk+QZT5HNv90UTA9qKd1J9THlsQkvmH7HWTEV1lDNxkQO5PzNw==} engines: {node: '>=20.0.0'} '@aws-sdk/region-config-resolver@3.972.6': resolution: {integrity: sha512-Aa5PusHLXAqLTX1UKDvI3pHQJtIsF7Q+3turCHqfz/1F61/zDMWfbTC8evjhrrYVAtz9Vsv3SJ/waSUeu7B6gw==} engines: {node: '>=20.0.0'} - '@aws-sdk/region-config-resolver@3.972.7': - resolution: {integrity: sha512-/Ev/6AI8bvt4HAAptzSjThGUMjcWaX3GX8oERkB0F0F9x2dLSBdgFDiyrRz3i0u0ZFZFQ1b28is4QhyqXTUsVA==} + '@aws-sdk/region-config-resolver@3.972.8': + resolution: {integrity: sha512-1eD4uhTDeambO/PNIDVG19A6+v4NdD7xzwLHDutHsUqz0B+i661MwQB2eYO4/crcCvCiQG4SRm1k81k54FEIvw==} engines: {node: '>=20.0.0'} '@aws-sdk/s3-request-presigner@3.1000.0': @@ -818,14 +830,14 @@ packages: resolution: {integrity: sha512-gQYI/Buwp0CAGQxY7mR5VzkP56rkWq2Y1ROkFuXh5XY94DsSjJw62B3I0N0lysQmtwiL2ht2KHI9NylM/RP4FA==} engines: {node: '>=20.0.0'} - '@aws-sdk/token-providers@3.1000.0': - resolution: {integrity: sha512-eOI+8WPtWpLdlYBGs8OCK3k5uIMUHVsNG3AFO4kaRaZcKReJ/2OO6+2O2Dd/3vTzM56kRjSKe7mBOCwa4PdYqg==} - engines: {node: '>=20.0.0'} - '@aws-sdk/token-providers@3.1004.0': resolution: {integrity: sha512-j9BwZZId9sFp+4GPhf6KrwO8Tben2sXibZA8D1vv2I1zBdvkUHcBA2g4pkqIpTRalMTLC0NPkBPX0gERxfy/iA==} engines: {node: '>=20.0.0'} + '@aws-sdk/token-providers@3.1009.0': + resolution: {integrity: sha512-KCPLuTqN9u0Rr38Arln78fRG9KXpzsPWmof+PZzfAHMMQq2QED6YjQrkrfiH7PDefLWEposY1o4/eGwrmKA4JA==} + engines: {node: '>=20.0.0'} + '@aws-sdk/token-providers@3.999.0': resolution: {integrity: sha512-cx0hHUlgXULfykx4rdu/ciNAJaa3AL5xz3rieCz7NKJ68MJwlj3664Y8WR5MGgxfyYJBdamnkjNSx5Kekuc0cg==} engines: {node: '>=20.0.0'} @@ -838,6 +850,10 @@ packages: resolution: {integrity: sha512-hl7BGwDCWsjH8NkZfx+HgS7H2LyM2lTMAI7ba9c8O0KqdBLTdNJivsHpqjg9rNlAlPyREb6DeDRXUl0s8uFdmQ==} engines: {node: '>=20.0.0'} + '@aws-sdk/types@3.973.6': + resolution: {integrity: sha512-Atfcy4E++beKtwJHiDln2Nby8W/mam64opFPTiHEqgsthqeydFS1pY+OUlN1ouNOmf8ArPU/6cDS65anOP3KQw==} + engines: {node: '>=20.0.0'} + '@aws-sdk/util-arn-parser@3.972.2': resolution: {integrity: sha512-VkykWbqMjlSgBFDyrY3nOSqupMc6ivXuGmvci6Q3NnLq5kC+mKQe2QBZ4nrWRE/jqOxeFP2uYzLtwncYYcvQDg==} engines: {node: '>=20.0.0'} @@ -846,8 +862,8 @@ packages: resolution: {integrity: sha512-yWIQSNiCjykLL+ezN5A+DfBb1gfXTytBxm57e64lYmwxDHNmInYHRJYYRAGWG1o77vKEiWaw4ui28e3yb1k5aQ==} engines: {node: '>=20.0.0'} - '@aws-sdk/util-endpoints@3.996.4': - resolution: {integrity: sha512-Hek90FBmd4joCFj+Vc98KLJh73Zqj3s2W56gjAcTkrNLMDI5nIFkG9YpfcJiVI1YlE2Ne1uOQNe+IgQ/Vz2XRA==} + '@aws-sdk/util-endpoints@3.996.5': + resolution: {integrity: sha512-Uh93L5sXFNbyR5sEPMzUU8tJ++Ku97EY4udmC01nB8Zu+xfBPwpIwJ6F7snqQeq8h2pf+8SGN5/NoytfKgYPIw==} engines: {node: '>=20.0.0'} '@aws-sdk/util-format-url@3.972.6': @@ -862,11 +878,15 @@ packages: resolution: {integrity: sha512-H1onv5SkgPBK2P6JR2MjGgbOnttoNzSPIRoeZTNPZYyaplwGg50zS3amXvXqF0/qfXpWEC9rLWU564QTB9bSog==} engines: {node: '>=20.0.0'} + '@aws-sdk/util-locate-window@3.965.5': + resolution: {integrity: sha512-WhlJNNINQB+9qtLtZJcpQdgZw3SCDCpXdUJP7cToGwHbCWCnRckGlc6Bx/OhWwIYFNAn+FIydY8SZ0QmVu3xTQ==} + engines: {node: '>=20.0.0'} + '@aws-sdk/util-user-agent-browser@3.972.6': resolution: {integrity: sha512-Fwr/llD6GOrFgQnKaI2glhohdGuBDfHfora6iG9qsBBBR8xv1SdCSwbtf5CWlUdCw5X7g76G/9Hf0Inh0EmoxA==} - '@aws-sdk/util-user-agent-browser@3.972.7': - resolution: {integrity: sha512-7SJVuvhKhMF/BkNS1n0QAJYgvEwYbK2QLKBrzDiwQGiTRU6Yf1f3nehTzm/l21xdAOtWSfp2uWSddPnP2ZtsVw==} + '@aws-sdk/util-user-agent-browser@3.972.8': + resolution: {integrity: sha512-B3KGXJviV2u6Cdw2SDY2aDhoJkVfY/Q/Trwk2CMSkikE1Oi6gRzxhvhIfiRpHfmIsAhV4EA54TVEX8K6CbHbkA==} '@aws-sdk/util-user-agent-node@3.973.0': resolution: {integrity: sha512-A9J2G4Nf236e9GpaC1JnA8wRn6u6GjnOXiTwBLA6NUJhlBTIGfrTy+K1IazmF8y+4OFdW3O5TZlhyspJMqiqjA==} @@ -877,8 +897,8 @@ packages: aws-crt: optional: true - '@aws-sdk/util-user-agent-node@3.973.4': - resolution: {integrity: sha512-uqKeLqZ9D3nQjH7HGIERNXK9qnSpUK08l4MlJ5/NZqSSdeJsVANYp437EM9sEzwU28c2xfj2V6qlkqzsgtKs6Q==} + '@aws-sdk/util-user-agent-node@3.973.7': + resolution: {integrity: sha512-Hz6EZMUAEzqUd7e+vZ9LE7mn+5gMbxltXy18v+YSFY+9LBJz15wkNZvw5JqfX3z0FS9n3bgUtz3L5rAsfh4YlA==} engines: {node: '>=20.0.0'} peerDependencies: aws-crt: '>=1.0.0' @@ -886,8 +906,8 @@ packages: aws-crt: optional: true - '@aws-sdk/xml-builder@3.972.10': - resolution: {integrity: sha512-OnejAIVD+CxzyAUrVic7lG+3QRltyja9LoNqCE/1YVs8ichoTbJlVSaZ9iSMcnHLyzrSNtvaOGjSDRP+d/ouFA==} + '@aws-sdk/xml-builder@3.972.11': + resolution: {integrity: sha512-iitV/gZKQMvY9d7ovmyFnFuTHbBAtrmLnvaSb/3X8vOKyevwtpmEtyc8AdhVWZe0pI/1GsHxlEvQeOePFzy7KQ==} engines: {node: '>=20.0.0'} '@aws-sdk/xml-builder@3.972.8': @@ -898,6 +918,10 @@ packages: resolution: {integrity: sha512-oLvsaPMTBejkkmHhjf09xTgk71mOqyr/409NKhRIL08If7AhVfUsJhVsx386uJaqNd42v9kWamQ9lFbkoC2dYw==} engines: {node: '>=18.0.0'} + '@aws/lambda-invoke-store@0.2.4': + resolution: {integrity: sha512-iY8yvjE0y651BixKNPgmv1WrQc+GZ142sb0z4gYnChDDY2YqI4P/jsSopBWrKfAt7LOJAkOXt7rC/hms+WclQQ==} + engines: {node: '>=18.0.0'} + '@azure/abort-controller@2.1.2': resolution: {integrity: sha512-nBrLsEWm4J2u5LpAPjxADTlq3trDgVZZXHNKabeXZtpq3d3AbN/KGO82R87rdDz5/lYB024rtEf10/q0urNgsA==} engines: {node: '>=18.0.0'} @@ -964,8 +988,15 @@ packages: resolution: {integrity: sha512-6zABk/ECA/QYSCQ1NGiVwwbQerUCZ+TQbp64Q3AgmfNvurHH0j8TtXa1qbShXA6qqkpAj4V5W8pP6mLe1mcMqA==} engines: {node: '>=18'} - '@borewit/text-codec@0.2.1': - resolution: {integrity: sha512-k7vvKPbf7J2fZ5klGRD9AeKfUvojuZIQ3BT5u7Jfv+puwXkUBUT5PVyMDfJZpy30CBDXGMgw7fguK/lpOMBvgw==} + '@blazediff/core@1.9.1': + resolution: {integrity: sha512-ehg3jIkYKulZh+8om/O25vkvSsXXwC+skXmyA87FFx6A/45eqOkZsBltMw/TVteb0mloiGT8oGRTcjRAz66zaA==} + + '@borewit/text-codec@0.2.2': + resolution: {integrity: sha512-DDaRehssg1aNrH4+2hnj1B7vnUGEjU6OIlyRdkMd0aUdIUvKXrJfXsy8LVtXAy7DRvYVluWbMspsRhz2lcW0mQ==} + + '@bramus/specificity@2.4.2': + resolution: {integrity: sha512-ctxtJ/eA+t+6q2++vj5j7FYX3nRu311q1wfYH3xjlLOsczhlhxAg2FWNUXhpGvAw3BWo1xBcvOV6/YLc2r5FJw==} + hasBin: true '@buape/carbon@0.0.0-beta-20260216184201': resolution: {integrity: sha512-u5mgYcigfPVqT7D9gVTGd+3YSflTreQmrWog7ORbb0z5w9eT8ft4rJOdw9fGwr75zMu9kXpSBaAcY2eZoJFSdA==} @@ -980,15 +1011,9 @@ packages: '@cacheable/utils@2.3.4': resolution: {integrity: sha512-knwKUJEYgIfwShABS1BX6JyJJTglAFcEU7EXqzTdiGCXur4voqkiJkdgZIQtWNFhynzDWERcTYv/sETMu3uJWA==} - '@clack/core@1.0.1': - resolution: {integrity: sha512-WKeyK3NOBwDOzagPR5H08rFk9D/WuN705yEbuZvKqlkmoLM2woKtXb10OO2k1NoSU4SFG947i2/SCYh+2u5e4g==} - '@clack/core@1.1.0': resolution: {integrity: sha512-SVcm4Dqm2ukn64/8Gub2wnlA5nS2iWJyCkdNHcvNHPIeBTGojpdJ+9cZKwLfmqy7irD4N5qLteSilJlE0WLAtA==} - '@clack/prompts@1.0.1': - resolution: {integrity: sha512-/42G73JkuYdyWZ6m8d/CJtBrGl1Hegyc7Fy78m5Ob+jF85TOUmLR5XLce/U3LxYAw0kJ8CT5aI99RIvPHcGp/Q==} - '@clack/prompts@1.1.0': resolution: {integrity: sha512-pkqbPGtohJAvm4Dphs2M8xE29ggupihHdy1x84HNojZuMtFsHiUlRvqD24tM2+XmI+61LlfNceM3Wr7U5QES5g==} @@ -999,6 +1024,37 @@ packages: resolution: {integrity: sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==} engines: {node: '>=0.1.90'} + '@csstools/color-helpers@6.0.2': + resolution: {integrity: sha512-LMGQLS9EuADloEFkcTBR3BwV/CGHV7zyDxVRtVDTwdI2Ca4it0CCVTT9wCkxSgokjE5Ho41hEPgb8OEUwoXr6Q==} + engines: {node: '>=20.19.0'} + + '@csstools/css-calc@3.1.1': + resolution: {integrity: sha512-HJ26Z/vmsZQqs/o3a6bgKslXGFAungXGbinULZO3eMsOyNJHeBBZfup5FiZInOghgoM4Hwnmw+OgbJCNg1wwUQ==} + engines: {node: '>=20.19.0'} + peerDependencies: + '@csstools/css-parser-algorithms': ^4.0.0 + '@csstools/css-tokenizer': ^4.0.0 + + '@csstools/css-color-parser@4.0.2': + resolution: {integrity: sha512-0GEfbBLmTFf0dJlpsNU7zwxRIH0/BGEMuXLTCvFYxuL1tNhqzTbtnFICyJLTNK4a+RechKP75e7w42ClXSnJQw==} + engines: {node: '>=20.19.0'} + peerDependencies: + '@csstools/css-parser-algorithms': ^4.0.0 + '@csstools/css-tokenizer': ^4.0.0 + + '@csstools/css-parser-algorithms@4.0.0': + resolution: {integrity: sha512-+B87qS7fIG3L5h3qwJ/IFbjoVoOe/bpOdh9hAjXbvx0o8ImEmUsGXN0inFOnk2ChCFgqkkGFQ+TpM5rbhkKe4w==} + engines: {node: '>=20.19.0'} + peerDependencies: + '@csstools/css-tokenizer': ^4.0.0 + + '@csstools/css-syntax-patches-for-csstree@1.1.0': + resolution: {integrity: sha512-H4tuz2nhWgNKLt1inYpoVCfbJbMwX/lQKp3g69rrrIMIYlFD9+zTykOKhNR8uGrAmbS/kT9n6hTFkmDkxLgeTA==} + + '@csstools/css-tokenizer@4.0.0': + resolution: {integrity: sha512-QxULHAm7cNu72w97JUNCBFODFaXpbDg+dP8b/oWFAZ2MTRppA3U00Y2L1HqaS4J6yBqxwa/Y3nMBaxVKbB/NsA==} + engines: {node: '>=20.19.0'} + '@cypress/request-promise@5.0.0': resolution: {integrity: sha512-eKdYVpa9cBEw2kTBlHeu1PP16Blwtum6QHg/u9s/MoHkZfuo1pRGka1VlUHXF5kdew82BvOJVVGk0x8X0nbp+w==} engines: {node: '>=0.10.0'} @@ -1054,6 +1110,10 @@ packages: resolution: {integrity: sha512-UyX6rGEXzVyPzb1yvjHtPfTlnLvB5jX/stAMdiytHhfoydX+98hfympdOwsnTktzr+IRvphxTbdErgYDJkEsvw==} engines: {node: '>=22.12.0'} + '@discordjs/voice@0.19.1': + resolution: {integrity: sha512-XYbFVyUBB7zhRvrjREfiWDwio24nEp/vFaVe6u9aBIC5UYuT7HvoMt8LgNfZ5hOyaCW0flFr72pkhUGz+gWw4Q==} + engines: {node: '>=22.12.0'} + '@emnapi/core@1.8.1': resolution: {integrity: sha512-AvT9QFpxK0Zd8J0jopedNm+w/2fIzvtPKPjqyw9jwvBaReTTqPBk9Hixaz7KbjimP+QNz605/XnjFcDAL2pqBg==} @@ -1222,13 +1282,13 @@ packages: '@eshaz/web-worker@1.2.2': resolution: {integrity: sha512-WxXiHFmD9u/owrzempiDlBB1ZYqiLnm9s6aPc8AlFQalq2tKmqdmMr9GXOupDgzXtqnBipj8Un0gkIm7Sjf8mw==} - '@google/genai@1.43.0': - resolution: {integrity: sha512-hklCsJNdMlDM1IwcCVcGQFBg2izY0+t5BIGbRsxi2UnKi6AGKL7pqJqmBDNRbw0bYCs4y3NA7TB+fkKfP/Nrdw==} - engines: {node: '>=20.0.0'} + '@exodus/bytes@1.15.0': + resolution: {integrity: sha512-UY0nlA+feH81UGSHv92sLEPLCeZFjXOuHhrIo0HQydScuQc8s0A7kL/UdgwgDq8g8ilksmuoF35YVTNphV2aBQ==} + engines: {node: ^20.19.0 || ^22.12.0 || >=24.0.0} peerDependencies: - '@modelcontextprotocol/sdk': ^1.25.2 + '@noble/hashes': ^1.8.0 || ^2.0.0 peerDependenciesMeta: - '@modelcontextprotocol/sdk': + '@noble/hashes': optional: true '@google/genai@1.44.0': @@ -1278,7 +1338,7 @@ packages: resolution: {integrity: sha512-hZ7nOssGqRgyV3FVVQdfi+U4q02uB23bpnYpdvNXkYTRRyWx84b7yf1ans+dnJ/7h41sGL3CeQTfO+ZGxuO+Iw==} engines: {node: '>=18.14.1'} peerDependencies: - hono: 4.12.5 + hono: 4.12.7 '@huggingface/jinja@0.5.5': resolution: {integrity: sha512-xRlzazC+QZwr6z4ixEqYHo9fgwhTZ3xNSdljlKfUFGZSdlvt166DljRELFUfFytlYOYvo3vTisA/AFOuOAzFQQ==} @@ -1644,40 +1704,22 @@ packages: resolution: {integrity: sha512-faGUlTcXka5l7rv0lP3K3vGW/ejRuOS24RR2aSFWREUQqzjgdsuWNo/IiPqL3kWRGt6Ahl2+qcDAwtdeWeuGUw==} hasBin: true - '@mariozechner/pi-agent-core@0.55.3': - resolution: {integrity: sha512-rqbfpQ9BrP6BDiW+Ps3A8Z/p9+Md/pAfc/ECq8JP6cwnZL/jQgU355KWZKtF8zM9az1p0Q9hIWi9cQygVo6Auw==} + '@mariozechner/pi-agent-core@0.58.0': + resolution: {integrity: sha512-zhkwx3Wdo27snVfnJWi7l+wyU4XlazkeunTtz4e500GC+ufGOp4C3aIf0XiO5ZOtTE/0lvUiG2bWULR/i4lgUQ==} engines: {node: '>=20.0.0'} - '@mariozechner/pi-agent-core@0.57.1': - resolution: {integrity: sha512-WXsBbkNWOObFGHkhixaT8GXJpHDd3+fn8QntYF+4R8Sa9WB90ENXWidO6b7vcKX+JX0jjO5dIsQxmzosARJKlg==} - engines: {node: '>=20.0.0'} - - '@mariozechner/pi-ai@0.55.3': - resolution: {integrity: sha512-f9jWoDzJR9Wy/H8JPMbjoM4WvVUeFZ65QdYA9UHIfoOopDfwWE8F8JHQOj5mmmILMacXuzsqA3J7MYqNWZRvvQ==} + '@mariozechner/pi-ai@0.58.0': + resolution: {integrity: sha512-3TrkJ9QcBYFPo4NxYluhd+JQ4M+98RaEkNPMrLFU4wK4GMFVtsL3kp1YJ/oj7X0eqKuuDKbHj6MdoMZeT2TCvA==} engines: {node: '>=20.0.0'} hasBin: true - '@mariozechner/pi-ai@0.57.1': - resolution: {integrity: sha512-Bd/J4a3YpdzJVyHLih0vDSdB0QPL4ti0XsAwtHOK/8eVhB0fHM1CpcgIrcBFJ23TMcKXMi0qamz18ERfp8tmgg==} - engines: {node: '>=20.0.0'} - hasBin: true - - '@mariozechner/pi-coding-agent@0.55.3': - resolution: {integrity: sha512-5SFbB7/BIp/Crjre7UNjUeNfpoU1KSW/i6LXa+ikJTBqI5LukWq2avE5l0v0M8Pg/dt1go2XCLrNFlQJiQDSPQ==} - engines: {node: '>=20.0.0'} - hasBin: true - - '@mariozechner/pi-coding-agent@0.57.1': - resolution: {integrity: sha512-u5MQEduj68rwVIsRsqrWkJYiJCyPph/a6bMoJAQKo1sb+Pc17Y/ojwa+wGssnUMjEB38AQKofWTVe8NFEpSWNw==} + '@mariozechner/pi-coding-agent@0.58.0': + resolution: {integrity: sha512-aCoqIMfcFWwuZrLC4MC1EnHwUrqo+ppamXlNYk5+nANH8U+51AP8OUqOUqT9NSHO9ZdItheU9wCqt7wPf5Ah8A==} engines: {node: '>=20.6.0'} hasBin: true - '@mariozechner/pi-tui@0.55.3': - resolution: {integrity: sha512-Gh4wkYgiSPCJJaB/4wEWSL7Ga8bxSq1Crp1RPRT4vKybE/DG0W/MQr5VJDvktarxtJrD16ixScwE4dzdox/PIA==} - engines: {node: '>=20.0.0'} - - '@mariozechner/pi-tui@0.57.1': - resolution: {integrity: sha512-cjoRghLbeAHV0tTJeHgZXaryUi5zzBZofeZ7uJun1gztnckLLRjoVeaPTujNlc5BIfyKvFqhh1QWCZng/MXlpg==} + '@mariozechner/pi-tui@0.58.0': + resolution: {integrity: sha512-luRbQlk0ZCbYGCtCrKTqQX0ECKNYPj7OSlxKMXEY0B3bA6s4f/Xj0aLPiKlhsIynC2dPQmijA44ZDfrWFniWwA==} engines: {node: '>=20.0.0'} '@matrix-org/matrix-sdk-crypto-nodejs@0.4.0': @@ -1692,12 +1734,19 @@ packages: resolution: {integrity: sha512-570oJr93l1RcCNNaMVpOm+PgQkRgno/F65nH1aCWLIKLnw0o7iPoj+8Z5b7mnLMidg9lldVSCcf0dBxqTGE1/w==} engines: {node: '>=20.0.0'} - '@mistralai/mistralai@1.10.0': - resolution: {integrity: sha512-tdIgWs4Le8vpvPiUEWne6tK0qbVc+jMenujnvTqOjogrJUsCSQhus0tHTU1avDDh5//Rq2dFgP9mWRAdIEoBqg==} - '@mistralai/mistralai@1.14.1': resolution: {integrity: sha512-IiLmmZFCCTReQgPAT33r7KQ1nYo5JPdvGkrkZqA8qQ2qB1GHgs5LoP5K2ICyrjnpw2n8oSxMM/VP+liiKcGNlQ==} + '@modelcontextprotocol/sdk@1.27.1': + resolution: {integrity: sha512-sr6GbP+4edBwFndLbM60gf07z0FQ79gaExpnsjMGePXqFcSSb7t6iscpjk9DhFhwd+mTEQrzNafGP8/iGGFYaA==} + engines: {node: '>=18'} + peerDependencies: + '@cfworker/json-schema': ^4.1.1 + zod: ^3.25 || ^4.0 + peerDependenciesMeta: + '@cfworker/json-schema': + optional: true + '@mozilla/readability@0.6.0': resolution: {integrity: sha512-juG5VWh4qAivzTAeMzvY9xs9HY5rAcr2E4I7tiSSCokRFi7XIZCAu92ZkSTsIj1OPceCifL3cpfteP3pDT9/QQ==} engines: {node: '>=14.0.0'} @@ -2159,119 +2208,123 @@ packages: resolution: {integrity: sha512-cifvXDhcqMwwTlTK04GBNeIe7yyo28Mfby85QXFe1Yk8nmi36Ab/5UQwptOx84SsoGNRg+EVSjwzfSZMy6pmlw==} engines: {node: '>=14'} + '@oxc-project/runtime@0.115.0': + resolution: {integrity: sha512-Rg8Wlt5dCbXhQnsXPrkOjL1DTSvXLgb2R/KYfnf1/K+R0k6UMLEmbQXPM+kwrWqSmWA2t0B1EtHy2/3zikQpvQ==} + engines: {node: ^20.19.0 || >=22.12.0} + '@oxc-project/types@0.115.0': resolution: {integrity: sha512-4n91DKnebUS4yjUHl2g3/b2T+IUdCfmoZGhmwsovZCDaJSs+QkVAM+0AqqTxHSsHfeiMuueT75cZaZcT/m0pSw==} - '@oxfmt/binding-android-arm-eabi@0.36.0': - resolution: {integrity: sha512-Z4yVHJWx/swHHjtr0dXrBZb6LxS+qNz1qdza222mWwPTUK4L790+5i3LTgjx3KYGBzcYpjaiZBw4vOx94dH7MQ==} + '@oxfmt/binding-android-arm-eabi@0.40.0': + resolution: {integrity: sha512-S6zd5r1w/HmqR8t0CTnGjFTBLDq2QKORPwriCHxo4xFNuhmOTABGjPaNvCJJVnrKBLsohOeiDX3YqQfJPF+FXw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [android] - '@oxfmt/binding-android-arm64@0.36.0': - resolution: {integrity: sha512-3ElCJRFNPQl7jexf2CAa9XmAm8eC5JPrIDSjc9jSchkVSFTEqyL0NtZinBB2h1a4i4JgP1oGl/5G5n8YR4FN8Q==} + '@oxfmt/binding-android-arm64@0.40.0': + resolution: {integrity: sha512-/mbS9UUP/5Vbl2D6osIdcYiP0oie63LKMoTyGj5hyMCK/SFkl3EhtyRAfdjPvuvHC0SXdW6ePaTKkBSq1SNcIw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [android] - '@oxfmt/binding-darwin-arm64@0.36.0': - resolution: {integrity: sha512-nak4znWCqIExKhYSY/mz/lWsqWIpdsS7o0+SRzXR1Q0m7GrMcG1UrF1pS7TLGZhhkf7nTfEF7q6oZzJiodRDuw==} + '@oxfmt/binding-darwin-arm64@0.40.0': + resolution: {integrity: sha512-wRt8fRdfLiEhnRMBonlIbKrJWixoEmn6KCjKE9PElnrSDSXETGZfPb8ee+nQNTobXkCVvVLytp2o0obAsxl78Q==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [darwin] - '@oxfmt/binding-darwin-x64@0.36.0': - resolution: {integrity: sha512-V4GP96thDnpKx6ADnMDnhIXNdtV+Ql9D4HUU+a37VTeVbs5qQSF/s6hhUP1b3xUqU7iRcwh72jUU2Y12rtGHAw==} + '@oxfmt/binding-darwin-x64@0.40.0': + resolution: {integrity: sha512-fzowhqbOE/NRy+AE5ob0+Y4X243WbWzDb00W+pKwD7d9tOqsAFbtWUwIyqqCoCLxj791m2xXIEeLH/3uz7zCCg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [darwin] - '@oxfmt/binding-freebsd-x64@0.36.0': - resolution: {integrity: sha512-/xapWCADfI5wrhxpEUjhI9fnw7MV5BUZizVa8e24n3VSK6A3Y1TB/ClOP1tfxNspykFKXp4NBWl6NtDJP3osqQ==} + '@oxfmt/binding-freebsd-x64@0.40.0': + resolution: {integrity: sha512-agZ9ITaqdBjcerRRFEHB8s0OyVcQW8F9ZxsszjxzeSthQ4fcN2MuOtQFWec1ed8/lDa50jSLHVE2/xPmTgtCfQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [freebsd] - '@oxfmt/binding-linux-arm-gnueabihf@0.36.0': - resolution: {integrity: sha512-1lOmv61XMFIH5uNm27620kRRzWt/RK6tdn250BRDoG9W7OXGOQ5UyI1HVT+SFkoOoKztBiinWgi68+NA1MjBVQ==} + '@oxfmt/binding-linux-arm-gnueabihf@0.40.0': + resolution: {integrity: sha512-ZM2oQ47p28TP1DVIp7HL1QoMUgqlBFHey0ksHct7tMXoU5BqjNvPWw7888azzMt25lnyPODVuye1wvNbvVUFOA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [linux] - '@oxfmt/binding-linux-arm-musleabihf@0.36.0': - resolution: {integrity: sha512-vMH23AskdR1ujUS9sPck2Df9rBVoZUnCVY86jisILzIQ/QQ/yKUTi7tgnIvydPx7TyB/48wsQ5QMr5Knq5p/aw==} + '@oxfmt/binding-linux-arm-musleabihf@0.40.0': + resolution: {integrity: sha512-RBFPAxRAIsMisKM47Oe6Lwdv6agZYLz02CUhVCD1sOv5ajAcRMrnwCFBPWwGXpazToW2mjnZxFos8TuFjTU15A==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [linux] - '@oxfmt/binding-linux-arm64-gnu@0.36.0': - resolution: {integrity: sha512-Hy1V+zOBHpBiENRx77qrUTt5aPDHeCASRc8K5KwwAHkX2AKP0nV89eL17hsZrE9GmnXFjsNmd80lyf7aRTXsbw==} + '@oxfmt/binding-linux-arm64-gnu@0.40.0': + resolution: {integrity: sha512-Nb2XbQ+wV3W2jSIihXdPj7k83eOxeSgYP3N/SRXvQ6ZYPIk6Q86qEh5Gl/7OitX3bQoQrESqm1yMLvZV8/J7dA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] - '@oxfmt/binding-linux-arm64-musl@0.36.0': - resolution: {integrity: sha512-SPGLJkOIHSIC6ABUQ5V8NqJpvYhMJueJv26NYqfCnwi/Mn6A61amkpJJ9Suy0Nmvs+OWESJpcebrBUbXPGZyQQ==} + '@oxfmt/binding-linux-arm64-musl@0.40.0': + resolution: {integrity: sha512-tGmWhLD/0YMotCdfezlT6tC/MJG/wKpo4vnQ3Cq+4eBk/BwNv7EmkD0VkD5F/dYkT3b8FNU01X2e8vvJuWoM1w==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] - '@oxfmt/binding-linux-ppc64-gnu@0.36.0': - resolution: {integrity: sha512-3EuoyB8x9x8ysYJjbEO/M9fkSk72zQKnXCvpZMDHXlnY36/1qMp55Nm0PrCwjGO/1pen5hdOVkz9WmP3nAp2IQ==} + '@oxfmt/binding-linux-ppc64-gnu@0.40.0': + resolution: {integrity: sha512-rVbFyM3e7YhkVnp0IVYjaSHfrBWcTRWb60LEcdNAJcE2mbhTpbqKufx0FrhWfoxOrW/+7UJonAOShoFFLigDqQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [ppc64] os: [linux] - '@oxfmt/binding-linux-riscv64-gnu@0.36.0': - resolution: {integrity: sha512-MpY3itLwpGh8dnywtrZtaZ604T1m715SydCKy0+qTxetv+IHzuA+aO/AGzrlzUNYZZmtWtmDBrChZGibvZxbRQ==} + '@oxfmt/binding-linux-riscv64-gnu@0.40.0': + resolution: {integrity: sha512-3ZqBw14JtWeEoLiioJcXSJz8RQyPE+3jLARnYM1HdPzZG4vk+Ua8CUupt2+d+vSAvMyaQBTN2dZK+kbBS/j5mA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [riscv64] os: [linux] - '@oxfmt/binding-linux-riscv64-musl@0.36.0': - resolution: {integrity: sha512-mmDhe4Vtx+XwQPRPn/V25+APnkApYgZ23q+6GVsNYY98pf3aU0aI3Me96pbRs/AfJ1jIiGC+/6q71FEu8dHcHw==} + '@oxfmt/binding-linux-riscv64-musl@0.40.0': + resolution: {integrity: sha512-JJ4PPSdcbGBjPvb+O7xYm2FmAsKCyuEMYhqatBAHMp/6TA6rVlf9Z/sYPa4/3Bommb+8nndm15SPFRHEPU5qFA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [riscv64] os: [linux] - '@oxfmt/binding-linux-s390x-gnu@0.36.0': - resolution: {integrity: sha512-AYXhU+DmNWLSnvVwkHM92fuYhogtVHab7UQrPNaDf1sxadugg9gWVmcgJDlIwxJdpk5CVW/TFvwUKwI432zhhA==} + '@oxfmt/binding-linux-s390x-gnu@0.40.0': + resolution: {integrity: sha512-Kp0zNJoX9Ik77wUya2tpBY3W9f40VUoMQLWVaob5SgCrblH/t2xr/9B2bWHfs0WCefuGmqXcB+t0Lq77sbBmZw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [s390x] os: [linux] - '@oxfmt/binding-linux-x64-gnu@0.36.0': - resolution: {integrity: sha512-H16QhhQ3usoakMleiAAQ2mg0NsBDAdyE9agUgfC8IHHh3jZEbr0rIKwjEqwbOHK5M0EmfhJmr+aGO/MgZPsneA==} + '@oxfmt/binding-linux-x64-gnu@0.40.0': + resolution: {integrity: sha512-7YTCNzleWTaQTqNGUNQ66qVjpoV6DjbCOea+RnpMBly2bpzrI/uu7Rr+2zcgRfNxyjXaFTVQKaRKjqVdeUfeVA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] - '@oxfmt/binding-linux-x64-musl@0.36.0': - resolution: {integrity: sha512-EFFGkixA39BcmHiCe2ECdrq02D6FCve5ka6ObbvrheXl4V+R0U/E+/uLyVx1X65LW8TA8QQHdnbdDallRekohw==} + '@oxfmt/binding-linux-x64-musl@0.40.0': + resolution: {integrity: sha512-hWnSzJ0oegeOwfOEeejYXfBqmnRGHusgtHfCPzmvJvHTwy1s3Neo59UKc1CmpE3zxvrCzJoVHos0rr97GHMNPw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] - '@oxfmt/binding-openharmony-arm64@0.36.0': - resolution: {integrity: sha512-zr/t369wZWFOj1qf06Z5gGNjFymfUNDrxKMmr7FKiDRVI1sNsdKRCuRL4XVjtcptKQ+ao3FfxLN1vrynivmCYg==} + '@oxfmt/binding-openharmony-arm64@0.40.0': + resolution: {integrity: sha512-28sJC1lR4qtBJGzSRRbPnSW3GxU2+4YyQFE6rCmsUYqZ5XYH8jg0/w+CvEzQ8TuAQz5zLkcA25nFQGwoU0PT3Q==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [openharmony] - '@oxfmt/binding-win32-arm64-msvc@0.36.0': - resolution: {integrity: sha512-FxO7UksTv8h4olzACgrqAXNF6BP329+H322323iDrMB5V/+a1kcAw07fsOsUmqNrb9iJBsCQgH/zqcqp5903ag==} + '@oxfmt/binding-win32-arm64-msvc@0.40.0': + resolution: {integrity: sha512-cDkRnyT0dqwF5oIX1Cv59HKCeZQFbWWdUpXa3uvnHFT2iwYSSZspkhgjXjU6iDp5pFPaAEAe9FIbMoTgkTmKPg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [win32] - '@oxfmt/binding-win32-ia32-msvc@0.36.0': - resolution: {integrity: sha512-OjoMQ89H01M0oLMfr/CPNH1zi48ZIwxAKObUl57oh7ssUBNDp/2Vjf7E1TQ8M4oj4VFQ/byxl2SmcPNaI2YNDg==} + '@oxfmt/binding-win32-ia32-msvc@0.40.0': + resolution: {integrity: sha512-7rPemBJjqm5Gkv6ZRCPvK8lE6AqQ/2z31DRdWazyx2ZvaSgL7QGofHXHNouRpPvNsT9yxRNQJgigsWkc+0qg4w==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [ia32] os: [win32] - '@oxfmt/binding-win32-x64-msvc@0.36.0': - resolution: {integrity: sha512-MoyeQ9S36ZTz/4bDhOKJgOBIDROd4dQ5AkT9iezhEaUBxAPdNX9Oq0jD8OSnCj3G4wam/XNxVWKMA52kmzmPtQ==} + '@oxfmt/binding-win32-x64-msvc@0.40.0': + resolution: {integrity: sha512-/Zmj0yTYSvmha6TG1QnoLqVT7ZMRDqXvFXXBQpIjteEwx9qvUYMBH2xbiOFhDeMUJkGwC3D6fdKsFtaqUvkwNA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [win32] @@ -2306,126 +2359,130 @@ packages: cpu: [x64] os: [win32] - '@oxlint/binding-android-arm-eabi@1.51.0': - resolution: {integrity: sha512-jJYIqbx4sX+suIxWstc4P7SzhEwb4ArWA2KVrmEuu9vH2i0qM6QIHz/ehmbGE4/2fZbpuMuBzTl7UkfNoqiSgw==} + '@oxlint/binding-android-arm-eabi@1.55.0': + resolution: {integrity: sha512-NhvgAhncTSOhRahQSCnkK/4YIGPjTmhPurQQ2dwt2IvwCMTvZRW5vF2K10UBOxFve4GZDMw6LtXZdC2qeuYIVQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [android] - '@oxlint/binding-android-arm64@1.51.0': - resolution: {integrity: sha512-GtXyBCcH4ti98YdiMNCrpBNGitx87EjEWxevnyhcBK12k/Vu4EzSB45rzSC4fGFUD6sQgeaxItRCEEWeVwPafw==} + '@oxlint/binding-android-arm64@1.55.0': + resolution: {integrity: sha512-P9iWRh+Ugqhg+D7rkc7boHX8o3H2h7YPcZHQIgvVBgnua5tk4LR2L+IBlreZs58/95cd2x3/004p5VsQM9z4SA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [android] - '@oxlint/binding-darwin-arm64@1.51.0': - resolution: {integrity: sha512-3QJbeYaMHn6Bh2XeBXuITSsbnIctyTjvHf5nRjKYrT9pPeErNIpp5VDEeAXC0CZSwSVTsc8WOSDwgrAI24JolQ==} + '@oxlint/binding-darwin-arm64@1.55.0': + resolution: {integrity: sha512-esakkJIt7WFAhT30P/Qzn96ehFpzdZ1mNuzpOb8SCW7lI4oB8VsyQnkSHREM671jfpuBb/o2ppzBCx5l0jpgMA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [darwin] - '@oxlint/binding-darwin-x64@1.51.0': - resolution: {integrity: sha512-NzErhMaTEN1cY0E8C5APy74lw5VwsNfJfVPBMWPVQLqAbO0k4FFLjvHURvkUL+Y18Wu+8Vs1kbqPh2hjXYA4pg==} + '@oxlint/binding-darwin-x64@1.55.0': + resolution: {integrity: sha512-xDMFRCCAEK9fOH6As2z8ELsC+VDGSFRHwIKVSilw+xhgLwTDFu37rtmRbmUlx8rRGS6cWKQPTc47AVxAZEVVPQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [darwin] - '@oxlint/binding-freebsd-x64@1.51.0': - resolution: {integrity: sha512-msAIh3vPAoKoHlOE/oe6Q5C/n9umypv/k81lED82ibrJotn+3YG2Qp1kiR8o/Dg5iOEU97c6tl0utxcyFenpFw==} + '@oxlint/binding-freebsd-x64@1.55.0': + resolution: {integrity: sha512-mYZqnwUD7ALCRxGenyLd1uuG+rHCL+OTT6S8FcAbVm/ZT2AZMGjvibp3F6k1SKOb2aeqFATmwRykrE41Q0GWVw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [freebsd] - '@oxlint/binding-linux-arm-gnueabihf@1.51.0': - resolution: {integrity: sha512-CqQPcvqYyMe9ZBot2stjGogEzk1z8gGAngIX7srSzrzexmXixwVxBdFZyxTVM0CjGfDeV+Ru0w25/WNjlMM2Hw==} + '@oxlint/binding-linux-arm-gnueabihf@1.55.0': + resolution: {integrity: sha512-LcX6RYcF9vL9ESGwJW3yyIZ/d/ouzdOKXxCdey1q0XJOW1asrHsIg5MmyKdEBR4plQx+shvYeQne7AzW5f3T1w==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [linux] - '@oxlint/binding-linux-arm-musleabihf@1.51.0': - resolution: {integrity: sha512-dstrlYQgZMnyOssxSbolGCge/sDbko12N/35RBNuqLpoPbft2aeBidBAb0dvQlyBd9RJ6u8D4o4Eh8Un6iTgyQ==} + '@oxlint/binding-linux-arm-musleabihf@1.55.0': + resolution: {integrity: sha512-C+8GS1rPtK+dI7mJFkqoRBkDuqbrNihnyYQsJPS9ez+8zF9JzfvU19lawqt4l/Y23o5uQswE/DORa8aiXUih3w==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [linux] - '@oxlint/binding-linux-arm64-gnu@1.51.0': - resolution: {integrity: sha512-QEjUpXO7d35rP1/raLGGbAsBLLGZIzV3ZbeSjqWlD3oRnxpRIZ6iL4o51XQHkconn3uKssc+1VKdtHJ81BBhDA==} + '@oxlint/binding-linux-arm64-gnu@1.55.0': + resolution: {integrity: sha512-ErLE4XbmcCopA4/CIDiH6J1IAaDOMnf/KSx/aFObs4/OjAAM3sFKWGZ57pNOMxhhyBdcmcXwYymph9GwcpcqgQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] - '@oxlint/binding-linux-arm64-musl@1.51.0': - resolution: {integrity: sha512-YSJua5irtG4DoMAjUapDTPhkQLHhBIY0G9JqlZS6/SZPzqDkPku/1GdWs0D6h/wyx0Iz31lNCfIaWKBQhzP0wQ==} + '@oxlint/binding-linux-arm64-musl@1.55.0': + resolution: {integrity: sha512-/kp65avi6zZfqEng56TTuhiy3P/3pgklKIdf38yvYeJ9/PgEeRA2A2AqKAKbZBNAqUzrzHhz9jF6j/PZvhJzTQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] - '@oxlint/binding-linux-ppc64-gnu@1.51.0': - resolution: {integrity: sha512-7L4Wj2IEUNDETKssB9IDYt16T6WlF+X2jgC/hBq3diGHda9vJLpAgb09+D3quFq7TdkFtI7hwz/jmuQmQFPc1Q==} + '@oxlint/binding-linux-ppc64-gnu@1.55.0': + resolution: {integrity: sha512-A6pTdXwcEEwL/nmz0eUJ6WxmxcoIS+97GbH96gikAyre3s5deC7sts38ZVVowjS2QQFuSWkpA4ZmQC0jZSNvJQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [ppc64] os: [linux] - '@oxlint/binding-linux-riscv64-gnu@1.51.0': - resolution: {integrity: sha512-cBUHqtOXy76G41lOB401qpFoKx1xq17qYkhWrLSM7eEjiHM9sOtYqpr6ZdqCnN9s6ZpzudX4EkeHOFH2E9q0vA==} + '@oxlint/binding-linux-riscv64-gnu@1.55.0': + resolution: {integrity: sha512-clj0lnIN+V52G9tdtZl0LbdTSurnZ1NZj92Je5X4lC7gP5jiCSW+Y/oiDiSauBAD4wrHt2S7nN3pA0zfKYK/6Q==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [riscv64] os: [linux] - '@oxlint/binding-linux-riscv64-musl@1.51.0': - resolution: {integrity: sha512-WKbg8CysgZcHfZX0ixQFBRSBvFZUHa3SBnEjHY2FVYt2nbNJEjzTxA3ZR5wMU0NOCNKIAFUFvAh5/XJKPRJuJg==} + '@oxlint/binding-linux-riscv64-musl@1.55.0': + resolution: {integrity: sha512-NNu08pllN5x/O94/sgR3DA8lbrGBnTHsINZZR0hcav1sj79ksTiKKm1mRzvZvacwQ0hUnGinFo+JO75ok2PxYg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [riscv64] os: [linux] - '@oxlint/binding-linux-s390x-gnu@1.51.0': - resolution: {integrity: sha512-N1QRUvJTxqXNSu35YOufdjsAVmKVx5bkrggOWAhTWBc3J4qjcBwr1IfyLh/6YCg8sYRSR1GraldS9jUgJL/U4A==} + '@oxlint/binding-linux-s390x-gnu@1.55.0': + resolution: {integrity: sha512-BvfQz3PRlWZRoEZ17dZCqgQsMRdpzGZomJkVATwCIGhHVVeHJMQdmdXPSjcT1DCNUrOjXnVyj1RGDj5+/Je2+Q==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [s390x] os: [linux] - '@oxlint/binding-linux-x64-gnu@1.51.0': - resolution: {integrity: sha512-e0Mz0DizsCoqNIjeOg6OUKe8JKJWZ5zZlwsd05Bmr51Jo3AOL4UJnPvwKumr4BBtBrDZkCmOLhCvDGm95nJM2g==} + '@oxlint/binding-linux-x64-gnu@1.55.0': + resolution: {integrity: sha512-ngSOoFCSBMKVQd24H8zkbcBNc7EHhjnF1sv3mC9NNXQ/4rRjI/4Dj9+9XoDZeFEkF1SX1COSBXF1b2Pr9rqdEw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] - '@oxlint/binding-linux-x64-musl@1.51.0': - resolution: {integrity: sha512-wD8HGTWhYBKXvRDvoBVB1y+fEYV01samhWQSy1Zkxq2vpezvMnjaFKRuiP6tBNITLGuffbNDEXOwcAhJ3gI5Ug==} + '@oxlint/binding-linux-x64-musl@1.55.0': + resolution: {integrity: sha512-BDpP7W8GlaG7BR6QjGZAleYzxoyKc/D24spZIF2mB3XsfALQJJT/OBmP8YpeTb1rveFSBHzl8T7l0aqwkWNdGA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] - '@oxlint/binding-openharmony-arm64@1.51.0': - resolution: {integrity: sha512-5NSwQ2hDEJ0GPXqikjWtwzgAQCsS7P9aLMNenjjKa+gknN3lTCwwwERsT6lKXSirfU3jLjexA2XQvQALh5h27w==} + '@oxlint/binding-openharmony-arm64@1.55.0': + resolution: {integrity: sha512-PS6GFvmde/pc3fCA2Srt51glr8Lcxhpf6WIBFfLphndjRrD34NEcses4TSxQrEcxYo6qVywGfylM0ZhSCF2gGA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [openharmony] - '@oxlint/binding-win32-arm64-msvc@1.51.0': - resolution: {integrity: sha512-JEZyah1M0RHMw8d+jjSSJmSmO8sABA1J1RtrHYujGPeCkYg1NeH0TGuClpe2h5QtioRTaF57y/TZfn/2IFV6fA==} + '@oxlint/binding-win32-arm64-msvc@1.55.0': + resolution: {integrity: sha512-P6JcLJGs/q1UOvDLzN8otd9JsH4tsuuPDv+p7aHqHM3PrKmYdmUvkNj4K327PTd35AYcznOCN+l4ZOaq76QzSw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [win32] - '@oxlint/binding-win32-ia32-msvc@1.51.0': - resolution: {integrity: sha512-q3cEoKH6kwjz/WRyHwSf0nlD2F5Qw536kCXvmlSu+kaShzgrA0ojmh45CA81qL+7udfCaZL2SdKCZlLiGBVFlg==} + '@oxlint/binding-win32-ia32-msvc@1.55.0': + resolution: {integrity: sha512-gzkk4zE2zsE+WmRxFOiAZHpCpUNDFytEakqNXoNHW+PnYEOTPKDdW6nrzgSeTbGKVPXNAKQnRnMgrh7+n3Xueg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [ia32] os: [win32] - '@oxlint/binding-win32-x64-msvc@1.51.0': - resolution: {integrity: sha512-Q14+fOGb9T28nWF/0EUsYqERiRA7cl1oy4TJrGmLaqhm+aO2cV+JttboHI3CbdeMCAyDI1+NoSlrM7Melhp/cw==} + '@oxlint/binding-win32-x64-msvc@1.55.0': + resolution: {integrity: sha512-ZFALNow2/og75gvYzNP7qe+rREQ5xunktwA+lgykoozHZ6hw9bqg4fn5j2UvG4gIn1FXqrZHkOAXuPf5+GOYTQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [win32] - '@pierre/diffs@1.0.11': - resolution: {integrity: sha512-j6zIEoyImQy1HfcJqbrDwP0O5I7V2VNXAaw53FqQ+SykRfaNwABeZHs9uibXO4supaXPmTx6LEH9Lffr03e1Tw==} + '@pierre/diffs@1.1.0': + resolution: {integrity: sha512-wbxrzcmanJuHZb81iir09j42uU9AnKxXDtAuEQJbAnti5f2UfYdCQYejawuHZStFrlsMacCZLh/dDHmqvAaQCw==} peerDependencies: react: ^18.3.1 || ^19.0.0 react-dom: ^18.3.1 || ^19.0.0 + '@pierre/theme@0.0.22': + resolution: {integrity: sha512-ePUIdQRNGjrveELTU7fY89Xa7YGHHEy5Po5jQy/18lm32eRn96+tnYJEtFooGdffrx55KBUtOXfvVy/7LDFFhA==} + engines: {vscode: ^1.0.0} + '@pinojs/redact@0.4.0': resolution: {integrity: sha512-k2ENnmBugE/rzQfEcdWHcCY+/FM3VLzH9cYEsbdsoqrvzAKRhUZeRNhAZvB8OitQJ1TBed3yqWtdjzS6wJKBwg==} @@ -2521,222 +2578,97 @@ packages: resolution: {integrity: sha512-DmCG8GzysnCZ15bres3N5AHCmwBwYgp0As6xjhQ47rAUTUXxJiK+lLUxaGsX3hd/30qUpVElh05PbGuxRPgJwA==} engines: {node: '>= 10'} - '@rolldown/binding-android-arm64@1.0.0-rc.7': - resolution: {integrity: sha512-/uadfNUaMLFFBGvcIOiq8NnlhvTZTjOyybJaJnhGxD0n9k5vZRJfTaitH5GHnbwmc6T2PC+ZpS1FQH+vXyS/UA==} + '@rolldown/binding-android-arm64@1.0.0-rc.9': + resolution: {integrity: sha512-lcJL0bN5hpgJfSIz/8PIf02irmyL43P+j1pTCfbD1DbLkmGRuFIA4DD3B3ZOvGqG0XiVvRznbKtN0COQVaKUTg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [android] - '@rolldown/binding-darwin-arm64@1.0.0-rc.7': - resolution: {integrity: sha512-zokYr1KgRn0hRA89dmgtPj/BmKp9DxgrfAJvOEFfXa8nfYWW2nmgiYIBGpSIAJrEg7Qc/Qznovy6xYwmKh0M8g==} + '@rolldown/binding-darwin-arm64@1.0.0-rc.9': + resolution: {integrity: sha512-J7Zk3kLYFsLtuH6U+F4pS2sYVzac0qkjcO5QxHS7OS7yZu2LRs+IXo+uvJ/mvpyUljDJ3LROZPoQfgBIpCMhdQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [darwin] - '@rolldown/binding-darwin-x64@1.0.0-rc.7': - resolution: {integrity: sha512-eZFjbmrapCBVgMmuLALH3pmQQQStHFuRhsFceJHk6KISW8CkI2e9OPLp9V4qXksrySQcD8XM8fpvGLs5l5C7LQ==} + '@rolldown/binding-darwin-x64@1.0.0-rc.9': + resolution: {integrity: sha512-iwtmmghy8nhfRGeNAIltcNXzD0QMNaaA5U/NyZc1Ia4bxrzFByNMDoppoC+hl7cDiUq5/1CnFthpT9n+UtfFyg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [darwin] - '@rolldown/binding-freebsd-x64@1.0.0-rc.7': - resolution: {integrity: sha512-xjMrh8Dmu2DNwdY6DZsrF6YPGeesc3PaTlkh8v9cqmkSCNeTxnhX3ErhVnuv1j3n8t2IuuhQIwM9eZDINNEt5Q==} + '@rolldown/binding-freebsd-x64@1.0.0-rc.9': + resolution: {integrity: sha512-DLFYI78SCiZr5VvdEplsVC2Vx53lnA4/Ga5C65iyldMVaErr86aiqCoNBLl92PXPfDtUYjUh+xFFor40ueNs4Q==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [freebsd] - '@rolldown/binding-linux-arm-gnueabihf@1.0.0-rc.7': - resolution: {integrity: sha512-mOvftrHiXg4/xFdxJY3T9Wl1/zDAOSlMN8z9an2bXsCwuvv3RdyhYbSMZDuDO52S04w9z7+cBd90lvQSPTAQtw==} + '@rolldown/binding-linux-arm-gnueabihf@1.0.0-rc.9': + resolution: {integrity: sha512-CsjTmTwd0Hri6iTw/DRMK7kOZ7FwAkrO4h8YWKoX/kcj833e4coqo2wzIFywtch/8Eb5enQ/lwLM7w6JX1W5RQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [linux] - '@rolldown/binding-linux-arm64-gnu@1.0.0-rc.7': - resolution: {integrity: sha512-TuUkeuEEPRyXMBbJ86NRhAiPNezxHW8merl3Om2HASA9Pl1rI+VZcTtsVQ6v/P0MDIFpSl0k0+tUUze9HIXyEw==} + '@rolldown/binding-linux-arm64-gnu@1.0.0-rc.9': + resolution: {integrity: sha512-2x9O2JbSPxpxMDhP9Z74mahAStibTlrBMW0520+epJH5sac7/LwZW5Bmg/E6CXuEF53JJFW509uP+lSedaUNxg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] - '@rolldown/binding-linux-arm64-musl@1.0.0-rc.7': - resolution: {integrity: sha512-G43ZElEvaby+YSOgrXfBgpeQv42LdS0ivFFYQufk2tBDWeBfzE/+ob5DmO8Izbyn4Y8k6GgLF11jFDYNnmU/3w==} + '@rolldown/binding-linux-arm64-musl@1.0.0-rc.9': + resolution: {integrity: sha512-JA1QRW31ogheAIRhIg9tjMfsYbglXXYGNPLdPEYrwFxdbkQCAzvpSCSHCDWNl4hTtrol8WeboCSEpjdZK8qrCg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] - '@rolldown/binding-linux-ppc64-gnu@1.0.0-rc.7': - resolution: {integrity: sha512-Y48ShVxGE2zUTt0A0PR3grCLNxW4DWtAfe5lxf6L3uYEQujwo/LGuRogMsAtOJeYLCPTJo2i714LOdnK34cHpw==} + '@rolldown/binding-linux-ppc64-gnu@1.0.0-rc.9': + resolution: {integrity: sha512-aOKU9dJheda8Kj8Y3w9gnt9QFOO+qKPAl8SWd7JPHP+Cu0EuDAE5wokQubLzIDQWg2myXq2XhTpOVS07qqvT+w==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [ppc64] os: [linux] - '@rolldown/binding-linux-s390x-gnu@1.0.0-rc.7': - resolution: {integrity: sha512-KU5DUYvX3qI8/TX6D3RA4awXi4Ge/1+M6Jqv7kRiUndpqoVGgD765xhV3Q6QvtABnYjLJenrWDl3S1B5U56ixA==} + '@rolldown/binding-linux-s390x-gnu@1.0.0-rc.9': + resolution: {integrity: sha512-OalO94fqj7IWRn3VdXWty75jC5dk4C197AWEuMhIpvVv2lw9fiPhud0+bW2ctCxb3YoBZor71QHbY+9/WToadA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [s390x] os: [linux] - '@rolldown/binding-linux-x64-gnu@1.0.0-rc.7': - resolution: {integrity: sha512-1THb6FdBkAEL12zvUue2bmK4W1+P+tz8Pgu5uEzq+xrtYa3iBzmmKNlyfUzCFNCqsPd8WJEQrYdLcw4iMW4AVw==} + '@rolldown/binding-linux-x64-gnu@1.0.0-rc.9': + resolution: {integrity: sha512-cVEl1vZtBsBZna3YMjGXNvnYYrOJ7RzuWvZU0ffvJUexWkukMaDuGhUXn0rjnV0ptzGVkvc+vW9Yqy6h8YX4pg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] - '@rolldown/binding-linux-x64-musl@1.0.0-rc.7': - resolution: {integrity: sha512-12o73atFNWDgYnLyA52QEUn9AH8pHIe12W28cmqjyHt4bIEYRzMICvYVCPa2IQm6DJBvCBrEhD9K+ct4wr2hwg==} + '@rolldown/binding-linux-x64-musl@1.0.0-rc.9': + resolution: {integrity: sha512-UzYnKCIIc4heAKgI4PZ3dfBGUZefGCJ1TPDuLHoCzgrMYPb5Rv6TLFuYtyM4rWyHM7hymNdsg5ik2C+UD9VDbA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] - '@rolldown/binding-openharmony-arm64@1.0.0-rc.7': - resolution: {integrity: sha512-+uUgGwvuUCXl894MTsmTS2J0BnCZccFsmzV7y1jFxW5pTSxkuwL5agyPuDvDOztPeS6RrdqWkn7sT0jRd0ECkg==} + '@rolldown/binding-openharmony-arm64@1.0.0-rc.9': + resolution: {integrity: sha512-+6zoiF+RRyf5cdlFQP7nm58mq7+/2PFaY2DNQeD4B87N36JzfF/l9mdBkkmTvSYcYPE8tMh/o3cRlsx1ldLfog==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [openharmony] - '@rolldown/binding-wasm32-wasi@1.0.0-rc.7': - resolution: {integrity: sha512-53p2L/NSy21UiFOqUGlC11kJDZS2Nx2GJRz1QvbkXovypA3cOHbsyZHLkV72JsLSbiEQe+kg4tndUhSiC31UEA==} + '@rolldown/binding-wasm32-wasi@1.0.0-rc.9': + resolution: {integrity: sha512-rgFN6sA/dyebil3YTlL2evvi/M+ivhfnyxec7AccTpRPccno/rPoNlqybEZQBkcbZu8Hy+eqNJCqfBR8P7Pg8g==} engines: {node: '>=14.0.0'} cpu: [wasm32] - '@rolldown/binding-win32-arm64-msvc@1.0.0-rc.7': - resolution: {integrity: sha512-K6svNRljO6QrL6VTKxwh4yThhlR9DT/tK0XpaFQMnJwwQKng+NYcVEtUkAM0WsoiZHw+Hnh3DGnn3taf/pNYGg==} + '@rolldown/binding-win32-arm64-msvc@1.0.0-rc.9': + resolution: {integrity: sha512-lHVNUG/8nlF1IQk1C0Ci574qKYyty2goMiPlRqkC5R+3LkXDkL5Dhx8ytbxq35m+pkHVIvIxviD+TWLdfeuadA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [win32] - '@rolldown/binding-win32-x64-msvc@1.0.0-rc.7': - resolution: {integrity: sha512-3ZJBT47VWLKVKIyvHhUSUgVwHzzZW761YAIkM3tOT+8ZTjFVp0acCM0Y2Z2j3jCl+XYi2d9y2uEWQ8H0PvvpPw==} + '@rolldown/binding-win32-x64-msvc@1.0.0-rc.9': + resolution: {integrity: sha512-G0oA4+w1iY5AGi5HcDTxWsoxF509hrFIPB2rduV5aDqS9FtDg1CAfa7V34qImbjfhIcA8C+RekocJZA96EarwQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [win32] - '@rolldown/pluginutils@1.0.0-rc.7': - resolution: {integrity: sha512-qujRfC8sFVInYSPPMLQByRh7zhwkGFS4+tyMQ83srV1qrxL4g8E2tyxVVyxd0+8QeBM1mIk9KbWxkegRr76XzA==} - - '@rollup/rollup-android-arm-eabi@4.59.0': - resolution: {integrity: sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==} - cpu: [arm] - os: [android] - - '@rollup/rollup-android-arm64@4.59.0': - resolution: {integrity: sha512-hZ+Zxj3SySm4A/DylsDKZAeVg0mvi++0PYVceVyX7hemkw7OreKdCvW2oQ3T1FMZvCaQXqOTHb8qmBShoqk69Q==} - cpu: [arm64] - os: [android] - - '@rollup/rollup-darwin-arm64@4.59.0': - resolution: {integrity: sha512-W2Psnbh1J8ZJw0xKAd8zdNgF9HRLkdWwwdWqubSVk0pUuQkoHnv7rx4GiF9rT4t5DIZGAsConRE3AxCdJ4m8rg==} - cpu: [arm64] - os: [darwin] - - '@rollup/rollup-darwin-x64@4.59.0': - resolution: {integrity: sha512-ZW2KkwlS4lwTv7ZVsYDiARfFCnSGhzYPdiOU4IM2fDbL+QGlyAbjgSFuqNRbSthybLbIJ915UtZBtmuLrQAT/w==} - cpu: [x64] - os: [darwin] - - '@rollup/rollup-freebsd-arm64@4.59.0': - resolution: {integrity: sha512-EsKaJ5ytAu9jI3lonzn3BgG8iRBjV4LxZexygcQbpiU0wU0ATxhNVEpXKfUa0pS05gTcSDMKpn3Sx+QB9RlTTA==} - cpu: [arm64] - os: [freebsd] - - '@rollup/rollup-freebsd-x64@4.59.0': - resolution: {integrity: sha512-d3DuZi2KzTMjImrxoHIAODUZYoUUMsuUiY4SRRcJy6NJoZ6iIqWnJu9IScV9jXysyGMVuW+KNzZvBLOcpdl3Vg==} - cpu: [x64] - os: [freebsd] - - '@rollup/rollup-linux-arm-gnueabihf@4.59.0': - resolution: {integrity: sha512-t4ONHboXi/3E0rT6OZl1pKbl2Vgxf9vJfWgmUoCEVQVxhW6Cw/c8I6hbbu7DAvgp82RKiH7TpLwxnJeKv2pbsw==} - cpu: [arm] - os: [linux] - - '@rollup/rollup-linux-arm-musleabihf@4.59.0': - resolution: {integrity: sha512-CikFT7aYPA2ufMD086cVORBYGHffBo4K8MQ4uPS/ZnY54GKj36i196u8U+aDVT2LX4eSMbyHtyOh7D7Zvk2VvA==} - cpu: [arm] - os: [linux] - - '@rollup/rollup-linux-arm64-gnu@4.59.0': - resolution: {integrity: sha512-jYgUGk5aLd1nUb1CtQ8E+t5JhLc9x5WdBKew9ZgAXg7DBk0ZHErLHdXM24rfX+bKrFe+Xp5YuJo54I5HFjGDAA==} - cpu: [arm64] - os: [linux] - - '@rollup/rollup-linux-arm64-musl@4.59.0': - resolution: {integrity: sha512-peZRVEdnFWZ5Bh2KeumKG9ty7aCXzzEsHShOZEFiCQlDEepP1dpUl/SrUNXNg13UmZl+gzVDPsiCwnV1uI0RUA==} - cpu: [arm64] - os: [linux] - - '@rollup/rollup-linux-loong64-gnu@4.59.0': - resolution: {integrity: sha512-gbUSW/97f7+r4gHy3Jlup8zDG190AuodsWnNiXErp9mT90iCy9NKKU0Xwx5k8VlRAIV2uU9CsMnEFg/xXaOfXg==} - cpu: [loong64] - os: [linux] - - '@rollup/rollup-linux-loong64-musl@4.59.0': - resolution: {integrity: sha512-yTRONe79E+o0FWFijasoTjtzG9EBedFXJMl888NBEDCDV9I2wGbFFfJQQe63OijbFCUZqxpHz1GzpbtSFikJ4Q==} - cpu: [loong64] - os: [linux] - - '@rollup/rollup-linux-ppc64-gnu@4.59.0': - resolution: {integrity: sha512-sw1o3tfyk12k3OEpRddF68a1unZ5VCN7zoTNtSn2KndUE+ea3m3ROOKRCZxEpmT9nsGnogpFP9x6mnLTCaoLkA==} - cpu: [ppc64] - os: [linux] - - '@rollup/rollup-linux-ppc64-musl@4.59.0': - resolution: {integrity: sha512-+2kLtQ4xT3AiIxkzFVFXfsmlZiG5FXYW7ZyIIvGA7Bdeuh9Z0aN4hVyXS/G1E9bTP/vqszNIN/pUKCk/BTHsKA==} - cpu: [ppc64] - os: [linux] - - '@rollup/rollup-linux-riscv64-gnu@4.59.0': - resolution: {integrity: sha512-NDYMpsXYJJaj+I7UdwIuHHNxXZ/b/N2hR15NyH3m2qAtb/hHPA4g4SuuvrdxetTdndfj9b1WOmy73kcPRoERUg==} - cpu: [riscv64] - os: [linux] - - '@rollup/rollup-linux-riscv64-musl@4.59.0': - resolution: {integrity: sha512-nLckB8WOqHIf1bhymk+oHxvM9D3tyPndZH8i8+35p/1YiVoVswPid2yLzgX7ZJP0KQvnkhM4H6QZ5m0LzbyIAg==} - cpu: [riscv64] - os: [linux] - - '@rollup/rollup-linux-s390x-gnu@4.59.0': - resolution: {integrity: sha512-oF87Ie3uAIvORFBpwnCvUzdeYUqi2wY6jRFWJAy1qus/udHFYIkplYRW+wo+GRUP4sKzYdmE1Y3+rY5Gc4ZO+w==} - cpu: [s390x] - os: [linux] - - '@rollup/rollup-linux-x64-gnu@4.59.0': - resolution: {integrity: sha512-3AHmtQq/ppNuUspKAlvA8HtLybkDflkMuLK4DPo77DfthRb71V84/c4MlWJXixZz4uruIH4uaa07IqoAkG64fg==} - cpu: [x64] - os: [linux] - - '@rollup/rollup-linux-x64-musl@4.59.0': - resolution: {integrity: sha512-2UdiwS/9cTAx7qIUZB/fWtToJwvt0Vbo0zmnYt7ED35KPg13Q0ym1g442THLC7VyI6JfYTP4PiSOWyoMdV2/xg==} - cpu: [x64] - os: [linux] - - '@rollup/rollup-openbsd-x64@4.59.0': - resolution: {integrity: sha512-M3bLRAVk6GOwFlPTIxVBSYKUaqfLrn8l0psKinkCFxl4lQvOSz8ZrKDz2gxcBwHFpci0B6rttydI4IpS4IS/jQ==} - cpu: [x64] - os: [openbsd] - - '@rollup/rollup-openharmony-arm64@4.59.0': - resolution: {integrity: sha512-tt9KBJqaqp5i5HUZzoafHZX8b5Q2Fe7UjYERADll83O4fGqJ49O1FsL6LpdzVFQcpwvnyd0i+K/VSwu/o/nWlA==} - cpu: [arm64] - os: [openharmony] - - '@rollup/rollup-win32-arm64-msvc@4.59.0': - resolution: {integrity: sha512-V5B6mG7OrGTwnxaNUzZTDTjDS7F75PO1ae6MJYdiMu60sq0CqN5CVeVsbhPxalupvTX8gXVSU9gq+Rx1/hvu6A==} - cpu: [arm64] - os: [win32] - - '@rollup/rollup-win32-ia32-msvc@4.59.0': - resolution: {integrity: sha512-UKFMHPuM9R0iBegwzKF4y0C4J9u8C6MEJgFuXTBerMk7EJ92GFVFYBfOZaSGLu6COf7FxpQNqhNS4c4icUPqxA==} - cpu: [ia32] - os: [win32] - - '@rollup/rollup-win32-x64-gnu@4.59.0': - resolution: {integrity: sha512-laBkYlSS1n2L8fSo1thDNGrCTQMmxjYY5G0WFWjFFYZkKPjsMBsgJfGf4TLxXrF6RyhI60L8TMOjBMvXiTcxeA==} - cpu: [x64] - os: [win32] - - '@rollup/rollup-win32-x64-msvc@4.59.0': - resolution: {integrity: sha512-2HRCml6OztYXyJXAvdDXPKcawukWY2GpR5/nxKp4iBgiO3wcoEGkAaqctIbZcNB6KlUQBIqt8VYkNSj2397EfA==} - cpu: [x64] - os: [win32] + '@rolldown/pluginutils@1.0.0-rc.9': + resolution: {integrity: sha512-w6oiRWgEBl04QkFZgmW+jnU1EC9b57Oihi2ot3HNWIQRqgHp5PnYDia5iZ5FF7rpa4EQdiqMDXjlqKGXBhsoXw==} '@scure/base@2.0.0': resolution: {integrity: sha512-3E1kpuZginKkek01ovG8krQ0Z44E3DHPjc5S2rjJw9lZn3KSQOs8S7wqikF/AH7iRanHypj85uGyxk0XAyC37w==} @@ -2790,6 +2722,10 @@ packages: resolution: {integrity: sha512-Wz7QYfPAlG/DR+DfABddUZeNgoeY7d1J39OCR2jR+v7VBsB8ezulDK5szTnDDPDwLH5IWhLvXIHlCFZV7MSKgA==} engines: {node: '>= 18', npm: '>= 8.6.0'} + '@slack/logger@4.0.1': + resolution: {integrity: sha512-6cmdPrV/RYfd2U0mDGiMK8S7OJqpCTm7enMLRR3edccsPX8j7zXTLnaEF4fhxxJJTAIOil6+qZrnUPTuaLvwrQ==} + engines: {node: '>= 18', npm: '>= 8.6.0'} + '@slack/oauth@3.0.4': resolution: {integrity: sha512-+8H0g7mbrHndEUbYCP7uYyBCbwqmm3E6Mo3nfsDvZZW74zKk1ochfH/fWSvGInYNCVvaBUbg3RZBbTp0j8yJCg==} engines: {node: '>=18', npm: '>=8.6.0'} @@ -2802,16 +2738,20 @@ packages: resolution: {integrity: sha512-PVF6P6nxzDMrzPC8fSCsnwaI+kF8YfEpxf3MqXmdyjyWTYsZQURpkK7WWUWvP5QpH55pB7zyYL9Qem/xSgc5VA==} engines: {node: '>= 12.13.0', npm: '>= 6.12.0'} - '@slack/web-api@7.14.1': - resolution: {integrity: sha512-RoygyteJeFswxDPJjUMESn9dldWVMD2xUcHHd9DenVavSfVC6FeVnSdDerOO7m8LLvw4Q132nQM4hX8JiF7dng==} + '@slack/types@2.20.1': + resolution: {integrity: sha512-eWX2mdt1ktpn8+40iiMc404uGrih+2fxiky3zBcPjtXKj6HLRdYlmhrPkJi7JTJm8dpXR6BWVWEDBXtaWMKD6A==} + engines: {node: '>= 12.13.0', npm: '>= 6.12.0'} + + '@slack/web-api@7.15.0': + resolution: {integrity: sha512-va7zYIt3QHG1x9M/jqXXRPFMoOVlVSSRHC5YH+DzKYsrz5xUKOA3lR4THsu/Zxha9N1jOndbKFKLtr0WOPW1Vw==} engines: {node: '>= 18', npm: '>= 8.6.0'} '@smithy/abort-controller@4.2.10': resolution: {integrity: sha512-qocxM/X4XGATqQtUkbE9SPUB6wekBi+FyJOMbPj0AhvyvFGYEmOlz6VB22iMePCQsFmMIvFSeViDvA7mZJG47g==} engines: {node: '>=18.0.0'} - '@smithy/abort-controller@4.2.11': - resolution: {integrity: sha512-Hj4WoYWMJnSpM6/kchsm4bUNTL9XiSyhvoMb2KIq4VJzyDt7JpGHUZHkVNPZVC7YE1tf8tPeVauxpFBKGW4/KQ==} + '@smithy/abort-controller@4.2.12': + resolution: {integrity: sha512-xolrFw6b+2iYGl6EcOL7IJY71vvyZ0DJ3mcKtpykqPe2uscwtzDZJa1uVQXyP7w9Dd+kGwYnPbMsJrGISKiY/Q==} engines: {node: '>=18.0.0'} '@smithy/chunked-blob-reader-native@4.2.2': @@ -2822,28 +2762,28 @@ packages: resolution: {integrity: sha512-y5d4xRiD6TzeP5BWlb+Ig/VFqF+t9oANNhGeMqyzU7obw7FYgTgVi50i5JqBTeKp+TABeDIeeXFZdz65RipNtA==} engines: {node: '>=18.0.0'} - '@smithy/config-resolver@4.4.10': - resolution: {integrity: sha512-IRTkd6ps0ru+lTWnfnsbXzW80A8Od8p3pYiZnW98K2Hb20rqfsX7VTlfUwhrcOeSSy68Gn9WBofwPuw3e5CCsg==} + '@smithy/config-resolver@4.4.11': + resolution: {integrity: sha512-YxFiiG4YDAtX7WMN7RuhHZLeTmRRAOyCbr+zB8e3AQzHPnUhS8zXjB1+cniPVQI3xbWsQPM0X2aaIkO/ME0ymw==} engines: {node: '>=18.0.0'} '@smithy/config-resolver@4.4.9': resolution: {integrity: sha512-ejQvXqlcU30h7liR9fXtj7PIAau1t/sFbJpgWPfiYDs7zd16jpH0IsSXKcba2jF6ChTXvIjACs27kNMc5xxE2Q==} engines: {node: '>=18.0.0'} - '@smithy/core@3.23.6': - resolution: {integrity: sha512-4xE+0L2NrsFKpEVFlFELkIHQddBvMbQ41LRIP74dGCXnY1zQ9DgksrBcRBDJT+iOzGy4VEJIeU3hkUK5mn06kg==} + '@smithy/core@3.23.11': + resolution: {integrity: sha512-952rGf7hBRnhUIaeLp6q4MptKW8sPFe5VvkoZ5qIzFAtx6c/QZ/54FS3yootsyUSf9gJX/NBqEBNdNR7jMIlpQ==} engines: {node: '>=18.0.0'} - '@smithy/core@3.23.9': - resolution: {integrity: sha512-1Vcut4LEL9HZsdpI0vFiRYIsaoPwZLjAxnVQDUMQK8beMS+EYPLDQCXtbzfxmM5GzSgjfe2Q9M7WaXwIMQllyQ==} + '@smithy/core@3.23.6': + resolution: {integrity: sha512-4xE+0L2NrsFKpEVFlFELkIHQddBvMbQ41LRIP74dGCXnY1zQ9DgksrBcRBDJT+iOzGy4VEJIeU3hkUK5mn06kg==} engines: {node: '>=18.0.0'} '@smithy/credential-provider-imds@4.2.10': resolution: {integrity: sha512-3bsMLJJLTZGZqVGGeBVFfLzuRulVsGTj12BzRKODTHqUABpIr0jMN1vN3+u6r2OfyhAQ2pXaMZWX/swBK5I6PQ==} engines: {node: '>=18.0.0'} - '@smithy/credential-provider-imds@4.2.11': - resolution: {integrity: sha512-lBXrS6ku0kTj3xLmsJW0WwqWbGQ6ueooYyp/1L9lkyT0M02C+DWwYwc5aTyXFbRaK38ojALxNixg+LxKSHZc0g==} + '@smithy/credential-provider-imds@4.2.12': + resolution: {integrity: sha512-cr2lR792vNZcYMriSIj+Um3x9KWrjcu98kn234xA6reOAFMmbRpQMOv8KPgEmLLtx3eldU6c5wALKFqNOhugmg==} engines: {node: '>=18.0.0'} '@smithy/eventstream-codec@4.2.10': @@ -2890,8 +2830,8 @@ packages: resolution: {integrity: sha512-wbTRjOxdFuyEg0CpumjZO0hkUl+fetJFqxNROepuLIoijQh51aMBmzFLfoQdwRjxsuuS2jizzIUTjPWgd8pd7g==} engines: {node: '>=18.0.0'} - '@smithy/fetch-http-handler@5.3.13': - resolution: {integrity: sha512-U2Hcfl2s3XaYjikN9cT4mPu8ybDbImV3baXR0PkVlC0TTx808bRP3FaPGAzPtB8OByI+JqJ1kyS+7GEgae7+qQ==} + '@smithy/fetch-http-handler@5.3.15': + resolution: {integrity: sha512-T4jFU5N/yiIfrtrsb9uOQn7RdELdM/7HbyLNr6uO/mpkj1ctiVs7CihVr51w4LyQlXWDpXFn4BElf1WmQvZu/A==} engines: {node: '>=18.0.0'} '@smithy/hash-blob-browser@4.2.11': @@ -2902,8 +2842,8 @@ packages: resolution: {integrity: sha512-1VzIOI5CcsvMDvP3iv1vG/RfLJVVVc67dCRyLSB2Hn9SWCZrDO3zvcIzj3BfEtqRW5kcMg5KAeVf1K3dR6nD3w==} engines: {node: '>=18.0.0'} - '@smithy/hash-node@4.2.11': - resolution: {integrity: sha512-T+p1pNynRkydpdL015ruIoyPSRw9e/SQOWmSAMmmprfswMrd5Ow5igOWNVlvyVFZlxXqGmyH3NQwfwy8r5Jx0A==} + '@smithy/hash-node@4.2.12': + resolution: {integrity: sha512-QhBYbGrbxTkZ43QoTPrK72DoYviDeg6YKDrHTMJbbC+A0sml3kSjzFtXP7BtbyJnXojLfTQldGdUR0RGD8dA3w==} engines: {node: '>=18.0.0'} '@smithy/hash-stream-node@4.2.10': @@ -2914,8 +2854,8 @@ packages: resolution: {integrity: sha512-vy9KPNSFUU0ajFYk0sDZIYiUlAWGEAhRfehIr5ZkdFrRFTAuXEPUd41USuqHU6vvLX4r6Q9X7MKBco5+Il0Org==} engines: {node: '>=18.0.0'} - '@smithy/invalid-dependency@4.2.11': - resolution: {integrity: sha512-cGNMrgykRmddrNhYy1yBdrp5GwIgEkniS7k9O1VLB38yxQtlvrxpZtUVvo6T4cKpeZsriukBuuxfJcdZQc/f/g==} + '@smithy/invalid-dependency@4.2.12': + resolution: {integrity: sha512-/4F1zb7Z8LOu1PalTdESFHR0RbPwHd3FcaG1sI3UEIriQTWakysgJr65lc1jj6QY5ye7aFsisajotH6UhWfm/g==} engines: {node: '>=18.0.0'} '@smithy/is-array-buffer@2.2.0': @@ -2938,132 +2878,136 @@ packages: resolution: {integrity: sha512-TQZ9kX5c6XbjhaEBpvhSvMEZ0klBs1CFtOdPFwATZSbC9UeQfKHPLPN9Y+I6wZGMOavlYTOlHEPDrt42PMSH9w==} engines: {node: '>=18.0.0'} - '@smithy/middleware-content-length@4.2.11': - resolution: {integrity: sha512-UvIfKYAKhCzr4p6jFevPlKhQwyQwlJ6IeKLDhmV1PlYfcW3RL4ROjNEDtSik4NYMi9kDkH7eSwyTP3vNJ/u/Dw==} + '@smithy/middleware-content-length@4.2.12': + resolution: {integrity: sha512-YE58Yz+cvFInWI/wOTrB+DbvUVz/pLn5mC5MvOV4fdRUc6qGwygyngcucRQjAhiCEbmfLOXX0gntSIcgMvAjmA==} engines: {node: '>=18.0.0'} '@smithy/middleware-endpoint@4.4.20': resolution: {integrity: sha512-9W6Np4ceBP3XCYAGLoMCmn8t2RRVzuD1ndWPLBbv7H9CrwM9Bprf6Up6BM9ZA/3alodg0b7Kf6ftBK9R1N04vw==} engines: {node: '>=18.0.0'} - '@smithy/middleware-endpoint@4.4.23': - resolution: {integrity: sha512-UEFIejZy54T1EJn2aWJ45voB7RP2T+IRzUqocIdM6GFFa5ClZncakYJfcYnoXt3UsQrZZ9ZRauGm77l9UCbBLw==} + '@smithy/middleware-endpoint@4.4.25': + resolution: {integrity: sha512-dqjLwZs2eBxIUG6Qtw8/YZ4DvzHGIf0DA18wrgtfP6a50UIO7e2nY0FPdcbv5tVJKqWCCU5BmGMOUwT7Puan+A==} engines: {node: '>=18.0.0'} '@smithy/middleware-retry@4.4.37': resolution: {integrity: sha512-/1psZZllBBSQ7+qo5+hhLz7AEPGLx3Z0+e3ramMBEuPK2PfvLK4SrncDB9VegX5mBn+oP/UTDrM6IHrFjvX1ZA==} engines: {node: '>=18.0.0'} - '@smithy/middleware-retry@4.4.40': - resolution: {integrity: sha512-YhEMakG1Ae57FajERdHNZ4ShOPIY7DsgV+ZoAxo/5BT0KIe+f6DDU2rtIymNNFIj22NJfeeI6LWIifrwM0f+rA==} + '@smithy/middleware-retry@4.4.42': + resolution: {integrity: sha512-vbwyqHRIpIZutNXZpLAozakzamcINaRCpEy1MYmK6xBeW3xN+TyPRA123GjXnuxZIjc9848MRRCugVMTXxC4Eg==} engines: {node: '>=18.0.0'} '@smithy/middleware-serde@4.2.11': resolution: {integrity: sha512-STQdONGPwbbC7cusL60s7vOa6He6A9w2jWhoapL0mgVjmR19pr26slV+yoSP76SIssMTX/95e5nOZ6UQv6jolg==} engines: {node: '>=18.0.0'} - '@smithy/middleware-serde@4.2.12': - resolution: {integrity: sha512-W9g1bOLui7Xn5FABRVS0o3rXL0gfN37d/8I/W7i0N7oxjx9QecUmXEMSUMADTODwdtka9cN43t5BI2CodLJpng==} + '@smithy/middleware-serde@4.2.14': + resolution: {integrity: sha512-+CcaLoLa5apzSRtloOyG7lQvkUw2ZDml3hRh4QiG9WyEPfW5Ke/3tPOPiPjUneuT59Tpn8+c3RVaUvvkkwqZwg==} engines: {node: '>=18.0.0'} '@smithy/middleware-stack@4.2.10': resolution: {integrity: sha512-pmts/WovNcE/tlyHa8z/groPeOtqtEpp61q3W0nW1nDJuMq/x+hWa/OVQBtgU0tBqupeXq0VBOLA4UZwE8I0YA==} engines: {node: '>=18.0.0'} - '@smithy/middleware-stack@4.2.11': - resolution: {integrity: sha512-s+eenEPW6RgliDk2IhjD2hWOxIx1NKrOHxEwNUaUXxYBxIyCcDfNULZ2Mu15E3kwcJWBedTET/kEASPV1A1Akg==} + '@smithy/middleware-stack@4.2.12': + resolution: {integrity: sha512-kruC5gRHwsCOuyCd4ouQxYjgRAym2uDlCvQ5acuMtRrcdfg7mFBg6blaxcJ09STpt3ziEkis6bhg1uwrWU7txw==} engines: {node: '>=18.0.0'} '@smithy/node-config-provider@4.3.10': resolution: {integrity: sha512-UALRbJtVX34AdP2VECKVlnNgidLHA2A7YgcJzwSBg1hzmnO/bZBHl/LDQQyYifzUwp1UOODnl9JJ3KNawpUJ9w==} engines: {node: '>=18.0.0'} - '@smithy/node-config-provider@4.3.11': - resolution: {integrity: sha512-xD17eE7kaLgBBGf5CZQ58hh2YmwK1Z0O8YhffwB/De2jsL0U3JklmhVYJ9Uf37OtUDLF2gsW40Xwwag9U869Gg==} + '@smithy/node-config-provider@4.3.12': + resolution: {integrity: sha512-tr2oKX2xMcO+rBOjobSwVAkV05SIfUKz8iI53rzxEmgW3GOOPOv0UioSDk+J8OpRQnpnhsO3Af6IEBabQBVmiw==} engines: {node: '>=18.0.0'} '@smithy/node-http-handler@4.4.12': resolution: {integrity: sha512-zo1+WKJkR9x7ZtMeMDAAsq2PufwiLDmkhcjpWPRRkmeIuOm6nq1qjFICSZbnjBvD09ei8KMo26BWxsu2BUU+5w==} engines: {node: '>=18.0.0'} - '@smithy/node-http-handler@4.4.14': - resolution: {integrity: sha512-DamSqaU8nuk0xTJDrYnRzZndHwwRnyj/n/+RqGGCcBKB4qrQem0mSDiWdupaNWdwxzyMU91qxDmHOCazfhtO3A==} + '@smithy/node-http-handler@4.4.16': + resolution: {integrity: sha512-ULC8UCS/HivdCB3jhi+kLFYe4B5gxH2gi9vHBfEIiRrT2jfKiZNiETJSlzRtE6B26XbBHjPtc8iZKSNqMol9bw==} engines: {node: '>=18.0.0'} '@smithy/property-provider@4.2.10': resolution: {integrity: sha512-5jm60P0CU7tom0eNrZ7YrkgBaoLFXzmqB0wVS+4uK8PPGmosSrLNf6rRd50UBvukztawZ7zyA8TxlrKpF5z9jw==} engines: {node: '>=18.0.0'} - '@smithy/property-provider@4.2.11': - resolution: {integrity: sha512-14T1V64o6/ndyrnl1ze1ZhyLzIeYNN47oF/QU6P5m82AEtyOkMJTb0gO1dPubYjyyKuPD6OSVMPDKe+zioOnCg==} + '@smithy/property-provider@4.2.12': + resolution: {integrity: sha512-jqve46eYU1v7pZ5BM+fmkbq3DerkSluPr5EhvOcHxygxzD05ByDRppRwRPPpFrsFo5yDtCYLKu+kreHKVrvc7A==} engines: {node: '>=18.0.0'} '@smithy/protocol-http@5.3.10': resolution: {integrity: sha512-2NzVWpYY0tRdfeCJLsgrR89KE3NTWT2wGulhNUxYlRmtRmPwLQwKzhrfVaiNlA9ZpJvbW7cjTVChYKgnkqXj1A==} engines: {node: '>=18.0.0'} - '@smithy/protocol-http@5.3.11': - resolution: {integrity: sha512-hI+barOVDJBkNt4y0L2mu3Ugc0w7+BpJ2CZuLwXtSltGAAwCb3IvnalGlbDV/UCS6a9ZuT3+exd1WxNdLb5IlQ==} + '@smithy/protocol-http@5.3.12': + resolution: {integrity: sha512-fit0GZK9I1xoRlR4jXmbLhoN0OdEpa96ul8M65XdmXnxXkuMxM0Y8HDT0Fh0Xb4I85MBvBClOzgSrV1X2s1Hxw==} engines: {node: '>=18.0.0'} '@smithy/querystring-builder@4.2.10': resolution: {integrity: sha512-HeN7kEvuzO2DmAzLukE9UryiUvejD3tMp9a1D1NJETerIfKobBUCLfviP6QEk500166eD2IATaXM59qgUI+YDA==} engines: {node: '>=18.0.0'} - '@smithy/querystring-builder@4.2.11': - resolution: {integrity: sha512-7spdikrYiljpket6u0up2Ck2mxhy7dZ0+TDd+S53Dg2DHd6wg+YNJrTCHiLdgZmEXZKI7LJZcwL3721ZRDFiqA==} + '@smithy/querystring-builder@4.2.12': + resolution: {integrity: sha512-6wTZjGABQufekycfDGMEB84BgtdOE/rCVTov+EDXQ8NHKTUNIp/j27IliwP7tjIU9LR+sSzyGBOXjeEtVgzCHg==} engines: {node: '>=18.0.0'} '@smithy/querystring-parser@4.2.10': resolution: {integrity: sha512-4Mh18J26+ao1oX5wXJfWlTT+Q1OpDR8ssiC9PDOuEgVBGloqg18Fw7h5Ct8DyT9NBYwJgtJ2nLjKKFU6RP1G1Q==} engines: {node: '>=18.0.0'} - '@smithy/querystring-parser@4.2.11': - resolution: {integrity: sha512-nE3IRNjDltvGcoThD2abTozI1dkSy8aX+a2N1Rs55en5UsdyyIXgGEmevUL3okZFoJC77JgRGe99xYohhsjivQ==} + '@smithy/querystring-parser@4.2.12': + resolution: {integrity: sha512-P2OdvrgiAKpkPNKlKUtWbNZKB1XjPxM086NeVhK+W+wI46pIKdWBe5QyXvhUm3MEcyS/rkLvY8rZzyUdmyDZBw==} engines: {node: '>=18.0.0'} '@smithy/service-error-classification@4.2.10': resolution: {integrity: sha512-0R/+/Il5y8nB/By90o8hy/bWVYptbIfvoTYad0igYQO5RefhNCDmNzqxaMx7K1t/QWo0d6UynqpqN5cCQt1MCg==} engines: {node: '>=18.0.0'} - '@smithy/service-error-classification@4.2.11': - resolution: {integrity: sha512-HkMFJZJUhzU3HvND1+Yw/kYWXp4RPDLBWLcK1n+Vqw8xn4y2YiBhdww8IxhkQjP/QlZun5bwm3vcHc8AqIU3zw==} + '@smithy/service-error-classification@4.2.12': + resolution: {integrity: sha512-LlP29oSQN0Tw0b6D0Xo6BIikBswuIiGYbRACy5ujw/JgWSzTdYj46U83ssf6Ux0GyNJVivs2uReU8pt7Eu9okQ==} engines: {node: '>=18.0.0'} '@smithy/shared-ini-file-loader@4.4.5': resolution: {integrity: sha512-pHgASxl50rrtOztgQCPmOXFjRW+mCd7ALr/3uXNzRrRoGV5G2+78GOsQ3HlQuBVHCh9o6xqMNvlIKZjWn4Euug==} engines: {node: '>=18.0.0'} - '@smithy/shared-ini-file-loader@4.4.6': - resolution: {integrity: sha512-IB/M5I8G0EeXZTHsAxpx51tMQ5R719F3aq+fjEB6VtNcCHDc0ajFDIGDZw+FW9GxtEkgTduiPpjveJdA/CX7sw==} + '@smithy/shared-ini-file-loader@4.4.7': + resolution: {integrity: sha512-HrOKWsUb+otTeo1HxVWeEb99t5ER1XrBi/xka2Wv6NVmTbuCUC1dvlrksdvxFtODLBjsC+PHK+fuy2x/7Ynyiw==} engines: {node: '>=18.0.0'} '@smithy/signature-v4@5.3.10': resolution: {integrity: sha512-Wab3wW8468WqTKIxI+aZe3JYO52/RYT/8sDOdzkUhjnLakLe9qoQqIcfih/qxcF4qWEFoWBszY0mj5uxffaVXA==} engines: {node: '>=18.0.0'} - '@smithy/signature-v4@5.3.11': - resolution: {integrity: sha512-V1L6N9aKOBAN4wEHLyqjLBnAz13mtILU0SeDrjOaIZEeN6IFa6DxwRt1NNpOdmSpQUfkBj0qeD3m6P77uzMhgQ==} + '@smithy/signature-v4@5.3.12': + resolution: {integrity: sha512-B/FBwO3MVOL00DaRSXfXfa/TRXRheagt/q5A2NM13u7q+sHS59EOVGQNfG7DkmVtdQm5m3vOosoKAXSqn/OEgw==} engines: {node: '>=18.0.0'} '@smithy/smithy-client@4.12.0': resolution: {integrity: sha512-R8bQ9K3lCcXyZmBnQqUZJF4ChZmtWT5NLi6x5kgWx5D+/j0KorXcA0YcFg/X5TOgnTCy1tbKc6z2g2y4amFupQ==} engines: {node: '>=18.0.0'} - '@smithy/smithy-client@4.12.3': - resolution: {integrity: sha512-7k4UxjSpHmPN2AxVhvIazRSzFQjWnud3sOsXcFStzagww17j1cFQYqTSiQ8xuYK3vKLR1Ni8FzuT3VlKr3xCNw==} + '@smithy/smithy-client@4.12.5': + resolution: {integrity: sha512-UqwYawyqSr/aog8mnLnfbPurS0gi4G7IYDcD28cUIBhsvWs1+rQcL2IwkUQ+QZ7dibaoRzhNF99fAQ9AUcO00w==} engines: {node: '>=18.0.0'} '@smithy/types@4.13.0': resolution: {integrity: sha512-COuLsZILbbQsdrwKQpkkpyep7lCsByxwj7m0Mg5v66/ZTyenlfBc40/QFQ5chO0YN/PNEH1Bi3fGtfXPnYNeDw==} engines: {node: '>=18.0.0'} + '@smithy/types@4.13.1': + resolution: {integrity: sha512-787F3yzE2UiJIQ+wYW1CVg2odHjmaWLGksnKQHUrK/lYZSEcy1msuLVvxaR/sI2/aDe9U+TBuLsXnr3vod1g0g==} + engines: {node: '>=18.0.0'} + '@smithy/url-parser@4.2.10': resolution: {integrity: sha512-uypjF7fCDsRk26u3qHmFI/ePL7bxxB9vKkE+2WKEciHhz+4QtbzWiHRVNRJwU3cKhrYDYQE3b0MRFtqfLYdA4A==} engines: {node: '>=18.0.0'} - '@smithy/url-parser@4.2.11': - resolution: {integrity: sha512-oTAGGHo8ZYc5VZsBREzuf5lf2pAurJQsccMusVZ85wDkX66ojEc/XauiGjzCj50A61ObFTPe6d7Pyt6UBYaing==} + '@smithy/url-parser@4.2.12': + resolution: {integrity: sha512-wOPKPEpso+doCZGIlr+e1lVI6+9VAKfL4kZWFgzVgGWY2hZxshNKod4l2LXS3PRC9otH/JRSjtEHqQ/7eLciRA==} engines: {node: '>=18.0.0'} '@smithy/util-base64@4.3.1': @@ -3114,24 +3058,24 @@ packages: resolution: {integrity: sha512-R0smq7EHQXRVMxkAxtH5akJ/FvgAmNF6bUy/GwY/N20T4GrwjT633NFm0VuRpC+8Bbv8R9A0DoJ9OiZL/M3xew==} engines: {node: '>=18.0.0'} - '@smithy/util-defaults-mode-browser@4.3.39': - resolution: {integrity: sha512-ui7/Ho/+VHqS7Km2wBw4/Ab4RktoiSshgcgpJzC4keFPs6tLJS4IQwbeahxQS3E/w98uq6E1mirCH/id9xIXeQ==} + '@smithy/util-defaults-mode-browser@4.3.41': + resolution: {integrity: sha512-M1w1Ux0rSVvBOxIIiqbxvZvhnjQ+VUjJrugtORE90BbadSTH+jsQL279KRL3Hv0w69rE7EuYkV/4Lepz/NBW9g==} engines: {node: '>=18.0.0'} '@smithy/util-defaults-mode-node@4.2.39': resolution: {integrity: sha512-otWuoDm35btJV1L8MyHrPl462B07QCdMTktKc7/yM+Psv6KbED/ziXiHnmr7yPHUjfIwE9S8Max0LO24Mo3ZVg==} engines: {node: '>=18.0.0'} - '@smithy/util-defaults-mode-node@4.2.42': - resolution: {integrity: sha512-QDA84CWNe8Akpj15ofLO+1N3Rfg8qa2K5uX0y6HnOp4AnRYRgWrKx/xzbYNbVF9ZsyJUYOfcoaN3y93wA/QJ2A==} + '@smithy/util-defaults-mode-node@4.2.44': + resolution: {integrity: sha512-YPze3/lD1KmWuZsl9JlfhcgGLX7AXhSoaCDtiPntUjNW5/YY0lOHjkcgxyE9x/h5vvS1fzDifMGjzqnNlNiqOQ==} engines: {node: '>=18.0.0'} '@smithy/util-endpoints@3.3.1': resolution: {integrity: sha512-xyctc4klmjmieQiF9I1wssBWleRV0RhJ2DpO8+8yzi2LO1Z+4IWOZNGZGNj4+hq9kdo+nyfrRLmQTzc16Op2Vg==} engines: {node: '>=18.0.0'} - '@smithy/util-endpoints@3.3.2': - resolution: {integrity: sha512-+4HFLpE5u29AbFlTdlKIT7jfOzZ8PDYZKTb3e+AgLz986OYwqTourQ5H+jg79/66DB69Un1+qKecLnkZdAsYcA==} + '@smithy/util-endpoints@3.3.3': + resolution: {integrity: sha512-VACQVe50j0HZPjpwWcjyT51KUQ4AnsvEaQ2lKHOSL4mNLD0G9BjEniQ+yCt1qqfKfiAHRAts26ud7hBjamrwig==} engines: {node: '>=18.0.0'} '@smithy/util-hex-encoding@4.2.1': @@ -3146,24 +3090,24 @@ packages: resolution: {integrity: sha512-LxaQIWLp4y0r72eA8mwPNQ9va4h5KeLM0I3M/HV9klmFaY2kN766wf5vsTzmaOpNNb7GgXAd9a25P3h8T49PSA==} engines: {node: '>=18.0.0'} - '@smithy/util-middleware@4.2.11': - resolution: {integrity: sha512-r3dtF9F+TpSZUxpOVVtPfk09Rlo4lT6ORBqEvX3IBT6SkQAdDSVKR5GcfmZbtl7WKhKnmb3wbDTQ6ibR2XHClw==} + '@smithy/util-middleware@4.2.12': + resolution: {integrity: sha512-Er805uFUOvgc0l8nv0e0su0VFISoxhJ/AwOn3gL2NWNY2LUEldP5WtVcRYSQBcjg0y9NfG8JYrCJaYDpupBHJQ==} engines: {node: '>=18.0.0'} '@smithy/util-retry@4.2.10': resolution: {integrity: sha512-HrBzistfpyE5uqTwiyLsFHscgnwB0kgv8vySp7q5kZ0Eltn/tjosaSGGDj/jJ9ys7pWzIP/icE2d+7vMKXLv7A==} engines: {node: '>=18.0.0'} - '@smithy/util-retry@4.2.11': - resolution: {integrity: sha512-XSZULmL5x6aCTTii59wJqKsY1l3eMIAomRAccW7Tzh9r8s7T/7rdo03oektuH5jeYRlJMPcNP92EuRDvk9aXbw==} + '@smithy/util-retry@4.2.12': + resolution: {integrity: sha512-1zopLDUEOwumjcHdJ1mwBHddubYF8GMQvstVCLC54Y46rqoHwlIU+8ZzUeaBcD+WCJHyDGSeZ2ml9YSe9aqcoQ==} engines: {node: '>=18.0.0'} '@smithy/util-stream@4.5.15': resolution: {integrity: sha512-OlOKnaqnkU9X+6wEkd7mN+WB7orPbCVDauXOj22Q7VtiTkvy7ZdSsOg4QiNAZMgI4OkvNf+/VLUC3VXkxuWJZw==} engines: {node: '>=18.0.0'} - '@smithy/util-stream@4.5.17': - resolution: {integrity: sha512-793BYZ4h2JAQkNHcEnyFxDTcZbm9bVybD0UV/LEWmZ5bkTms7JqjfrLMi2Qy0E5WFcCzLwCAPgcvcvxoeALbAQ==} + '@smithy/util-stream@4.5.19': + resolution: {integrity: sha512-v4sa+3xTweL1CLO2UP0p7tvIMH/Rq1X4KKOxd568mpe6LSLMQCnDHs4uv7m3ukpl3HvcN2JH6jiCS0SNRXKP/w==} engines: {node: '>=18.0.0'} '@smithy/util-uri-escape@4.2.1': @@ -3198,91 +3142,91 @@ packages: resolution: {integrity: sha512-O/IEdcCUKkubz60tFbGA7ceITTAJsty+lBjNoorP4Z6XRqaFb/OjQjZODophEcuq68nKm6/0r+6/lLQ+XVpk8g==} engines: {node: '>=18.0.0'} - '@snazzah/davey-android-arm-eabi@0.1.9': - resolution: {integrity: sha512-Dq0WyeVGBw+uQbisV/6PeCQV2ndJozfhZqiNIfQxu6ehIdXB7iHILv+oY+AQN2n+qxiFmLh/MOX9RF+pIWdPbA==} + '@snazzah/davey-android-arm-eabi@0.1.10': + resolution: {integrity: sha512-7bwHxSNEI2wVXOT6xnmpnO9SHb2xwAnf9oEdL45dlfVHTgU1Okg5rwGwRvZ2aLVFFbTyecfC8EVZyhpyTkjLSw==} engines: {node: '>= 10'} cpu: [arm] os: [android] - '@snazzah/davey-android-arm64@0.1.9': - resolution: {integrity: sha512-OE16OZjv7F/JrD7Mzw5eL2gY2vXRPC8S7ZrmkcMyz/sHHJsGHlT+L7X5s56Bec1YDTVmzAsH4UBuvVBoXuIWEQ==} + '@snazzah/davey-android-arm64@0.1.10': + resolution: {integrity: sha512-68WUf2LQwQTP9MgPcCqTWwJztJSIk0keGfF2Y/b+MihSDh29fYJl7C0rbz69aUrVCvCC2lYkB/46P8X1kBz7yg==} engines: {node: '>= 10'} cpu: [arm64] os: [android] - '@snazzah/davey-darwin-arm64@0.1.9': - resolution: {integrity: sha512-z7oORvAPExikFkH6tvHhbUdZd77MYZp9VqbCpKEiI+sisWFVXgHde7F7iH3G4Bz6gUYJfgvKhWXiDRc+0SC4dg==} + '@snazzah/davey-darwin-arm64@0.1.10': + resolution: {integrity: sha512-nYC+DWCGUC1jUGEenCNQE/jJpL/02m0ebY/NvTCQbul5ktI/ShVzgA3kzssEhZvhf6jbH048Rs39wDhp/b24Jg==} engines: {node: '>= 10'} cpu: [arm64] os: [darwin] - '@snazzah/davey-darwin-x64@0.1.9': - resolution: {integrity: sha512-f1LzGyRGlM414KpXml3OgWVSd7CgylcdYaFj/zDBb8bvWjxyvsI9iMeuPfe/cduloxRj8dELde/yCDZtFR6PdQ==} + '@snazzah/davey-darwin-x64@0.1.10': + resolution: {integrity: sha512-0q5Rrcs+O9sSSnPX+A3R3djEQs2nTAtMe5N3lApO6lZas/QNMl6wkEWCvTbDc2cfAYBMSk2jgc1awlRXi4LX3Q==} engines: {node: '>= 10'} cpu: [x64] os: [darwin] - '@snazzah/davey-freebsd-x64@0.1.9': - resolution: {integrity: sha512-k6p3JY2b8rD6j0V9Ql7kBUMR4eJdcpriNwiHltLzmtGuz/nK5RGQdkEP68gTLc+Uj3xs5Cy0jRKmv2xJQBR4sA==} + '@snazzah/davey-freebsd-x64@0.1.10': + resolution: {integrity: sha512-/Gq5YDD6Oz8iBqVJLswUnetCv9JCRo1quYX5ujzpAG8zPCNItZo4g4h5p9C+h4Yoay2quWBYhoaVqQKT96bm8g==} engines: {node: '>= 10'} cpu: [x64] os: [freebsd] - '@snazzah/davey-linux-arm-gnueabihf@0.1.9': - resolution: {integrity: sha512-xDaAFUC/1+n/YayNwKsqKOBMuW0KI6F0SjgWU+krYTQTVmAKNjOM80IjemrVoqTpBOxBsT80zEtct2wj11CE3Q==} + '@snazzah/davey-linux-arm-gnueabihf@0.1.10': + resolution: {integrity: sha512-0Z7Vrt0WIbgxws9CeHB9qlueYJlvltI44rUuZmysdi70UcHGxlr7nE3MnzYCr9nRWRegohn8EQPWHMKMDJH2GA==} engines: {node: '>= 10'} cpu: [arm] os: [linux] - '@snazzah/davey-linux-arm64-gnu@0.1.9': - resolution: {integrity: sha512-t1VxFBzWExPNpsNY/9oStdAAuHqFvwZvIO2YPYyVNstxfi2KmAbHMweHUW7xb2ppXuhVQZ4VGmmeXiXcXqhPBw==} + '@snazzah/davey-linux-arm64-gnu@0.1.10': + resolution: {integrity: sha512-xhZQycn4QB+qXhqm/QmZ+kb9MHMXcbjjoPfvcIL4WMQXFG/zUWHW8EiBk7ZTEGMOpeab3F9D1+MlgumglYByUQ==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] - '@snazzah/davey-linux-arm64-musl@0.1.9': - resolution: {integrity: sha512-Xvlr+nBPzuFV4PXHufddlt08JsEyu0p8mX2DpqdPxdpysYIH4I8V86yJiS4tk04a6pLBDd8IxTbBwvXJKqd/LQ==} + '@snazzah/davey-linux-arm64-musl@0.1.10': + resolution: {integrity: sha512-pudzQCP9rZItwW4qHHvciMwtNd9kWH4l73g6Id1LRpe6sc8jiFBV7W+YXITj2PZbI0by6XPfkRP6Dk5IkGOuAw==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] - '@snazzah/davey-linux-x64-gnu@0.1.9': - resolution: {integrity: sha512-6Uunc/NxiEkg1reroAKZAGfOtjl1CGa7hfTTVClb2f+DiA8ZRQWBh+3lgkq/0IeL262B4F14X8QRv5Bsv128qw==} + '@snazzah/davey-linux-x64-gnu@0.1.10': + resolution: {integrity: sha512-DC8qRmk+xJEFNqjxKB46cETKeDQqgUqE5p39KXS2k6Vl/XTi8pw8pXOxrPfYte5neoqlWAVQzbxuLnwpyRJVEQ==} engines: {node: '>= 10'} cpu: [x64] os: [linux] - '@snazzah/davey-linux-x64-musl@0.1.9': - resolution: {integrity: sha512-fFQ/n3aWt1lXhxSdy+Ge3gi5bR3VETMVsWhH0gwBALUKrbo3ZzgSktm4lNrXE9i0ncMz/CDpZ5i0wt/N3XphEQ==} + '@snazzah/davey-linux-x64-musl@0.1.10': + resolution: {integrity: sha512-wPR5/2QmsF7sR0WUaCwbk4XI3TLcxK9PVK8mhgcAYyuRpbhcVgNGWXs8ulcyMSXve5pFRJAFAuMTGCEb014peg==} engines: {node: '>= 10'} cpu: [x64] os: [linux] - '@snazzah/davey-wasm32-wasi@0.1.9': - resolution: {integrity: sha512-xWvzej8YCVlUvzlpmqJMIf0XmLlHqulKZ2e7WNe2TxQmsK+o0zTZqiQYs2MwaEbrNXBhYlHDkdpuwoXkJdscNQ==} + '@snazzah/davey-wasm32-wasi@0.1.10': + resolution: {integrity: sha512-SfQavU+eKTDbRmPeLRodrVSfsWq25PYTmH1nIZW3B27L6IkijzjXZZuxiU1ZG1gdI5fB7mwXrOTtx34t+vAG7Q==} engines: {node: '>=14.0.0'} cpu: [wasm32] - '@snazzah/davey-win32-arm64-msvc@0.1.9': - resolution: {integrity: sha512-sTqry/DfltX2OdW1CTLKa3dFYN5FloAEb2yhGsY1i5+Bms6OhwByXfALvyMHYVo61Th2+sD+9BJpQffHFKDA3w==} + '@snazzah/davey-win32-arm64-msvc@0.1.10': + resolution: {integrity: sha512-Raafk53smYs67wZCY9bQXHXzbaiRMS5QCdjTdin3D9fF5A06T/0Zv1z7/YnaN+O3GSL/Ou3RvynF7SziToYiFQ==} engines: {node: '>= 10'} cpu: [arm64] os: [win32] - '@snazzah/davey-win32-ia32-msvc@0.1.9': - resolution: {integrity: sha512-twD3LwlkGnSwphsCtpGb5ztpBIWEvGdc0iujoVkdzZ6nJiq5p8iaLjJMO4hBm9h3s28fc+1Qd7AMVnagiOasnA==} + '@snazzah/davey-win32-ia32-msvc@0.1.10': + resolution: {integrity: sha512-pAs43l/DiZ+icqBwxIwNePzuYxFM1ZblVuf7t6vwwSLxvova7vnREnU7qDVjbc5/YTUHOsqYy3S6TpZMzDo2lw==} engines: {node: '>= 10'} cpu: [ia32] os: [win32] - '@snazzah/davey-win32-x64-msvc@0.1.9': - resolution: {integrity: sha512-eMnXbv4GoTngWYY538i/qHz2BS+RgSXFsvKltPzKqnqzPzhQZIY7TemEJn3D5yWGfW4qHve9u23rz93FQqnQMA==} + '@snazzah/davey-win32-x64-msvc@0.1.10': + resolution: {integrity: sha512-kr6148VVBoUT4CtD+5hYshTFRny7R/xQZxXFhFc0fYjtmdMVM8Px9M91olg1JFNxuNzdfMfTufR58Q3wfBocug==} engines: {node: '>= 10'} cpu: [x64] os: [win32] - '@snazzah/davey@0.1.9': - resolution: {integrity: sha512-vNZk5y+IsxjwzTAXikvzz5pqMLb35YytC64nVF2MAFVhjpXu9ITOKUriZ0JG/llwzCAi56jb5x0cXDRIyE2A2A==} + '@snazzah/davey@0.1.10': + resolution: {integrity: sha512-J5f7vV5/tnj0xGnqufFRd6qiWn3FcR3iXjpjpEmO2Ok+Io0AASkMaZ3I39TsL45as0Qo5bq9wWuamFQ77PjJ+g==} engines: {node: '>= 10'} '@standard-schema/spec@1.1.0': @@ -3451,11 +3395,11 @@ packages: '@types/node@20.19.37': resolution: {integrity: sha512-8kzdPJ3FsNsVIurqBs7oodNnCEVbni9yUEkaHbgptDACOPW04jimGagZ51E6+lXUwJjgnBw+hyko/lkFWCldqw==} - '@types/node@24.11.0': - resolution: {integrity: sha512-fPxQqz4VTgPI/IQ+lj9r0h+fDR66bzoeMGHp8ASee+32OSGIkeASsoZuJixsQoVef1QJbeubcPBxKk22QVoWdw==} + '@types/node@24.12.0': + resolution: {integrity: sha512-GYDxsZi3ChgmckRT9HPU0WEhKLP08ev/Yfcq2AstjrDASOYCSXeyjDsHg4v5t4jOj7cyDX3vmprafKlWIG9MXQ==} - '@types/node@25.3.5': - resolution: {integrity: sha512-oX8xrhvpiyRCQkG1MFchB09f+cXftgIXb3a7UUa4Y3wpmZPw5tyZGTLWhlESOLq1Rq6oDlc8npVU2/9xiCuXMA==} + '@types/node@25.5.0': + resolution: {integrity: sha512-jp2P3tQMSxWugkCUKLRPVUpGaL5MVFwF8RDuSRztfwgN1wmqJeMSbKlnEtQqU8UrhTmzEmZdu2I6v2dpp7XIxw==} '@types/qrcode-terminal@0.12.2': resolution: {integrity: sha512-v+RcIEJ+Uhd6ygSQ0u5YYY7ZM+la7GgPbs0V/7l/kFs2uO4S8BcIUEMoP7za4DNIqNnUD5npf0A/7kBhrCKG5Q==} @@ -3502,43 +3446,43 @@ packages: '@types/yauzl@2.10.3': resolution: {integrity: sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q==} - '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-mywkctYr45fUBUYD35poInc9HEjup0zyCO5z3ZU2QC9eCQShpwYSDceoSCwxVKB/b/f/CU6H3LqINFeIz5CvrQ==} + '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260313.1': + resolution: {integrity: sha512-/fU2IvlRQWOy63xSzkejW7tTQpsL5dQ/ATIsJFlK75vS941CnNJY8dAx3iQYLkHMhS45hhCIR+bbJPRaacq/fw==} cpu: [arm64] os: [darwin] - '@typescript/native-preview-darwin-x64@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-iF+Y4USbCiD5BxmXI6xYuy+S6d2BhxKDb3YHjchzqg3AgleDNTd2rqSzlWv4ku26V2iOSfpM9t1H/xluL9pgNw==} + '@typescript/native-preview-darwin-x64@7.0.0-dev.20260313.1': + resolution: {integrity: sha512-oy7Ew1J3+YtO9QsqVGkncQ8bCwVPxNk8nSO2q1sHLccyYq0f4eDaZTlJ+u9Ynry548NwNucLh9wE+DWfWhzU3Q==} cpu: [x64] os: [darwin] - '@typescript/native-preview-linux-arm64@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-uEIIbW1JYPGEesVh/P5xA+xox7pQ6toeFPeke2X2H2bs5YkWHVaUQtVZuKNmGelw+2PCG6XRrXvMgMp056ebuQ==} + '@typescript/native-preview-linux-arm64@7.0.0-dev.20260313.1': + resolution: {integrity: sha512-KkbAweTnBpmQ8wCGHjrLzPX+FuwhSrVERNqyGPaq/267Sxt0UwbIO3rZduXlq5UUln1+/z7uT/BNJiuoFW3iLw==} cpu: [arm64] os: [linux] - '@typescript/native-preview-linux-arm@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-vg8hwfwIhT8CmYJI5lG3PP8IoNzKKBGbq1cKjxQabSZTPuQKwVFVity2XKTKZKd+qRGL7xW4UWMJZLFgSx3b2Q==} + '@typescript/native-preview-linux-arm@7.0.0-dev.20260313.1': + resolution: {integrity: sha512-IAx0ajfEiL1tJg1N6+/nHXJKebNe72yanY2N5bicwIB3t2BmydnrEPG+/OFVqc+prfJngxSx/61mvkXScZePzg==} cpu: [arm] os: [linux] - '@typescript/native-preview-linux-x64@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-Yd/ht0CGE4NYUAjuHa1u4VbiJbyUgvDh+b2o+Zcb2h5t8B761DIzDm24QqVXh+KhvGUoEodXWg3g3APxLHqj8Q==} + '@typescript/native-preview-linux-x64@7.0.0-dev.20260313.1': + resolution: {integrity: sha512-9LCNgXVNoArHlMuL6yFKJxSdshiiadTfW/pU4tz4Vbg+Dg9La1VE9mLlBdijy5ZIg4nsOFpR8JTDURcA1RoHXw==} cpu: [x64] os: [linux] - '@typescript/native-preview-win32-arm64@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-Klk6BoiHegfPmkO0YYrXmbYVdPjOfN25lRkzenqDIwbyzPlABHvICCyo5YRvWD3HU4EeDfLisIFU9wEd/0duCw==} + '@typescript/native-preview-win32-arm64@7.0.0-dev.20260313.1': + resolution: {integrity: sha512-cP2y5hb2xhfEDIgxdhxhPXa/D5Lq3yj6zxVuhh9ZkUariF+ZAmF4pySlIA+7NdprgTQqvNY5Mp70cPUiYD3yUg==} cpu: [arm64] os: [win32] - '@typescript/native-preview-win32-x64@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-4LrXmaMfzedwczANIkD/M9guPD4EWuQnCxOJsJkdYi3ExWQDjIFwfmxTtAmfPBWxVExLfn7UUkz/yCtcv2Wd+w==} + '@typescript/native-preview-win32-x64@7.0.0-dev.20260313.1': + resolution: {integrity: sha512-8KDfi7U1enFo4z6F0qe4Rd5QzBhk+4cwpZtOGAT9lgyR4pF/mo8zQd0t+Hlkj6d87W057RP8lgCGTGfclGWxUg==} cpu: [x64] os: [win32] - '@typescript/native-preview@7.0.0-dev.20260308.1': - resolution: {integrity: sha512-8a3oe5IAfBkEfMouRheNhOXUScBSHIUknPvUdsbxx7s+Ja1lxFNA1X1TTl2T18vu72Q/mM86vxefw5eW8/ps3g==} + '@typescript/native-preview@7.0.0-dev.20260313.1': + resolution: {integrity: sha512-x+ZrFAEq+c7bF4Ml8+abYZ9vW6mzu22fmcPbDcBmUl/4uGFCYXXww0FS3+me9MfdSOCAPtqcZtwApx1RQO2X/w==} hasBin: true '@typespec/ts-http-runtime@0.3.3': @@ -3559,54 +3503,54 @@ packages: resolution: {integrity: sha512-2FFo/Kz2vTnOZDv59Q0s803LHf7KzuQ2EwOYYAtO0zUKJ8pV5CPsVC/IHyFb+Fsxl3R9XWFiX529yhslb4v9cQ==} engines: {node: '>=22.0.0'} - '@vitest/browser-playwright@4.0.18': - resolution: {integrity: sha512-gfajTHVCiwpxRj1qh0Sh/5bbGLG4F/ZH/V9xvFVoFddpITfMta9YGow0W6ZpTTORv2vdJuz9TnrNSmjKvpOf4g==} + '@vitest/browser-playwright@4.1.0': + resolution: {integrity: sha512-2RU7pZELY9/aVMLmABNy1HeZ4FX23FXGY1jRuHLHgWa2zaAE49aNW2GLzebW+BmbTZIKKyFF1QXvk7DEWViUCQ==} peerDependencies: playwright: '*' - vitest: 4.0.18 + vitest: 4.1.0 - '@vitest/browser@4.0.18': - resolution: {integrity: sha512-gVQqh7paBz3gC+ZdcCmNSWJMk70IUjDeVqi+5m5vYpEHsIwRgw3Y545jljtajhkekIpIp5Gg8oK7bctgY0E2Ng==} + '@vitest/browser@4.1.0': + resolution: {integrity: sha512-tG/iOrgbiHQks0ew7CdelUyNEHkv8NLrt+CqdTivIuoSnXvO7scWMn4Kqo78/UGY1NJ6Hv+vp8BvRnED/bjFdQ==} peerDependencies: - vitest: 4.0.18 + vitest: 4.1.0 - '@vitest/coverage-v8@4.0.18': - resolution: {integrity: sha512-7i+N2i0+ME+2JFZhfuz7Tg/FqKtilHjGyGvoHYQ6iLV0zahbsJ9sljC9OcFcPDbhYKCet+sG8SsVqlyGvPflZg==} + '@vitest/coverage-v8@4.1.0': + resolution: {integrity: sha512-nDWulKeik2bL2Va/Wl4x7DLuTKAXa906iRFooIRPR+huHkcvp9QDkPQ2RJdmjOFrqOqvNfoSQLF68deE3xC3CQ==} peerDependencies: - '@vitest/browser': 4.0.18 - vitest: 4.0.18 + '@vitest/browser': 4.1.0 + vitest: 4.1.0 peerDependenciesMeta: '@vitest/browser': optional: true - '@vitest/expect@4.0.18': - resolution: {integrity: sha512-8sCWUyckXXYvx4opfzVY03EOiYVxyNrHS5QxX3DAIi5dpJAAkyJezHCP77VMX4HKA2LDT/Jpfo8i2r5BE3GnQQ==} + '@vitest/expect@4.1.0': + resolution: {integrity: sha512-EIxG7k4wlWweuCLG9Y5InKFwpMEOyrMb6ZJ1ihYu02LVj/bzUwn2VMU+13PinsjRW75XnITeFrQBMH5+dLvCDA==} - '@vitest/mocker@4.0.18': - resolution: {integrity: sha512-HhVd0MDnzzsgevnOWCBj5Otnzobjy5wLBe4EdeeFGv8luMsGcYqDuFRMcttKWZA5vVO8RFjexVovXvAM4JoJDQ==} + '@vitest/mocker@4.1.0': + resolution: {integrity: sha512-evxREh+Hork43+Y4IOhTo+h5lGmVRyjqI739Rz4RlUPqwrkFFDF6EMvOOYjTx4E8Tl6gyCLRL8Mu7Ry12a13Tw==} peerDependencies: msw: ^2.4.9 - vite: ^6.0.0 || ^7.0.0-0 + vite: ^6.0.0 || ^7.0.0 || ^8.0.0-0 peerDependenciesMeta: msw: optional: true vite: optional: true - '@vitest/pretty-format@4.0.18': - resolution: {integrity: sha512-P24GK3GulZWC5tz87ux0m8OADrQIUVDPIjjj65vBXYG17ZeU3qD7r+MNZ1RNv4l8CGU2vtTRqixrOi9fYk/yKw==} + '@vitest/pretty-format@4.1.0': + resolution: {integrity: sha512-3RZLZlh88Ib0J7NQTRATfc/3ZPOnSUn2uDBUoGNn5T36+bALixmzphN26OUD3LRXWkJu4H0s5vvUeqBiw+kS0A==} - '@vitest/runner@4.0.18': - resolution: {integrity: sha512-rpk9y12PGa22Jg6g5M3UVVnTS7+zycIGk9ZNGN+m6tZHKQb7jrP7/77WfZy13Y/EUDd52NDsLRQhYKtv7XfPQw==} + '@vitest/runner@4.1.0': + resolution: {integrity: sha512-Duvx2OzQ7d6OjchL+trw+aSrb9idh7pnNfxrklo14p3zmNL4qPCDeIJAK+eBKYjkIwG96Bc6vYuxhqDXQOWpoQ==} - '@vitest/snapshot@4.0.18': - resolution: {integrity: sha512-PCiV0rcl7jKQjbgYqjtakly6T1uwv/5BQ9SwBLekVg/EaYeQFPiXcgrC2Y7vDMA8dM1SUEAEV82kgSQIlXNMvA==} + '@vitest/snapshot@4.1.0': + resolution: {integrity: sha512-0Vy9euT1kgsnj1CHttwi9i9o+4rRLEaPRSOJ5gyv579GJkNpgJK+B4HSv/rAWixx2wdAFci1X4CEPjiu2bXIMg==} - '@vitest/spy@4.0.18': - resolution: {integrity: sha512-cbQt3PTSD7P2OARdVW3qWER5EGq7PHlvE+QfzSC0lbwO+xnt7+XH06ZzFjFRgzUX//JmpxrCu92VdwvEPlWSNw==} + '@vitest/spy@4.1.0': + resolution: {integrity: sha512-pz77k+PgNpyMDv2FV6qmk5ZVau6c3R8HC8v342T2xlFxQKTrSeYw9waIJG8KgV9fFwAtTu4ceRzMivPTH6wSxw==} - '@vitest/utils@4.0.18': - resolution: {integrity: sha512-msMRKLMVLWygpK3u2Hybgi4MNjcYJvwTb0Ru09+fOyCXIgT5raYP041DRRdiJiI3k/2U6SEbAETB3YtBrUkCFA==} + '@vitest/utils@4.1.0': + resolution: {integrity: sha512-XfPXT6a8TZY3dcGY8EdwsBulFCIw+BeeX0RZn2x/BtiY/75YGh8FeWGG8QISN/WhaqSrE2OrlDgtF8q5uhOTmw==} '@wasm-audio-decoders/common@9.0.7': resolution: {integrity: sha512-WRaUuWSKV7pkttBygml/a6dIEpatq2nnZGFIoPTc5yPLkxL6Wk4YaslPM98OPQvWacvNZ+Py9xROGDtrFBDzag==} @@ -3670,9 +3614,9 @@ packages: engines: {node: '>=0.4.0'} hasBin: true - acpx@0.1.15: - resolution: {integrity: sha512-1r+tmPT9Oe2Ulv5b4r7O2hCCq5CHVru/H2tcPeTpZek9jR1zBQoBfZ/RcK+9sC9/mnDvWYO5R7Iae64v2LMO+A==} - engines: {node: '>=18'} + acpx@0.3.0: + resolution: {integrity: sha512-5F3GRojIqXyMCzWZ6fT3+mgXXS0sRR7Phc6VyAdEUyfjQQTVeJHr81+XQ/Z4jHrP3pbjtqwlRC6E0O5Glc8lOg==} + engines: {node: '>=22.12.0'} hasBin: true agent-base@6.0.2: @@ -3683,6 +3627,10 @@ packages: resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} engines: {node: '>= 14'} + agent-base@8.0.0: + resolution: {integrity: sha512-QT8i0hCz6C/KQ+KTAbSNwCHDGdmUJl2tp2ZpNlGSWCfhUNVbYG2WLE3MdZGBAgXPV4GAvjGMxo+C1hroyxmZEg==} + engines: {node: '>= 14'} + ajv-formats@3.0.1: resolution: {integrity: sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ==} peerDependencies: @@ -3779,8 +3727,8 @@ packages: resolution: {integrity: sha512-x1FCFnFifvYDDzTaLII71vG5uvDwgtmDTEVWAxrgeiR8VjMONcCXJx7E+USjDtHlwFmt9MysbqgF9b9Vjr6w+w==} engines: {node: '>=4'} - ast-v8-to-istanbul@0.3.11: - resolution: {integrity: sha512-Qya9fkoofMjCBNVdWINMjB5KZvkYfaO9/anwkWnjxibpWUxo5iHl2sOdP7/uAqaRuUYuoo8rDwnbaaKVFxoUvw==} + ast-v8-to-istanbul@1.0.0: + resolution: {integrity: sha512-1fSfIwuDICFA4LKkCzRPO7F0hzFf0B7+Xqrl27ynQaa+Rh0e1Es0v6kWHPott3lU10AyAr7oKHa65OppjLn3Rg==} async-lock@1.4.1: resolution: {integrity: sha512-Az2ZTpuytrtqENulXwO3GGv1Bztugx6TT37NIo7imr/Qo0gsYiGtSdBa2B6fsXhTpVZDNfu1Qn3pk531e3q+nQ==} @@ -3817,6 +3765,9 @@ packages: axios@1.13.5: resolution: {integrity: sha512-cz4ur7Vb0xS4/KUN0tPWe44eqxrIu31me+fbang3ijiNscE129POzipJJA6zniq2C/Z6sJCjMimjS8Lc/GAs8Q==} + axios@1.13.6: + resolution: {integrity: sha512-ChTCHMouEe2kn713WHbQGcuYrr6fXTBiu460OTwWrWob16g1bXn4vtz07Ope7ewMozJAnEquLk5lWQWtBig9DQ==} + b4a@1.8.0: resolution: {integrity: sha512-qRuSmNSkGQaHwNbM7J78Wwy+ghLEYF1zNrSeMxj4Kgw6y33O3mXcQ6Ie9fRvfU/YnxWkOchPXbaLb73TkIsfdg==} peerDependencies: @@ -3844,6 +3795,36 @@ packages: bare-abort-controller: optional: true + bare-fs@4.5.5: + resolution: {integrity: sha512-XvwYM6VZqKoqDll8BmSww5luA5eflDzY0uEFfBJtFKe4PAAtxBjU3YIxzIBzhyaEQBy1VXEQBto4cpN5RZJw+w==} + engines: {bare: '>=1.16.0'} + peerDependencies: + bare-buffer: '*' + peerDependenciesMeta: + bare-buffer: + optional: true + + bare-os@3.7.1: + resolution: {integrity: sha512-ebvMaS5BgZKmJlvuWh14dg9rbUI84QeV3WlWn6Ph6lFI8jJoh7ADtVTyD2c93euwbe+zgi0DVrl4YmqXeM9aIA==} + engines: {bare: '>=1.14.0'} + + bare-path@3.0.0: + resolution: {integrity: sha512-tyfW2cQcB5NN8Saijrhqn0Zh7AnFNsnczRcuWODH0eYAXBsJ5gVxAUuNr7tsHSC6IZ77cA0SitzT+s47kot8Mw==} + + bare-stream@2.8.1: + resolution: {integrity: sha512-bSeR8RfvbRwDpD7HWZvn8M3uYNDrk7m9DQjYOFkENZlXW8Ju/MPaqUPQq5LqJ3kyjEm07siTaAQ7wBKCU59oHg==} + peerDependencies: + bare-buffer: '*' + bare-events: '*' + peerDependenciesMeta: + bare-buffer: + optional: true + bare-events: + optional: true + + bare-url@2.3.2: + resolution: {integrity: sha512-ZMq4gd9ngV5aTMa5p9+UfY0b3skwhHELaDkhEHetMdX0LRkW9kzaym4oo/Eh+Ghm0CCDuMTsRIGM/ytUc1ZYmw==} + base64-js@1.5.1: resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==} @@ -3861,6 +3842,9 @@ packages: before-after-hook@4.0.0: resolution: {integrity: sha512-q6tR3RPqIB1pMiTRMFcZwuG5T8vwp+vUvEG0vuI6B+Rikh5BfPp2fQ82c925FOs+b0lcFQ8CFrL+KbilfZFhOQ==} + bidi-js@1.0.3: + resolution: {integrity: sha512-RKshQI1R3YQ+n9YJz2QQ147P66ELpa1FQEg20Dk8oW9t2KgLbpDLLp9aGZ7y8WHSshDknG0bknqGw5/tyCs5tw==} + big-integer@1.6.52: resolution: {integrity: sha512-QxD8cf2eVqJOOz63z6JIN9BzvVs/dlySa5HGSBH5xtR8dPteIRQnBxxKqkNTiT6jbDTF6jAfrd4oMcND9RGbQg==} engines: {node: '>=0.6'} @@ -4059,10 +4043,6 @@ packages: resolution: {integrity: sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==} engines: {node: '>=14'} - commander@13.1.0: - resolution: {integrity: sha512-/rFeCpNJQbhSZjGVwO9RFV3xPqbnERS8MmIQzCtD/zl6gpJuV/bMLuN92oG3F7d8oDEHHRrujSXNUr8fpjntKw==} - engines: {node: '>=18'} - commander@14.0.3: resolution: {integrity: sha512-H+y0Jo/T1RZ9qPP4Eh1pkcQcLRglraJaSLoyOtHxu6AapkjWVCy2Sit1QQ4x3Dng8qDlSsZEet7g5Pq06MvTgw==} engines: {node: '>=20'} @@ -4089,6 +4069,9 @@ packages: resolution: {integrity: sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==} engines: {node: '>= 0.6'} + convert-source-map@2.0.0: + resolution: {integrity: sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==} + cookie-signature@1.0.7: resolution: {integrity: sha512-NXdYc3dLr47pBkpUCHtKSwIOQXLVn8dZEuywboCOJY/osA0wFSLlSawr3KN8qXJEyX66FcONTH8EIlVuK0yyFA==} @@ -4106,6 +4089,10 @@ packages: core-util-is@1.0.3: resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==} + cors@2.8.6: + resolution: {integrity: sha512-tJtZBBHA6vjIAaF6EnIaq6laBBP9aq/Y3ouVJjEfoHbRBcHBAHYcMh/w8LDrk2PvIMMq8gmopa5D4V8RmbrxGw==} + engines: {node: '>= 0.10'} + croner@10.0.1: resolution: {integrity: sha512-ixNtAJndqh173VQ4KodSdJEI6nuioBWI0V1ITNKhZZsO0pEMoDxz539T4FTTbSZ/xIOSuDnzxLVRqBVSvPNE2g==} engines: {node: '>=18.0'} @@ -4120,6 +4107,10 @@ packages: css-select@5.2.2: resolution: {integrity: sha512-TizTzUddG/xYLA3NXodFM0fSbNizXjOKhqiQQwvhlspadZokn1KDy0NZFS0wuEubIYAV5/c1/lAr0TaaFXEXzw==} + css-tree@3.2.1: + resolution: {integrity: sha512-X7sjQzceUhu1u7Y/ylrRZFU2FS6LRiFVp6rKLPg23y3x3c3DOKAwuXGDp+PAGjh6CSnCjYeAul8pcT8bAl+lSA==} + engines: {node: ^10 || ^12.20.0 || ^14.13.0 || >=15.0.0} + css-what@6.2.2: resolution: {integrity: sha512-u/O3vwbptzhMs3L1fQE82ZSLHQQfto5gyZzwteVIEyeaY5Fc7R4dapF/BvRoSYFeqfBk4m0V1Vafq5Pjv25wvA==} engines: {node: '>= 6'} @@ -4127,6 +4118,10 @@ packages: cssom@0.5.0: resolution: {integrity: sha512-iKuQcq+NdHqlAcwUY0o/HL69XQrUaQdMjmStJ8JFmUaiiQErlhrmuigkg/CU4E2J0IyUKUrMAgl36TvN67MqTw==} + cssstyle@6.2.0: + resolution: {integrity: sha512-Fm5NvhYathRnXNVndkUsCCuR63DCLVVwGOOwQw782coXFi5HhkXdu289l59HlXZBawsyNccXfWRYvLzcDCdDig==} + engines: {node: '>=20'} + curve25519-js@0.0.4: resolution: {integrity: sha512-axn2UMEnkhyDUPWOwVKBMVIzSQy2ejH2xRGy1wq81dqRwApXfIzfbE3hIX0ZRFBIihf/KDqK158DLwESu4AK1w==} @@ -4142,6 +4137,10 @@ packages: resolution: {integrity: sha512-7hvf7/GW8e86rW0ptuwS3OcBGDjIi6SZva7hCyWC0yYry2cOPmLIjXAUHI6DK2HsnwJd9ifmt57i8eV2n4YNpw==} engines: {node: '>= 14'} + data-urls@7.0.0: + resolution: {integrity: sha512-23XHcCF+coGYevirZceTVD7NdJOqVn+49IHyxgszm+JIiHLoB2TkmPtsYkNWT1pvRSGkc35L6NHs0yHkN2SumA==} + engines: {node: ^20.19.0 || ^22.12.0 || >=24.0.0} + date-fns@3.6.0: resolution: {integrity: sha512-fRHTG8g/Gif+kSh50gaGEdToemgfj74aRX3swtiouboip5JDLAyDE9F11nHMIcvOaXeOC6D7SpNhi7uFyB7Uww==} @@ -4162,6 +4161,9 @@ packages: supports-color: optional: true + decimal.js@10.6.0: + resolution: {integrity: sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==} + deep-extend@0.6.0: resolution: {integrity: sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==} engines: {node: '>=4.0.0'} @@ -4210,11 +4212,8 @@ packages: discord-api-types@0.38.37: resolution: {integrity: sha512-Cv47jzY1jkGkh5sv0bfHYqGgKOWO1peOrGMkDFM4UmaGMOTgOW8QSexhvixa9sVOiz8MnVOBryWYyw/CEVhj7w==} - discord-api-types@0.38.40: - resolution: {integrity: sha512-P/His8cotqZgQqrt+hzrocp9L8RhQQz1GkrCnC9TMJ8Uw2q0tg8YyqJyGULxhXn/8kxHETN4IppmOv+P2m82lQ==} - - discord-api-types@0.38.41: - resolution: {integrity: sha512-yMECyR8j9c2fVTvCQ+Qc24pweYFIZk/XoxDOmt1UvPeSw5tK6gXBd/2hhP+FEAe9Y6ny8pRMaf618XDK4U53OQ==} + discord-api-types@0.38.42: + resolution: {integrity: sha512-qs1kya7S84r5RR8m9kgttywGrmmoHaRifU1askAoi+wkoSefLpZP6aGXusjNw5b0jD3zOg3LTwUa3Tf2iHIceQ==} doctypes@1.1.0: resolution: {integrity: sha512-LLBi6pEqS6Do3EKQ3J0NqHWV5hhb78Pi8vvESYwyOy2c31ZEZVdtitdzsQsKb7878PEERhzUk0ftqGhG6Mz+pQ==} @@ -4229,9 +4228,8 @@ packages: resolution: {integrity: sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==} engines: {node: '>= 4'} - dompurify@3.3.2: - resolution: {integrity: sha512-6obghkliLdmKa56xdbLOpUZ43pAR6xFy1uOrxBaIDjT+yaRuuybLjGS9eVBoSR/UPU5fq3OXClEHLJNGvbxKpQ==} - engines: {node: '>=20'} + dompurify@3.3.3: + resolution: {integrity: sha512-Oj6pzI2+RqBfFG+qOaOLbFXLQ90ARpcGG6UePL82bJLtdsa6CYJD7nmiU8MW9nQNOtCHV3lZ/Bzq1X0QYbBZCA==} domutils@3.2.2: resolution: {integrity: sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw==} @@ -4289,6 +4287,10 @@ packages: resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==} engines: {node: '>=0.12'} + entities@6.0.1: + resolution: {integrity: sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==} + engines: {node: '>=0.12'} + entities@7.0.1: resolution: {integrity: sha512-TWrgLOFUQTH994YUyl1yT4uyavY5nNB5muff+RtWaqNVCAK408b5ZnnbNAUEWLTCpum9w6arT70i1XdQ4UeOPA==} engines: {node: '>=0.12'} @@ -4305,8 +4307,8 @@ packages: resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} engines: {node: '>= 0.4'} - es-module-lexer@1.7.0: - resolution: {integrity: sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==} + es-module-lexer@2.0.0: + resolution: {integrity: sha512-5POEcUuZybH7IdmGsD8wlf0AI55wMecM9rVBTI/qEAy2c1kTOm3DjFYjrBdI2K3BaJjJYfYFeRtM0t9ssnRuxw==} es-object-atoms@1.1.1: resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==} @@ -4370,6 +4372,14 @@ packages: events-universal@1.0.1: resolution: {integrity: sha512-LUd5euvbMLpwOF8m6ivPCbhQeSiYVNb8Vs0fQ8QjXo0JTkEHpz8pxdQf0gStltaPpw0Cca8b39KxvK9cfKRiAw==} + eventsource-parser@3.0.6: + resolution: {integrity: sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==} + engines: {node: '>=18.0.0'} + + eventsource@3.0.7: + resolution: {integrity: sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA==} + engines: {node: '>=18.0.0'} + execa@4.1.0: resolution: {integrity: sha512-j5W0//W7f8UxAn8hXVnwG8tLwdiUy4FJLcSupCg6maBYZDpyBvTApK7KyuI4bKj8KOh1r2YH+6ucuYtJv1bTZA==} engines: {node: '>=10'} @@ -4381,6 +4391,12 @@ packages: exponential-backoff@3.1.3: resolution: {integrity: sha512-ZgEeZXj30q+I0EN+CbSSpIyPaJ5HVQD18Z1m+u1FXbAeT94mr1zw50q4q6jiiC447Nl/YTcIYSAftiGqetwXCA==} + express-rate-limit@8.3.1: + resolution: {integrity: sha512-D1dKN+cmyPWuvB+G2SREQDzPY1agpBIcTa9sJxOPMCNeH3gwzhqJRDWCXW3gg0y//+LQ/8j52JbMROWyrKdMdw==} + engines: {node: '>= 16'} + peerDependencies: + express: '>= 4.11' + express@4.22.1: resolution: {integrity: sha512-F2X8g9P1X7uCPZMA3MVf9wcTqlyNp7IhH5qPCI0izhaOIYXaW9L535tGA3qmjRzpH+bZczqq7hVKxTR4NWnu+g==} engines: {node: '>= 0.10.0'} @@ -4440,8 +4456,8 @@ packages: resolution: {integrity: sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==} engines: {node: ^12.20 || >= 14.13} - file-type@21.3.0: - resolution: {integrity: sha512-8kPJMIGz1Yt/aPEwOsrR97ZyZaD1Iqm8PClb1nYFclUCkBi0Ma5IsYNQzvSFS9ib51lWyIw5mIT9rWzI/xjpzA==} + file-type@21.3.2: + resolution: {integrity: sha512-DLkUvGwep3poOV2wpzbHCOnSKGk1LzyXTv+aHFgN2VFl96wnp8YA9YjO2qPzg5PuL8q/SW9Pdi6WTkYOIh995w==} engines: {node: '>=20'} filename-reserved-regex@3.0.0: @@ -4614,10 +4630,6 @@ packages: graceful-fs@4.2.11: resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} - grammy@1.41.0: - resolution: {integrity: sha512-CAAu74SLT+/QCg40FBhUuYJalVsxxCN3D0c31TzhFBsWWTdXrMXYjGsKngBdfvN6hQ/VzHczluj/ugZVetFNCQ==} - engines: {node: ^12.20.0 || >=14.13.1} - grammy@1.41.1: resolution: {integrity: sha512-wcHAQ1e7svL3fJMpDchcQVcWUmywhuepOOjHUHmMmWAwUJEIyK5ea5sbSjZd+Gy1aMpZeP8VYJa+4tP+j1YptQ==} engines: {node: ^12.20.0 || >=14.13.1} @@ -4661,8 +4673,8 @@ packages: highlight.js@10.7.3: resolution: {integrity: sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A==} - hono@4.12.5: - resolution: {integrity: sha512-3qq+FUBtlTHhtYxbxheZgY8NIFnkkC/MR8u5TTsr7YZ3wixryQ3cCwn3iZbg8p8B88iDBBAYSfZDS75t8MN7Vg==} + hono@4.12.7: + resolution: {integrity: sha512-jq9l1DM0zVIvsm3lv9Nw9nlJnMNPOcAtsbsgiUhWcFzPE99Gvo6yRTlszSLLYacMeQ6quHD6hMfId8crVHvexw==} engines: {node: '>=16.9.0'} hookable@6.0.1: @@ -4675,6 +4687,10 @@ packages: resolution: {integrity: sha512-M422h7o/BR3rmCQ8UHi7cyyMqKltdP9Uo+J2fXK+RSAY+wTcKOIRyhTuKv4qn+DJf3g+PL890AzId5KZpX+CBg==} engines: {node: ^20.17.0 || >=22.9.0} + html-encoding-sniffer@6.0.0: + resolution: {integrity: sha512-CV9TW3Y3f8/wT0BRFc1/KAVQ3TUHiXmaAb6VW9vtiMFf7SLoMd1PdAc4W3KFOFETBJUb90KatHqlsZMWV+R9Gg==} + engines: {node: ^20.19.0 || ^22.12.0 || >=24.0.0} + html-escaper@2.0.2: resolution: {integrity: sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==} @@ -4717,6 +4733,10 @@ packages: resolution: {integrity: sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==} engines: {node: '>= 14'} + https-proxy-agent@8.0.0: + resolution: {integrity: sha512-YYeW+iCnAS3xhvj2dvVoWgsbca3RfQy/IlaNHHOtDmU0jMqPI9euIq3Y9BJETdxk16h9NHHCKqp/KB9nIMStCQ==} + engines: {node: '>= 14'} + human-signals@1.1.1: resolution: {integrity: sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw==} engines: {node: '>=8.12.0'} @@ -4815,6 +4835,9 @@ packages: resolution: {integrity: sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==} engines: {node: '>=0.10.0'} + is-potential-custom-element-name@1.0.1: + resolution: {integrity: sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==} + is-promise@2.2.2: resolution: {integrity: sha512-+lP4/6lKUBfQjZ2pdxThZvLUAafmZb8OAxFb8XXtiQmS35INgr85hdOGoEs124ez1FCnZJt6jau/T+alh58QFQ==} @@ -4871,6 +4894,9 @@ packages: jose@4.15.9: resolution: {integrity: sha512-1vUQX+IdDMVPj4k8kOxgUqlcK518yluMuGZwqlr44FS1ppZB/5GWh4rZG89erpOBOJjU/OBsnCVFfapsRz6nEA==} + jose@6.2.1: + resolution: {integrity: sha512-jUaKr1yrbfaImV7R2TN/b3IcZzsw38/chqMpo2XJ7i2F8AfM/lA4G1goC3JVEwg0H7UldTmSt3P68nt31W7/mw==} + js-stringify@1.0.2: resolution: {integrity: sha512-rtS5ATOo2Q5k1G+DADISilDA6lv79zIiwFd6CcjuIxGKLFm5C+RLImRscVap9k55i+MOZwgliw+NejvkLuGD5g==} @@ -4887,6 +4913,15 @@ packages: resolution: {integrity: sha512-d2VNT/2Hv4dxT2/59He8Lyda4DYOxPRyRG9zBaOpTZAqJCVf2xLrBlZkT8Va6Lo9u3X2qz8Bpq4HrDi4JsrQhA==} hasBin: true + jsdom@28.1.0: + resolution: {integrity: sha512-0+MoQNYyr2rBHqO1xilltfDjV9G7ymYGlAUazgcDLQaUf8JDHbuGwsxN6U9qWaElZ4w1B2r7yEGIL3GdeW3Rug==} + engines: {node: ^20.19.0 || ^22.12.0 || >=24.0.0} + peerDependencies: + canvas: ^3.0.0 + peerDependenciesMeta: + canvas: + optional: true + jsesc@3.1.0: resolution: {integrity: sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==} engines: {node: '>=6'} @@ -4906,6 +4941,9 @@ packages: json-schema-traverse@1.0.0: resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==} + json-schema-typed@8.0.2: + resolution: {integrity: sha512-fQhoXdcvc3V28x7C7BMs4P5+kNlgUURe2jmUT1T//oBRMDrqy1QPelJimwZGo7Hg9VPV3EQV5Bnq4hbFy2vetA==} + json-schema@0.4.0: resolution: {integrity: sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==} @@ -4972,74 +5010,74 @@ packages: lifecycle-utils@3.1.1: resolution: {integrity: sha512-gNd3OvhFNjHykJE3uGntz7UuPzWlK9phrIdXxU9Adis0+ExkwnZibfxCJWiWWZ+a6VbKiZrb+9D9hCQWd4vjTg==} - lightningcss-android-arm64@1.30.2: - resolution: {integrity: sha512-BH9sEdOCahSgmkVhBLeU7Hc9DWeZ1Eb6wNS6Da8igvUwAe0sqROHddIlvU06q3WyXVEOYDZ6ykBZQnjTbmo4+A==} + lightningcss-android-arm64@1.32.0: + resolution: {integrity: sha512-YK7/ClTt4kAK0vo6w3X+Pnm0D2cf2vPHbhOXdoNti1Ga0al1P4TBZhwjATvjNwLEBCnKvjJc2jQgHXH0NEwlAg==} engines: {node: '>= 12.0.0'} cpu: [arm64] os: [android] - lightningcss-darwin-arm64@1.30.2: - resolution: {integrity: sha512-ylTcDJBN3Hp21TdhRT5zBOIi73P6/W0qwvlFEk22fkdXchtNTOU4Qc37SkzV+EKYxLouZ6M4LG9NfZ1qkhhBWA==} + lightningcss-darwin-arm64@1.32.0: + resolution: {integrity: sha512-RzeG9Ju5bag2Bv1/lwlVJvBE3q6TtXskdZLLCyfg5pt+HLz9BqlICO7LZM7VHNTTn/5PRhHFBSjk5lc4cmscPQ==} engines: {node: '>= 12.0.0'} cpu: [arm64] os: [darwin] - lightningcss-darwin-x64@1.30.2: - resolution: {integrity: sha512-oBZgKchomuDYxr7ilwLcyms6BCyLn0z8J0+ZZmfpjwg9fRVZIR5/GMXd7r9RH94iDhld3UmSjBM6nXWM2TfZTQ==} + lightningcss-darwin-x64@1.32.0: + resolution: {integrity: sha512-U+QsBp2m/s2wqpUYT/6wnlagdZbtZdndSmut/NJqlCcMLTWp5muCrID+K5UJ6jqD2BFshejCYXniPDbNh73V8w==} engines: {node: '>= 12.0.0'} cpu: [x64] os: [darwin] - lightningcss-freebsd-x64@1.30.2: - resolution: {integrity: sha512-c2bH6xTrf4BDpK8MoGG4Bd6zAMZDAXS569UxCAGcA7IKbHNMlhGQ89eRmvpIUGfKWNVdbhSbkQaWhEoMGmGslA==} + lightningcss-freebsd-x64@1.32.0: + resolution: {integrity: sha512-JCTigedEksZk3tHTTthnMdVfGf61Fky8Ji2E4YjUTEQX14xiy/lTzXnu1vwiZe3bYe0q+SpsSH/CTeDXK6WHig==} engines: {node: '>= 12.0.0'} cpu: [x64] os: [freebsd] - lightningcss-linux-arm-gnueabihf@1.30.2: - resolution: {integrity: sha512-eVdpxh4wYcm0PofJIZVuYuLiqBIakQ9uFZmipf6LF/HRj5Bgm0eb3qL/mr1smyXIS1twwOxNWndd8z0E374hiA==} + lightningcss-linux-arm-gnueabihf@1.32.0: + resolution: {integrity: sha512-x6rnnpRa2GL0zQOkt6rts3YDPzduLpWvwAF6EMhXFVZXD4tPrBkEFqzGowzCsIWsPjqSK+tyNEODUBXeeVHSkw==} engines: {node: '>= 12.0.0'} cpu: [arm] os: [linux] - lightningcss-linux-arm64-gnu@1.30.2: - resolution: {integrity: sha512-UK65WJAbwIJbiBFXpxrbTNArtfuznvxAJw4Q2ZGlU8kPeDIWEX1dg3rn2veBVUylA2Ezg89ktszWbaQnxD/e3A==} + lightningcss-linux-arm64-gnu@1.32.0: + resolution: {integrity: sha512-0nnMyoyOLRJXfbMOilaSRcLH3Jw5z9HDNGfT/gwCPgaDjnx0i8w7vBzFLFR1f6CMLKF8gVbebmkUN3fa/kQJpQ==} engines: {node: '>= 12.0.0'} cpu: [arm64] os: [linux] - lightningcss-linux-arm64-musl@1.30.2: - resolution: {integrity: sha512-5Vh9dGeblpTxWHpOx8iauV02popZDsCYMPIgiuw97OJ5uaDsL86cnqSFs5LZkG3ghHoX5isLgWzMs+eD1YzrnA==} + lightningcss-linux-arm64-musl@1.32.0: + resolution: {integrity: sha512-UpQkoenr4UJEzgVIYpI80lDFvRmPVg6oqboNHfoH4CQIfNA+HOrZ7Mo7KZP02dC6LjghPQJeBsvXhJod/wnIBg==} engines: {node: '>= 12.0.0'} cpu: [arm64] os: [linux] - lightningcss-linux-x64-gnu@1.30.2: - resolution: {integrity: sha512-Cfd46gdmj1vQ+lR6VRTTadNHu6ALuw2pKR9lYq4FnhvgBc4zWY1EtZcAc6EffShbb1MFrIPfLDXD6Xprbnni4w==} + lightningcss-linux-x64-gnu@1.32.0: + resolution: {integrity: sha512-V7Qr52IhZmdKPVr+Vtw8o+WLsQJYCTd8loIfpDaMRWGUZfBOYEJeyJIkqGIDMZPwPx24pUMfwSxxI8phr/MbOA==} engines: {node: '>= 12.0.0'} cpu: [x64] os: [linux] - lightningcss-linux-x64-musl@1.30.2: - resolution: {integrity: sha512-XJaLUUFXb6/QG2lGIW6aIk6jKdtjtcffUT0NKvIqhSBY3hh9Ch+1LCeH80dR9q9LBjG3ewbDjnumefsLsP6aiA==} + lightningcss-linux-x64-musl@1.32.0: + resolution: {integrity: sha512-bYcLp+Vb0awsiXg/80uCRezCYHNg1/l3mt0gzHnWV9XP1W5sKa5/TCdGWaR/zBM2PeF/HbsQv/j2URNOiVuxWg==} engines: {node: '>= 12.0.0'} cpu: [x64] os: [linux] - lightningcss-win32-arm64-msvc@1.30.2: - resolution: {integrity: sha512-FZn+vaj7zLv//D/192WFFVA0RgHawIcHqLX9xuWiQt7P0PtdFEVaxgF9rjM/IRYHQXNnk61/H/gb2Ei+kUQ4xQ==} + lightningcss-win32-arm64-msvc@1.32.0: + resolution: {integrity: sha512-8SbC8BR40pS6baCM8sbtYDSwEVQd4JlFTOlaD3gWGHfThTcABnNDBda6eTZeqbofalIJhFx0qKzgHJmcPTnGdw==} engines: {node: '>= 12.0.0'} cpu: [arm64] os: [win32] - lightningcss-win32-x64-msvc@1.30.2: - resolution: {integrity: sha512-5g1yc73p+iAkid5phb4oVFMB45417DkRevRbt/El/gKXJk4jid+vPFF/AXbxn05Aky8PapwzZrdJShv5C0avjw==} + lightningcss-win32-x64-msvc@1.32.0: + resolution: {integrity: sha512-Amq9B/SoZYdDi1kFrojnoqPLxYhQ4Wo5XiL8EVJrVsB8ARoC1PWW6VGtT0WKCemjy8aC+louJnjS7U18x3b06Q==} engines: {node: '>= 12.0.0'} cpu: [x64] os: [win32] - lightningcss@1.30.2: - resolution: {integrity: sha512-utfs7Pr5uJyyvDETitgsaqSyjCb2qNRAtuqUeWIAKztsOYdcACf2KtARYXg2pSvhkt+9NfoaNY7fxjl6nuMjIQ==} + lightningcss@1.32.0: + resolution: {integrity: sha512-NXYBzinNrblfraPGyrbPoD19C1h9lfI/1mzgWYvXUTe414Gz/X1FD2XBZSZM7rRTrMA8JL3OtAaGifrIKhQ5yQ==} engines: {node: '>= 12.0.0'} limiter@1.1.5: @@ -5185,6 +5223,9 @@ packages: mdast-util-to-hast@13.2.1: resolution: {integrity: sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==} + mdn-data@2.27.1: + resolution: {integrity: sha512-9Yubnt3e8A0OKwxYSXyhLymGW4sCufcLG6VdiDdUGVkPhpqLxlvP5vl1983gQjJl3tqbrM731mjaZaP68AgosQ==} + mdurl@2.0.0: resolution: {integrity: sha512-Lf+9+2r+Tdp5wXDXC4PcIBjTDtq4UKjCPMQhKIuzpJNW0b96kVqSwW0bT7FhRSfmAiFYgP+SCRvdrDozfh0U5w==} @@ -5305,8 +5346,8 @@ packages: ms@2.1.3: resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} - music-metadata@11.12.1: - resolution: {integrity: sha512-j++ltLxHDb5VCXET9FzQ8bnueiLHwQKgCO7vcbkRH/3F7fRjPkv6qncGEJ47yFhmemcYtgvsOAlcQ1dRBTkDjg==} + music-metadata@11.12.3: + resolution: {integrity: sha512-n6hSTZkuD59qWgHh6IP5dtDlDZQXoxk/bcA85Jywg8Z1iFrlNgl2+GTFgjZyn52W5UgQpV42V4XqrQZZAMbZTQ==} engines: {node: '>=18'} mz@2.7.0: @@ -5466,18 +5507,6 @@ packages: oniguruma-to-es@4.3.4: resolution: {integrity: sha512-3VhUGN3w2eYxnTzHn+ikMI+fp/96KoRSVK9/kMTcFqj1NRDh2IhQCKvYxDnWePKRXY/AqH+Fuiyb7VHSzBjHfA==} - openai@6.10.0: - resolution: {integrity: sha512-ITxOGo7rO3XRMiKA5l7tQ43iNNu+iXGFAcf2t+aWVzzqRaS0i7m1K2BhxNdaveB+5eENhO0VY1FkiZzhBk4v3A==} - hasBin: true - peerDependencies: - ws: ^8.18.0 - zod: ^3.25 || ^4.0 - peerDependenciesMeta: - ws: - optional: true - zod: - optional: true - openai@6.26.0: resolution: {integrity: sha512-zd23dbWTjiJ6sSAX6s0HrCZi41JwTA1bQVs0wLQPZ2/5o2gxOJA5wh7yOAUgwYybfhDXyhwlpeQf7Mlgx8EOCA==} hasBin: true @@ -5490,8 +5519,8 @@ packages: zod: optional: true - openai@6.27.0: - resolution: {integrity: sha512-osTKySlrdYrLYTt0zjhY8yp0JUBmWDCN+Q+QxsV4xMQnnoVFpylgKGgxwN8sSdTNw0G4y+WUXs4eCMWpyDNWZQ==} + openai@6.29.0: + resolution: {integrity: sha512-YxoArl2BItucdO89/sN6edksV0x47WUTgkgVfCgX7EuEMhbirENsgYe5oO4LTjBL9PtdKtk2WqND1gSLcTd2yw==} hasBin: true peerDependencies: ws: ^8.18.0 @@ -5502,13 +5531,16 @@ packages: zod: optional: true - openclaw@2026.3.2: - resolution: {integrity: sha512-Gkqx24m7PF1DUXPI968DuC9n52lTZ5hI3X5PIi0HosC7J7d6RLkgVppj1mxvgiQAWMp41E41elvoi/h4KBjFcQ==} - engines: {node: '>=22.12.0'} + openclaw@2026.3.13: + resolution: {integrity: sha512-/juSUb070Xz8K8CnShjaZQr7CVtRaW4FbR93lgr1hLepcRSbyz2PQR+V4w5giVWkea61opXWPA6Vb8dybaztFg==} + engines: {node: '>=22.16.0'} hasBin: true peerDependencies: '@napi-rs/canvas': ^0.1.89 node-llama-cpp: 3.16.2 + peerDependenciesMeta: + node-llama-cpp: + optional: true opus-decoder@0.7.11: resolution: {integrity: sha512-+e+Jz3vGQLxRTBHs8YJQPRPc1Tr+/aC6coV/DlZylriA29BdHQAYXhvNRKtjftof17OFng0+P4wsFIqQu3a48A==} @@ -5524,8 +5556,8 @@ packages: resolution: {integrity: sha512-4/8JfsetakdeEa4vAYV45FW20aY+B/+K8NEXp5Eiar3wR8726whgHrbSg5Ar/ZY1FLJ/AGtUqV7W2IVF+Gvp9A==} engines: {node: '>=20'} - oxfmt@0.36.0: - resolution: {integrity: sha512-/ejJ+KoSW6J9bcNT9a9UtJSJNWhJ3yOLSBLbkoFHJs/8CZjmaZVZAJe4YgO1KMJlKpNQasrn/G9JQUEZI3p0EQ==} + oxfmt@0.40.0: + resolution: {integrity: sha512-g0C3I7xUj4b4DcagevM9kgH6+pUHytikxUcn3/VUkvzTNaaXBeyZqb7IBsHwojeXm4mTBEC/aBjBTMVUkZwWUQ==} engines: {node: ^20.19.0 || >=22.12.0} hasBin: true @@ -5533,8 +5565,8 @@ packages: resolution: {integrity: sha512-4RuJK2jP08XwqtUu+5yhCbxEauCm6tv2MFHKEMsjbosK2+vy5us82oI3VLuHwbNyZG7ekZA26U2LLHnGR4frIA==} hasBin: true - oxlint@1.51.0: - resolution: {integrity: sha512-g6DNPaV9/WI9MoX2XllafxQuxwY1TV++j7hP8fTJByVBuCoVtm3dy9f/2vtH/HU40JztcgWF4G7ua+gkainklQ==} + oxlint@1.55.0: + resolution: {integrity: sha512-T+FjepiyWpaZMhekqRpH8Z3I4vNM610p6w+Vjfqgj5TZUxHXl7N8N5IPvmOU8U4XdTRxqtNNTh9Y4hLtr7yvFg==} engines: {node: ^20.19.0 || >=22.12.0} hasBin: true peerDependencies: @@ -5604,6 +5636,9 @@ packages: parse5@6.0.1: resolution: {integrity: sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==} + parse5@8.0.0: + resolution: {integrity: sha512-9m4m5GSgXjL4AjumKzq1Fgfp3Z8rsvjRNbnkVwfu2ImRqE5D0LnY2QfDen18FSY9C573YU5XxSapdHZTZ2WolA==} + parseley@0.12.1: resolution: {integrity: sha512-e6qHKe3a9HWr0oMRVDTRhKce+bRO8VGQR3NyVwcjwrbhMmFCX9KszEV35+rn4AdilFAq9VPxP/Fe1wC9Qjd2lw==} @@ -5680,9 +5715,9 @@ packages: resolution: {integrity: sha512-8OEwKp5juEvb/MjpIc4hjqfgCNysrS94RIOMXYvpYCdm/jglrKEiAYmiumbmGhCvs+IcInsphYDFwqrjr7398w==} hasBin: true - pixelmatch@7.1.0: - resolution: {integrity: sha512-1wrVzJ2STrpmONHKBy228LM1b84msXDUoAzVEl0R8Mz4Ce6EPr+IVtxm8+yvrqLYMHswREkjYFaMxnyGnaY3Ng==} - hasBin: true + pkce-challenge@5.0.1: + resolution: {integrity: sha512-wQ0b/W4Fr01qtpHlqSqspcj3EhBvimsdh0KlHhH8HRZnMsEa0ea2fTULOXOS9ccQr3om+GcGRk4e+isrZWV8qQ==} + engines: {node: '>=16.20.0'} playwright-core@1.58.2: resolution: {integrity: sha512-yZkEtftgwS8CsfYo7nm0KE8jsvm6i/PTgVtB8DL726wNf6H2IMsDuxCpJj59KDaxCtSnrWan2AeDqM7JBaultg==} @@ -5702,6 +5737,10 @@ packages: resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==} engines: {node: ^10 || ^12 || >=14} + postcss@8.5.8: + resolution: {integrity: sha512-OW/rX8O/jXnm82Ey1k44pObPtdblfiuWnrd8X7GJ7emImCOstunGbXUpp7HdBrFQX6rJzn3sPT397Wp5aCwCHg==} + engines: {node: ^10 || ^12 || >=14} + postgres@3.4.8: resolution: {integrity: sha512-d+JFcLM17njZaOLkv6SCev7uoLaBtfK86vMUXhW1Z4glPWh4jozno9APvW/XKFJ3CCxVoC7OL38BqRydtu5nGg==} engines: {node: '>=12'} @@ -5959,8 +5998,8 @@ packages: resolution: {integrity: sha512-l0OE8wL34P4nJH/H2ffoaniAokM2qSmrtXHmlpvYr5AVVX8msAyW0l8NVJFDxlSK4u3Uh/f41cQheDVdnYijwQ==} hasBin: true - rolldown-plugin-dts@0.22.4: - resolution: {integrity: sha512-pueqTPyN1N6lWYivyDGad+j+GO3DT67pzpct8s8e6KGVIezvnrDjejuw1AXFeyDRas3xTq4Ja6Lj5R5/04C5GQ==} + rolldown-plugin-dts@0.22.5: + resolution: {integrity: sha512-M/HXfM4cboo+jONx9Z0X+CUf3B5tCi7ni+kR5fUW50Fp9AlZk0oVLesibGWgCXDKFp5lpgQ9yhKoImUFjl3VZw==} engines: {node: '>=20.19.0'} peerDependencies: '@ts-macro/tsc': ^0.3.6 @@ -5978,16 +6017,11 @@ packages: vue-tsc: optional: true - rolldown@1.0.0-rc.7: - resolution: {integrity: sha512-5X0zEeQFzDpB3MqUWQZyO2TUQqP9VnT7CqXHF2laTFRy487+b6QZyotCazOySAuZLAvplCaOVsg1tVn/Zlmwfg==} + rolldown@1.0.0-rc.9: + resolution: {integrity: sha512-9EbgWge7ZH+yqb4d2EnELAntgPTWbfL8ajiTW+SyhJEC4qhBbkCKbqFV4Ge4zmu5ziQuVbWxb/XwLZ+RIO7E8Q==} engines: {node: ^20.19.0 || >=22.12.0} hasBin: true - rollup@4.59.0: - resolution: {integrity: sha512-2oMpl67a3zCH9H79LeMcbDhXW/UmWG/y2zuqnF2jQq5uq9TbM9TVyXvA4+t+ne2IIkBdrLpAaRQAvo7YI/Yyeg==} - engines: {node: '>=18.0.0', npm: '>=8.0.0'} - hasBin: true - router@2.2.0: resolution: {integrity: sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==} engines: {node: '>= 18'} @@ -6011,6 +6045,10 @@ packages: sanitize-html@2.17.1: resolution: {integrity: sha512-ehFCW+q1a4CSOWRAdX97BX/6/PDEkCqw7/0JXZAGQV57FQB3YOkTa/rrzHPeJ+Aghy4vZAFfWMYyfxIiB7F/gw==} + saxes@6.0.0: + resolution: {integrity: sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==} + engines: {node: '>=v12.22.7'} + scheduler@0.27.0: resolution: {integrity: sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==} @@ -6211,6 +6249,9 @@ packages: std-env@3.10.0: resolution: {integrity: sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==} + std-env@4.0.0: + resolution: {integrity: sha512-zUMPtQ/HBY3/50VbpkupYHbRroTRZJPRLvreamgErJVys0ceuzMkD44J/QjqhHjOzK42GQ3QZIeFG1OYfOtKqQ==} + stdin-discarder@0.3.1: resolution: {integrity: sha512-reExS1kSGoElkextOcPkel4NE99S0BWxjUHQeDFnR8S993JxpPX7KU4MNmO19NXhlJp+8dmdCbKQVNgLJh2teA==} engines: {node: '>=18'} @@ -6289,17 +6330,23 @@ packages: resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} engines: {node: '>= 0.4'} + symbol-tree@3.2.4: + resolution: {integrity: sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==} + table-layout@4.1.1: resolution: {integrity: sha512-iK5/YhZxq5GO5z8wb0bY1317uDF3Zjpha0QFFLA8/trAoiLbQD0HUbMesEaxyzUgDxi2QlcbM8IvqOlEjgoXBA==} engines: {node: '>=12.17'} - tar-stream@3.1.7: - resolution: {integrity: sha512-qJj60CXt7IU1Ffyc3NJMjh6EkuCFej46zUqJ4J7pqYlThyd9bO0XBTmcOIhSzZJVWfsLks0+nle/j538YAW9RQ==} + tar-stream@3.1.8: + resolution: {integrity: sha512-U6QpVRyCGHva435KoNWy9PRoi2IFYCgtEhq9nmrPPpbRacPs9IH4aJ3gbrFC8dPcXvdSZ4XXfXT5Fshbp2MtlQ==} - tar@7.5.10: - resolution: {integrity: sha512-8mOPs1//5q/rlkNSPcCegA6hiHJYDmSLEI8aMH/CdSQJNWztHC9WHNam5zdQlfpTwB9Xp7IBEsHfV5LKMJGVAw==} + tar@7.5.11: + resolution: {integrity: sha512-ChjMH33/KetonMTAtpYdgUFr0tbz69Fp2v7zWxQfYZX4g5ZN2nOBXm1R2xyA+lMIKrLKIoKAwFj93jE/avX9cQ==} engines: {node: '>=18'} + teex@1.0.1: + resolution: {integrity: sha512-eYE6iEI62Ni1H8oIa7KlDU6uQBtqr4Eajni3wX7rpfXD8ysFx8z0+dri+KWEPWpBsxXfxu58x/0jvTVT1ekOSg==} + text-decoder@1.2.7: resolution: {integrity: sha512-vlLytXkeP4xvEq2otHeJfSQIRyWxo/oZGEbXrtEEF9Hnmrdly59sUbzZ/QgyWuLYHctCHxFF4tRQZNQ9k60ExQ==} @@ -6328,8 +6375,8 @@ packages: resolution: {integrity: sha512-Pugqs6M0m7Lv1I7FtxN4aoyToKg1C4tu+/381vH35y8oENM/Ai7f7C4StcoK4/+BSw9ebcS8jRiVrORFKCALLw==} engines: {node: ^20.0.0 || >=22.0.0} - tinyrainbow@3.0.3: - resolution: {integrity: sha512-PSkbLUoxOFRzJYjjxHJt9xro7D+iilgMX/C9lawzVuYiIdcihh9DXmVibBe8lmcFrRi/VzlPjBxbN7rH24q8/Q==} + tinyrainbow@3.1.0: + resolution: {integrity: sha512-Bf+ILmBgretUrdJxzXM0SgXLZ3XfiaUuOj/IKQHuTXip+05Xn+uyEYdVg0kYDipTBcLrCVyUzAPz7QmArb0mmw==} engines: {node: '>=14.0.0'} to-regex-range@5.0.1: @@ -6362,6 +6409,10 @@ packages: tr46@0.0.3: resolution: {integrity: sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==} + tr46@6.0.0: + resolution: {integrity: sha512-bLVMLPtstlZ4iMQHpFHTR7GAGj2jxi8Dg0s2h2MafAE4uSWF98FC/3MomU51iQAMf8/qDUbKWf5GxuvvVcXEhw==} + engines: {node: '>=20'} + tree-kill@1.2.2: resolution: {integrity: sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A==} hasBin: true @@ -6372,14 +6423,14 @@ packages: ts-algebra@2.0.0: resolution: {integrity: sha512-FPAhNPFMrkwz76P7cdjdmiShwMynZYN6SgOujD1urY4oNm80Ou9oMdmbR45LotcKOXoy7wSmHkRFE6Mxbrhefw==} - tsdown@0.21.0: - resolution: {integrity: sha512-Sw/ehzVhjYLD7HVBPybJHDxpcaeyFjPcaDCME23o9O4fyuEl6ibYEdrnB8W8UchYAGoayKqzWQqx/oIp3jn/Vg==} + tsdown@0.21.2: + resolution: {integrity: sha512-pP8eAcd1XAWjl5gjosuJs0BAuVoheUe3V8VDHx31QK7YOgXjcCMsBSyFWO3CMh/CSUkjRUzR96JtGH3WJFTExQ==} engines: {node: '>=20.19.0'} hasBin: true peerDependencies: '@arethetypeswrong/core': ^0.18.1 - '@tsdown/css': 0.21.0 - '@tsdown/exe': 0.21.0 + '@tsdown/css': 0.21.2 + '@tsdown/exe': 0.21.2 '@vitejs/devtools': '*' publint: ^0.3.0 typescript: ^5.0.0 @@ -6465,8 +6516,8 @@ packages: undici-types@7.18.2: resolution: {integrity: sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w==} - undici@7.22.0: - resolution: {integrity: sha512-RqslV2Us5BrllB+JeiZnK4peryVTndy9Dnqq62S3yYRRTj0tFQCwEniUy2167skdGOy3vqRzEvl1Dm4sV2ReDg==} + undici@7.24.1: + resolution: {integrity: sha512-5xoBibbmnjlcR3jdqtY2Lnx7WbrD/tHlT01TmvqZUFVc9Q1w4+j5hbnapTqbcXITMH1ovjq/W7BkqBilHiVAaA==} engines: {node: '>=20.18.1'} unist-util-is@6.0.1: @@ -6502,8 +6553,8 @@ packages: resolution: {integrity: sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==} engines: {node: '>= 0.8'} - unrun@0.2.30: - resolution: {integrity: sha512-a4W1wDADI0gvDDr14T0ho1FgMhmfjq6M8Iz8q234EnlxgH/9cMHDueUSLwTl1fwSBs5+mHrLFYH+7B8ao36EBA==} + unrun@0.2.32: + resolution: {integrity: sha512-opd3z6791rf281JdByf0RdRQrpcc7WyzqittqIXodM/5meNWdTwrVxeyzbaCp4/Rgls/um14oUaif1gomO8YGg==} engines: {node: '>=20.19.0'} hasBin: true peerDependencies: @@ -6555,15 +6606,16 @@ packages: vfile@6.0.3: resolution: {integrity: sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==} - vite@7.3.1: - resolution: {integrity: sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==} + vite@8.0.0: + resolution: {integrity: sha512-fPGaRNj9Zytaf8LEiBhY7Z6ijnFKdzU/+mL8EFBaKr7Vw1/FWcTBAMW0wLPJAGMPX38ZPVCVgLceWiEqeoqL2Q==} engines: {node: ^20.19.0 || >=22.12.0} hasBin: true peerDependencies: '@types/node': ^20.19.0 || >=22.12.0 + '@vitejs/devtools': ^0.0.0-alpha.31 + esbuild: ^0.27.0 jiti: '>=1.21.0' less: ^4.0.0 - lightningcss: ^1.21.0 sass: ^1.70.0 sass-embedded: ^1.70.0 stylus: '>=0.54.8' @@ -6574,12 +6626,14 @@ packages: peerDependenciesMeta: '@types/node': optional: true + '@vitejs/devtools': + optional: true + esbuild: + optional: true jiti: optional: true less: optional: true - lightningcss: - optional: true sass: optional: true sass-embedded: @@ -6595,20 +6649,21 @@ packages: yaml: optional: true - vitest@4.0.18: - resolution: {integrity: sha512-hOQuK7h0FGKgBAas7v0mSAsnvrIgAvWmRFjmzpJ7SwFHH3g1k2u37JtYwOwmEKhK6ZO3v9ggDBBm0La1LCK4uQ==} + vitest@4.1.0: + resolution: {integrity: sha512-YbDrMF9jM2Lqc++2530UourxZHmkKLxrs4+mYhEwqWS97WJ7wOYEkcr+QfRgJ3PW9wz3odRijLZjHEaRLTNbqw==} engines: {node: ^20.0.0 || ^22.0.0 || >=24.0.0} hasBin: true peerDependencies: '@edge-runtime/vm': '*' '@opentelemetry/api': ^1.9.0 '@types/node': ^20.0.0 || ^22.0.0 || >=24.0.0 - '@vitest/browser-playwright': 4.0.18 - '@vitest/browser-preview': 4.0.18 - '@vitest/browser-webdriverio': 4.0.18 - '@vitest/ui': 4.0.18 + '@vitest/browser-playwright': 4.1.0 + '@vitest/browser-preview': 4.1.0 + '@vitest/browser-webdriverio': 4.1.0 + '@vitest/ui': 4.1.0 happy-dom: '*' jsdom: '*' + vite: ^6.0.0 || ^7.0.0 || ^8.0.0-0 peerDependenciesMeta: '@edge-runtime/vm': optional: true @@ -6633,6 +6688,10 @@ packages: resolution: {integrity: sha512-Dhxzh5HZuiHQhbvTW9AMetFfBHDMYpo23Uo9btPXgdYP+3T5S+p+jgNy7spra+veYhBP2dCSgxR/i2Y02h5/6w==} engines: {node: '>=0.10.0'} + w3c-xmlserializer@5.0.0: + resolution: {integrity: sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==} + engines: {node: '>=18'} + web-streams-polyfill@3.3.3: resolution: {integrity: sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==} engines: {node: '>= 8'} @@ -6640,6 +6699,18 @@ packages: webidl-conversions@3.0.1: resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==} + webidl-conversions@8.0.1: + resolution: {integrity: sha512-BMhLD/Sw+GbJC21C/UgyaZX41nPt8bUTg+jWyDeg7e7YN4xOM05YPSIXceACnXVtqyEw/LMClUQMtMZ+PGGpqQ==} + engines: {node: '>=20'} + + whatwg-mimetype@5.0.0: + resolution: {integrity: sha512-sXcNcHOC51uPGF0P/D4NVtrkjSU2fNsm9iog4ZvZJsL3rjoDAzXZhkm2MWt1y+PUdggKAYVoMAIYcs78wJ51Cw==} + engines: {node: '>=20'} + + whatwg-url@16.0.1: + resolution: {integrity: sha512-1to4zXBxmXHV3IiSSEInrreIlu02vUOvrhxJJH5vcxYTBDAx51cqZiKdyTxlecdKNSjj8EcxGBxNf6Vg+945gw==} + engines: {node: ^20.19.0 || ^22.12.0 || >=24.0.0} + whatwg-url@5.0.0: resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==} @@ -6695,6 +6766,13 @@ packages: utf-8-validate: optional: true + xml-name-validator@5.0.0: + resolution: {integrity: sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==} + engines: {node: '>=18'} + + xmlchars@2.2.0: + resolution: {integrity: sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==} + y18n@5.0.8: resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==} engines: {node: '>=10'} @@ -6746,9 +6824,6 @@ packages: zod@3.25.75: resolution: {integrity: sha512-OhpzAmVzabPOL6C3A3gpAifqr9MqihV/Msx3gor2b2kviCgcb+HM9SEOpMWwwNp9MRunWnhtAKUoo0AHhjyPPg==} - zod@3.25.76: - resolution: {integrity: sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==} - zod@4.3.6: resolution: {integrity: sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==} @@ -6757,11 +6832,13 @@ packages: snapshots: - '@agentclientprotocol/sdk@0.14.1(zod@4.3.6)': + '@acemir/cssom@0.9.31': {} + + '@agentclientprotocol/sdk@0.15.0(zod@4.3.6)': dependencies: zod: 4.3.6 - '@agentclientprotocol/sdk@0.15.0(zod@4.3.6)': + '@agentclientprotocol/sdk@0.16.1(zod@4.3.6)': dependencies: zod: 4.3.6 @@ -6771,6 +6848,24 @@ snapshots: optionalDependencies: zod: 4.3.6 + '@asamuzakjp/css-color@5.0.1': + dependencies: + '@csstools/css-calc': 3.1.1(@csstools/css-parser-algorithms@4.0.0(@csstools/css-tokenizer@4.0.0))(@csstools/css-tokenizer@4.0.0) + '@csstools/css-color-parser': 4.0.2(@csstools/css-parser-algorithms@4.0.0(@csstools/css-tokenizer@4.0.0))(@csstools/css-tokenizer@4.0.0) + '@csstools/css-parser-algorithms': 4.0.0(@csstools/css-tokenizer@4.0.0) + '@csstools/css-tokenizer': 4.0.0 + lru-cache: 11.2.6 + + '@asamuzakjp/dom-selector@6.8.1': + dependencies: + '@asamuzakjp/nwsapi': 2.3.9 + bidi-js: 1.0.3 + css-tree: 3.2.1 + is-potential-custom-element-name: 1.0.1 + lru-cache: 11.2.6 + + '@asamuzakjp/nwsapi@2.3.9': {} + '@aws-crypto/crc32@5.2.0': dependencies: '@aws-crypto/util': 5.2.0 @@ -6797,15 +6892,15 @@ snapshots: '@aws-crypto/sha256-js': 5.2.0 '@aws-crypto/supports-web-crypto': 5.2.0 '@aws-crypto/util': 5.2.0 - '@aws-sdk/types': 3.973.5 - '@aws-sdk/util-locate-window': 3.965.4 + '@aws-sdk/types': 3.973.6 + '@aws-sdk/util-locate-window': 3.965.5 '@smithy/util-utf8': 2.3.0 tslib: 2.8.1 '@aws-crypto/sha256-js@5.2.0': dependencies: '@aws-crypto/util': 5.2.0 - '@aws-sdk/types': 3.973.5 + '@aws-sdk/types': 3.973.6 tslib: 2.8.1 '@aws-crypto/supports-web-crypto@5.2.0': @@ -6818,195 +6913,98 @@ snapshots: '@smithy/util-utf8': 2.3.0 tslib: 2.8.1 - '@aws-sdk/client-bedrock-runtime@3.1000.0': - dependencies: - '@aws-crypto/sha256-browser': 5.2.0 - '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.15 - '@aws-sdk/credential-provider-node': 3.972.14 - '@aws-sdk/eventstream-handler-node': 3.972.9 - '@aws-sdk/middleware-eventstream': 3.972.6 - '@aws-sdk/middleware-host-header': 3.972.6 - '@aws-sdk/middleware-logger': 3.972.6 - '@aws-sdk/middleware-recursion-detection': 3.972.6 - '@aws-sdk/middleware-user-agent': 3.972.15 - '@aws-sdk/middleware-websocket': 3.972.10 - '@aws-sdk/region-config-resolver': 3.972.6 - '@aws-sdk/token-providers': 3.1000.0 - '@aws-sdk/types': 3.973.4 - '@aws-sdk/util-endpoints': 3.996.3 - '@aws-sdk/util-user-agent-browser': 3.972.6 - '@aws-sdk/util-user-agent-node': 3.973.0 - '@smithy/config-resolver': 4.4.9 - '@smithy/core': 3.23.6 - '@smithy/eventstream-serde-browser': 4.2.10 - '@smithy/eventstream-serde-config-resolver': 4.3.10 - '@smithy/eventstream-serde-node': 4.2.10 - '@smithy/fetch-http-handler': 5.3.11 - '@smithy/hash-node': 4.2.10 - '@smithy/invalid-dependency': 4.2.10 - '@smithy/middleware-content-length': 4.2.10 - '@smithy/middleware-endpoint': 4.4.20 - '@smithy/middleware-retry': 4.4.37 - '@smithy/middleware-serde': 4.2.11 - '@smithy/middleware-stack': 4.2.10 - '@smithy/node-config-provider': 4.3.10 - '@smithy/node-http-handler': 4.4.12 - '@smithy/protocol-http': 5.3.10 - '@smithy/smithy-client': 4.12.0 - '@smithy/types': 4.13.0 - '@smithy/url-parser': 4.2.10 - '@smithy/util-base64': 4.3.1 - '@smithy/util-body-length-browser': 4.2.1 - '@smithy/util-body-length-node': 4.2.2 - '@smithy/util-defaults-mode-browser': 4.3.36 - '@smithy/util-defaults-mode-node': 4.2.39 - '@smithy/util-endpoints': 3.3.1 - '@smithy/util-middleware': 4.2.10 - '@smithy/util-retry': 4.2.10 - '@smithy/util-stream': 4.5.15 - '@smithy/util-utf8': 4.2.1 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - '@aws-sdk/client-bedrock-runtime@3.1004.0': dependencies: '@aws-crypto/sha256-browser': 5.2.0 '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.18 - '@aws-sdk/credential-provider-node': 3.972.18 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/credential-provider-node': 3.972.21 '@aws-sdk/eventstream-handler-node': 3.972.10 '@aws-sdk/middleware-eventstream': 3.972.7 - '@aws-sdk/middleware-host-header': 3.972.7 - '@aws-sdk/middleware-logger': 3.972.7 - '@aws-sdk/middleware-recursion-detection': 3.972.7 - '@aws-sdk/middleware-user-agent': 3.972.19 + '@aws-sdk/middleware-host-header': 3.972.8 + '@aws-sdk/middleware-logger': 3.972.8 + '@aws-sdk/middleware-recursion-detection': 3.972.8 + '@aws-sdk/middleware-user-agent': 3.972.21 '@aws-sdk/middleware-websocket': 3.972.12 - '@aws-sdk/region-config-resolver': 3.972.7 + '@aws-sdk/region-config-resolver': 3.972.8 '@aws-sdk/token-providers': 3.1004.0 - '@aws-sdk/types': 3.973.5 - '@aws-sdk/util-endpoints': 3.996.4 - '@aws-sdk/util-user-agent-browser': 3.972.7 - '@aws-sdk/util-user-agent-node': 3.973.4 - '@smithy/config-resolver': 4.4.10 - '@smithy/core': 3.23.9 + '@aws-sdk/types': 3.973.6 + '@aws-sdk/util-endpoints': 3.996.5 + '@aws-sdk/util-user-agent-browser': 3.972.8 + '@aws-sdk/util-user-agent-node': 3.973.7 + '@smithy/config-resolver': 4.4.11 + '@smithy/core': 3.23.11 '@smithy/eventstream-serde-browser': 4.2.11 '@smithy/eventstream-serde-config-resolver': 4.3.11 '@smithy/eventstream-serde-node': 4.2.11 - '@smithy/fetch-http-handler': 5.3.13 - '@smithy/hash-node': 4.2.11 - '@smithy/invalid-dependency': 4.2.11 - '@smithy/middleware-content-length': 4.2.11 - '@smithy/middleware-endpoint': 4.4.23 - '@smithy/middleware-retry': 4.4.40 - '@smithy/middleware-serde': 4.2.12 - '@smithy/middleware-stack': 4.2.11 - '@smithy/node-config-provider': 4.3.11 - '@smithy/node-http-handler': 4.4.14 - '@smithy/protocol-http': 5.3.11 - '@smithy/smithy-client': 4.12.3 - '@smithy/types': 4.13.0 - '@smithy/url-parser': 4.2.11 + '@smithy/fetch-http-handler': 5.3.15 + '@smithy/hash-node': 4.2.12 + '@smithy/invalid-dependency': 4.2.12 + '@smithy/middleware-content-length': 4.2.12 + '@smithy/middleware-endpoint': 4.4.25 + '@smithy/middleware-retry': 4.4.42 + '@smithy/middleware-serde': 4.2.14 + '@smithy/middleware-stack': 4.2.12 + '@smithy/node-config-provider': 4.3.12 + '@smithy/node-http-handler': 4.4.16 + '@smithy/protocol-http': 5.3.12 + '@smithy/smithy-client': 4.12.5 + '@smithy/types': 4.13.1 + '@smithy/url-parser': 4.2.12 '@smithy/util-base64': 4.3.2 '@smithy/util-body-length-browser': 4.2.2 '@smithy/util-body-length-node': 4.2.3 - '@smithy/util-defaults-mode-browser': 4.3.39 - '@smithy/util-defaults-mode-node': 4.2.42 - '@smithy/util-endpoints': 3.3.2 - '@smithy/util-middleware': 4.2.11 - '@smithy/util-retry': 4.2.11 - '@smithy/util-stream': 4.5.17 + '@smithy/util-defaults-mode-browser': 4.3.41 + '@smithy/util-defaults-mode-node': 4.2.44 + '@smithy/util-endpoints': 3.3.3 + '@smithy/util-middleware': 4.2.12 + '@smithy/util-retry': 4.2.12 + '@smithy/util-stream': 4.5.19 '@smithy/util-utf8': 4.2.2 tslib: 2.8.1 transitivePeerDependencies: - aws-crt - '@aws-sdk/client-bedrock@3.1000.0': + '@aws-sdk/client-bedrock@3.1009.0': dependencies: '@aws-crypto/sha256-browser': 5.2.0 '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.15 - '@aws-sdk/credential-provider-node': 3.972.14 - '@aws-sdk/middleware-host-header': 3.972.6 - '@aws-sdk/middleware-logger': 3.972.6 - '@aws-sdk/middleware-recursion-detection': 3.972.6 - '@aws-sdk/middleware-user-agent': 3.972.15 - '@aws-sdk/region-config-resolver': 3.972.6 - '@aws-sdk/token-providers': 3.1000.0 - '@aws-sdk/types': 3.973.4 - '@aws-sdk/util-endpoints': 3.996.3 - '@aws-sdk/util-user-agent-browser': 3.972.6 - '@aws-sdk/util-user-agent-node': 3.973.0 - '@smithy/config-resolver': 4.4.9 - '@smithy/core': 3.23.6 - '@smithy/fetch-http-handler': 5.3.11 - '@smithy/hash-node': 4.2.10 - '@smithy/invalid-dependency': 4.2.10 - '@smithy/middleware-content-length': 4.2.10 - '@smithy/middleware-endpoint': 4.4.20 - '@smithy/middleware-retry': 4.4.37 - '@smithy/middleware-serde': 4.2.11 - '@smithy/middleware-stack': 4.2.10 - '@smithy/node-config-provider': 4.3.10 - '@smithy/node-http-handler': 4.4.12 - '@smithy/protocol-http': 5.3.10 - '@smithy/smithy-client': 4.12.0 - '@smithy/types': 4.13.0 - '@smithy/url-parser': 4.2.10 - '@smithy/util-base64': 4.3.1 - '@smithy/util-body-length-browser': 4.2.1 - '@smithy/util-body-length-node': 4.2.2 - '@smithy/util-defaults-mode-browser': 4.3.36 - '@smithy/util-defaults-mode-node': 4.2.39 - '@smithy/util-endpoints': 3.3.1 - '@smithy/util-middleware': 4.2.10 - '@smithy/util-retry': 4.2.10 - '@smithy/util-utf8': 4.2.1 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/client-bedrock@3.1004.0': - dependencies: - '@aws-crypto/sha256-browser': 5.2.0 - '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.18 - '@aws-sdk/credential-provider-node': 3.972.18 - '@aws-sdk/middleware-host-header': 3.972.7 - '@aws-sdk/middleware-logger': 3.972.7 - '@aws-sdk/middleware-recursion-detection': 3.972.7 - '@aws-sdk/middleware-user-agent': 3.972.19 - '@aws-sdk/region-config-resolver': 3.972.7 - '@aws-sdk/token-providers': 3.1004.0 - '@aws-sdk/types': 3.973.5 - '@aws-sdk/util-endpoints': 3.996.4 - '@aws-sdk/util-user-agent-browser': 3.972.7 - '@aws-sdk/util-user-agent-node': 3.973.4 - '@smithy/config-resolver': 4.4.10 - '@smithy/core': 3.23.9 - '@smithy/fetch-http-handler': 5.3.13 - '@smithy/hash-node': 4.2.11 - '@smithy/invalid-dependency': 4.2.11 - '@smithy/middleware-content-length': 4.2.11 - '@smithy/middleware-endpoint': 4.4.23 - '@smithy/middleware-retry': 4.4.40 - '@smithy/middleware-serde': 4.2.12 - '@smithy/middleware-stack': 4.2.11 - '@smithy/node-config-provider': 4.3.11 - '@smithy/node-http-handler': 4.4.14 - '@smithy/protocol-http': 5.3.11 - '@smithy/smithy-client': 4.12.3 - '@smithy/types': 4.13.0 - '@smithy/url-parser': 4.2.11 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/credential-provider-node': 3.972.21 + '@aws-sdk/middleware-host-header': 3.972.8 + '@aws-sdk/middleware-logger': 3.972.8 + '@aws-sdk/middleware-recursion-detection': 3.972.8 + '@aws-sdk/middleware-user-agent': 3.972.21 + '@aws-sdk/region-config-resolver': 3.972.8 + '@aws-sdk/token-providers': 3.1009.0 + '@aws-sdk/types': 3.973.6 + '@aws-sdk/util-endpoints': 3.996.5 + '@aws-sdk/util-user-agent-browser': 3.972.8 + '@aws-sdk/util-user-agent-node': 3.973.7 + '@smithy/config-resolver': 4.4.11 + '@smithy/core': 3.23.11 + '@smithy/fetch-http-handler': 5.3.15 + '@smithy/hash-node': 4.2.12 + '@smithy/invalid-dependency': 4.2.12 + '@smithy/middleware-content-length': 4.2.12 + '@smithy/middleware-endpoint': 4.4.25 + '@smithy/middleware-retry': 4.4.42 + '@smithy/middleware-serde': 4.2.14 + '@smithy/middleware-stack': 4.2.12 + '@smithy/node-config-provider': 4.3.12 + '@smithy/node-http-handler': 4.4.16 + '@smithy/protocol-http': 5.3.12 + '@smithy/smithy-client': 4.12.5 + '@smithy/types': 4.13.1 + '@smithy/url-parser': 4.2.12 '@smithy/util-base64': 4.3.2 '@smithy/util-body-length-browser': 4.2.2 '@smithy/util-body-length-node': 4.2.3 - '@smithy/util-defaults-mode-browser': 4.3.39 - '@smithy/util-defaults-mode-node': 4.2.42 - '@smithy/util-endpoints': 3.3.2 - '@smithy/util-middleware': 4.2.11 - '@smithy/util-retry': 4.2.11 + '@smithy/util-defaults-mode-browser': 4.3.41 + '@smithy/util-defaults-mode-node': 4.2.44 + '@smithy/util-endpoints': 3.3.3 + '@smithy/util-middleware': 4.2.12 + '@smithy/util-retry': 4.2.12 '@smithy/util-utf8': 4.2.2 tslib: 2.8.1 transitivePeerDependencies: @@ -7088,19 +7086,19 @@ snapshots: '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 - '@aws-sdk/core@3.973.18': + '@aws-sdk/core@3.973.20': dependencies: - '@aws-sdk/types': 3.973.5 - '@aws-sdk/xml-builder': 3.972.10 - '@smithy/core': 3.23.9 - '@smithy/node-config-provider': 4.3.11 - '@smithy/property-provider': 4.2.11 - '@smithy/protocol-http': 5.3.11 - '@smithy/signature-v4': 5.3.11 - '@smithy/smithy-client': 4.12.3 - '@smithy/types': 4.13.0 + '@aws-sdk/types': 3.973.6 + '@aws-sdk/xml-builder': 3.972.11 + '@smithy/core': 3.23.11 + '@smithy/node-config-provider': 4.3.12 + '@smithy/property-provider': 4.2.12 + '@smithy/protocol-http': 5.3.12 + '@smithy/signature-v4': 5.3.12 + '@smithy/smithy-client': 4.12.5 + '@smithy/types': 4.13.1 '@smithy/util-base64': 4.3.2 - '@smithy/util-middleware': 4.2.11 + '@smithy/util-middleware': 4.2.12 '@smithy/util-utf8': 4.2.2 tslib: 2.8.1 @@ -7117,12 +7115,12 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/credential-provider-env@3.972.16': + '@aws-sdk/credential-provider-env@3.972.18': dependencies: - '@aws-sdk/core': 3.973.18 - '@aws-sdk/types': 3.973.5 - '@smithy/property-provider': 4.2.11 - '@smithy/types': 4.13.0 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/types': 3.973.6 + '@smithy/property-provider': 4.2.12 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@aws-sdk/credential-provider-http@3.972.15': @@ -7138,17 +7136,17 @@ snapshots: '@smithy/util-stream': 4.5.15 tslib: 2.8.1 - '@aws-sdk/credential-provider-http@3.972.18': + '@aws-sdk/credential-provider-http@3.972.20': dependencies: - '@aws-sdk/core': 3.973.18 - '@aws-sdk/types': 3.973.5 - '@smithy/fetch-http-handler': 5.3.13 - '@smithy/node-http-handler': 4.4.14 - '@smithy/property-provider': 4.2.11 - '@smithy/protocol-http': 5.3.11 - '@smithy/smithy-client': 4.12.3 - '@smithy/types': 4.13.0 - '@smithy/util-stream': 4.5.17 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/types': 3.973.6 + '@smithy/fetch-http-handler': 5.3.15 + '@smithy/node-http-handler': 4.4.16 + '@smithy/property-provider': 4.2.12 + '@smithy/protocol-http': 5.3.12 + '@smithy/smithy-client': 4.12.5 + '@smithy/types': 4.13.1 + '@smithy/util-stream': 4.5.19 tslib: 2.8.1 '@aws-sdk/credential-provider-ini@3.972.13': @@ -7170,21 +7168,21 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-ini@3.972.17': + '@aws-sdk/credential-provider-ini@3.972.20': dependencies: - '@aws-sdk/core': 3.973.18 - '@aws-sdk/credential-provider-env': 3.972.16 - '@aws-sdk/credential-provider-http': 3.972.18 - '@aws-sdk/credential-provider-login': 3.972.17 - '@aws-sdk/credential-provider-process': 3.972.16 - '@aws-sdk/credential-provider-sso': 3.972.17 - '@aws-sdk/credential-provider-web-identity': 3.972.17 - '@aws-sdk/nested-clients': 3.996.7 - '@aws-sdk/types': 3.973.5 - '@smithy/credential-provider-imds': 4.2.11 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/credential-provider-env': 3.972.18 + '@aws-sdk/credential-provider-http': 3.972.20 + '@aws-sdk/credential-provider-login': 3.972.20 + '@aws-sdk/credential-provider-process': 3.972.18 + '@aws-sdk/credential-provider-sso': 3.972.20 + '@aws-sdk/credential-provider-web-identity': 3.972.20 + '@aws-sdk/nested-clients': 3.996.10 + '@aws-sdk/types': 3.973.6 + '@smithy/credential-provider-imds': 4.2.12 + '@smithy/property-provider': 4.2.12 + '@smithy/shared-ini-file-loader': 4.4.7 + '@smithy/types': 4.13.1 tslib: 2.8.1 transitivePeerDependencies: - aws-crt @@ -7202,15 +7200,15 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-login@3.972.17': + '@aws-sdk/credential-provider-login@3.972.20': dependencies: - '@aws-sdk/core': 3.973.18 - '@aws-sdk/nested-clients': 3.996.7 - '@aws-sdk/types': 3.973.5 - '@smithy/property-provider': 4.2.11 - '@smithy/protocol-http': 5.3.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/nested-clients': 3.996.10 + '@aws-sdk/types': 3.973.6 + '@smithy/property-provider': 4.2.12 + '@smithy/protocol-http': 5.3.12 + '@smithy/shared-ini-file-loader': 4.4.7 + '@smithy/types': 4.13.1 tslib: 2.8.1 transitivePeerDependencies: - aws-crt @@ -7232,19 +7230,19 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-node@3.972.18': + '@aws-sdk/credential-provider-node@3.972.21': dependencies: - '@aws-sdk/credential-provider-env': 3.972.16 - '@aws-sdk/credential-provider-http': 3.972.18 - '@aws-sdk/credential-provider-ini': 3.972.17 - '@aws-sdk/credential-provider-process': 3.972.16 - '@aws-sdk/credential-provider-sso': 3.972.17 - '@aws-sdk/credential-provider-web-identity': 3.972.17 - '@aws-sdk/types': 3.973.5 - '@smithy/credential-provider-imds': 4.2.11 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 + '@aws-sdk/credential-provider-env': 3.972.18 + '@aws-sdk/credential-provider-http': 3.972.20 + '@aws-sdk/credential-provider-ini': 3.972.20 + '@aws-sdk/credential-provider-process': 3.972.18 + '@aws-sdk/credential-provider-sso': 3.972.20 + '@aws-sdk/credential-provider-web-identity': 3.972.20 + '@aws-sdk/types': 3.973.6 + '@smithy/credential-provider-imds': 4.2.12 + '@smithy/property-provider': 4.2.12 + '@smithy/shared-ini-file-loader': 4.4.7 + '@smithy/types': 4.13.1 tslib: 2.8.1 transitivePeerDependencies: - aws-crt @@ -7258,13 +7256,13 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/credential-provider-process@3.972.16': + '@aws-sdk/credential-provider-process@3.972.18': dependencies: - '@aws-sdk/core': 3.973.18 - '@aws-sdk/types': 3.973.5 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/types': 3.973.6 + '@smithy/property-provider': 4.2.12 + '@smithy/shared-ini-file-loader': 4.4.7 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@aws-sdk/credential-provider-sso@3.972.13': @@ -7280,15 +7278,15 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-sso@3.972.17': + '@aws-sdk/credential-provider-sso@3.972.20': dependencies: - '@aws-sdk/core': 3.973.18 - '@aws-sdk/nested-clients': 3.996.7 - '@aws-sdk/token-providers': 3.1004.0 - '@aws-sdk/types': 3.973.5 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/nested-clients': 3.996.10 + '@aws-sdk/token-providers': 3.1009.0 + '@aws-sdk/types': 3.973.6 + '@smithy/property-provider': 4.2.12 + '@smithy/shared-ini-file-loader': 4.4.7 + '@smithy/types': 4.13.1 tslib: 2.8.1 transitivePeerDependencies: - aws-crt @@ -7305,30 +7303,23 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-web-identity@3.972.17': + '@aws-sdk/credential-provider-web-identity@3.972.20': dependencies: - '@aws-sdk/core': 3.973.18 - '@aws-sdk/nested-clients': 3.996.7 - '@aws-sdk/types': 3.973.5 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/nested-clients': 3.996.10 + '@aws-sdk/types': 3.973.6 + '@smithy/property-provider': 4.2.12 + '@smithy/shared-ini-file-loader': 4.4.7 + '@smithy/types': 4.13.1 tslib: 2.8.1 transitivePeerDependencies: - aws-crt '@aws-sdk/eventstream-handler-node@3.972.10': dependencies: - '@aws-sdk/types': 3.973.5 + '@aws-sdk/types': 3.973.6 '@smithy/eventstream-codec': 4.2.11 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - - '@aws-sdk/eventstream-handler-node@3.972.9': - dependencies: - '@aws-sdk/types': 3.973.4 - '@smithy/eventstream-codec': 4.2.10 - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@aws-sdk/middleware-bucket-endpoint@3.972.6': @@ -7341,18 +7332,11 @@ snapshots: '@smithy/util-config-provider': 4.2.1 tslib: 2.8.1 - '@aws-sdk/middleware-eventstream@3.972.6': - dependencies: - '@aws-sdk/types': 3.973.4 - '@smithy/protocol-http': 5.3.10 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - '@aws-sdk/middleware-eventstream@3.972.7': dependencies: - '@aws-sdk/types': 3.973.5 - '@smithy/protocol-http': 5.3.11 - '@smithy/types': 4.13.0 + '@aws-sdk/types': 3.973.6 + '@smithy/protocol-http': 5.3.12 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@aws-sdk/middleware-expect-continue@3.972.6': @@ -7386,11 +7370,11 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/middleware-host-header@3.972.7': + '@aws-sdk/middleware-host-header@3.972.8': dependencies: - '@aws-sdk/types': 3.973.5 - '@smithy/protocol-http': 5.3.11 - '@smithy/types': 4.13.0 + '@aws-sdk/types': 3.973.6 + '@smithy/protocol-http': 5.3.12 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@aws-sdk/middleware-location-constraint@3.972.6': @@ -7405,10 +7389,10 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/middleware-logger@3.972.7': + '@aws-sdk/middleware-logger@3.972.8': dependencies: - '@aws-sdk/types': 3.973.5 - '@smithy/types': 4.13.0 + '@aws-sdk/types': 3.973.6 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@aws-sdk/middleware-recursion-detection@3.972.6': @@ -7419,12 +7403,12 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/middleware-recursion-detection@3.972.7': + '@aws-sdk/middleware-recursion-detection@3.972.8': dependencies: - '@aws-sdk/types': 3.973.5 - '@aws/lambda-invoke-store': 0.2.3 - '@smithy/protocol-http': 5.3.11 - '@smithy/types': 4.13.0 + '@aws-sdk/types': 3.973.6 + '@aws/lambda-invoke-store': 0.2.4 + '@smithy/protocol-http': 5.3.12 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@aws-sdk/middleware-sdk-s3@3.972.15': @@ -7460,47 +7444,75 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/middleware-user-agent@3.972.19': + '@aws-sdk/middleware-user-agent@3.972.21': dependencies: - '@aws-sdk/core': 3.973.18 - '@aws-sdk/types': 3.973.5 - '@aws-sdk/util-endpoints': 3.996.4 - '@smithy/core': 3.23.9 - '@smithy/protocol-http': 5.3.11 - '@smithy/types': 4.13.0 - '@smithy/util-retry': 4.2.11 - tslib: 2.8.1 - - '@aws-sdk/middleware-websocket@3.972.10': - dependencies: - '@aws-sdk/types': 3.973.4 - '@aws-sdk/util-format-url': 3.972.6 - '@smithy/eventstream-codec': 4.2.10 - '@smithy/eventstream-serde-browser': 4.2.10 - '@smithy/fetch-http-handler': 5.3.11 - '@smithy/protocol-http': 5.3.10 - '@smithy/signature-v4': 5.3.10 - '@smithy/types': 4.13.0 - '@smithy/util-base64': 4.3.1 - '@smithy/util-hex-encoding': 4.2.1 - '@smithy/util-utf8': 4.2.1 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/types': 3.973.6 + '@aws-sdk/util-endpoints': 3.996.5 + '@smithy/core': 3.23.11 + '@smithy/protocol-http': 5.3.12 + '@smithy/types': 4.13.1 + '@smithy/util-retry': 4.2.12 tslib: 2.8.1 '@aws-sdk/middleware-websocket@3.972.12': dependencies: - '@aws-sdk/types': 3.973.5 + '@aws-sdk/types': 3.973.6 '@aws-sdk/util-format-url': 3.972.7 '@smithy/eventstream-codec': 4.2.11 '@smithy/eventstream-serde-browser': 4.2.11 - '@smithy/fetch-http-handler': 5.3.13 - '@smithy/protocol-http': 5.3.11 - '@smithy/signature-v4': 5.3.11 - '@smithy/types': 4.13.0 + '@smithy/fetch-http-handler': 5.3.15 + '@smithy/protocol-http': 5.3.12 + '@smithy/signature-v4': 5.3.12 + '@smithy/types': 4.13.1 '@smithy/util-base64': 4.3.2 '@smithy/util-hex-encoding': 4.2.2 '@smithy/util-utf8': 4.2.2 tslib: 2.8.1 + '@aws-sdk/nested-clients@3.996.10': + dependencies: + '@aws-crypto/sha256-browser': 5.2.0 + '@aws-crypto/sha256-js': 5.2.0 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/middleware-host-header': 3.972.8 + '@aws-sdk/middleware-logger': 3.972.8 + '@aws-sdk/middleware-recursion-detection': 3.972.8 + '@aws-sdk/middleware-user-agent': 3.972.21 + '@aws-sdk/region-config-resolver': 3.972.8 + '@aws-sdk/types': 3.973.6 + '@aws-sdk/util-endpoints': 3.996.5 + '@aws-sdk/util-user-agent-browser': 3.972.8 + '@aws-sdk/util-user-agent-node': 3.973.7 + '@smithy/config-resolver': 4.4.11 + '@smithy/core': 3.23.11 + '@smithy/fetch-http-handler': 5.3.15 + '@smithy/hash-node': 4.2.12 + '@smithy/invalid-dependency': 4.2.12 + '@smithy/middleware-content-length': 4.2.12 + '@smithy/middleware-endpoint': 4.4.25 + '@smithy/middleware-retry': 4.4.42 + '@smithy/middleware-serde': 4.2.14 + '@smithy/middleware-stack': 4.2.12 + '@smithy/node-config-provider': 4.3.12 + '@smithy/node-http-handler': 4.4.16 + '@smithy/protocol-http': 5.3.12 + '@smithy/smithy-client': 4.12.5 + '@smithy/types': 4.13.1 + '@smithy/url-parser': 4.2.12 + '@smithy/util-base64': 4.3.2 + '@smithy/util-body-length-browser': 4.2.2 + '@smithy/util-body-length-node': 4.2.3 + '@smithy/util-defaults-mode-browser': 4.3.41 + '@smithy/util-defaults-mode-node': 4.2.44 + '@smithy/util-endpoints': 3.3.3 + '@smithy/util-middleware': 4.2.12 + '@smithy/util-retry': 4.2.12 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + '@aws-sdk/nested-clients@3.996.3': dependencies: '@aws-crypto/sha256-browser': 5.2.0 @@ -7544,49 +7556,6 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/nested-clients@3.996.7': - dependencies: - '@aws-crypto/sha256-browser': 5.2.0 - '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.18 - '@aws-sdk/middleware-host-header': 3.972.7 - '@aws-sdk/middleware-logger': 3.972.7 - '@aws-sdk/middleware-recursion-detection': 3.972.7 - '@aws-sdk/middleware-user-agent': 3.972.19 - '@aws-sdk/region-config-resolver': 3.972.7 - '@aws-sdk/types': 3.973.5 - '@aws-sdk/util-endpoints': 3.996.4 - '@aws-sdk/util-user-agent-browser': 3.972.7 - '@aws-sdk/util-user-agent-node': 3.973.4 - '@smithy/config-resolver': 4.4.10 - '@smithy/core': 3.23.9 - '@smithy/fetch-http-handler': 5.3.13 - '@smithy/hash-node': 4.2.11 - '@smithy/invalid-dependency': 4.2.11 - '@smithy/middleware-content-length': 4.2.11 - '@smithy/middleware-endpoint': 4.4.23 - '@smithy/middleware-retry': 4.4.40 - '@smithy/middleware-serde': 4.2.12 - '@smithy/middleware-stack': 4.2.11 - '@smithy/node-config-provider': 4.3.11 - '@smithy/node-http-handler': 4.4.14 - '@smithy/protocol-http': 5.3.11 - '@smithy/smithy-client': 4.12.3 - '@smithy/types': 4.13.0 - '@smithy/url-parser': 4.2.11 - '@smithy/util-base64': 4.3.2 - '@smithy/util-body-length-browser': 4.2.2 - '@smithy/util-body-length-node': 4.2.3 - '@smithy/util-defaults-mode-browser': 4.3.39 - '@smithy/util-defaults-mode-node': 4.2.42 - '@smithy/util-endpoints': 3.3.2 - '@smithy/util-middleware': 4.2.11 - '@smithy/util-retry': 4.2.11 - '@smithy/util-utf8': 4.2.2 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - '@aws-sdk/region-config-resolver@3.972.6': dependencies: '@aws-sdk/types': 3.973.4 @@ -7595,12 +7564,12 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/region-config-resolver@3.972.7': + '@aws-sdk/region-config-resolver@3.972.8': dependencies: - '@aws-sdk/types': 3.973.5 - '@smithy/config-resolver': 4.4.10 - '@smithy/node-config-provider': 4.3.11 - '@smithy/types': 4.13.0 + '@aws-sdk/types': 3.973.6 + '@smithy/config-resolver': 4.4.11 + '@smithy/node-config-provider': 4.3.12 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@aws-sdk/s3-request-presigner@3.1000.0': @@ -7623,26 +7592,26 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/token-providers@3.1000.0': + '@aws-sdk/token-providers@3.1004.0': dependencies: - '@aws-sdk/core': 3.973.15 - '@aws-sdk/nested-clients': 3.996.3 - '@aws-sdk/types': 3.973.4 - '@smithy/property-provider': 4.2.10 - '@smithy/shared-ini-file-loader': 4.4.5 - '@smithy/types': 4.13.0 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/nested-clients': 3.996.10 + '@aws-sdk/types': 3.973.6 + '@smithy/property-provider': 4.2.12 + '@smithy/shared-ini-file-loader': 4.4.7 + '@smithy/types': 4.13.1 tslib: 2.8.1 transitivePeerDependencies: - aws-crt - '@aws-sdk/token-providers@3.1004.0': + '@aws-sdk/token-providers@3.1009.0': dependencies: - '@aws-sdk/core': 3.973.18 - '@aws-sdk/nested-clients': 3.996.7 - '@aws-sdk/types': 3.973.5 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 + '@aws-sdk/core': 3.973.20 + '@aws-sdk/nested-clients': 3.996.10 + '@aws-sdk/types': 3.973.6 + '@smithy/property-provider': 4.2.12 + '@smithy/shared-ini-file-loader': 4.4.7 + '@smithy/types': 4.13.1 tslib: 2.8.1 transitivePeerDependencies: - aws-crt @@ -7669,6 +7638,11 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 + '@aws-sdk/types@3.973.6': + dependencies: + '@smithy/types': 4.13.1 + tslib: 2.8.1 + '@aws-sdk/util-arn-parser@3.972.2': dependencies: tslib: 2.8.1 @@ -7681,12 +7655,12 @@ snapshots: '@smithy/util-endpoints': 3.3.1 tslib: 2.8.1 - '@aws-sdk/util-endpoints@3.996.4': + '@aws-sdk/util-endpoints@3.996.5': dependencies: - '@aws-sdk/types': 3.973.5 - '@smithy/types': 4.13.0 - '@smithy/url-parser': 4.2.11 - '@smithy/util-endpoints': 3.3.2 + '@aws-sdk/types': 3.973.6 + '@smithy/types': 4.13.1 + '@smithy/url-parser': 4.2.12 + '@smithy/util-endpoints': 3.3.3 tslib: 2.8.1 '@aws-sdk/util-format-url@3.972.6': @@ -7698,15 +7672,19 @@ snapshots: '@aws-sdk/util-format-url@3.972.7': dependencies: - '@aws-sdk/types': 3.973.5 - '@smithy/querystring-builder': 4.2.11 - '@smithy/types': 4.13.0 + '@aws-sdk/types': 3.973.6 + '@smithy/querystring-builder': 4.2.12 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@aws-sdk/util-locate-window@3.965.4': dependencies: tslib: 2.8.1 + '@aws-sdk/util-locate-window@3.965.5': + dependencies: + tslib: 2.8.1 + '@aws-sdk/util-user-agent-browser@3.972.6': dependencies: '@aws-sdk/types': 3.973.4 @@ -7714,10 +7692,10 @@ snapshots: bowser: 2.14.1 tslib: 2.8.1 - '@aws-sdk/util-user-agent-browser@3.972.7': + '@aws-sdk/util-user-agent-browser@3.972.8': dependencies: - '@aws-sdk/types': 3.973.5 - '@smithy/types': 4.13.0 + '@aws-sdk/types': 3.973.6 + '@smithy/types': 4.13.1 bowser: 2.14.1 tslib: 2.8.1 @@ -7729,17 +7707,18 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/util-user-agent-node@3.973.4': + '@aws-sdk/util-user-agent-node@3.973.7': dependencies: - '@aws-sdk/middleware-user-agent': 3.972.19 - '@aws-sdk/types': 3.973.5 - '@smithy/node-config-provider': 4.3.11 - '@smithy/types': 4.13.0 + '@aws-sdk/middleware-user-agent': 3.972.21 + '@aws-sdk/types': 3.973.6 + '@smithy/node-config-provider': 4.3.12 + '@smithy/types': 4.13.1 + '@smithy/util-config-provider': 4.2.2 tslib: 2.8.1 - '@aws-sdk/xml-builder@3.972.10': + '@aws-sdk/xml-builder@3.972.11': dependencies: - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 fast-xml-parser: 5.3.8 tslib: 2.8.1 @@ -7751,6 +7730,8 @@ snapshots: '@aws/lambda-invoke-store@0.2.3': {} + '@aws/lambda-invoke-store@0.2.4': {} + '@azure/abort-controller@2.1.2': dependencies: tslib: 2.8.1 @@ -7818,16 +7799,22 @@ snapshots: '@bcoe/v8-coverage@1.0.2': {} - '@borewit/text-codec@0.2.1': {} + '@blazediff/core@1.9.1': {} - '@buape/carbon@0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.12.5)(opusscript@0.1.1)': + '@borewit/text-codec@0.2.2': {} + + '@bramus/specificity@2.4.2': dependencies: - '@types/node': 25.3.5 + css-tree: 3.2.1 + + '@buape/carbon@0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.12.7)(opusscript@0.1.1)': + dependencies: + '@types/node': 25.5.0 discord-api-types: 0.38.37 optionalDependencies: '@cloudflare/workers-types': 4.20260120.0 '@discordjs/voice': 0.19.0(@discordjs/opus@0.10.0)(opusscript@0.1.1) - '@hono/node-server': 1.19.10(hono@4.12.5) + '@hono/node-server': 1.19.10(hono@4.12.7) '@types/bun': 1.3.9 '@types/ws': 8.18.1 ws: 8.19.0 @@ -7858,21 +7845,10 @@ snapshots: hashery: 1.5.0 keyv: 5.6.0 - '@clack/core@1.0.1': - dependencies: - picocolors: 1.1.1 - sisteransi: 1.0.5 - '@clack/core@1.1.0': dependencies: sisteransi: 1.0.5 - '@clack/prompts@1.0.1': - dependencies: - '@clack/core': 1.0.1 - picocolors: 1.1.1 - sisteransi: 1.0.5 - '@clack/prompts@1.1.0': dependencies: '@clack/core': 1.1.0 @@ -7884,6 +7860,28 @@ snapshots: '@colors/colors@1.5.0': optional: true + '@csstools/color-helpers@6.0.2': {} + + '@csstools/css-calc@3.1.1(@csstools/css-parser-algorithms@4.0.0(@csstools/css-tokenizer@4.0.0))(@csstools/css-tokenizer@4.0.0)': + dependencies: + '@csstools/css-parser-algorithms': 4.0.0(@csstools/css-tokenizer@4.0.0) + '@csstools/css-tokenizer': 4.0.0 + + '@csstools/css-color-parser@4.0.2(@csstools/css-parser-algorithms@4.0.0(@csstools/css-tokenizer@4.0.0))(@csstools/css-tokenizer@4.0.0)': + dependencies: + '@csstools/color-helpers': 6.0.2 + '@csstools/css-calc': 3.1.1(@csstools/css-parser-algorithms@4.0.0(@csstools/css-tokenizer@4.0.0))(@csstools/css-tokenizer@4.0.0) + '@csstools/css-parser-algorithms': 4.0.0(@csstools/css-tokenizer@4.0.0) + '@csstools/css-tokenizer': 4.0.0 + + '@csstools/css-parser-algorithms@4.0.0(@csstools/css-tokenizer@4.0.0)': + dependencies: + '@csstools/css-tokenizer': 4.0.0 + + '@csstools/css-syntax-patches-for-csstree@1.1.0': {} + + '@csstools/css-tokenizer@4.0.0': {} + '@cypress/request-promise@5.0.0(@cypress/request@3.0.10)(@cypress/request@3.0.10)': dependencies: '@cypress/request': 3.0.10 @@ -7973,7 +7971,7 @@ snapshots: npmlog: 5.0.1 rimraf: 3.0.2 semver: 7.7.4 - tar: 7.5.10 + tar: 7.5.11 transitivePeerDependencies: - encoding - supports-color @@ -7991,7 +7989,24 @@ snapshots: '@discordjs/voice@0.19.0(@discordjs/opus@0.10.0)(opusscript@0.1.1)': dependencies: '@types/ws': 8.18.1 - discord-api-types: 0.38.41 + discord-api-types: 0.38.42 + prism-media: 1.3.5(@discordjs/opus@0.10.0)(opusscript@0.1.1) + tslib: 2.8.1 + ws: 8.19.0 + transitivePeerDependencies: + - '@discordjs/opus' + - bufferutil + - ffmpeg-static + - node-opus + - opusscript + - utf-8-validate + optional: true + + '@discordjs/voice@0.19.1(@discordjs/opus@0.10.0)(opusscript@0.1.1)': + dependencies: + '@snazzah/davey': 0.1.10 + '@types/ws': 8.18.1 + discord-api-types: 0.38.42 prism-media: 1.3.5(@discordjs/opus@0.10.0)(opusscript@0.1.1) tslib: 2.8.1 ws: 8.19.0 @@ -8100,43 +8115,28 @@ snapshots: '@eshaz/web-worker@1.2.2': optional: true - '@google/genai@1.43.0': + '@exodus/bytes@1.15.0(@noble/hashes@2.0.1)': + optionalDependencies: + '@noble/hashes': 2.0.1 + + '@google/genai@1.44.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))': dependencies: google-auth-library: 10.6.1 p-retry: 4.6.2 protobufjs: 7.5.4 ws: 8.19.0 + optionalDependencies: + '@modelcontextprotocol/sdk': 1.27.1(zod@4.3.6) transitivePeerDependencies: - bufferutil - supports-color - utf-8-validate - '@google/genai@1.44.0': - dependencies: - google-auth-library: 10.6.1 - p-retry: 4.6.2 - protobufjs: 7.5.4 - ws: 8.19.0 - transitivePeerDependencies: - - bufferutil - - supports-color - - utf-8-validate - - '@grammyjs/runner@2.0.3(grammy@1.41.0)': - dependencies: - abort-controller: 3.0.0 - grammy: 1.41.0 - '@grammyjs/runner@2.0.3(grammy@1.41.1)': dependencies: abort-controller: 3.0.0 grammy: 1.41.1 - '@grammyjs/transformer-throttler@1.2.1(grammy@1.41.0)': - dependencies: - bottleneck: 2.19.5 - grammy: 1.41.0 - '@grammyjs/transformer-throttler@1.2.1(grammy@1.41.1)': dependencies: bottleneck: 2.19.5 @@ -8171,10 +8171,9 @@ snapshots: transitivePeerDependencies: - supports-color - '@hono/node-server@1.19.10(hono@4.12.5)': + '@hono/node-server@1.19.10(hono@4.12.7)': dependencies: - hono: 4.12.5 - optional: true + hono: 4.12.7 '@huggingface/jinja@0.5.5': {} @@ -8404,7 +8403,7 @@ snapshots: '@line/bot-sdk@10.6.0': dependencies: - '@types/node': 24.11.0 + '@types/node': 24.12.0 optionalDependencies: axios: 1.13.5 transitivePeerDependencies: @@ -8501,9 +8500,9 @@ snapshots: std-env: 3.10.0 yoctocolors: 2.1.2 - '@mariozechner/pi-agent-core@0.55.3(ws@8.19.0)(zod@4.3.6)': + '@mariozechner/pi-agent-core@0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)': dependencies: - '@mariozechner/pi-ai': 0.55.3(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-ai': 0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) transitivePeerDependencies: - '@modelcontextprotocol/sdk' - aws-crt @@ -8513,47 +8512,11 @@ snapshots: - ws - zod - '@mariozechner/pi-agent-core@0.57.1(ws@8.19.0)(zod@4.3.6)': - dependencies: - '@mariozechner/pi-ai': 0.57.1(ws@8.19.0)(zod@4.3.6) - transitivePeerDependencies: - - '@modelcontextprotocol/sdk' - - aws-crt - - bufferutil - - supports-color - - utf-8-validate - - ws - - zod - - '@mariozechner/pi-ai@0.55.3(ws@8.19.0)(zod@4.3.6)': - dependencies: - '@anthropic-ai/sdk': 0.73.0(zod@4.3.6) - '@aws-sdk/client-bedrock-runtime': 3.1000.0 - '@google/genai': 1.43.0 - '@mistralai/mistralai': 1.10.0 - '@sinclair/typebox': 0.34.48 - ajv: 8.18.0 - ajv-formats: 3.0.1(ajv@8.18.0) - chalk: 5.6.2 - openai: 6.10.0(ws@8.19.0)(zod@4.3.6) - partial-json: 0.1.7 - proxy-agent: 6.5.0 - undici: 7.22.0 - zod-to-json-schema: 3.25.1(zod@4.3.6) - transitivePeerDependencies: - - '@modelcontextprotocol/sdk' - - aws-crt - - bufferutil - - supports-color - - utf-8-validate - - ws - - zod - - '@mariozechner/pi-ai@0.57.1(ws@8.19.0)(zod@4.3.6)': + '@mariozechner/pi-ai@0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)': dependencies: '@anthropic-ai/sdk': 0.73.0(zod@4.3.6) '@aws-sdk/client-bedrock-runtime': 3.1004.0 - '@google/genai': 1.44.0 + '@google/genai': 1.44.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6)) '@mistralai/mistralai': 1.14.1 '@sinclair/typebox': 0.34.48 ajv: 8.18.0 @@ -8562,7 +8525,7 @@ snapshots: openai: 6.26.0(ws@8.19.0)(zod@4.3.6) partial-json: 0.1.7 proxy-agent: 6.5.0 - undici: 7.22.0 + undici: 7.24.1 zod-to-json-schema: 3.25.1(zod@4.3.6) transitivePeerDependencies: - '@modelcontextprotocol/sdk' @@ -8573,18 +8536,18 @@ snapshots: - ws - zod - '@mariozechner/pi-coding-agent@0.55.3(ws@8.19.0)(zod@4.3.6)': + '@mariozechner/pi-coding-agent@0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)': dependencies: '@mariozechner/jiti': 2.6.5 - '@mariozechner/pi-agent-core': 0.55.3(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-ai': 0.55.3(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-tui': 0.55.3 + '@mariozechner/pi-agent-core': 0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-ai': 0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-tui': 0.58.0 '@silvia-odwyer/photon-node': 0.3.4 chalk: 5.6.2 cli-highlight: 2.1.11 diff: 8.0.3 extract-zip: 2.0.1 - file-type: 21.3.0 + file-type: 21.3.2 glob: 13.0.6 hosted-git-info: 9.0.2 ignore: 7.0.5 @@ -8592,6 +8555,7 @@ snapshots: minimatch: 10.2.4 proper-lockfile: 4.1.2 strip-ansi: 7.2.0 + undici: 7.24.1 yaml: 2.8.2 optionalDependencies: '@mariozechner/clipboard': 0.3.2 @@ -8604,48 +8568,7 @@ snapshots: - ws - zod - '@mariozechner/pi-coding-agent@0.57.1(ws@8.19.0)(zod@4.3.6)': - dependencies: - '@mariozechner/jiti': 2.6.5 - '@mariozechner/pi-agent-core': 0.57.1(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-ai': 0.57.1(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-tui': 0.57.1 - '@silvia-odwyer/photon-node': 0.3.4 - chalk: 5.6.2 - cli-highlight: 2.1.11 - diff: 8.0.3 - extract-zip: 2.0.1 - file-type: 21.3.0 - glob: 13.0.6 - hosted-git-info: 9.0.2 - ignore: 7.0.5 - marked: 15.0.12 - minimatch: 10.2.4 - proper-lockfile: 4.1.2 - strip-ansi: 7.2.0 - undici: 7.22.0 - yaml: 2.8.2 - optionalDependencies: - '@mariozechner/clipboard': 0.3.2 - transitivePeerDependencies: - - '@modelcontextprotocol/sdk' - - aws-crt - - bufferutil - - supports-color - - utf-8-validate - - ws - - zod - - '@mariozechner/pi-tui@0.55.3': - dependencies: - '@types/mime-types': 2.1.4 - chalk: 5.6.2 - get-east-asian-width: 1.5.0 - koffi: 2.15.1 - marked: 15.0.12 - mime-types: 3.0.2 - - '@mariozechner/pi-tui@0.57.1': + '@mariozechner/pi-tui@0.58.0': dependencies: '@types/mime-types': 2.1.4 chalk: 5.6.2 @@ -8684,11 +8607,6 @@ snapshots: - debug - supports-color - '@mistralai/mistralai@1.10.0': - dependencies: - zod: 3.25.76 - zod-to-json-schema: 3.25.1(zod@3.25.76) - '@mistralai/mistralai@1.14.1': dependencies: ws: 8.19.0 @@ -8698,6 +8616,28 @@ snapshots: - bufferutil - utf-8-validate + '@modelcontextprotocol/sdk@1.27.1(zod@4.3.6)': + dependencies: + '@hono/node-server': 1.19.10(hono@4.12.7) + ajv: 8.18.0 + ajv-formats: 3.0.1(ajv@8.18.0) + content-type: 1.0.5 + cors: 2.8.6 + cross-spawn: 7.0.6 + eventsource: 3.0.7 + eventsource-parser: 3.0.6 + express: 5.2.1 + express-rate-limit: 8.3.1(express@5.2.1) + hono: 4.12.7 + jose: 6.2.1 + json-schema-typed: 8.0.2 + pkce-challenge: 5.0.1 + raw-body: 3.0.2 + zod: 4.3.6 + zod-to-json-schema: 3.25.1(zod@4.3.6) + transitivePeerDependencies: + - supports-color + '@mozilla/readability@0.6.0': {} '@napi-rs/canvas-android-arm64@0.1.95': @@ -9202,63 +9142,65 @@ snapshots: '@opentelemetry/semantic-conventions@1.40.0': {} + '@oxc-project/runtime@0.115.0': {} + '@oxc-project/types@0.115.0': {} - '@oxfmt/binding-android-arm-eabi@0.36.0': + '@oxfmt/binding-android-arm-eabi@0.40.0': optional: true - '@oxfmt/binding-android-arm64@0.36.0': + '@oxfmt/binding-android-arm64@0.40.0': optional: true - '@oxfmt/binding-darwin-arm64@0.36.0': + '@oxfmt/binding-darwin-arm64@0.40.0': optional: true - '@oxfmt/binding-darwin-x64@0.36.0': + '@oxfmt/binding-darwin-x64@0.40.0': optional: true - '@oxfmt/binding-freebsd-x64@0.36.0': + '@oxfmt/binding-freebsd-x64@0.40.0': optional: true - '@oxfmt/binding-linux-arm-gnueabihf@0.36.0': + '@oxfmt/binding-linux-arm-gnueabihf@0.40.0': optional: true - '@oxfmt/binding-linux-arm-musleabihf@0.36.0': + '@oxfmt/binding-linux-arm-musleabihf@0.40.0': optional: true - '@oxfmt/binding-linux-arm64-gnu@0.36.0': + '@oxfmt/binding-linux-arm64-gnu@0.40.0': optional: true - '@oxfmt/binding-linux-arm64-musl@0.36.0': + '@oxfmt/binding-linux-arm64-musl@0.40.0': optional: true - '@oxfmt/binding-linux-ppc64-gnu@0.36.0': + '@oxfmt/binding-linux-ppc64-gnu@0.40.0': optional: true - '@oxfmt/binding-linux-riscv64-gnu@0.36.0': + '@oxfmt/binding-linux-riscv64-gnu@0.40.0': optional: true - '@oxfmt/binding-linux-riscv64-musl@0.36.0': + '@oxfmt/binding-linux-riscv64-musl@0.40.0': optional: true - '@oxfmt/binding-linux-s390x-gnu@0.36.0': + '@oxfmt/binding-linux-s390x-gnu@0.40.0': optional: true - '@oxfmt/binding-linux-x64-gnu@0.36.0': + '@oxfmt/binding-linux-x64-gnu@0.40.0': optional: true - '@oxfmt/binding-linux-x64-musl@0.36.0': + '@oxfmt/binding-linux-x64-musl@0.40.0': optional: true - '@oxfmt/binding-openharmony-arm64@0.36.0': + '@oxfmt/binding-openharmony-arm64@0.40.0': optional: true - '@oxfmt/binding-win32-arm64-msvc@0.36.0': + '@oxfmt/binding-win32-arm64-msvc@0.40.0': optional: true - '@oxfmt/binding-win32-ia32-msvc@0.36.0': + '@oxfmt/binding-win32-ia32-msvc@0.40.0': optional: true - '@oxfmt/binding-win32-x64-msvc@0.36.0': + '@oxfmt/binding-win32-x64-msvc@0.40.0': optional: true '@oxlint-tsgolint/darwin-arm64@0.16.0': @@ -9279,67 +9221,66 @@ snapshots: '@oxlint-tsgolint/win32-x64@0.16.0': optional: true - '@oxlint/binding-android-arm-eabi@1.51.0': + '@oxlint/binding-android-arm-eabi@1.55.0': optional: true - '@oxlint/binding-android-arm64@1.51.0': + '@oxlint/binding-android-arm64@1.55.0': optional: true - '@oxlint/binding-darwin-arm64@1.51.0': + '@oxlint/binding-darwin-arm64@1.55.0': optional: true - '@oxlint/binding-darwin-x64@1.51.0': + '@oxlint/binding-darwin-x64@1.55.0': optional: true - '@oxlint/binding-freebsd-x64@1.51.0': + '@oxlint/binding-freebsd-x64@1.55.0': optional: true - '@oxlint/binding-linux-arm-gnueabihf@1.51.0': + '@oxlint/binding-linux-arm-gnueabihf@1.55.0': optional: true - '@oxlint/binding-linux-arm-musleabihf@1.51.0': + '@oxlint/binding-linux-arm-musleabihf@1.55.0': optional: true - '@oxlint/binding-linux-arm64-gnu@1.51.0': + '@oxlint/binding-linux-arm64-gnu@1.55.0': optional: true - '@oxlint/binding-linux-arm64-musl@1.51.0': + '@oxlint/binding-linux-arm64-musl@1.55.0': optional: true - '@oxlint/binding-linux-ppc64-gnu@1.51.0': + '@oxlint/binding-linux-ppc64-gnu@1.55.0': optional: true - '@oxlint/binding-linux-riscv64-gnu@1.51.0': + '@oxlint/binding-linux-riscv64-gnu@1.55.0': optional: true - '@oxlint/binding-linux-riscv64-musl@1.51.0': + '@oxlint/binding-linux-riscv64-musl@1.55.0': optional: true - '@oxlint/binding-linux-s390x-gnu@1.51.0': + '@oxlint/binding-linux-s390x-gnu@1.55.0': optional: true - '@oxlint/binding-linux-x64-gnu@1.51.0': + '@oxlint/binding-linux-x64-gnu@1.55.0': optional: true - '@oxlint/binding-linux-x64-musl@1.51.0': + '@oxlint/binding-linux-x64-musl@1.55.0': optional: true - '@oxlint/binding-openharmony-arm64@1.51.0': + '@oxlint/binding-openharmony-arm64@1.55.0': optional: true - '@oxlint/binding-win32-arm64-msvc@1.51.0': + '@oxlint/binding-win32-arm64-msvc@1.55.0': optional: true - '@oxlint/binding-win32-ia32-msvc@1.51.0': + '@oxlint/binding-win32-ia32-msvc@1.55.0': optional: true - '@oxlint/binding-win32-x64-msvc@1.51.0': + '@oxlint/binding-win32-x64-msvc@1.55.0': optional: true - '@pierre/diffs@1.0.11(react-dom@19.2.4(react@19.2.4))(react@19.2.4)': + '@pierre/diffs@1.1.0(react-dom@19.2.4(react@19.2.4))(react@19.2.4)': dependencies: - '@shikijs/core': 3.23.0 - '@shikijs/engine-javascript': 3.23.0 + '@pierre/theme': 0.0.22 '@shikijs/transformers': 3.23.0 diff: 8.0.3 hast-util-to-html: 9.0.5 @@ -9348,6 +9289,8 @@ snapshots: react-dom: 19.2.4(react@19.2.4) shiki: 3.23.0 + '@pierre/theme@0.0.22': {} + '@pinojs/redact@0.4.0': {} '@pkgjs/parseargs@0.11.0': @@ -9418,129 +9361,54 @@ snapshots: '@reflink/reflink-win32-x64-msvc': 0.1.19 optional: true - '@rolldown/binding-android-arm64@1.0.0-rc.7': + '@rolldown/binding-android-arm64@1.0.0-rc.9': optional: true - '@rolldown/binding-darwin-arm64@1.0.0-rc.7': + '@rolldown/binding-darwin-arm64@1.0.0-rc.9': optional: true - '@rolldown/binding-darwin-x64@1.0.0-rc.7': + '@rolldown/binding-darwin-x64@1.0.0-rc.9': optional: true - '@rolldown/binding-freebsd-x64@1.0.0-rc.7': + '@rolldown/binding-freebsd-x64@1.0.0-rc.9': optional: true - '@rolldown/binding-linux-arm-gnueabihf@1.0.0-rc.7': + '@rolldown/binding-linux-arm-gnueabihf@1.0.0-rc.9': optional: true - '@rolldown/binding-linux-arm64-gnu@1.0.0-rc.7': + '@rolldown/binding-linux-arm64-gnu@1.0.0-rc.9': optional: true - '@rolldown/binding-linux-arm64-musl@1.0.0-rc.7': + '@rolldown/binding-linux-arm64-musl@1.0.0-rc.9': optional: true - '@rolldown/binding-linux-ppc64-gnu@1.0.0-rc.7': + '@rolldown/binding-linux-ppc64-gnu@1.0.0-rc.9': optional: true - '@rolldown/binding-linux-s390x-gnu@1.0.0-rc.7': + '@rolldown/binding-linux-s390x-gnu@1.0.0-rc.9': optional: true - '@rolldown/binding-linux-x64-gnu@1.0.0-rc.7': + '@rolldown/binding-linux-x64-gnu@1.0.0-rc.9': optional: true - '@rolldown/binding-linux-x64-musl@1.0.0-rc.7': + '@rolldown/binding-linux-x64-musl@1.0.0-rc.9': optional: true - '@rolldown/binding-openharmony-arm64@1.0.0-rc.7': + '@rolldown/binding-openharmony-arm64@1.0.0-rc.9': optional: true - '@rolldown/binding-wasm32-wasi@1.0.0-rc.7': + '@rolldown/binding-wasm32-wasi@1.0.0-rc.9': dependencies: '@napi-rs/wasm-runtime': 1.1.1 optional: true - '@rolldown/binding-win32-arm64-msvc@1.0.0-rc.7': + '@rolldown/binding-win32-arm64-msvc@1.0.0-rc.9': optional: true - '@rolldown/binding-win32-x64-msvc@1.0.0-rc.7': + '@rolldown/binding-win32-x64-msvc@1.0.0-rc.9': optional: true - '@rolldown/pluginutils@1.0.0-rc.7': {} - - '@rollup/rollup-android-arm-eabi@4.59.0': - optional: true - - '@rollup/rollup-android-arm64@4.59.0': - optional: true - - '@rollup/rollup-darwin-arm64@4.59.0': - optional: true - - '@rollup/rollup-darwin-x64@4.59.0': - optional: true - - '@rollup/rollup-freebsd-arm64@4.59.0': - optional: true - - '@rollup/rollup-freebsd-x64@4.59.0': - optional: true - - '@rollup/rollup-linux-arm-gnueabihf@4.59.0': - optional: true - - '@rollup/rollup-linux-arm-musleabihf@4.59.0': - optional: true - - '@rollup/rollup-linux-arm64-gnu@4.59.0': - optional: true - - '@rollup/rollup-linux-arm64-musl@4.59.0': - optional: true - - '@rollup/rollup-linux-loong64-gnu@4.59.0': - optional: true - - '@rollup/rollup-linux-loong64-musl@4.59.0': - optional: true - - '@rollup/rollup-linux-ppc64-gnu@4.59.0': - optional: true - - '@rollup/rollup-linux-ppc64-musl@4.59.0': - optional: true - - '@rollup/rollup-linux-riscv64-gnu@4.59.0': - optional: true - - '@rollup/rollup-linux-riscv64-musl@4.59.0': - optional: true - - '@rollup/rollup-linux-s390x-gnu@4.59.0': - optional: true - - '@rollup/rollup-linux-x64-gnu@4.59.0': - optional: true - - '@rollup/rollup-linux-x64-musl@4.59.0': - optional: true - - '@rollup/rollup-openbsd-x64@4.59.0': - optional: true - - '@rollup/rollup-openharmony-arm64@4.59.0': - optional: true - - '@rollup/rollup-win32-arm64-msvc@4.59.0': - optional: true - - '@rollup/rollup-win32-ia32-msvc@4.59.0': - optional: true - - '@rollup/rollup-win32-x64-gnu@4.59.0': - optional: true - - '@rollup/rollup-win32-x64-msvc@4.59.0': - optional: true + '@rolldown/pluginutils@1.0.0-rc.9': {} '@scure/base@2.0.0': {} @@ -9608,7 +9476,7 @@ snapshots: '@slack/oauth': 3.0.4 '@slack/socket-mode': 2.0.5 '@slack/types': 2.20.0 - '@slack/web-api': 7.14.1 + '@slack/web-api': 7.15.0 '@types/express': 5.0.6 axios: 1.13.5 express: 5.2.1 @@ -9623,14 +9491,18 @@ snapshots: '@slack/logger@4.0.0': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.5.0 + + '@slack/logger@4.0.1': + dependencies: + '@types/node': 25.5.0 '@slack/oauth@3.0.4': dependencies: '@slack/logger': 4.0.0 - '@slack/web-api': 7.14.1 + '@slack/web-api': 7.15.0 '@types/jsonwebtoken': 9.0.10 - '@types/node': 25.3.5 + '@types/node': 25.5.0 jsonwebtoken: 9.0.3 transitivePeerDependencies: - debug @@ -9638,8 +9510,8 @@ snapshots: '@slack/socket-mode@2.0.5': dependencies: '@slack/logger': 4.0.0 - '@slack/web-api': 7.14.1 - '@types/node': 25.3.5 + '@slack/web-api': 7.15.0 + '@types/node': 25.5.0 '@types/ws': 8.18.1 eventemitter3: 5.0.4 ws: 8.19.0 @@ -9650,13 +9522,15 @@ snapshots: '@slack/types@2.20.0': {} - '@slack/web-api@7.14.1': + '@slack/types@2.20.1': {} + + '@slack/web-api@7.15.0': dependencies: - '@slack/logger': 4.0.0 - '@slack/types': 2.20.0 - '@types/node': 25.3.5 + '@slack/logger': 4.0.1 + '@slack/types': 2.20.1 + '@types/node': 25.5.0 '@types/retry': 0.12.0 - axios: 1.13.5 + axios: 1.13.6 eventemitter3: 5.0.4 form-data: 2.5.4 is-electron: 2.2.2 @@ -9672,9 +9546,9 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/abort-controller@4.2.11': + '@smithy/abort-controller@4.2.12': dependencies: - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/chunked-blob-reader-native@4.2.2': @@ -9686,13 +9560,13 @@ snapshots: dependencies: tslib: 2.8.1 - '@smithy/config-resolver@4.4.10': + '@smithy/config-resolver@4.4.11': dependencies: - '@smithy/node-config-provider': 4.3.11 - '@smithy/types': 4.13.0 + '@smithy/node-config-provider': 4.3.12 + '@smithy/types': 4.13.1 '@smithy/util-config-provider': 4.2.2 - '@smithy/util-endpoints': 3.3.2 - '@smithy/util-middleware': 4.2.11 + '@smithy/util-endpoints': 3.3.3 + '@smithy/util-middleware': 4.2.12 tslib: 2.8.1 '@smithy/config-resolver@4.4.9': @@ -9704,6 +9578,19 @@ snapshots: '@smithy/util-middleware': 4.2.10 tslib: 2.8.1 + '@smithy/core@3.23.11': + dependencies: + '@smithy/protocol-http': 5.3.12 + '@smithy/types': 4.13.1 + '@smithy/url-parser': 4.2.12 + '@smithy/util-base64': 4.3.2 + '@smithy/util-body-length-browser': 4.2.2 + '@smithy/util-middleware': 4.2.12 + '@smithy/util-stream': 4.5.19 + '@smithy/util-utf8': 4.2.2 + '@smithy/uuid': 1.1.2 + tslib: 2.8.1 + '@smithy/core@3.23.6': dependencies: '@smithy/middleware-serde': 4.2.11 @@ -9717,19 +9604,6 @@ snapshots: '@smithy/uuid': 1.1.1 tslib: 2.8.1 - '@smithy/core@3.23.9': - dependencies: - '@smithy/middleware-serde': 4.2.12 - '@smithy/protocol-http': 5.3.11 - '@smithy/types': 4.13.0 - '@smithy/util-base64': 4.3.2 - '@smithy/util-body-length-browser': 4.2.2 - '@smithy/util-middleware': 4.2.11 - '@smithy/util-stream': 4.5.17 - '@smithy/util-utf8': 4.2.2 - '@smithy/uuid': 1.1.2 - tslib: 2.8.1 - '@smithy/credential-provider-imds@4.2.10': dependencies: '@smithy/node-config-provider': 4.3.10 @@ -9738,12 +9612,12 @@ snapshots: '@smithy/url-parser': 4.2.10 tslib: 2.8.1 - '@smithy/credential-provider-imds@4.2.11': + '@smithy/credential-provider-imds@4.2.12': dependencies: - '@smithy/node-config-provider': 4.3.11 - '@smithy/property-provider': 4.2.11 - '@smithy/types': 4.13.0 - '@smithy/url-parser': 4.2.11 + '@smithy/node-config-provider': 4.3.12 + '@smithy/property-provider': 4.2.12 + '@smithy/types': 4.13.1 + '@smithy/url-parser': 4.2.12 tslib: 2.8.1 '@smithy/eventstream-codec@4.2.10': @@ -9756,7 +9630,7 @@ snapshots: '@smithy/eventstream-codec@4.2.11': dependencies: '@aws-crypto/crc32': 5.2.0 - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 '@smithy/util-hex-encoding': 4.2.2 tslib: 2.8.1 @@ -9769,7 +9643,7 @@ snapshots: '@smithy/eventstream-serde-browser@4.2.11': dependencies: '@smithy/eventstream-serde-universal': 4.2.11 - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/eventstream-serde-config-resolver@4.3.10': @@ -9779,7 +9653,7 @@ snapshots: '@smithy/eventstream-serde-config-resolver@4.3.11': dependencies: - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/eventstream-serde-node@4.2.10': @@ -9791,7 +9665,7 @@ snapshots: '@smithy/eventstream-serde-node@4.2.11': dependencies: '@smithy/eventstream-serde-universal': 4.2.11 - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/eventstream-serde-universal@4.2.10': @@ -9803,7 +9677,7 @@ snapshots: '@smithy/eventstream-serde-universal@4.2.11': dependencies: '@smithy/eventstream-codec': 4.2.11 - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/fetch-http-handler@5.3.11': @@ -9814,11 +9688,11 @@ snapshots: '@smithy/util-base64': 4.3.1 tslib: 2.8.1 - '@smithy/fetch-http-handler@5.3.13': + '@smithy/fetch-http-handler@5.3.15': dependencies: - '@smithy/protocol-http': 5.3.11 - '@smithy/querystring-builder': 4.2.11 - '@smithy/types': 4.13.0 + '@smithy/protocol-http': 5.3.12 + '@smithy/querystring-builder': 4.2.12 + '@smithy/types': 4.13.1 '@smithy/util-base64': 4.3.2 tslib: 2.8.1 @@ -9836,9 +9710,9 @@ snapshots: '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 - '@smithy/hash-node@4.2.11': + '@smithy/hash-node@4.2.12': dependencies: - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 '@smithy/util-buffer-from': 4.2.2 '@smithy/util-utf8': 4.2.2 tslib: 2.8.1 @@ -9854,9 +9728,9 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/invalid-dependency@4.2.11': + '@smithy/invalid-dependency@4.2.12': dependencies: - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/is-array-buffer@2.2.0': @@ -9883,10 +9757,10 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/middleware-content-length@4.2.11': + '@smithy/middleware-content-length@4.2.12': dependencies: - '@smithy/protocol-http': 5.3.11 - '@smithy/types': 4.13.0 + '@smithy/protocol-http': 5.3.12 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/middleware-endpoint@4.4.20': @@ -9900,15 +9774,15 @@ snapshots: '@smithy/util-middleware': 4.2.10 tslib: 2.8.1 - '@smithy/middleware-endpoint@4.4.23': + '@smithy/middleware-endpoint@4.4.25': dependencies: - '@smithy/core': 3.23.9 - '@smithy/middleware-serde': 4.2.12 - '@smithy/node-config-provider': 4.3.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - '@smithy/url-parser': 4.2.11 - '@smithy/util-middleware': 4.2.11 + '@smithy/core': 3.23.11 + '@smithy/middleware-serde': 4.2.14 + '@smithy/node-config-provider': 4.3.12 + '@smithy/shared-ini-file-loader': 4.4.7 + '@smithy/types': 4.13.1 + '@smithy/url-parser': 4.2.12 + '@smithy/util-middleware': 4.2.12 tslib: 2.8.1 '@smithy/middleware-retry@4.4.37': @@ -9923,15 +9797,15 @@ snapshots: '@smithy/uuid': 1.1.1 tslib: 2.8.1 - '@smithy/middleware-retry@4.4.40': + '@smithy/middleware-retry@4.4.42': dependencies: - '@smithy/node-config-provider': 4.3.11 - '@smithy/protocol-http': 5.3.11 - '@smithy/service-error-classification': 4.2.11 - '@smithy/smithy-client': 4.12.3 - '@smithy/types': 4.13.0 - '@smithy/util-middleware': 4.2.11 - '@smithy/util-retry': 4.2.11 + '@smithy/node-config-provider': 4.3.12 + '@smithy/protocol-http': 5.3.12 + '@smithy/service-error-classification': 4.2.12 + '@smithy/smithy-client': 4.12.5 + '@smithy/types': 4.13.1 + '@smithy/util-middleware': 4.2.12 + '@smithy/util-retry': 4.2.12 '@smithy/uuid': 1.1.2 tslib: 2.8.1 @@ -9941,10 +9815,11 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/middleware-serde@4.2.12': + '@smithy/middleware-serde@4.2.14': dependencies: - '@smithy/protocol-http': 5.3.11 - '@smithy/types': 4.13.0 + '@smithy/core': 3.23.11 + '@smithy/protocol-http': 5.3.12 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/middleware-stack@4.2.10': @@ -9952,9 +9827,9 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/middleware-stack@4.2.11': + '@smithy/middleware-stack@4.2.12': dependencies: - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/node-config-provider@4.3.10': @@ -9964,11 +9839,11 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/node-config-provider@4.3.11': + '@smithy/node-config-provider@4.3.12': dependencies: - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 + '@smithy/property-provider': 4.2.12 + '@smithy/shared-ini-file-loader': 4.4.7 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/node-http-handler@4.4.12': @@ -9979,12 +9854,12 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/node-http-handler@4.4.14': + '@smithy/node-http-handler@4.4.16': dependencies: - '@smithy/abort-controller': 4.2.11 - '@smithy/protocol-http': 5.3.11 - '@smithy/querystring-builder': 4.2.11 - '@smithy/types': 4.13.0 + '@smithy/abort-controller': 4.2.12 + '@smithy/protocol-http': 5.3.12 + '@smithy/querystring-builder': 4.2.12 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/property-provider@4.2.10': @@ -9992,9 +9867,9 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/property-provider@4.2.11': + '@smithy/property-provider@4.2.12': dependencies: - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/protocol-http@5.3.10': @@ -10002,9 +9877,9 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/protocol-http@5.3.11': + '@smithy/protocol-http@5.3.12': dependencies: - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/querystring-builder@4.2.10': @@ -10013,9 +9888,9 @@ snapshots: '@smithy/util-uri-escape': 4.2.1 tslib: 2.8.1 - '@smithy/querystring-builder@4.2.11': + '@smithy/querystring-builder@4.2.12': dependencies: - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 '@smithy/util-uri-escape': 4.2.2 tslib: 2.8.1 @@ -10024,27 +9899,27 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/querystring-parser@4.2.11': + '@smithy/querystring-parser@4.2.12': dependencies: - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/service-error-classification@4.2.10': dependencies: '@smithy/types': 4.13.0 - '@smithy/service-error-classification@4.2.11': + '@smithy/service-error-classification@4.2.12': dependencies: - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 '@smithy/shared-ini-file-loader@4.4.5': dependencies: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/shared-ini-file-loader@4.4.6': + '@smithy/shared-ini-file-loader@4.4.7': dependencies: - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/signature-v4@5.3.10': @@ -10058,13 +9933,13 @@ snapshots: '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 - '@smithy/signature-v4@5.3.11': + '@smithy/signature-v4@5.3.12': dependencies: '@smithy/is-array-buffer': 4.2.2 - '@smithy/protocol-http': 5.3.11 - '@smithy/types': 4.13.0 + '@smithy/protocol-http': 5.3.12 + '@smithy/types': 4.13.1 '@smithy/util-hex-encoding': 4.2.2 - '@smithy/util-middleware': 4.2.11 + '@smithy/util-middleware': 4.2.12 '@smithy/util-uri-escape': 4.2.2 '@smithy/util-utf8': 4.2.2 tslib: 2.8.1 @@ -10079,30 +9954,34 @@ snapshots: '@smithy/util-stream': 4.5.15 tslib: 2.8.1 - '@smithy/smithy-client@4.12.3': + '@smithy/smithy-client@4.12.5': dependencies: - '@smithy/core': 3.23.9 - '@smithy/middleware-endpoint': 4.4.23 - '@smithy/middleware-stack': 4.2.11 - '@smithy/protocol-http': 5.3.11 - '@smithy/types': 4.13.0 - '@smithy/util-stream': 4.5.17 + '@smithy/core': 3.23.11 + '@smithy/middleware-endpoint': 4.4.25 + '@smithy/middleware-stack': 4.2.12 + '@smithy/protocol-http': 5.3.12 + '@smithy/types': 4.13.1 + '@smithy/util-stream': 4.5.19 tslib: 2.8.1 '@smithy/types@4.13.0': dependencies: tslib: 2.8.1 + '@smithy/types@4.13.1': + dependencies: + tslib: 2.8.1 + '@smithy/url-parser@4.2.10': dependencies: '@smithy/querystring-parser': 4.2.10 '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/url-parser@4.2.11': + '@smithy/url-parser@4.2.12': dependencies: - '@smithy/querystring-parser': 4.2.11 - '@smithy/types': 4.13.0 + '@smithy/querystring-parser': 4.2.12 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/util-base64@4.3.1': @@ -10163,11 +10042,11 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/util-defaults-mode-browser@4.3.39': + '@smithy/util-defaults-mode-browser@4.3.41': dependencies: - '@smithy/property-provider': 4.2.11 - '@smithy/smithy-client': 4.12.3 - '@smithy/types': 4.13.0 + '@smithy/property-provider': 4.2.12 + '@smithy/smithy-client': 4.12.5 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/util-defaults-mode-node@4.2.39': @@ -10180,14 +10059,14 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/util-defaults-mode-node@4.2.42': + '@smithy/util-defaults-mode-node@4.2.44': dependencies: - '@smithy/config-resolver': 4.4.10 - '@smithy/credential-provider-imds': 4.2.11 - '@smithy/node-config-provider': 4.3.11 - '@smithy/property-provider': 4.2.11 - '@smithy/smithy-client': 4.12.3 - '@smithy/types': 4.13.0 + '@smithy/config-resolver': 4.4.11 + '@smithy/credential-provider-imds': 4.2.12 + '@smithy/node-config-provider': 4.3.12 + '@smithy/property-provider': 4.2.12 + '@smithy/smithy-client': 4.12.5 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/util-endpoints@3.3.1': @@ -10196,10 +10075,10 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/util-endpoints@3.3.2': + '@smithy/util-endpoints@3.3.3': dependencies: - '@smithy/node-config-provider': 4.3.11 - '@smithy/types': 4.13.0 + '@smithy/node-config-provider': 4.3.12 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/util-hex-encoding@4.2.1': @@ -10215,9 +10094,9 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/util-middleware@4.2.11': + '@smithy/util-middleware@4.2.12': dependencies: - '@smithy/types': 4.13.0 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/util-retry@4.2.10': @@ -10226,10 +10105,10 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@smithy/util-retry@4.2.11': + '@smithy/util-retry@4.2.12': dependencies: - '@smithy/service-error-classification': 4.2.11 - '@smithy/types': 4.13.0 + '@smithy/service-error-classification': 4.2.12 + '@smithy/types': 4.13.1 tslib: 2.8.1 '@smithy/util-stream@4.5.15': @@ -10243,11 +10122,11 @@ snapshots: '@smithy/util-utf8': 4.2.1 tslib: 2.8.1 - '@smithy/util-stream@4.5.17': + '@smithy/util-stream@4.5.19': dependencies: - '@smithy/fetch-http-handler': 5.3.13 - '@smithy/node-http-handler': 4.4.14 - '@smithy/types': 4.13.0 + '@smithy/fetch-http-handler': 5.3.15 + '@smithy/node-http-handler': 4.4.16 + '@smithy/types': 4.13.1 '@smithy/util-base64': 4.3.2 '@smithy/util-buffer-from': 4.2.2 '@smithy/util-hex-encoding': 4.2.2 @@ -10291,66 +10170,66 @@ snapshots: dependencies: tslib: 2.8.1 - '@snazzah/davey-android-arm-eabi@0.1.9': + '@snazzah/davey-android-arm-eabi@0.1.10': optional: true - '@snazzah/davey-android-arm64@0.1.9': + '@snazzah/davey-android-arm64@0.1.10': optional: true - '@snazzah/davey-darwin-arm64@0.1.9': + '@snazzah/davey-darwin-arm64@0.1.10': optional: true - '@snazzah/davey-darwin-x64@0.1.9': + '@snazzah/davey-darwin-x64@0.1.10': optional: true - '@snazzah/davey-freebsd-x64@0.1.9': + '@snazzah/davey-freebsd-x64@0.1.10': optional: true - '@snazzah/davey-linux-arm-gnueabihf@0.1.9': + '@snazzah/davey-linux-arm-gnueabihf@0.1.10': optional: true - '@snazzah/davey-linux-arm64-gnu@0.1.9': + '@snazzah/davey-linux-arm64-gnu@0.1.10': optional: true - '@snazzah/davey-linux-arm64-musl@0.1.9': + '@snazzah/davey-linux-arm64-musl@0.1.10': optional: true - '@snazzah/davey-linux-x64-gnu@0.1.9': + '@snazzah/davey-linux-x64-gnu@0.1.10': optional: true - '@snazzah/davey-linux-x64-musl@0.1.9': + '@snazzah/davey-linux-x64-musl@0.1.10': optional: true - '@snazzah/davey-wasm32-wasi@0.1.9': + '@snazzah/davey-wasm32-wasi@0.1.10': dependencies: '@napi-rs/wasm-runtime': 1.1.1 optional: true - '@snazzah/davey-win32-arm64-msvc@0.1.9': + '@snazzah/davey-win32-arm64-msvc@0.1.10': optional: true - '@snazzah/davey-win32-ia32-msvc@0.1.9': + '@snazzah/davey-win32-ia32-msvc@0.1.10': optional: true - '@snazzah/davey-win32-x64-msvc@0.1.9': + '@snazzah/davey-win32-x64-msvc@0.1.10': optional: true - '@snazzah/davey@0.1.9': + '@snazzah/davey@0.1.10': optionalDependencies: - '@snazzah/davey-android-arm-eabi': 0.1.9 - '@snazzah/davey-android-arm64': 0.1.9 - '@snazzah/davey-darwin-arm64': 0.1.9 - '@snazzah/davey-darwin-x64': 0.1.9 - '@snazzah/davey-freebsd-x64': 0.1.9 - '@snazzah/davey-linux-arm-gnueabihf': 0.1.9 - '@snazzah/davey-linux-arm64-gnu': 0.1.9 - '@snazzah/davey-linux-arm64-musl': 0.1.9 - '@snazzah/davey-linux-x64-gnu': 0.1.9 - '@snazzah/davey-linux-x64-musl': 0.1.9 - '@snazzah/davey-wasm32-wasi': 0.1.9 - '@snazzah/davey-win32-arm64-msvc': 0.1.9 - '@snazzah/davey-win32-ia32-msvc': 0.1.9 - '@snazzah/davey-win32-x64-msvc': 0.1.9 + '@snazzah/davey-android-arm-eabi': 0.1.10 + '@snazzah/davey-android-arm64': 0.1.10 + '@snazzah/davey-darwin-arm64': 0.1.10 + '@snazzah/davey-darwin-x64': 0.1.10 + '@snazzah/davey-freebsd-x64': 0.1.10 + '@snazzah/davey-linux-arm-gnueabihf': 0.1.10 + '@snazzah/davey-linux-arm64-gnu': 0.1.10 + '@snazzah/davey-linux-arm64-musl': 0.1.10 + '@snazzah/davey-linux-x64-gnu': 0.1.10 + '@snazzah/davey-linux-x64-musl': 0.1.10 + '@snazzah/davey-wasm32-wasi': 0.1.10 + '@snazzah/davey-win32-arm64-msvc': 0.1.10 + '@snazzah/davey-win32-ia32-msvc': 0.1.10 + '@snazzah/davey-win32-x64-msvc': 0.1.10 '@standard-schema/spec@1.1.0': {} @@ -10479,7 +10358,7 @@ snapshots: '@types/body-parser@1.19.6': dependencies: '@types/connect': 3.4.38 - '@types/node': 25.3.5 + '@types/node': 25.5.0 '@types/bun@1.3.9': dependencies: @@ -10499,7 +10378,7 @@ snapshots: '@types/connect@3.4.38': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.5.0 '@types/deep-eql@4.0.2': {} @@ -10507,14 +10386,14 @@ snapshots: '@types/express-serve-static-core@4.19.8': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.5.0 '@types/qs': 6.14.0 '@types/range-parser': 1.2.7 '@types/send': 1.2.1 '@types/express-serve-static-core@5.1.1': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.5.0 '@types/qs': 6.14.0 '@types/range-parser': 1.2.7 '@types/send': 1.2.1 @@ -10543,7 +10422,7 @@ snapshots: '@types/jsonwebtoken@9.0.10': dependencies: '@types/ms': 2.1.0 - '@types/node': 25.3.5 + '@types/node': 25.5.0 '@types/linkify-it@5.0.0': {} @@ -10572,11 +10451,11 @@ snapshots: dependencies: undici-types: 6.21.0 - '@types/node@24.11.0': + '@types/node@24.12.0': dependencies: undici-types: 7.16.0 - '@types/node@25.3.5': + '@types/node@25.5.0': dependencies: undici-types: 7.18.2 @@ -10589,7 +10468,7 @@ snapshots: '@types/request@2.48.13': dependencies: '@types/caseless': 0.12.5 - '@types/node': 25.3.5 + '@types/node': 25.5.0 '@types/tough-cookie': 4.0.5 form-data: 2.5.4 @@ -10600,22 +10479,22 @@ snapshots: '@types/send@0.17.6': dependencies: '@types/mime': 1.3.5 - '@types/node': 25.3.5 + '@types/node': 25.5.0 '@types/send@1.2.1': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.5.0 '@types/serve-static@1.15.10': dependencies: '@types/http-errors': 2.0.5 - '@types/node': 25.3.5 + '@types/node': 25.5.0 '@types/send': 0.17.6 '@types/serve-static@2.2.0': dependencies: '@types/http-errors': 2.0.5 - '@types/node': 25.3.5 + '@types/node': 25.5.0 '@types/tough-cookie@4.0.5': {} @@ -10625,43 +10504,43 @@ snapshots: '@types/ws@8.18.1': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.5.0 '@types/yauzl@2.10.3': dependencies: - '@types/node': 25.3.5 + '@types/node': 25.5.0 optional: true - '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260308.1': + '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260313.1': optional: true - '@typescript/native-preview-darwin-x64@7.0.0-dev.20260308.1': + '@typescript/native-preview-darwin-x64@7.0.0-dev.20260313.1': optional: true - '@typescript/native-preview-linux-arm64@7.0.0-dev.20260308.1': + '@typescript/native-preview-linux-arm64@7.0.0-dev.20260313.1': optional: true - '@typescript/native-preview-linux-arm@7.0.0-dev.20260308.1': + '@typescript/native-preview-linux-arm@7.0.0-dev.20260313.1': optional: true - '@typescript/native-preview-linux-x64@7.0.0-dev.20260308.1': + '@typescript/native-preview-linux-x64@7.0.0-dev.20260313.1': optional: true - '@typescript/native-preview-win32-arm64@7.0.0-dev.20260308.1': + '@typescript/native-preview-win32-arm64@7.0.0-dev.20260313.1': optional: true - '@typescript/native-preview-win32-x64@7.0.0-dev.20260308.1': + '@typescript/native-preview-win32-x64@7.0.0-dev.20260313.1': optional: true - '@typescript/native-preview@7.0.0-dev.20260308.1': + '@typescript/native-preview@7.0.0-dev.20260313.1': optionalDependencies: - '@typescript/native-preview-darwin-arm64': 7.0.0-dev.20260308.1 - '@typescript/native-preview-darwin-x64': 7.0.0-dev.20260308.1 - '@typescript/native-preview-linux-arm': 7.0.0-dev.20260308.1 - '@typescript/native-preview-linux-arm64': 7.0.0-dev.20260308.1 - '@typescript/native-preview-linux-x64': 7.0.0-dev.20260308.1 - '@typescript/native-preview-win32-arm64': 7.0.0-dev.20260308.1 - '@typescript/native-preview-win32-x64': 7.0.0-dev.20260308.1 + '@typescript/native-preview-darwin-arm64': 7.0.0-dev.20260313.1 + '@typescript/native-preview-darwin-x64': 7.0.0-dev.20260313.1 + '@typescript/native-preview-linux-arm': 7.0.0-dev.20260313.1 + '@typescript/native-preview-linux-arm64': 7.0.0-dev.20260313.1 + '@typescript/native-preview-linux-x64': 7.0.0-dev.20260313.1 + '@typescript/native-preview-win32-arm64': 7.0.0-dev.20260313.1 + '@typescript/native-preview-win32-x64': 7.0.0-dev.20260313.1 '@typespec/ts-http-runtime@0.3.3': dependencies: @@ -10702,29 +10581,29 @@ snapshots: - '@cypress/request' - supports-color - '@vitest/browser-playwright@4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18)': + '@vitest/browser-playwright@4.1.0(playwright@1.58.2)(vite@8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.1.0)': dependencies: - '@vitest/browser': 4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) - '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/browser': 4.1.0(vite@8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.1.0) + '@vitest/mocker': 4.1.0(vite@8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)) playwright: 1.58.2 - tinyrainbow: 3.0.3 - vitest: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.5)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + tinyrainbow: 3.1.0 + vitest: 4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(@vitest/browser-playwright@4.1.0)(jsdom@28.1.0(@noble/hashes@2.0.1))(vite@8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)) transitivePeerDependencies: - bufferutil - msw - utf-8-validate - vite - '@vitest/browser@4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18)': + '@vitest/browser@4.1.0(vite@8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.1.0)': dependencies: - '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) - '@vitest/utils': 4.0.18 + '@blazediff/core': 1.9.1 + '@vitest/mocker': 4.1.0(vite@8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/utils': 4.1.0 magic-string: 0.30.21 - pixelmatch: 7.1.0 pngjs: 7.0.0 sirv: 3.0.2 - tinyrainbow: 3.0.3 - vitest: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.5)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + tinyrainbow: 3.1.0 + vitest: 4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(@vitest/browser-playwright@4.1.0)(jsdom@28.1.0(@noble/hashes@2.0.1))(vite@8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)) ws: 8.19.0 transitivePeerDependencies: - bufferutil @@ -10732,60 +10611,62 @@ snapshots: - utf-8-validate - vite - '@vitest/coverage-v8@4.0.18(@vitest/browser@4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18))(vitest@4.0.18)': + '@vitest/coverage-v8@4.1.0(@vitest/browser@4.1.0(vite@8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.1.0))(vitest@4.1.0)': dependencies: '@bcoe/v8-coverage': 1.0.2 - '@vitest/utils': 4.0.18 - ast-v8-to-istanbul: 0.3.11 + '@vitest/utils': 4.1.0 + ast-v8-to-istanbul: 1.0.0 istanbul-lib-coverage: 3.2.2 istanbul-lib-report: 3.0.1 istanbul-reports: 3.2.0 magicast: 0.5.2 obug: 2.1.1 - std-env: 3.10.0 - tinyrainbow: 3.0.3 - vitest: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.5)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + std-env: 4.0.0 + tinyrainbow: 3.1.0 + vitest: 4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(@vitest/browser-playwright@4.1.0)(jsdom@28.1.0(@noble/hashes@2.0.1))(vite@8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)) optionalDependencies: - '@vitest/browser': 4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) + '@vitest/browser': 4.1.0(vite@8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.1.0) - '@vitest/expect@4.0.18': + '@vitest/expect@4.1.0': dependencies: '@standard-schema/spec': 1.1.0 '@types/chai': 5.2.3 - '@vitest/spy': 4.0.18 - '@vitest/utils': 4.0.18 + '@vitest/spy': 4.1.0 + '@vitest/utils': 4.1.0 chai: 6.2.2 - tinyrainbow: 3.0.3 + tinyrainbow: 3.1.0 - '@vitest/mocker@4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))': + '@vitest/mocker@4.1.0(vite@8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))': dependencies: - '@vitest/spy': 4.0.18 + '@vitest/spy': 4.1.0 estree-walker: 3.0.3 magic-string: 0.30.21 optionalDependencies: - vite: 7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + vite: 8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2) - '@vitest/pretty-format@4.0.18': + '@vitest/pretty-format@4.1.0': dependencies: - tinyrainbow: 3.0.3 + tinyrainbow: 3.1.0 - '@vitest/runner@4.0.18': + '@vitest/runner@4.1.0': dependencies: - '@vitest/utils': 4.0.18 + '@vitest/utils': 4.1.0 pathe: 2.0.3 - '@vitest/snapshot@4.0.18': + '@vitest/snapshot@4.1.0': dependencies: - '@vitest/pretty-format': 4.0.18 + '@vitest/pretty-format': 4.1.0 + '@vitest/utils': 4.1.0 magic-string: 0.30.21 pathe: 2.0.3 - '@vitest/spy@4.0.18': {} + '@vitest/spy@4.1.0': {} - '@vitest/utils@4.0.18': + '@vitest/utils@4.1.0': dependencies: - '@vitest/pretty-format': 4.0.18 - tinyrainbow: 3.0.3 + '@vitest/pretty-format': 4.1.0 + convert-source-map: 2.0.0 + tinyrainbow: 3.1.0 '@wasm-audio-decoders/common@9.0.7': dependencies: @@ -10817,7 +10698,7 @@ snapshots: async-mutex: 0.5.0 libsignal: '@whiskeysockets/libsignal-node@https://codeload.github.com/whiskeysockets/libsignal-node/tar.gz/1c30d7d7e76a3b0aa120b04dc6a26f5a12dccf67' lru-cache: 11.2.6 - music-metadata: 11.12.1 + music-metadata: 11.12.3 p-queue: 9.1.0 pino: 9.14.0 protobufjs: 7.5.4 @@ -10860,13 +10741,14 @@ snapshots: acorn@8.16.0: {} - acpx@0.1.15(zod@4.3.6): + acpx@0.3.0(zod@4.3.6): dependencies: - '@agentclientprotocol/sdk': 0.14.1(zod@4.3.6) - commander: 13.1.0 + '@agentclientprotocol/sdk': 0.15.0(zod@4.3.6) + commander: 14.0.3 skillflag: 0.1.4 transitivePeerDependencies: - bare-abort-controller + - bare-buffer - react-native-b4a - zod @@ -10879,6 +10761,8 @@ snapshots: agent-base@7.1.4: {} + agent-base@8.0.0: {} + ajv-formats@3.0.1(ajv@8.18.0): optionalDependencies: ajv: 8.18.0 @@ -10961,7 +10845,7 @@ snapshots: dependencies: tslib: 2.8.1 - ast-v8-to-istanbul@0.3.11: + ast-v8-to-istanbul@1.0.0: dependencies: '@jridgewell/trace-mapping': 0.3.31 estree-walker: 3.0.3 @@ -11011,6 +10895,14 @@ snapshots: transitivePeerDependencies: - debug + axios@1.13.6: + dependencies: + follow-redirects: 1.15.11 + form-data: 2.5.4 + proxy-from-env: 1.1.0 + transitivePeerDependencies: + - debug + b4a@1.8.0: {} babel-walk@3.0.0-canary-5: @@ -11023,6 +10915,37 @@ snapshots: bare-events@2.8.2: {} + bare-fs@4.5.5: + dependencies: + bare-events: 2.8.2 + bare-path: 3.0.0 + bare-stream: 2.8.1(bare-events@2.8.2) + bare-url: 2.3.2 + fast-fifo: 1.3.2 + transitivePeerDependencies: + - bare-abort-controller + - react-native-b4a + + bare-os@3.7.1: {} + + bare-path@3.0.0: + dependencies: + bare-os: 3.7.1 + + bare-stream@2.8.1(bare-events@2.8.2): + dependencies: + streamx: 2.23.0 + teex: 1.0.1 + optionalDependencies: + bare-events: 2.8.2 + transitivePeerDependencies: + - bare-abort-controller + - react-native-b4a + + bare-url@2.3.2: + dependencies: + bare-path: 3.0.0 + base64-js@1.5.1: {} basic-auth@2.0.1: @@ -11037,6 +10960,10 @@ snapshots: before-after-hook@4.0.0: {} + bidi-js@1.0.3: + dependencies: + require-from-string: 2.0.2 + big-integer@1.6.52: {} bignumber.js@9.3.1: {} @@ -11110,7 +11037,7 @@ snapshots: bun-types@1.3.9: dependencies: - '@types/node': 25.3.5 + '@types/node': 25.5.0 optional: true bytes@3.1.2: {} @@ -11214,7 +11141,7 @@ snapshots: node-api-headers: 1.8.0 rc: 1.2.8 semver: 7.7.4 - tar: 7.5.10 + tar: 7.5.11 url-join: 4.0.1 which: 6.0.1 yargs: 17.7.2 @@ -11257,8 +11184,6 @@ snapshots: commander@10.0.1: {} - commander@13.1.0: {} - commander@14.0.3: {} commander@5.1.0: {} @@ -11279,6 +11204,8 @@ snapshots: content-type@1.0.5: {} + convert-source-map@2.0.0: {} + cookie-signature@1.0.7: {} cookie-signature@1.2.2: {} @@ -11289,6 +11216,11 @@ snapshots: core-util-is@1.0.3: {} + cors@2.8.6: + dependencies: + object-assign: 4.1.1 + vary: 1.1.2 + croner@10.0.1: {} cross-spawn@7.0.6: @@ -11307,10 +11239,22 @@ snapshots: domutils: 3.2.2 nth-check: 2.1.1 + css-tree@3.2.1: + dependencies: + mdn-data: 2.27.1 + source-map-js: 1.2.1 + css-what@6.2.2: {} cssom@0.5.0: {} + cssstyle@6.2.0: + dependencies: + '@asamuzakjp/css-color': 5.0.1 + '@csstools/css-syntax-patches-for-csstree': 1.1.0 + css-tree: 3.2.1 + lru-cache: 11.2.6 + curve25519-js@0.0.4: {} dashdash@1.14.1: @@ -11321,6 +11265,13 @@ snapshots: data-uri-to-buffer@6.0.2: {} + data-urls@7.0.0(@noble/hashes@2.0.1): + dependencies: + whatwg-mimetype: 5.0.0 + whatwg-url: 16.0.1(@noble/hashes@2.0.1) + transitivePeerDependencies: + - '@noble/hashes' + date-fns@3.6.0: {} debug@2.6.9: @@ -11331,6 +11282,8 @@ snapshots: dependencies: ms: 2.1.3 + decimal.js@10.6.0: {} + deep-extend@0.6.0: {} deepmerge@4.3.1: {} @@ -11364,9 +11317,7 @@ snapshots: discord-api-types@0.38.37: {} - discord-api-types@0.38.40: {} - - discord-api-types@0.38.41: {} + discord-api-types@0.38.42: {} doctypes@1.1.0: {} @@ -11382,7 +11333,7 @@ snapshots: dependencies: domelementtype: 2.3.0 - dompurify@3.3.2: + dompurify@3.3.3: optionalDependencies: '@types/trusted-types': 2.0.7 @@ -11431,6 +11382,8 @@ snapshots: entities@4.5.0: {} + entities@6.0.1: {} + entities@7.0.1: {} env-var@7.5.0: {} @@ -11439,7 +11392,7 @@ snapshots: es-errors@1.3.0: {} - es-module-lexer@1.7.0: {} + es-module-lexer@2.0.0: {} es-object-atoms@1.1.1: dependencies: @@ -11519,6 +11472,12 @@ snapshots: transitivePeerDependencies: - bare-abort-controller + eventsource-parser@3.0.6: {} + + eventsource@3.0.7: + dependencies: + eventsource-parser: 3.0.6 + execa@4.1.0: dependencies: cross-spawn: 7.0.6 @@ -11535,6 +11494,11 @@ snapshots: exponential-backoff@3.1.3: {} + express-rate-limit@8.3.1(express@5.2.1): + dependencies: + express: 5.2.1 + ip-address: 10.1.0 + express@4.22.1: dependencies: accepts: 1.3.8 @@ -11655,7 +11619,7 @@ snapshots: node-domexception: '@nolyfill/domexception@1.0.28' web-streams-polyfill: 3.3.3 - file-type@21.3.0: + file-type@21.3.2: dependencies: '@tokenizer/inflate': 0.4.1 strtok3: 10.3.4 @@ -11876,16 +11840,6 @@ snapshots: graceful-fs@4.2.11: {} - grammy@1.41.0: - dependencies: - '@grammyjs/types': 3.25.0 - abort-controller: 3.0.0 - debug: 4.4.3 - node-fetch: 2.7.0 - transitivePeerDependencies: - - encoding - - supports-color - grammy@1.41.1: dependencies: '@grammyjs/types': 3.25.0 @@ -11942,8 +11896,7 @@ snapshots: highlight.js@10.7.3: {} - hono@4.12.5: - optional: true + hono@4.12.7: {} hookable@6.0.1: {} @@ -11953,6 +11906,12 @@ snapshots: dependencies: lru-cache: 11.2.6 + html-encoding-sniffer@6.0.0(@noble/hashes@2.0.1): + dependencies: + '@exodus/bytes': 1.15.0(@noble/hashes@2.0.1) + transitivePeerDependencies: + - '@noble/hashes' + html-escaper@2.0.2: {} html-escaper@3.0.3: {} @@ -12019,6 +11978,13 @@ snapshots: transitivePeerDependencies: - supports-color + https-proxy-agent@8.0.0: + dependencies: + agent-base: 8.0.0 + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + human-signals@1.1.1: {} iconv-lite@0.4.24: @@ -12126,6 +12092,8 @@ snapshots: is-plain-object@5.0.0: {} + is-potential-custom-element-name@1.0.1: {} + is-promise@2.2.2: {} is-promise@4.0.0: {} @@ -12174,6 +12142,8 @@ snapshots: jose@4.15.9: {} + jose@6.2.1: {} + js-stringify@1.0.2: {} js-tokens@10.0.0: {} @@ -12199,6 +12169,33 @@ snapshots: gitignore-to-glob: 0.3.0 jscpd-sarif-reporter: 4.0.6 + jsdom@28.1.0(@noble/hashes@2.0.1): + dependencies: + '@acemir/cssom': 0.9.31 + '@asamuzakjp/dom-selector': 6.8.1 + '@bramus/specificity': 2.4.2 + '@exodus/bytes': 1.15.0(@noble/hashes@2.0.1) + cssstyle: 6.2.0 + data-urls: 7.0.0(@noble/hashes@2.0.1) + decimal.js: 10.6.0 + html-encoding-sniffer: 6.0.0(@noble/hashes@2.0.1) + http-proxy-agent: 7.0.2 + https-proxy-agent: 7.0.6 + is-potential-custom-element-name: 1.0.1 + parse5: 8.0.0 + saxes: 6.0.0 + symbol-tree: 3.2.4 + tough-cookie: 4.1.3 + undici: 7.24.1 + w3c-xmlserializer: 5.0.0 + webidl-conversions: 8.0.1 + whatwg-mimetype: 5.0.0 + whatwg-url: 16.0.1(@noble/hashes@2.0.1) + xml-name-validator: 5.0.0 + transitivePeerDependencies: + - '@noble/hashes' + - supports-color + jsesc@3.1.0: {} json-bigint@1.0.0: @@ -12214,6 +12211,8 @@ snapshots: json-schema-traverse@1.0.0: {} + json-schema-typed@8.0.2: {} + json-schema@0.4.0: {} json-stringify-safe@5.0.1: {} @@ -12287,7 +12286,8 @@ snapshots: klona@2.0.6: {} - koffi@2.15.1: {} + koffi@2.15.1: + optional: true leac@0.6.0: {} @@ -12301,55 +12301,54 @@ snapshots: lifecycle-utils@3.1.1: {} - lightningcss-android-arm64@1.30.2: + lightningcss-android-arm64@1.32.0: optional: true - lightningcss-darwin-arm64@1.30.2: + lightningcss-darwin-arm64@1.32.0: optional: true - lightningcss-darwin-x64@1.30.2: + lightningcss-darwin-x64@1.32.0: optional: true - lightningcss-freebsd-x64@1.30.2: + lightningcss-freebsd-x64@1.32.0: optional: true - lightningcss-linux-arm-gnueabihf@1.30.2: + lightningcss-linux-arm-gnueabihf@1.32.0: optional: true - lightningcss-linux-arm64-gnu@1.30.2: + lightningcss-linux-arm64-gnu@1.32.0: optional: true - lightningcss-linux-arm64-musl@1.30.2: + lightningcss-linux-arm64-musl@1.32.0: optional: true - lightningcss-linux-x64-gnu@1.30.2: + lightningcss-linux-x64-gnu@1.32.0: optional: true - lightningcss-linux-x64-musl@1.30.2: + lightningcss-linux-x64-musl@1.32.0: optional: true - lightningcss-win32-arm64-msvc@1.30.2: + lightningcss-win32-arm64-msvc@1.32.0: optional: true - lightningcss-win32-x64-msvc@1.30.2: + lightningcss-win32-x64-msvc@1.32.0: optional: true - lightningcss@1.30.2: + lightningcss@1.32.0: dependencies: detect-libc: 2.1.2 optionalDependencies: - lightningcss-android-arm64: 1.30.2 - lightningcss-darwin-arm64: 1.30.2 - lightningcss-darwin-x64: 1.30.2 - lightningcss-freebsd-x64: 1.30.2 - lightningcss-linux-arm-gnueabihf: 1.30.2 - lightningcss-linux-arm64-gnu: 1.30.2 - lightningcss-linux-arm64-musl: 1.30.2 - lightningcss-linux-x64-gnu: 1.30.2 - lightningcss-linux-x64-musl: 1.30.2 - lightningcss-win32-arm64-msvc: 1.30.2 - lightningcss-win32-x64-msvc: 1.30.2 - optional: true + lightningcss-android-arm64: 1.32.0 + lightningcss-darwin-arm64: 1.32.0 + lightningcss-darwin-x64: 1.32.0 + lightningcss-freebsd-x64: 1.32.0 + lightningcss-linux-arm-gnueabihf: 1.32.0 + lightningcss-linux-arm64-gnu: 1.32.0 + lightningcss-linux-arm64-musl: 1.32.0 + lightningcss-linux-x64-gnu: 1.32.0 + lightningcss-linux-x64-musl: 1.32.0 + lightningcss-win32-arm64-msvc: 1.32.0 + lightningcss-win32-x64-msvc: 1.32.0 limiter@1.1.5: {} @@ -12497,6 +12496,8 @@ snapshots: unist-util-visit: 5.1.0 vfile: 6.0.3 + mdn-data@2.27.1: {} + mdurl@2.0.0: {} media-typer@0.3.0: {} @@ -12592,13 +12593,13 @@ snapshots: ms@2.1.3: {} - music-metadata@11.12.1: + music-metadata@11.12.3: dependencies: - '@borewit/text-codec': 0.2.1 + '@borewit/text-codec': 0.2.2 '@tokenizer/token': 0.3.0 content-type: 1.0.5 debug: 4.4.3 - file-type: 21.3.0 + file-type: 21.3.2 media-typer: 1.1.0 strtok3: 10.3.4 token-types: 6.1.2 @@ -12806,44 +12807,39 @@ snapshots: regex: 6.1.0 regex-recursion: 6.0.2 - openai@6.10.0(ws@8.19.0)(zod@4.3.6): - optionalDependencies: - ws: 8.19.0 - zod: 4.3.6 - openai@6.26.0(ws@8.19.0)(zod@4.3.6): optionalDependencies: ws: 8.19.0 zod: 4.3.6 - openai@6.27.0(ws@8.19.0)(zod@4.3.6): + openai@6.29.0(ws@8.19.0)(zod@4.3.6): optionalDependencies: ws: 8.19.0 zod: 4.3.6 - openclaw@2026.3.2(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(hono@4.12.5)(node-llama-cpp@3.16.2(typescript@5.9.3)): + openclaw@2026.3.13(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3)): dependencies: - '@agentclientprotocol/sdk': 0.14.1(zod@4.3.6) - '@aws-sdk/client-bedrock': 3.1000.0 - '@buape/carbon': 0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.12.5)(opusscript@0.1.1) - '@clack/prompts': 1.0.1 - '@discordjs/voice': 0.19.0(@discordjs/opus@0.10.0)(opusscript@0.1.1) - '@grammyjs/runner': 2.0.3(grammy@1.41.0) - '@grammyjs/transformer-throttler': 1.2.1(grammy@1.41.0) + '@agentclientprotocol/sdk': 0.16.1(zod@4.3.6) + '@aws-sdk/client-bedrock': 3.1009.0 + '@buape/carbon': 0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.12.7)(opusscript@0.1.1) + '@clack/prompts': 1.1.0 + '@discordjs/voice': 0.19.1(@discordjs/opus@0.10.0)(opusscript@0.1.1) + '@grammyjs/runner': 2.0.3(grammy@1.41.1) + '@grammyjs/transformer-throttler': 1.2.1(grammy@1.41.1) '@homebridge/ciao': 1.3.5 '@larksuiteoapi/node-sdk': 1.59.0 '@line/bot-sdk': 10.6.0 '@lydell/node-pty': 1.2.0-beta.3 - '@mariozechner/pi-agent-core': 0.55.3(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-ai': 0.55.3(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-coding-agent': 0.55.3(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-tui': 0.55.3 + '@mariozechner/pi-agent-core': 0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-ai': 0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-coding-agent': 0.58.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-tui': 0.58.0 + '@modelcontextprotocol/sdk': 1.27.1(zod@4.3.6) '@mozilla/readability': 0.6.0 '@napi-rs/canvas': 0.1.95 '@sinclair/typebox': 0.34.48 '@slack/bolt': 4.6.0(@types/express@5.0.6) - '@slack/web-api': 7.14.1 - '@snazzah/davey': 0.1.9 + '@slack/web-api': 7.15.0 '@whiskeysockets/baileys': 7.0.0-rc.9(audio-decode@2.2.3)(sharp@0.34.5) ajv: 8.18.0 chalk: 5.6.2 @@ -12851,14 +12847,13 @@ snapshots: cli-highlight: 2.1.11 commander: 14.0.3 croner: 10.0.1 - discord-api-types: 0.38.40 + discord-api-types: 0.38.42 dotenv: 17.3.1 express: 5.2.1 - file-type: 21.3.0 - gaxios: 7.1.3 - google-auth-library: 10.6.1 - grammy: 1.41.0 - https-proxy-agent: 7.0.6 + file-type: 21.3.2 + grammy: 1.41.1 + hono: 4.12.7 + https-proxy-agent: 8.0.0 ipaddr.js: 2.3.0 jiti: 2.6.1 json5: 2.2.3 @@ -12866,9 +12861,7 @@ snapshots: linkedom: 0.18.12 long: 5.3.2 markdown-it: 14.1.1 - node-domexception: '@nolyfill/domexception@1.0.28' node-edge-tts: 1.2.10 - node-llama-cpp: 3.16.2(typescript@5.9.3) opusscript: 0.1.1 osc-progress: 0.3.0 pdfjs-dist: 5.5.207 @@ -12876,17 +12869,17 @@ snapshots: qrcode-terminal: 0.12.0 sharp: 0.34.5 sqlite-vec: 0.1.7-alpha.2 - strip-ansi: 7.2.0 - tar: 7.5.10 + tar: 7.5.11 tslog: 4.10.2 - undici: 7.22.0 + undici: 7.24.1 ws: 8.19.0 yaml: 2.8.2 zod: 4.3.6 optionalDependencies: - '@discordjs/opus': 0.10.0 + node-llama-cpp: 3.16.2(typescript@5.9.3) transitivePeerDependencies: - - '@modelcontextprotocol/sdk' + - '@cfworker/json-schema' + - '@discordjs/opus' - '@types/express' - audio-decode - aws-crt @@ -12895,7 +12888,6 @@ snapshots: - debug - encoding - ffmpeg-static - - hono - jimp - link-preview-js - node-opus @@ -12922,29 +12914,29 @@ snapshots: osc-progress@0.3.0: {} - oxfmt@0.36.0: + oxfmt@0.40.0: dependencies: tinypool: 2.1.0 optionalDependencies: - '@oxfmt/binding-android-arm-eabi': 0.36.0 - '@oxfmt/binding-android-arm64': 0.36.0 - '@oxfmt/binding-darwin-arm64': 0.36.0 - '@oxfmt/binding-darwin-x64': 0.36.0 - '@oxfmt/binding-freebsd-x64': 0.36.0 - '@oxfmt/binding-linux-arm-gnueabihf': 0.36.0 - '@oxfmt/binding-linux-arm-musleabihf': 0.36.0 - '@oxfmt/binding-linux-arm64-gnu': 0.36.0 - '@oxfmt/binding-linux-arm64-musl': 0.36.0 - '@oxfmt/binding-linux-ppc64-gnu': 0.36.0 - '@oxfmt/binding-linux-riscv64-gnu': 0.36.0 - '@oxfmt/binding-linux-riscv64-musl': 0.36.0 - '@oxfmt/binding-linux-s390x-gnu': 0.36.0 - '@oxfmt/binding-linux-x64-gnu': 0.36.0 - '@oxfmt/binding-linux-x64-musl': 0.36.0 - '@oxfmt/binding-openharmony-arm64': 0.36.0 - '@oxfmt/binding-win32-arm64-msvc': 0.36.0 - '@oxfmt/binding-win32-ia32-msvc': 0.36.0 - '@oxfmt/binding-win32-x64-msvc': 0.36.0 + '@oxfmt/binding-android-arm-eabi': 0.40.0 + '@oxfmt/binding-android-arm64': 0.40.0 + '@oxfmt/binding-darwin-arm64': 0.40.0 + '@oxfmt/binding-darwin-x64': 0.40.0 + '@oxfmt/binding-freebsd-x64': 0.40.0 + '@oxfmt/binding-linux-arm-gnueabihf': 0.40.0 + '@oxfmt/binding-linux-arm-musleabihf': 0.40.0 + '@oxfmt/binding-linux-arm64-gnu': 0.40.0 + '@oxfmt/binding-linux-arm64-musl': 0.40.0 + '@oxfmt/binding-linux-ppc64-gnu': 0.40.0 + '@oxfmt/binding-linux-riscv64-gnu': 0.40.0 + '@oxfmt/binding-linux-riscv64-musl': 0.40.0 + '@oxfmt/binding-linux-s390x-gnu': 0.40.0 + '@oxfmt/binding-linux-x64-gnu': 0.40.0 + '@oxfmt/binding-linux-x64-musl': 0.40.0 + '@oxfmt/binding-openharmony-arm64': 0.40.0 + '@oxfmt/binding-win32-arm64-msvc': 0.40.0 + '@oxfmt/binding-win32-ia32-msvc': 0.40.0 + '@oxfmt/binding-win32-x64-msvc': 0.40.0 oxlint-tsgolint@0.16.0: optionalDependencies: @@ -12955,27 +12947,27 @@ snapshots: '@oxlint-tsgolint/win32-arm64': 0.16.0 '@oxlint-tsgolint/win32-x64': 0.16.0 - oxlint@1.51.0(oxlint-tsgolint@0.16.0): + oxlint@1.55.0(oxlint-tsgolint@0.16.0): optionalDependencies: - '@oxlint/binding-android-arm-eabi': 1.51.0 - '@oxlint/binding-android-arm64': 1.51.0 - '@oxlint/binding-darwin-arm64': 1.51.0 - '@oxlint/binding-darwin-x64': 1.51.0 - '@oxlint/binding-freebsd-x64': 1.51.0 - '@oxlint/binding-linux-arm-gnueabihf': 1.51.0 - '@oxlint/binding-linux-arm-musleabihf': 1.51.0 - '@oxlint/binding-linux-arm64-gnu': 1.51.0 - '@oxlint/binding-linux-arm64-musl': 1.51.0 - '@oxlint/binding-linux-ppc64-gnu': 1.51.0 - '@oxlint/binding-linux-riscv64-gnu': 1.51.0 - '@oxlint/binding-linux-riscv64-musl': 1.51.0 - '@oxlint/binding-linux-s390x-gnu': 1.51.0 - '@oxlint/binding-linux-x64-gnu': 1.51.0 - '@oxlint/binding-linux-x64-musl': 1.51.0 - '@oxlint/binding-openharmony-arm64': 1.51.0 - '@oxlint/binding-win32-arm64-msvc': 1.51.0 - '@oxlint/binding-win32-ia32-msvc': 1.51.0 - '@oxlint/binding-win32-x64-msvc': 1.51.0 + '@oxlint/binding-android-arm-eabi': 1.55.0 + '@oxlint/binding-android-arm64': 1.55.0 + '@oxlint/binding-darwin-arm64': 1.55.0 + '@oxlint/binding-darwin-x64': 1.55.0 + '@oxlint/binding-freebsd-x64': 1.55.0 + '@oxlint/binding-linux-arm-gnueabihf': 1.55.0 + '@oxlint/binding-linux-arm-musleabihf': 1.55.0 + '@oxlint/binding-linux-arm64-gnu': 1.55.0 + '@oxlint/binding-linux-arm64-musl': 1.55.0 + '@oxlint/binding-linux-ppc64-gnu': 1.55.0 + '@oxlint/binding-linux-riscv64-gnu': 1.55.0 + '@oxlint/binding-linux-riscv64-musl': 1.55.0 + '@oxlint/binding-linux-s390x-gnu': 1.55.0 + '@oxlint/binding-linux-x64-gnu': 1.55.0 + '@oxlint/binding-linux-x64-musl': 1.55.0 + '@oxlint/binding-openharmony-arm64': 1.55.0 + '@oxlint/binding-win32-arm64-msvc': 1.55.0 + '@oxlint/binding-win32-ia32-msvc': 1.55.0 + '@oxlint/binding-win32-x64-msvc': 1.55.0 oxlint-tsgolint: 0.16.0 p-finally@1.0.0: {} @@ -13039,6 +13031,10 @@ snapshots: parse5@6.0.1: {} + parse5@8.0.0: + dependencies: + entities: 6.0.1 + parseley@0.12.1: dependencies: leac: 0.6.0 @@ -13110,9 +13106,7 @@ snapshots: sonic-boom: 4.2.1 thread-stream: 3.1.0 - pixelmatch@7.1.0: - dependencies: - pngjs: 7.0.0 + pkce-challenge@5.0.1: {} playwright-core@1.58.2: {} @@ -13130,6 +13124,12 @@ snapshots: picocolors: 1.1.1 source-map-js: 1.2.1 + postcss@8.5.8: + dependencies: + nanoid: 3.3.11 + picocolors: 1.1.1 + source-map-js: 1.2.1 + postgres@3.4.8: {} pretty-bytes@6.1.1: {} @@ -13191,7 +13191,7 @@ snapshots: '@protobufjs/path': 1.1.2 '@protobufjs/pool': 1.1.0 '@protobufjs/utf8': 1.1.0 - '@types/node': 25.3.5 + '@types/node': 25.5.0 long: 5.3.2 proxy-addr@2.0.7: @@ -13430,7 +13430,7 @@ snapshots: dependencies: glob: 10.5.0 - rolldown-plugin-dts@0.22.4(@typescript/native-preview@7.0.0-dev.20260308.1)(rolldown@1.0.0-rc.7)(typescript@5.9.3): + rolldown-plugin-dts@0.22.5(@typescript/native-preview@7.0.0-dev.20260313.1)(rolldown@1.0.0-rc.9)(typescript@5.9.3): dependencies: '@babel/generator': 8.0.0-rc.2 '@babel/helper-validator-identifier': 8.0.0-rc.2 @@ -13441,64 +13441,33 @@ snapshots: dts-resolver: 2.1.3 get-tsconfig: 4.13.6 obug: 2.1.1 - rolldown: 1.0.0-rc.7 + rolldown: 1.0.0-rc.9 optionalDependencies: - '@typescript/native-preview': 7.0.0-dev.20260308.1 + '@typescript/native-preview': 7.0.0-dev.20260313.1 typescript: 5.9.3 transitivePeerDependencies: - oxc-resolver - rolldown@1.0.0-rc.7: + rolldown@1.0.0-rc.9: dependencies: '@oxc-project/types': 0.115.0 - '@rolldown/pluginutils': 1.0.0-rc.7 + '@rolldown/pluginutils': 1.0.0-rc.9 optionalDependencies: - '@rolldown/binding-android-arm64': 1.0.0-rc.7 - '@rolldown/binding-darwin-arm64': 1.0.0-rc.7 - '@rolldown/binding-darwin-x64': 1.0.0-rc.7 - '@rolldown/binding-freebsd-x64': 1.0.0-rc.7 - '@rolldown/binding-linux-arm-gnueabihf': 1.0.0-rc.7 - '@rolldown/binding-linux-arm64-gnu': 1.0.0-rc.7 - '@rolldown/binding-linux-arm64-musl': 1.0.0-rc.7 - '@rolldown/binding-linux-ppc64-gnu': 1.0.0-rc.7 - '@rolldown/binding-linux-s390x-gnu': 1.0.0-rc.7 - '@rolldown/binding-linux-x64-gnu': 1.0.0-rc.7 - '@rolldown/binding-linux-x64-musl': 1.0.0-rc.7 - '@rolldown/binding-openharmony-arm64': 1.0.0-rc.7 - '@rolldown/binding-wasm32-wasi': 1.0.0-rc.7 - '@rolldown/binding-win32-arm64-msvc': 1.0.0-rc.7 - '@rolldown/binding-win32-x64-msvc': 1.0.0-rc.7 - - rollup@4.59.0: - dependencies: - '@types/estree': 1.0.8 - optionalDependencies: - '@rollup/rollup-android-arm-eabi': 4.59.0 - '@rollup/rollup-android-arm64': 4.59.0 - '@rollup/rollup-darwin-arm64': 4.59.0 - '@rollup/rollup-darwin-x64': 4.59.0 - '@rollup/rollup-freebsd-arm64': 4.59.0 - '@rollup/rollup-freebsd-x64': 4.59.0 - '@rollup/rollup-linux-arm-gnueabihf': 4.59.0 - '@rollup/rollup-linux-arm-musleabihf': 4.59.0 - '@rollup/rollup-linux-arm64-gnu': 4.59.0 - '@rollup/rollup-linux-arm64-musl': 4.59.0 - '@rollup/rollup-linux-loong64-gnu': 4.59.0 - '@rollup/rollup-linux-loong64-musl': 4.59.0 - '@rollup/rollup-linux-ppc64-gnu': 4.59.0 - '@rollup/rollup-linux-ppc64-musl': 4.59.0 - '@rollup/rollup-linux-riscv64-gnu': 4.59.0 - '@rollup/rollup-linux-riscv64-musl': 4.59.0 - '@rollup/rollup-linux-s390x-gnu': 4.59.0 - '@rollup/rollup-linux-x64-gnu': 4.59.0 - '@rollup/rollup-linux-x64-musl': 4.59.0 - '@rollup/rollup-openbsd-x64': 4.59.0 - '@rollup/rollup-openharmony-arm64': 4.59.0 - '@rollup/rollup-win32-arm64-msvc': 4.59.0 - '@rollup/rollup-win32-ia32-msvc': 4.59.0 - '@rollup/rollup-win32-x64-gnu': 4.59.0 - '@rollup/rollup-win32-x64-msvc': 4.59.0 - fsevents: 2.3.3 + '@rolldown/binding-android-arm64': 1.0.0-rc.9 + '@rolldown/binding-darwin-arm64': 1.0.0-rc.9 + '@rolldown/binding-darwin-x64': 1.0.0-rc.9 + '@rolldown/binding-freebsd-x64': 1.0.0-rc.9 + '@rolldown/binding-linux-arm-gnueabihf': 1.0.0-rc.9 + '@rolldown/binding-linux-arm64-gnu': 1.0.0-rc.9 + '@rolldown/binding-linux-arm64-musl': 1.0.0-rc.9 + '@rolldown/binding-linux-ppc64-gnu': 1.0.0-rc.9 + '@rolldown/binding-linux-s390x-gnu': 1.0.0-rc.9 + '@rolldown/binding-linux-x64-gnu': 1.0.0-rc.9 + '@rolldown/binding-linux-x64-musl': 1.0.0-rc.9 + '@rolldown/binding-openharmony-arm64': 1.0.0-rc.9 + '@rolldown/binding-wasm32-wasi': 1.0.0-rc.9 + '@rolldown/binding-win32-arm64-msvc': 1.0.0-rc.9 + '@rolldown/binding-win32-x64-msvc': 1.0.0-rc.9 router@2.2.0: dependencies: @@ -13531,6 +13500,10 @@ snapshots: parse-srcset: 1.0.2 postcss: 8.5.6 + saxes@6.0.0: + dependencies: + xmlchars: 2.2.0 + scheduler@0.27.0: {} selderee@0.11.0: @@ -13711,9 +13684,10 @@ snapshots: skillflag@0.1.4: dependencies: '@clack/prompts': 1.1.0 - tar-stream: 3.1.7 + tar-stream: 3.1.8 transitivePeerDependencies: - bare-abort-controller + - bare-buffer - react-native-b4a sleep-promise@9.1.0: {} @@ -13805,6 +13779,8 @@ snapshots: std-env@3.10.0: {} + std-env@4.0.0: {} + stdin-discarder@0.3.1: {} stdout-update@4.0.1: @@ -13892,21 +13868,25 @@ snapshots: supports-preserve-symlinks-flag@1.0.0: {} + symbol-tree@3.2.4: {} + table-layout@4.1.1: dependencies: array-back: 6.2.2 wordwrapjs: 5.1.1 - tar-stream@3.1.7: + tar-stream@3.1.8: dependencies: b4a: 1.8.0 + bare-fs: 4.5.5 fast-fifo: 1.3.2 streamx: 2.23.0 transitivePeerDependencies: - bare-abort-controller + - bare-buffer - react-native-b4a - tar@7.5.10: + tar@7.5.11: dependencies: '@isaacs/fs-minipass': 4.0.1 chownr: 3.0.0 @@ -13914,6 +13894,13 @@ snapshots: minizlib: 3.1.0 yallist: 5.0.0 + teex@1.0.1: + dependencies: + streamx: 2.23.0 + transitivePeerDependencies: + - bare-abort-controller + - react-native-b4a + text-decoder@1.2.7: dependencies: b4a: 1.8.0 @@ -13943,7 +13930,7 @@ snapshots: tinypool@2.1.0: {} - tinyrainbow@3.0.3: {} + tinyrainbow@3.1.0: {} to-regex-range@5.0.1: dependencies: @@ -13957,7 +13944,7 @@ snapshots: token-types@6.1.2: dependencies: - '@borewit/text-codec': 0.2.1 + '@borewit/text-codec': 0.2.2 '@tokenizer/token': 0.3.0 ieee754: 1.2.1 @@ -13972,13 +13959,17 @@ snapshots: tr46@0.0.3: {} + tr46@6.0.0: + dependencies: + punycode: 2.3.1 + tree-kill@1.2.2: {} trim-lines@3.0.1: {} ts-algebra@2.0.0: {} - tsdown@0.21.0(@typescript/native-preview@7.0.0-dev.20260308.1)(typescript@5.9.3): + tsdown@0.21.2(@typescript/native-preview@7.0.0-dev.20260313.1)(typescript@5.9.3): dependencies: ansis: 4.2.0 cac: 7.0.0 @@ -13988,14 +13979,14 @@ snapshots: import-without-cache: 0.2.5 obug: 2.1.1 picomatch: 4.0.3 - rolldown: 1.0.0-rc.7 - rolldown-plugin-dts: 0.22.4(@typescript/native-preview@7.0.0-dev.20260308.1)(rolldown@1.0.0-rc.7)(typescript@5.9.3) + rolldown: 1.0.0-rc.9 + rolldown-plugin-dts: 0.22.5(@typescript/native-preview@7.0.0-dev.20260313.1)(rolldown@1.0.0-rc.9)(typescript@5.9.3) semver: 7.7.4 tinyexec: 1.0.2 tinyglobby: 0.2.15 tree-kill: 1.2.2 unconfig-core: 7.5.0 - unrun: 0.2.30 + unrun: 0.2.32 optionalDependencies: typescript: 5.9.3 transitivePeerDependencies: @@ -14058,7 +14049,7 @@ snapshots: undici-types@7.18.2: {} - undici@7.22.0: {} + undici@7.24.1: {} unist-util-is@6.0.1: dependencies: @@ -14093,9 +14084,9 @@ snapshots: unpipe@1.0.0: {} - unrun@0.2.30: + unrun@0.2.32: dependencies: - rolldown: 1.0.0-rc.7 + rolldown: 1.0.0-rc.9 url-join@4.0.1: {} @@ -14134,67 +14125,74 @@ snapshots: '@types/unist': 3.0.3 vfile-message: 4.0.3 - vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2): + vite@8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2): dependencies: - esbuild: 0.27.3 - fdir: 6.5.0(picomatch@4.0.3) + '@oxc-project/runtime': 0.115.0 + lightningcss: 1.32.0 picomatch: 4.0.3 - postcss: 8.5.6 - rollup: 4.59.0 + postcss: 8.5.8 + rolldown: 1.0.0-rc.9 tinyglobby: 0.2.15 optionalDependencies: - '@types/node': 25.3.5 + '@types/node': 25.5.0 + esbuild: 0.27.3 fsevents: 2.3.3 jiti: 2.6.1 - lightningcss: 1.30.2 tsx: 4.21.0 yaml: 2.8.2 - vitest@4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.3.5)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2): + vitest@4.1.0(@opentelemetry/api@1.9.0)(@types/node@25.5.0)(@vitest/browser-playwright@4.1.0)(jsdom@28.1.0(@noble/hashes@2.0.1))(vite@8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)): dependencies: - '@vitest/expect': 4.0.18 - '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) - '@vitest/pretty-format': 4.0.18 - '@vitest/runner': 4.0.18 - '@vitest/snapshot': 4.0.18 - '@vitest/spy': 4.0.18 - '@vitest/utils': 4.0.18 - es-module-lexer: 1.7.0 + '@vitest/expect': 4.1.0 + '@vitest/mocker': 4.1.0(vite@8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2)) + '@vitest/pretty-format': 4.1.0 + '@vitest/runner': 4.1.0 + '@vitest/snapshot': 4.1.0 + '@vitest/spy': 4.1.0 + '@vitest/utils': 4.1.0 + es-module-lexer: 2.0.0 expect-type: 1.3.0 magic-string: 0.30.21 obug: 2.1.1 pathe: 2.0.3 picomatch: 4.0.3 - std-env: 3.10.0 + std-env: 4.0.0 tinybench: 2.9.0 tinyexec: 1.0.2 tinyglobby: 0.2.15 - tinyrainbow: 3.0.3 - vite: 7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) + tinyrainbow: 3.1.0 + vite: 8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2) why-is-node-running: 2.3.0 optionalDependencies: '@opentelemetry/api': 1.9.0 - '@types/node': 25.3.5 - '@vitest/browser-playwright': 4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.3.5)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) + '@types/node': 25.5.0 + '@vitest/browser-playwright': 4.1.0(playwright@1.58.2)(vite@8.0.0(@types/node@25.5.0)(esbuild@0.27.3)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.1.0) + jsdom: 28.1.0(@noble/hashes@2.0.1) transitivePeerDependencies: - - jiti - - less - - lightningcss - msw - - sass - - sass-embedded - - stylus - - sugarss - - terser - - tsx - - yaml void-elements@3.1.0: {} + w3c-xmlserializer@5.0.0: + dependencies: + xml-name-validator: 5.0.0 + web-streams-polyfill@3.3.3: {} webidl-conversions@3.0.1: {} + webidl-conversions@8.0.1: {} + + whatwg-mimetype@5.0.0: {} + + whatwg-url@16.0.1(@noble/hashes@2.0.1): + dependencies: + '@exodus/bytes': 1.15.0(@noble/hashes@2.0.1) + tr46: 6.0.0 + webidl-conversions: 8.0.1 + transitivePeerDependencies: + - '@noble/hashes' + whatwg-url@5.0.0: dependencies: tr46: 0.0.3 @@ -14245,6 +14243,10 @@ snapshots: ws@8.19.0: {} + xml-name-validator@5.0.0: {} + + xmlchars@2.2.0: {} + y18n@5.0.8: {} yallist@4.0.0: {} @@ -14298,18 +14300,12 @@ snapshots: - bufferutil - utf-8-validate - zod-to-json-schema@3.25.1(zod@3.25.76): - dependencies: - zod: 3.25.76 - zod-to-json-schema@3.25.1(zod@4.3.6): dependencies: zod: 4.3.6 zod@3.25.75: {} - zod@3.25.76: {} - zod@4.3.6: {} zwitch@2.0.4: {} diff --git a/scripts/bundle-a2ui.sh b/scripts/bundle-a2ui.sh index 3278e1d35a3..3888e4cf5cb 100755 --- a/scripts/bundle-a2ui.sh +++ b/scripts/bundle-a2ui.sh @@ -32,13 +32,13 @@ INPUT_PATHS=( ) compute_hash() { - ROOT_DIR="$ROOT_DIR" node --input-type=module - "${INPUT_PATHS[@]}" <<'NODE' + ROOT_DIR="$ROOT_DIR" node --input-type=module --eval ' import { createHash } from "node:crypto"; import { promises as fs } from "node:fs"; import path from "node:path"; const rootDir = process.env.ROOT_DIR ?? process.cwd(); -const inputs = process.argv.slice(2); +const inputs = process.argv.slice(1); const files = []; async function walk(entryPath) { @@ -73,7 +73,7 @@ for (const filePath of files) { } process.stdout.write(hash.digest("hex")); -NODE +' "${INPUT_PATHS[@]}" } current_hash="$(compute_hash)" @@ -86,7 +86,7 @@ if [[ -f "$HASH_FILE" ]]; then fi pnpm -s exec tsc -p "$A2UI_RENDERER_DIR/tsconfig.json" -if command -v rolldown >/dev/null 2>&1; then +if command -v rolldown >/dev/null 2>&1 && rolldown --version >/dev/null 2>&1; then rolldown -c "$A2UI_APP_DIR/rolldown.config.mjs" else pnpm -s dlx rolldown -c "$A2UI_APP_DIR/rolldown.config.mjs" diff --git a/scripts/ci-changed-scope.mjs b/scripts/ci-changed-scope.mjs index a4018b30a2c..c5ed28319b1 100644 --- a/scripts/ci-changed-scope.mjs +++ b/scripts/ci-changed-scope.mjs @@ -5,6 +5,7 @@ import { appendFileSync } from "node:fs"; const DOCS_PATH_RE = /^(docs\/|.*\.mdx?$)/; const SKILLS_PYTHON_SCOPE_RE = /^skills\//; +const CI_WORKFLOW_SCOPE_RE = /^\.github\/workflows\/ci\.yml$/; const MACOS_PROTOCOL_GEN_RE = /^(apps\/macos\/Sources\/OpenClawProtocol\/|apps\/shared\/OpenClawKit\/Sources\/OpenClawProtocol\/)/; const MACOS_NATIVE_RE = /^(apps\/macos\/|apps\/ios\/|apps\/shared\/|Swabble\/)/; @@ -55,6 +56,12 @@ export function detectChangedScope(changedPaths) { runSkillsPython = true; } + if (CI_WORKFLOW_SCOPE_RE.test(path)) { + runMacos = true; + runAndroid = true; + runSkillsPython = true; + } + if (!MACOS_PROTOCOL_GEN_RE.test(path) && MACOS_NATIVE_RE.test(path)) { runMacos = true; } diff --git a/scripts/docker/cleanup-smoke/Dockerfile b/scripts/docker/cleanup-smoke/Dockerfile index e67a4b1fe87..07a2334aa41 100644 --- a/scripts/docker/cleanup-smoke/Dockerfile +++ b/scripts/docker/cleanup-smoke/Dockerfile @@ -1,10 +1,11 @@ # syntax=docker/dockerfile:1.7 -FROM node:22-bookworm-slim@sha256:3cfe526ec8dd62013b8843e8e5d4877e297b886e5aace4a59fec25dc20736e45 +FROM node:24-bookworm-slim@sha256:b4687aef2571c632a1953695ce4d61d6462a7eda471fe6e272eebf0418f276ba RUN --mount=type=cache,id=openclaw-cleanup-smoke-apt-cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,id=openclaw-cleanup-smoke-apt-lists,target=/var/lib/apt,sharing=locked \ apt-get update \ + && DEBIAN_FRONTEND=noninteractive apt-get upgrade -y --no-install-recommends \ && apt-get install -y --no-install-recommends \ bash \ ca-certificates \ diff --git a/scripts/docker/install-sh-e2e/Dockerfile b/scripts/docker/install-sh-e2e/Dockerfile index 05b77f45197..e8069bf1e77 100644 --- a/scripts/docker/install-sh-e2e/Dockerfile +++ b/scripts/docker/install-sh-e2e/Dockerfile @@ -1,10 +1,11 @@ # syntax=docker/dockerfile:1.7 -FROM node:22-bookworm-slim@sha256:3cfe526ec8dd62013b8843e8e5d4877e297b886e5aace4a59fec25dc20736e45 +FROM node:24-bookworm-slim@sha256:b4687aef2571c632a1953695ce4d61d6462a7eda471fe6e272eebf0418f276ba RUN --mount=type=cache,id=openclaw-install-sh-e2e-apt-cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,id=openclaw-install-sh-e2e-apt-lists,target=/var/lib/apt,sharing=locked \ apt-get update \ + && DEBIAN_FRONTEND=noninteractive apt-get upgrade -y --no-install-recommends \ && apt-get install -y --no-install-recommends \ bash \ ca-certificates \ diff --git a/scripts/docker/install-sh-nonroot/Dockerfile b/scripts/docker/install-sh-nonroot/Dockerfile index d0c085d9f69..8e29715dbfb 100644 --- a/scripts/docker/install-sh-nonroot/Dockerfile +++ b/scripts/docker/install-sh-nonroot/Dockerfile @@ -11,6 +11,7 @@ RUN --mount=type=cache,id=openclaw-install-sh-nonroot-apt-cache,target=/var/cach if [ "${attempt}" -eq 3 ]; then exit 1; fi; \ sleep 3; \ done; \ + DEBIAN_FRONTEND=noninteractive apt-get -o Acquire::Retries=3 upgrade -y --no-install-recommends; \ apt-get -o Acquire::Retries=3 install -y --no-install-recommends \ bash \ ca-certificates \ diff --git a/scripts/docker/install-sh-smoke/Dockerfile b/scripts/docker/install-sh-smoke/Dockerfile index 94fdca13a31..ee37a24d6ce 100644 --- a/scripts/docker/install-sh-smoke/Dockerfile +++ b/scripts/docker/install-sh-smoke/Dockerfile @@ -1,6 +1,6 @@ # syntax=docker/dockerfile:1.7 -FROM node:22-bookworm-slim@sha256:3cfe526ec8dd62013b8843e8e5d4877e297b886e5aace4a59fec25dc20736e45 +FROM node:24-bookworm-slim@sha256:b4687aef2571c632a1953695ce4d61d6462a7eda471fe6e272eebf0418f276ba RUN --mount=type=cache,id=openclaw-install-sh-smoke-apt-cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,id=openclaw-install-sh-smoke-apt-lists,target=/var/lib/apt,sharing=locked \ @@ -11,6 +11,7 @@ RUN --mount=type=cache,id=openclaw-install-sh-smoke-apt-cache,target=/var/cache/ if [ "${attempt}" -eq 3 ]; then exit 1; fi; \ sleep 3; \ done; \ + DEBIAN_FRONTEND=noninteractive apt-get -o Acquire::Retries=3 upgrade -y --no-install-recommends; \ apt-get -o Acquire::Retries=3 install -y --no-install-recommends \ bash \ ca-certificates \ diff --git a/scripts/e2e/Dockerfile b/scripts/e2e/Dockerfile index e8bd039155d..fb390c1190b 100644 --- a/scripts/e2e/Dockerfile +++ b/scripts/e2e/Dockerfile @@ -1,6 +1,6 @@ # syntax=docker/dockerfile:1.7 -FROM node:22-bookworm@sha256:cd7bcd2e7a1e6f72052feb023c7f6b722205d3fcab7bbcbd2d1bfdab10b1e935 +FROM node:24-bookworm@sha256:9f3b13503acdf9bc1e0213ccb25ebe86ac881cad17636733a1da1be1d44509df RUN corepack enable diff --git a/scripts/e2e/Dockerfile.qr-import b/scripts/e2e/Dockerfile.qr-import index e221e0278a9..a8c611a9516 100644 --- a/scripts/e2e/Dockerfile.qr-import +++ b/scripts/e2e/Dockerfile.qr-import @@ -1,6 +1,6 @@ # syntax=docker/dockerfile:1.7 -FROM node:22-bookworm@sha256:cd7bcd2e7a1e6f72052feb023c7f6b722205d3fcab7bbcbd2d1bfdab10b1e935 +FROM node:24-bookworm@sha256:9f3b13503acdf9bc1e0213ccb25ebe86ac881cad17636733a1da1be1d44509df RUN corepack enable diff --git a/scripts/e2e/parallels-linux-smoke.sh b/scripts/e2e/parallels-linux-smoke.sh new file mode 100644 index 00000000000..a3e3f96bb56 --- /dev/null +++ b/scripts/e2e/parallels-linux-smoke.sh @@ -0,0 +1,673 @@ +#!/usr/bin/env bash +set -euo pipefail + +VM_NAME="Ubuntu 24.04.3 ARM64" +SNAPSHOT_HINT="fresh" +MODE="both" +OPENAI_API_KEY_ENV="OPENAI_API_KEY" +INSTALL_URL="https://openclaw.ai/install.sh" +HOST_PORT="18427" +HOST_PORT_EXPLICIT=0 +HOST_IP="" +LATEST_VERSION="" +INSTALL_VERSION="" +TARGET_PACKAGE_SPEC="" +JSON_OUTPUT=0 +KEEP_SERVER=0 + +MAIN_TGZ_DIR="$(mktemp -d)" +MAIN_TGZ_PATH="" +SERVER_PID="" +RUN_DIR="$(mktemp -d /tmp/openclaw-parallels-linux.XXXXXX)" +BUILD_LOCK_DIR="${TMPDIR:-/tmp}/openclaw-parallels-build.lock" + +TIMEOUT_SNAPSHOT_S=180 +TIMEOUT_BOOTSTRAP_S=600 +TIMEOUT_INSTALL_S=1200 +TIMEOUT_VERIFY_S=90 +TIMEOUT_ONBOARD_S=180 +TIMEOUT_AGENT_S=180 + +FRESH_MAIN_STATUS="skip" +FRESH_MAIN_VERSION="skip" +FRESH_GATEWAY_STATUS="skip" +FRESH_AGENT_STATUS="skip" +UPGRADE_STATUS="skip" +LATEST_INSTALLED_VERSION="skip" +UPGRADE_MAIN_VERSION="skip" +UPGRADE_GATEWAY_STATUS="skip" +UPGRADE_AGENT_STATUS="skip" +DAEMON_STATUS="systemd-user-unavailable" + +say() { + printf '==> %s\n' "$*" +} + +artifact_label() { + if [[ -n "$TARGET_PACKAGE_SPEC" ]]; then + printf 'target package tgz' + return + fi + printf 'current main tgz' +} + +warn() { + printf 'warn: %s\n' "$*" >&2 +} + +die() { + printf 'error: %s\n' "$*" >&2 + exit 1 +} + +cleanup() { + if [[ -n "${SERVER_PID:-}" ]]; then + kill "$SERVER_PID" >/dev/null 2>&1 || true + fi + rm -rf "$MAIN_TGZ_DIR" +} + +trap cleanup EXIT + +usage() { + cat <<'EOF' +Usage: bash scripts/e2e/parallels-linux-smoke.sh [options] + +Options: + --vm Parallels VM name. Default: "Ubuntu 24.04.3 ARM64" + --snapshot-hint Snapshot name substring/fuzzy match. Default: "fresh" + --mode + --openai-api-key-env Host env var name for OpenAI API key. Default: OPENAI_API_KEY + --install-url Installer URL for latest release. Default: https://openclaw.ai/install.sh + --host-port Host HTTP port for current-main tgz. Default: 18427 + --host-ip Override Parallels host IP. + --latest-version Override npm latest version lookup. + --install-version Pin site-installer version/dist-tag for the baseline lane. + --target-package-spec + Install this npm package tarball instead of packing current main. + Example: openclaw@2026.3.13-beta.1 + --keep-server Leave temp host HTTP server running. + --json Print machine-readable JSON summary. + -h, --help Show help. +EOF +} + +while [[ $# -gt 0 ]]; do + case "$1" in + --vm) + VM_NAME="$2" + shift 2 + ;; + --snapshot-hint) + SNAPSHOT_HINT="$2" + shift 2 + ;; + --mode) + MODE="$2" + shift 2 + ;; + --openai-api-key-env) + OPENAI_API_KEY_ENV="$2" + shift 2 + ;; + --install-url) + INSTALL_URL="$2" + shift 2 + ;; + --host-port) + HOST_PORT="$2" + HOST_PORT_EXPLICIT=1 + shift 2 + ;; + --host-ip) + HOST_IP="$2" + shift 2 + ;; + --latest-version) + LATEST_VERSION="$2" + shift 2 + ;; + --install-version) + INSTALL_VERSION="$2" + shift 2 + ;; + --target-package-spec) + TARGET_PACKAGE_SPEC="$2" + shift 2 + ;; + --keep-server) + KEEP_SERVER=1 + shift + ;; + --json) + JSON_OUTPUT=1 + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) + die "unknown arg: $1" + ;; + esac +done + +case "$MODE" in + fresh|upgrade|both) ;; + *) + die "invalid --mode: $MODE" + ;; +esac + +OPENAI_API_KEY_VALUE="${!OPENAI_API_KEY_ENV:-}" +[[ -n "$OPENAI_API_KEY_VALUE" ]] || die "$OPENAI_API_KEY_ENV is required" + +resolve_snapshot_id() { + local json hint + json="$(prlctl snapshot-list "$VM_NAME" --json)" + hint="$SNAPSHOT_HINT" + SNAPSHOT_JSON="$json" SNAPSHOT_HINT="$hint" python3 - <<'PY' +import difflib +import json +import os +import sys + +payload = json.loads(os.environ["SNAPSHOT_JSON"]) +hint = os.environ["SNAPSHOT_HINT"].strip().lower() +best_id = None +best_score = -1.0 +for snapshot_id, meta in payload.items(): + name = str(meta.get("name", "")).strip() + lowered = name.lower() + score = 0.0 + if lowered == hint: + score = 10.0 + elif hint and hint in lowered: + score = 5.0 + len(hint) / max(len(lowered), 1) + else: + score = difflib.SequenceMatcher(None, hint, lowered).ratio() + if score > best_score: + best_score = score + best_id = snapshot_id +if not best_id: + sys.exit("no snapshot matched") +print(best_id) +PY +} + +resolve_host_ip() { + if [[ -n "$HOST_IP" ]]; then + printf '%s\n' "$HOST_IP" + return + fi + local detected + detected="$(ifconfig | awk '/inet 10\.211\./ { print $2; exit }')" + [[ -n "$detected" ]] || die "failed to detect Parallels host IP; pass --host-ip" + printf '%s\n' "$detected" +} + +is_host_port_free() { + local port="$1" + python3 - "$port" <<'PY' +import socket +import sys + +sock = socket.socket() +try: + sock.bind(("0.0.0.0", int(sys.argv[1]))) +except OSError: + raise SystemExit(1) +finally: + sock.close() +PY +} + +allocate_host_port() { + python3 - <<'PY' +import socket + +sock = socket.socket() +sock.bind(("0.0.0.0", 0)) +print(sock.getsockname()[1]) +sock.close() +PY +} + +resolve_host_port() { + if is_host_port_free "$HOST_PORT"; then + printf '%s\n' "$HOST_PORT" + return + fi + if [[ "$HOST_PORT_EXPLICIT" -eq 1 ]]; then + die "host port $HOST_PORT already in use" + fi + HOST_PORT="$(allocate_host_port)" + warn "host port 18427 busy; using $HOST_PORT" + printf '%s\n' "$HOST_PORT" +} + +guest_exec() { + prlctl exec "$VM_NAME" "$@" +} + +restore_snapshot() { + local snapshot_id="$1" + say "Restore snapshot $SNAPSHOT_HINT ($snapshot_id)" + prlctl snapshot-switch "$VM_NAME" --id "$snapshot_id" >/dev/null +} + +bootstrap_guest() { + guest_exec apt-get -o Acquire::Check-Date=false update + guest_exec apt-get install -y curl ca-certificates +} + +resolve_latest_version() { + if [[ -n "$LATEST_VERSION" ]]; then + printf '%s\n' "$LATEST_VERSION" + return + fi + npm view openclaw version --userconfig "$(mktemp)" +} + +current_build_commit() { + python3 - <<'PY' +import json +import pathlib + +path = pathlib.Path("dist/build-info.json") +if not path.exists(): + print("") +else: + print(json.loads(path.read_text()).get("commit", "")) +PY +} + +acquire_build_lock() { + local owner_pid="" + while ! mkdir "$BUILD_LOCK_DIR" 2>/dev/null; do + if [[ -f "$BUILD_LOCK_DIR/pid" ]]; then + owner_pid="$(cat "$BUILD_LOCK_DIR/pid" 2>/dev/null || true)" + if [[ -n "$owner_pid" ]] && ! kill -0 "$owner_pid" >/dev/null 2>&1; then + warn "Removing stale Parallels build lock" + rm -rf "$BUILD_LOCK_DIR" + continue + fi + fi + sleep 1 + done + printf '%s\n' "$$" >"$BUILD_LOCK_DIR/pid" +} + +release_build_lock() { + if [[ -d "$BUILD_LOCK_DIR" ]]; then + rm -rf "$BUILD_LOCK_DIR" + fi +} + +ensure_current_build() { + local head build_commit + acquire_build_lock + head="$(git rev-parse HEAD)" + build_commit="$(current_build_commit)" + if [[ "$build_commit" == "$head" ]]; then + release_build_lock + return + fi + say "Build dist for current head" + pnpm build + build_commit="$(current_build_commit)" + release_build_lock + [[ "$build_commit" == "$head" ]] || die "dist/build-info.json still does not match HEAD after build" +} + +extract_package_version_from_tgz() { + tar -xOf "$1" package/package.json | python3 -c 'import json, sys; print(json.load(sys.stdin)["version"])' +} + +pack_main_tgz() { + local short_head pkg + if [[ -n "$TARGET_PACKAGE_SPEC" ]]; then + say "Pack target package tgz: $TARGET_PACKAGE_SPEC" + pkg="$( + npm pack "$TARGET_PACKAGE_SPEC" --ignore-scripts --json --pack-destination "$MAIN_TGZ_DIR" \ + | python3 -c 'import json, sys; data = json.load(sys.stdin); print(data[-1]["filename"])' + )" + MAIN_TGZ_PATH="$MAIN_TGZ_DIR/$(basename "$pkg")" + TARGET_EXPECT_VERSION="$(extract_package_version_from_tgz "$MAIN_TGZ_PATH")" + say "Packed $MAIN_TGZ_PATH" + say "Target package version: $TARGET_EXPECT_VERSION" + return + fi + say "Pack current main tgz" + ensure_current_build + short_head="$(git rev-parse --short HEAD)" + pkg="$( + npm pack --ignore-scripts --json --pack-destination "$MAIN_TGZ_DIR" \ + | python3 -c 'import json, sys; data = json.load(sys.stdin); print(data[-1]["filename"])' + )" + MAIN_TGZ_PATH="$MAIN_TGZ_DIR/openclaw-main-$short_head.tgz" + cp "$MAIN_TGZ_DIR/$pkg" "$MAIN_TGZ_PATH" + say "Packed $MAIN_TGZ_PATH" + tar -xOf "$MAIN_TGZ_PATH" package/dist/build-info.json +} + +verify_target_version() { + if [[ -n "$TARGET_PACKAGE_SPEC" ]]; then + verify_version_contains "$TARGET_EXPECT_VERSION" + return + fi + verify_version_contains "$(git rev-parse --short=7 HEAD)" +} + +start_server() { + local host_ip="$1" + local artifact probe_url attempt + artifact="$(basename "$MAIN_TGZ_PATH")" + attempt=0 + while :; do + attempt=$((attempt + 1)) + say "Serve $(artifact_label) on $host_ip:$HOST_PORT" + ( + cd "$MAIN_TGZ_DIR" + exec python3 -m http.server "$HOST_PORT" --bind 0.0.0.0 + ) >/tmp/openclaw-parallels-linux-http.log 2>&1 & + SERVER_PID=$! + sleep 1 + probe_url="http://127.0.0.1:$HOST_PORT/$artifact" + if kill -0 "$SERVER_PID" >/dev/null 2>&1 && curl -fsSI "$probe_url" >/dev/null 2>&1; then + return 0 + fi + kill "$SERVER_PID" >/dev/null 2>&1 || true + wait "$SERVER_PID" >/dev/null 2>&1 || true + SERVER_PID="" + if [[ "$HOST_PORT_EXPLICIT" -eq 1 || $attempt -ge 3 ]]; then + die "failed to start reachable host HTTP server on port $HOST_PORT" + fi + HOST_PORT="$(allocate_host_port)" + warn "retrying host HTTP server on port $HOST_PORT" + done +} + +install_latest_release() { + local version_args=() + if [[ -n "$INSTALL_VERSION" ]]; then + version_args=(--version "$INSTALL_VERSION") + fi + guest_exec curl -fsSL "$INSTALL_URL" -o /tmp/openclaw-install.sh + guest_exec /usr/bin/env OPENCLAW_NO_ONBOARD=1 bash /tmp/openclaw-install.sh "${version_args[@]}" --no-onboard + guest_exec openclaw --version +} + +install_main_tgz() { + local host_ip="$1" + local temp_name="$2" + local tgz_url="http://$host_ip:$HOST_PORT/$(basename "$MAIN_TGZ_PATH")" + guest_exec curl -fsSL "$tgz_url" -o "/tmp/$temp_name" + guest_exec npm install -g "/tmp/$temp_name" --no-fund --no-audit + guest_exec openclaw --version +} + +verify_version_contains() { + local needle="$1" + local version + version="$(guest_exec openclaw --version)" + printf '%s\n' "$version" + case "$version" in + *"$needle"*) ;; + *) + echo "version mismatch: expected substring $needle" >&2 + return 1 + ;; + esac +} + +run_ref_onboard() { + guest_exec /usr/bin/env "OPENAI_API_KEY=$OPENAI_API_KEY_VALUE" openclaw onboard \ + --non-interactive \ + --mode local \ + --auth-choice openai-api-key \ + --secret-input-mode ref \ + --gateway-port 18789 \ + --gateway-bind loopback \ + --skip-skills \ + --skip-health \ + --accept-risk \ + --json +} + +verify_local_turn() { + guest_exec /usr/bin/env "OPENAI_API_KEY=$OPENAI_API_KEY_VALUE" openclaw agent \ + --local \ + --agent main \ + --message ping \ + --json +} + +phase_log_path() { + printf '%s/%s.log\n' "$RUN_DIR" "$1" +} + +extract_last_version() { + local log_path="$1" + python3 - "$log_path" <<'PY' +import pathlib +import re +import sys + +text = pathlib.Path(sys.argv[1]).read_text(errors="replace") +matches = re.findall(r"OpenClaw [^\r\n]+ \([0-9a-f]{7,}\)", text) +print(matches[-1] if matches else "") +PY +} + +show_log_excerpt() { + local log_path="$1" + warn "log tail: $log_path" + tail -n 80 "$log_path" >&2 || true +} + +phase_run() { + local phase_id="$1" + local timeout_s="$2" + shift 2 + + local log_path pid start rc timed_out + log_path="$(phase_log_path "$phase_id")" + say "$phase_id" + start=$SECONDS + timed_out=0 + + ( + "$@" + ) >"$log_path" 2>&1 & + pid=$! + + while kill -0 "$pid" >/dev/null 2>&1; do + if (( SECONDS - start >= timeout_s )); then + timed_out=1 + kill "$pid" >/dev/null 2>&1 || true + sleep 2 + kill -9 "$pid" >/dev/null 2>&1 || true + break + fi + sleep 1 + done + + set +e + wait "$pid" + rc=$? + set -e + + if (( timed_out )); then + warn "$phase_id timed out after ${timeout_s}s" + printf 'timeout after %ss\n' "$timeout_s" >>"$log_path" + show_log_excerpt "$log_path" + return 124 + fi + + if [[ $rc -ne 0 ]]; then + warn "$phase_id failed (rc=$rc)" + show_log_excerpt "$log_path" + return "$rc" + fi + + return 0 +} + +write_summary_json() { + local summary_path="$RUN_DIR/summary.json" + python3 - "$summary_path" <<'PY' +import json +import os +import sys + +summary = { + "vm": os.environ["SUMMARY_VM"], + "snapshotHint": os.environ["SUMMARY_SNAPSHOT_HINT"], + "snapshotId": os.environ["SUMMARY_SNAPSHOT_ID"], + "mode": os.environ["SUMMARY_MODE"], + "latestVersion": os.environ["SUMMARY_LATEST_VERSION"], + "installVersion": os.environ["SUMMARY_INSTALL_VERSION"], + "targetPackageSpec": os.environ["SUMMARY_TARGET_PACKAGE_SPEC"], + "currentHead": os.environ["SUMMARY_CURRENT_HEAD"], + "runDir": os.environ["SUMMARY_RUN_DIR"], + "daemon": os.environ["SUMMARY_DAEMON_STATUS"], + "freshMain": { + "status": os.environ["SUMMARY_FRESH_MAIN_STATUS"], + "version": os.environ["SUMMARY_FRESH_MAIN_VERSION"], + "gateway": os.environ["SUMMARY_FRESH_GATEWAY_STATUS"], + "agent": os.environ["SUMMARY_FRESH_AGENT_STATUS"], + }, + "upgrade": { + "status": os.environ["SUMMARY_UPGRADE_STATUS"], + "latestVersionInstalled": os.environ["SUMMARY_LATEST_INSTALLED_VERSION"], + "mainVersion": os.environ["SUMMARY_UPGRADE_MAIN_VERSION"], + "gateway": os.environ["SUMMARY_UPGRADE_GATEWAY_STATUS"], + "agent": os.environ["SUMMARY_UPGRADE_AGENT_STATUS"], + }, +} +with open(sys.argv[1], "w", encoding="utf-8") as handle: + json.dump(summary, handle, indent=2, sort_keys=True) +print(sys.argv[1]) +PY +} + +run_fresh_main_lane() { + local snapshot_id="$1" + local host_ip="$2" + phase_run "fresh.restore-snapshot" "$TIMEOUT_SNAPSHOT_S" restore_snapshot "$snapshot_id" + phase_run "fresh.bootstrap-guest" "$TIMEOUT_BOOTSTRAP_S" bootstrap_guest + phase_run "fresh.install-latest-bootstrap" "$TIMEOUT_INSTALL_S" install_latest_release + phase_run "fresh.install-main" "$TIMEOUT_INSTALL_S" install_main_tgz "$host_ip" "openclaw-main-fresh.tgz" + FRESH_MAIN_VERSION="$(extract_last_version "$(phase_log_path fresh.install-main)")" + phase_run "fresh.verify-main-version" "$TIMEOUT_VERIFY_S" verify_target_version + phase_run "fresh.onboard-ref" "$TIMEOUT_ONBOARD_S" run_ref_onboard + FRESH_GATEWAY_STATUS="skipped-no-detached-linux-gateway" + phase_run "fresh.first-local-agent-turn" "$TIMEOUT_AGENT_S" verify_local_turn + FRESH_AGENT_STATUS="pass" +} + +run_upgrade_lane() { + local snapshot_id="$1" + local host_ip="$2" + phase_run "upgrade.restore-snapshot" "$TIMEOUT_SNAPSHOT_S" restore_snapshot "$snapshot_id" + phase_run "upgrade.bootstrap-guest" "$TIMEOUT_BOOTSTRAP_S" bootstrap_guest + phase_run "upgrade.install-latest" "$TIMEOUT_INSTALL_S" install_latest_release + LATEST_INSTALLED_VERSION="$(extract_last_version "$(phase_log_path upgrade.install-latest)")" + phase_run "upgrade.verify-latest-version" "$TIMEOUT_VERIFY_S" verify_version_contains "$LATEST_VERSION" + phase_run "upgrade.install-main" "$TIMEOUT_INSTALL_S" install_main_tgz "$host_ip" "openclaw-main-upgrade.tgz" + UPGRADE_MAIN_VERSION="$(extract_last_version "$(phase_log_path upgrade.install-main)")" + phase_run "upgrade.verify-main-version" "$TIMEOUT_VERIFY_S" verify_target_version + phase_run "upgrade.onboard-ref" "$TIMEOUT_ONBOARD_S" run_ref_onboard + UPGRADE_GATEWAY_STATUS="skipped-no-detached-linux-gateway" + phase_run "upgrade.first-local-agent-turn" "$TIMEOUT_AGENT_S" verify_local_turn + UPGRADE_AGENT_STATUS="pass" +} + +SNAPSHOT_ID="$(resolve_snapshot_id)" +LATEST_VERSION="$(resolve_latest_version)" +HOST_IP="$(resolve_host_ip)" +HOST_PORT="$(resolve_host_port)" + +say "VM: $VM_NAME" +say "Snapshot hint: $SNAPSHOT_HINT" +say "Latest npm version: $LATEST_VERSION" +say "Current head: $(git rev-parse --short HEAD)" +say "Run logs: $RUN_DIR" + +pack_main_tgz +start_server "$HOST_IP" + +if [[ "$MODE" == "fresh" || "$MODE" == "both" ]]; then + set +e + run_fresh_main_lane "$SNAPSHOT_ID" "$HOST_IP" + fresh_rc=$? + set -e + if [[ $fresh_rc -eq 0 ]]; then + FRESH_MAIN_STATUS="pass" + else + FRESH_MAIN_STATUS="fail" + fi +fi + +if [[ "$MODE" == "upgrade" || "$MODE" == "both" ]]; then + set +e + run_upgrade_lane "$SNAPSHOT_ID" "$HOST_IP" + upgrade_rc=$? + set -e + if [[ $upgrade_rc -eq 0 ]]; then + UPGRADE_STATUS="pass" + else + UPGRADE_STATUS="fail" + fi +fi + +if [[ "$KEEP_SERVER" -eq 0 && -n "${SERVER_PID:-}" ]]; then + kill "$SERVER_PID" >/dev/null 2>&1 || true + SERVER_PID="" +fi + +SUMMARY_JSON_PATH="$( + SUMMARY_VM="$VM_NAME" \ + SUMMARY_SNAPSHOT_HINT="$SNAPSHOT_HINT" \ + SUMMARY_SNAPSHOT_ID="$SNAPSHOT_ID" \ + SUMMARY_MODE="$MODE" \ + SUMMARY_LATEST_VERSION="$LATEST_VERSION" \ + SUMMARY_INSTALL_VERSION="$INSTALL_VERSION" \ + SUMMARY_TARGET_PACKAGE_SPEC="$TARGET_PACKAGE_SPEC" \ + SUMMARY_CURRENT_HEAD="$(git rev-parse --short HEAD)" \ + SUMMARY_RUN_DIR="$RUN_DIR" \ + SUMMARY_DAEMON_STATUS="$DAEMON_STATUS" \ + SUMMARY_FRESH_MAIN_STATUS="$FRESH_MAIN_STATUS" \ + SUMMARY_FRESH_MAIN_VERSION="$FRESH_MAIN_VERSION" \ + SUMMARY_FRESH_GATEWAY_STATUS="$FRESH_GATEWAY_STATUS" \ + SUMMARY_FRESH_AGENT_STATUS="$FRESH_AGENT_STATUS" \ + SUMMARY_UPGRADE_STATUS="$UPGRADE_STATUS" \ + SUMMARY_LATEST_INSTALLED_VERSION="$LATEST_INSTALLED_VERSION" \ + SUMMARY_UPGRADE_MAIN_VERSION="$UPGRADE_MAIN_VERSION" \ + SUMMARY_UPGRADE_GATEWAY_STATUS="$UPGRADE_GATEWAY_STATUS" \ + SUMMARY_UPGRADE_AGENT_STATUS="$UPGRADE_AGENT_STATUS" \ + write_summary_json +)" + +if [[ "$JSON_OUTPUT" -eq 1 ]]; then + cat "$SUMMARY_JSON_PATH" +else + printf '\nSummary:\n' + if [[ -n "$TARGET_PACKAGE_SPEC" ]]; then + printf ' target-package: %s\n' "$TARGET_PACKAGE_SPEC" + fi + if [[ -n "$INSTALL_VERSION" ]]; then + printf ' baseline-install-version: %s\n' "$INSTALL_VERSION" + fi + printf ' daemon: %s\n' "$DAEMON_STATUS" + printf ' fresh-main: %s (%s)\n' "$FRESH_MAIN_STATUS" "$FRESH_MAIN_VERSION" + printf ' latest->main: %s (%s)\n' "$UPGRADE_STATUS" "$UPGRADE_MAIN_VERSION" + printf ' logs: %s\n' "$RUN_DIR" + printf ' summary: %s\n' "$SUMMARY_JSON_PATH" +fi + +if [[ "$FRESH_MAIN_STATUS" == "fail" || "$UPGRADE_STATUS" == "fail" ]]; then + exit 1 +fi diff --git a/scripts/e2e/parallels-macos-smoke.sh b/scripts/e2e/parallels-macos-smoke.sh new file mode 100644 index 00000000000..0b790346358 --- /dev/null +++ b/scripts/e2e/parallels-macos-smoke.sh @@ -0,0 +1,812 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" + +VM_NAME="macOS Tahoe" +SNAPSHOT_HINT="macOS 26.3.1 fresh" +MODE="both" +OPENAI_API_KEY_ENV="OPENAI_API_KEY" +INSTALL_URL="https://openclaw.ai/install.sh" +HOST_PORT="18425" +HOST_PORT_EXPLICIT=0 +HOST_IP="" +LATEST_VERSION="" +INSTALL_VERSION="" +TARGET_PACKAGE_SPEC="" +KEEP_SERVER=0 +CHECK_LATEST_REF=1 +JSON_OUTPUT=0 +GUEST_OPENCLAW_BIN="/opt/homebrew/bin/openclaw" +GUEST_OPENCLAW_ENTRY="/opt/homebrew/lib/node_modules/openclaw/openclaw.mjs" +GUEST_NODE_BIN="/opt/homebrew/bin/node" +GUEST_NPM_BIN="/opt/homebrew/bin/npm" + +MAIN_TGZ_DIR="$(mktemp -d)" +MAIN_TGZ_PATH="" +SERVER_PID="" +RUN_DIR="$(mktemp -d /tmp/openclaw-parallels-smoke.XXXXXX)" +BUILD_LOCK_DIR="${TMPDIR:-/tmp}/openclaw-parallels-build.lock" + +TIMEOUT_INSTALL_S=900 +TIMEOUT_VERIFY_S=60 +TIMEOUT_ONBOARD_S=180 +TIMEOUT_GATEWAY_S=60 +TIMEOUT_AGENT_S=120 +TIMEOUT_PERMISSION_S=60 +TIMEOUT_SNAPSHOT_S=180 + +FRESH_MAIN_VERSION="skip" +LATEST_INSTALLED_VERSION="skip" +UPGRADE_MAIN_VERSION="skip" +FRESH_GATEWAY_STATUS="skip" +UPGRADE_GATEWAY_STATUS="skip" +FRESH_AGENT_STATUS="skip" +UPGRADE_AGENT_STATUS="skip" + +say() { + printf '==> %s\n' "$*" +} + +artifact_label() { + if [[ -n "$TARGET_PACKAGE_SPEC" ]]; then + printf 'target package tgz' + return + fi + printf 'current main tgz' +} + +warn() { + printf 'warn: %s\n' "$*" >&2 +} + +die() { + printf 'error: %s\n' "$*" >&2 + exit 1 +} + +cleanup() { + if [[ -n "${SERVER_PID:-}" ]]; then + kill "$SERVER_PID" >/dev/null 2>&1 || true + fi + rm -rf "$MAIN_TGZ_DIR" + if [[ "${KEEP_SERVER:-0}" -eq 0 ]]; then + : + fi +} + +trap cleanup EXIT + +shell_quote() { + local value="$1" + printf "'%s'" "$(printf '%s' "$value" | sed "s/'/'\"'\"'/g")" +} + +usage() { + cat <<'EOF' +Usage: bash scripts/e2e/parallels-macos-smoke.sh [options] + +Options: + --vm Parallels VM name. Default: "macOS Tahoe" + --snapshot-hint Snapshot name substring/fuzzy match. + Default: "macOS 26.3.1 fresh" + --mode + fresh = fresh snapshot -> target package/current main tgz -> onboard smoke + upgrade = fresh snapshot -> latest release -> target package/current main tgz -> onboard smoke + both = run both lanes + --openai-api-key-env Host env var name for OpenAI API key. + Default: OPENAI_API_KEY + --install-url Installer URL for latest release. Default: https://openclaw.ai/install.sh + --host-port Host HTTP port for current-main tgz. Default: 18425 + --host-ip Override Parallels host IP. + --latest-version Override npm latest version lookup. + --install-version Pin site-installer version/dist-tag for the baseline lane. + --target-package-spec + Install this npm package tarball instead of packing current main. + Example: openclaw@2026.3.13-beta.1 + --skip-latest-ref-check Skip the known latest-release ref-mode precheck in upgrade lane. + --keep-server Leave temp host HTTP server running. + --json Print machine-readable JSON summary. + -h, --help Show help. +EOF +} + +while [[ $# -gt 0 ]]; do + case "$1" in + --vm) + VM_NAME="$2" + shift 2 + ;; + --snapshot-hint) + SNAPSHOT_HINT="$2" + shift 2 + ;; + --mode) + MODE="$2" + shift 2 + ;; + --openai-api-key-env) + OPENAI_API_KEY_ENV="$2" + shift 2 + ;; + --install-url) + INSTALL_URL="$2" + shift 2 + ;; + --host-port) + HOST_PORT="$2" + HOST_PORT_EXPLICIT=1 + shift 2 + ;; + --host-ip) + HOST_IP="$2" + shift 2 + ;; + --latest-version) + LATEST_VERSION="$2" + shift 2 + ;; + --install-version) + INSTALL_VERSION="$2" + shift 2 + ;; + --target-package-spec) + TARGET_PACKAGE_SPEC="$2" + shift 2 + ;; + --skip-latest-ref-check) + CHECK_LATEST_REF=0 + shift + ;; + --keep-server) + KEEP_SERVER=1 + shift + ;; + --json) + JSON_OUTPUT=1 + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) + die "unknown arg: $1" + ;; + esac +done + +case "$MODE" in + fresh|upgrade|both) ;; + *) + die "invalid --mode: $MODE" + ;; +esac + +OPENAI_API_KEY_VALUE="${!OPENAI_API_KEY_ENV:-}" +[[ -n "$OPENAI_API_KEY_VALUE" ]] || die "$OPENAI_API_KEY_ENV is required" + +resolve_snapshot_id() { + local json hint + json="$(prlctl snapshot-list "$VM_NAME" --json)" + hint="$SNAPSHOT_HINT" + SNAPSHOT_JSON="$json" SNAPSHOT_HINT="$hint" python3 - <<'PY' +import difflib +import json +import os +import sys + +payload = json.loads(os.environ["SNAPSHOT_JSON"]) +hint = os.environ["SNAPSHOT_HINT"].strip().lower() +best_id = None +best_score = -1.0 +for snapshot_id, meta in payload.items(): + name = str(meta.get("name", "")).strip() + lowered = name.lower() + score = 0.0 + if lowered == hint: + score = 10.0 + elif hint and hint in lowered: + score = 5.0 + len(hint) / max(len(lowered), 1) + else: + score = difflib.SequenceMatcher(None, hint, lowered).ratio() + if score > best_score: + best_score = score + best_id = snapshot_id +if not best_id: + sys.exit("no snapshot matched") +print(best_id) +PY +} + +resolve_host_ip() { + if [[ -n "$HOST_IP" ]]; then + printf '%s\n' "$HOST_IP" + return + fi + + local detected + detected="$(ifconfig | awk '/inet 10\.211\./ { print $2; exit }')" + [[ -n "$detected" ]] || die "failed to detect Parallels host IP; pass --host-ip" + printf '%s\n' "$detected" +} + +is_host_port_free() { + local port="$1" + python3 - "$port" <<'PY' +import socket +import sys + +port = int(sys.argv[1]) +sock = socket.socket() +try: + sock.bind(("0.0.0.0", port)) +except OSError: + raise SystemExit(1) +finally: + sock.close() +PY +} + +allocate_host_port() { + python3 - <<'PY' +import socket + +sock = socket.socket() +sock.bind(("0.0.0.0", 0)) +print(sock.getsockname()[1]) +sock.close() +PY +} + +resolve_host_port() { + if is_host_port_free "$HOST_PORT"; then + printf '%s\n' "$HOST_PORT" + return + fi + if [[ "$HOST_PORT_EXPLICIT" -eq 1 ]]; then + die "host port $HOST_PORT already in use" + fi + HOST_PORT="$(allocate_host_port)" + warn "host port 18425 busy; using $HOST_PORT" + printf '%s\n' "$HOST_PORT" +} + +wait_for_current_user() { + local deadline + deadline=$((SECONDS + TIMEOUT_SNAPSHOT_S)) + while (( SECONDS < deadline )); do + if prlctl exec "$VM_NAME" --current-user whoami >/dev/null 2>&1; then + return 0 + fi + sleep 2 + done + return 1 +} + +guest_current_user_exec() { + prlctl exec "$VM_NAME" --current-user /usr/bin/env \ + PATH=/opt/homebrew/bin:/opt/homebrew/sbin:/usr/bin:/bin:/usr/sbin:/sbin \ + "$@" +} + +guest_script() { + local mode script + mode="$1" + script="$2" + PRL_GUEST_VM_NAME="$VM_NAME" PRL_GUEST_MODE="$mode" PRL_GUEST_SCRIPT="$script" /opt/homebrew/bin/expect <<'EOF' +log_user 1 +set timeout -1 +match_max 1048576 + +set vm $env(PRL_GUEST_VM_NAME) +set mode $env(PRL_GUEST_MODE) +set script $env(PRL_GUEST_SCRIPT) +set cmd [list prlctl enter $vm] +if {$mode eq "current-user"} { + lappend cmd --current-user +} + +spawn {*}$cmd +send -- "printf '__OPENCLAW_READY__\\n'\r" +expect "__OPENCLAW_READY__" +log_user 0 +send -- "export PS1='' PROMPT='' PROMPT2='' RPROMPT=''\r" +send -- "stty -echo\r" + +send -- "cat >/tmp/openclaw-prl.sh <<'__OPENCLAW_SCRIPT__'\r" +send -- $script +if {![string match "*\n" $script]} { + send -- "\r" +} +send -- "__OPENCLAW_SCRIPT__\r" +send -- "/bin/bash /tmp/openclaw-prl.sh; rc=\$?; rm -f /tmp/openclaw-prl.sh; printf '__OPENCLAW_RC__:%s\\n' \"\$rc\"; exit \"\$rc\"\r" +log_user 1 + +set rc 1 +expect { + -re {__OPENCLAW_RC__:(-?[0-9]+)} { + set rc $expect_out(1,string) + exp_continue + } + eof {} +} +catch wait result +exit $rc +EOF +} + +guest_current_user_sh() { + local script + script=$'set -eu\n' + script+=$'set -o pipefail\n' + script+=$'trap "" PIPE\n' + script+=$'umask 022\n' + script+=$'export PATH="/opt/homebrew/bin:/opt/homebrew/sbin:/usr/bin:/bin:/usr/sbin:/sbin:${PATH:-}"\n' + script+=$'if [ -z "${HOME:-}" ]; then export HOME="/Users/$(id -un)"; fi\n' + script+=$'cd "$HOME"\n' + script+="$1" + guest_script current-user "$script" +} + +restore_snapshot() { + local snapshot_id="$1" + say "Restore snapshot $SNAPSHOT_HINT ($snapshot_id)" + prlctl snapshot-switch "$VM_NAME" --id "$snapshot_id" >/dev/null + wait_for_current_user || die "desktop user did not become ready in $VM_NAME" +} + +resolve_latest_version() { + if [[ -n "$LATEST_VERSION" ]]; then + printf '%s\n' "$LATEST_VERSION" + return + fi + npm view openclaw version --userconfig "$(mktemp)" +} + +install_latest_release() { + local install_url_q version_arg_q + install_url_q="$(shell_quote "$INSTALL_URL")" + version_arg_q="" + if [[ -n "$INSTALL_VERSION" ]]; then + version_arg_q=" --version $(shell_quote "$INSTALL_VERSION")" + fi + guest_current_user_sh "$(cat <&2 + return 1 + ;; + esac +} + +extract_package_version_from_tgz() { + tar -xOf "$1" package/package.json | python3 -c 'import json, sys; print(json.load(sys.stdin)["version"])' +} + +pack_main_tgz() { + local short_head pkg + if [[ -n "$TARGET_PACKAGE_SPEC" ]]; then + say "Pack target package tgz: $TARGET_PACKAGE_SPEC" + pkg="$( + npm pack "$TARGET_PACKAGE_SPEC" --ignore-scripts --json --pack-destination "$MAIN_TGZ_DIR" \ + | python3 -c 'import json, sys; data = json.load(sys.stdin); print(data[-1]["filename"])' + )" + MAIN_TGZ_PATH="$MAIN_TGZ_DIR/$(basename "$pkg")" + TARGET_EXPECT_VERSION="$(extract_package_version_from_tgz "$MAIN_TGZ_PATH")" + say "Packed $MAIN_TGZ_PATH" + say "Target package version: $TARGET_EXPECT_VERSION" + return + fi + say "Pack current main tgz" + ensure_current_build + short_head="$(git rev-parse --short HEAD)" + pkg="$( + npm pack --ignore-scripts --json --pack-destination "$MAIN_TGZ_DIR" \ + | python3 -c 'import json, sys; data = json.load(sys.stdin); print(data[-1]["filename"])' + )" + MAIN_TGZ_PATH="$MAIN_TGZ_DIR/openclaw-main-$short_head.tgz" + cp "$MAIN_TGZ_DIR/$pkg" "$MAIN_TGZ_PATH" + say "Packed $MAIN_TGZ_PATH" + tar -xOf "$MAIN_TGZ_PATH" package/dist/build-info.json +} + +verify_target_version() { + if [[ -n "$TARGET_PACKAGE_SPEC" ]]; then + verify_version_contains "$TARGET_EXPECT_VERSION" + return + fi + verify_version_contains "$(git rev-parse --short=7 HEAD)" +} + +current_build_commit() { + python3 - <<'PY' +import json +import pathlib + +path = pathlib.Path("dist/build-info.json") +if not path.exists(): + print("") +else: + print(json.loads(path.read_text()).get("commit", "")) +PY +} + +acquire_build_lock() { + local owner_pid="" + while ! mkdir "$BUILD_LOCK_DIR" 2>/dev/null; do + if [[ -f "$BUILD_LOCK_DIR/pid" ]]; then + owner_pid="$(cat "$BUILD_LOCK_DIR/pid" 2>/dev/null || true)" + if [[ -n "$owner_pid" ]] && ! kill -0 "$owner_pid" >/dev/null 2>&1; then + warn "Removing stale Parallels build lock" + rm -rf "$BUILD_LOCK_DIR" + continue + fi + fi + sleep 1 + done + printf '%s\n' "$$" >"$BUILD_LOCK_DIR/pid" +} + +release_build_lock() { + if [[ -d "$BUILD_LOCK_DIR" ]]; then + rm -rf "$BUILD_LOCK_DIR" + fi +} + +ensure_current_build() { + local head build_commit + acquire_build_lock + head="$(git rev-parse HEAD)" + build_commit="$(current_build_commit)" + if [[ "$build_commit" == "$head" ]]; then + release_build_lock + return + fi + say "Build dist for current head" + pnpm build + build_commit="$(current_build_commit)" + release_build_lock + [[ "$build_commit" == "$head" ]] || die "dist/build-info.json still does not match HEAD after build" +} + +start_server() { + local host_ip="$1" + say "Serve $(artifact_label) on $host_ip:$HOST_PORT" + ( + cd "$MAIN_TGZ_DIR" + exec python3 -m http.server "$HOST_PORT" --bind 0.0.0.0 + ) >/tmp/openclaw-parallels-http.log 2>&1 & + SERVER_PID=$! + sleep 1 + kill -0 "$SERVER_PID" >/dev/null 2>&1 || die "failed to start host HTTP server" +} + +install_main_tgz() { + local host_ip="$1" + local temp_name="$2" + local tgz_url_q + tgz_url_q="$(shell_quote "http://$host_ip:$HOST_PORT/$(basename "$MAIN_TGZ_PATH")")" + guest_current_user_sh "$(cat <&2; exit 1; fi; }; check_path "\$root/openclaw"; check_path "\$root/openclaw/extensions"; if [ -d "\$root/openclaw/extensions" ]; then while IFS= read -r -d '' extension_dir; do check_path "\$extension_dir"; done < <(/usr/bin/find "\$root/openclaw/extensions" -mindepth 1 -maxdepth 1 -type d -print0); fi +EOF +)" + guest_current_user_exec /bin/bash -lc "$cmd" +} + +run_ref_onboard() { + guest_current_user_exec \ + /usr/bin/env "OPENAI_API_KEY=$OPENAI_API_KEY_VALUE" \ + "$GUEST_NODE_BIN" "$GUEST_OPENCLAW_ENTRY" onboard \ + --non-interactive \ + --mode local \ + --auth-choice openai-api-key \ + --secret-input-mode ref \ + --gateway-port 18789 \ + --gateway-bind loopback \ + --install-daemon \ + --skip-skills \ + --accept-risk \ + --json +} + +verify_gateway() { + guest_current_user_exec "$GUEST_NODE_BIN" "$GUEST_OPENCLAW_ENTRY" gateway status --deep --require-rpc +} + +show_gateway_status_compat() { + if guest_current_user_exec "$GUEST_NODE_BIN" "$GUEST_OPENCLAW_ENTRY" gateway status --help | grep -Fq -- "--require-rpc"; then + guest_current_user_exec "$GUEST_NODE_BIN" "$GUEST_OPENCLAW_ENTRY" gateway status --deep --require-rpc + return + fi + guest_current_user_exec "$GUEST_NODE_BIN" "$GUEST_OPENCLAW_ENTRY" gateway status --deep +} + +verify_turn() { + guest_current_user_exec "$GUEST_NODE_BIN" "$GUEST_OPENCLAW_ENTRY" agent --agent main --message ping --json +} + +phase_log_path() { + printf '%s/%s.log\n' "$RUN_DIR" "$1" +} + +extract_last_version() { + local log_path="$1" + python3 - "$log_path" <<'PY' +import pathlib +import re +import sys + +text = pathlib.Path(sys.argv[1]).read_text(errors="replace") +matches = re.findall(r"OpenClaw [^\r\n]+ \([0-9a-f]{7,}\)", text) +print(matches[-1] if matches else "") +PY +} + +show_log_excerpt() { + local log_path="$1" + warn "log tail: $log_path" + tail -n 80 "$log_path" >&2 || true +} + +phase_run() { + local phase_id="$1" + local timeout_s="$2" + shift 2 + + local log_path pid start rc timed_out + log_path="$(phase_log_path "$phase_id")" + say "$phase_id" + start=$SECONDS + timed_out=0 + + ( + "$@" + ) >"$log_path" 2>&1 & + pid=$! + + while kill -0 "$pid" >/dev/null 2>&1; do + if (( SECONDS - start >= timeout_s )); then + timed_out=1 + kill "$pid" >/dev/null 2>&1 || true + sleep 2 + kill -9 "$pid" >/dev/null 2>&1 || true + break + fi + sleep 1 + done + + set +e + wait "$pid" + rc=$? + set -e + + if (( timed_out )); then + warn "$phase_id timed out after ${timeout_s}s" + printf 'timeout after %ss\n' "$timeout_s" >>"$log_path" + show_log_excerpt "$log_path" + return 124 + fi + + if [[ $rc -ne 0 ]]; then + warn "$phase_id failed (rc=$rc)" + show_log_excerpt "$log_path" + return "$rc" + fi + + return 0 +} + +write_summary_json() { + local summary_path="$RUN_DIR/summary.json" + python3 - "$summary_path" <<'PY' +import json +import os +import sys + +summary = { + "vm": os.environ["SUMMARY_VM"], + "snapshotHint": os.environ["SUMMARY_SNAPSHOT_HINT"], + "snapshotId": os.environ["SUMMARY_SNAPSHOT_ID"], + "mode": os.environ["SUMMARY_MODE"], + "latestVersion": os.environ["SUMMARY_LATEST_VERSION"], + "installVersion": os.environ["SUMMARY_INSTALL_VERSION"], + "targetPackageSpec": os.environ["SUMMARY_TARGET_PACKAGE_SPEC"], + "currentHead": os.environ["SUMMARY_CURRENT_HEAD"], + "runDir": os.environ["SUMMARY_RUN_DIR"], + "freshMain": { + "status": os.environ["SUMMARY_FRESH_MAIN_STATUS"], + "version": os.environ["SUMMARY_FRESH_MAIN_VERSION"], + "gateway": os.environ["SUMMARY_FRESH_GATEWAY_STATUS"], + "agent": os.environ["SUMMARY_FRESH_AGENT_STATUS"], + }, + "upgrade": { + "precheck": os.environ["SUMMARY_UPGRADE_PRECHECK_STATUS"], + "status": os.environ["SUMMARY_UPGRADE_STATUS"], + "latestVersionInstalled": os.environ["SUMMARY_LATEST_INSTALLED_VERSION"], + "mainVersion": os.environ["SUMMARY_UPGRADE_MAIN_VERSION"], + "gateway": os.environ["SUMMARY_UPGRADE_GATEWAY_STATUS"], + "agent": os.environ["SUMMARY_UPGRADE_AGENT_STATUS"], + }, +} +with open(sys.argv[1], "w", encoding="utf-8") as handle: + json.dump(summary, handle, indent=2, sort_keys=True) +print(sys.argv[1]) +PY +} + +capture_latest_ref_failure() { + set +e + run_ref_onboard + local rc=$? + set -e + if [[ $rc -eq 0 ]]; then + say "Latest release ref-mode onboard passed" + return 0 + fi + warn "Latest release ref-mode onboard failed pre-upgrade" + set +e + show_gateway_status_compat || true + set -e + return 1 +} + +run_fresh_main_lane() { + local snapshot_id="$1" + local host_ip="$2" + phase_run "fresh.restore-snapshot" "$TIMEOUT_SNAPSHOT_S" restore_snapshot "$snapshot_id" + phase_run "fresh.install-main" "$TIMEOUT_INSTALL_S" install_main_tgz "$host_ip" "openclaw-main-fresh.tgz" + FRESH_MAIN_VERSION="$(extract_last_version "$(phase_log_path fresh.install-main)")" + phase_run "fresh.verify-main-version" "$TIMEOUT_VERIFY_S" verify_target_version + phase_run "fresh.verify-bundle-permissions" "$TIMEOUT_PERMISSION_S" verify_bundle_permissions + phase_run "fresh.onboard-ref" "$TIMEOUT_ONBOARD_S" run_ref_onboard + phase_run "fresh.gateway-status" "$TIMEOUT_GATEWAY_S" verify_gateway + FRESH_GATEWAY_STATUS="pass" + phase_run "fresh.first-agent-turn" "$TIMEOUT_AGENT_S" verify_turn + FRESH_AGENT_STATUS="pass" +} + +run_upgrade_lane() { + local snapshot_id="$1" + local host_ip="$2" + phase_run "upgrade.restore-snapshot" "$TIMEOUT_SNAPSHOT_S" restore_snapshot "$snapshot_id" + phase_run "upgrade.install-latest" "$TIMEOUT_INSTALL_S" install_latest_release + LATEST_INSTALLED_VERSION="$(extract_last_version "$(phase_log_path upgrade.install-latest)")" + phase_run "upgrade.verify-latest-version" "$TIMEOUT_VERIFY_S" verify_version_contains "$LATEST_VERSION" + if [[ "$CHECK_LATEST_REF" -eq 1 ]]; then + if phase_run "upgrade.latest-ref-precheck" "$TIMEOUT_ONBOARD_S" capture_latest_ref_failure; then + UPGRADE_PRECHECK_STATUS="latest-ref-pass" + else + UPGRADE_PRECHECK_STATUS="latest-ref-fail" + fi + else + UPGRADE_PRECHECK_STATUS="skipped" + fi + phase_run "upgrade.install-main" "$TIMEOUT_INSTALL_S" install_main_tgz "$host_ip" "openclaw-main-upgrade.tgz" + UPGRADE_MAIN_VERSION="$(extract_last_version "$(phase_log_path upgrade.install-main)")" + phase_run "upgrade.verify-main-version" "$TIMEOUT_VERIFY_S" verify_target_version + phase_run "upgrade.verify-bundle-permissions" "$TIMEOUT_PERMISSION_S" verify_bundle_permissions + phase_run "upgrade.onboard-ref" "$TIMEOUT_ONBOARD_S" run_ref_onboard + phase_run "upgrade.gateway-status" "$TIMEOUT_GATEWAY_S" verify_gateway + UPGRADE_GATEWAY_STATUS="pass" + phase_run "upgrade.first-agent-turn" "$TIMEOUT_AGENT_S" verify_turn + UPGRADE_AGENT_STATUS="pass" +} + +FRESH_MAIN_STATUS="skip" +UPGRADE_STATUS="skip" +UPGRADE_PRECHECK_STATUS="skip" + +SNAPSHOT_ID="$(resolve_snapshot_id)" +LATEST_VERSION="$(resolve_latest_version)" +HOST_IP="$(resolve_host_ip)" +HOST_PORT="$(resolve_host_port)" + +say "VM: $VM_NAME" +say "Snapshot hint: $SNAPSHOT_HINT" +say "Latest npm version: $LATEST_VERSION" +say "Current head: $(git rev-parse --short HEAD)" +say "Run logs: $RUN_DIR" + +pack_main_tgz +start_server "$HOST_IP" + +if [[ "$MODE" == "fresh" || "$MODE" == "both" ]]; then + set +e + run_fresh_main_lane "$SNAPSHOT_ID" "$HOST_IP" + fresh_rc=$? + set -e + if [[ $fresh_rc -eq 0 ]]; then + FRESH_MAIN_STATUS="pass" + else + FRESH_MAIN_STATUS="fail" + fi +fi + +if [[ "$MODE" == "upgrade" || "$MODE" == "both" ]]; then + set +e + run_upgrade_lane "$SNAPSHOT_ID" "$HOST_IP" + upgrade_rc=$? + set -e + if [[ $upgrade_rc -eq 0 ]]; then + UPGRADE_STATUS="pass" + else + UPGRADE_STATUS="fail" + fi +fi + +if [[ "$KEEP_SERVER" -eq 0 && -n "${SERVER_PID:-}" ]]; then + kill "$SERVER_PID" >/dev/null 2>&1 || true + SERVER_PID="" +fi + +SUMMARY_JSON_PATH="$( + SUMMARY_VM="$VM_NAME" \ + SUMMARY_SNAPSHOT_HINT="$SNAPSHOT_HINT" \ + SUMMARY_SNAPSHOT_ID="$SNAPSHOT_ID" \ + SUMMARY_MODE="$MODE" \ + SUMMARY_LATEST_VERSION="$LATEST_VERSION" \ + SUMMARY_INSTALL_VERSION="$INSTALL_VERSION" \ + SUMMARY_TARGET_PACKAGE_SPEC="$TARGET_PACKAGE_SPEC" \ + SUMMARY_CURRENT_HEAD="$(git rev-parse --short HEAD)" \ + SUMMARY_RUN_DIR="$RUN_DIR" \ + SUMMARY_FRESH_MAIN_STATUS="$FRESH_MAIN_STATUS" \ + SUMMARY_FRESH_MAIN_VERSION="$FRESH_MAIN_VERSION" \ + SUMMARY_FRESH_GATEWAY_STATUS="$FRESH_GATEWAY_STATUS" \ + SUMMARY_FRESH_AGENT_STATUS="$FRESH_AGENT_STATUS" \ + SUMMARY_UPGRADE_PRECHECK_STATUS="$UPGRADE_PRECHECK_STATUS" \ + SUMMARY_UPGRADE_STATUS="$UPGRADE_STATUS" \ + SUMMARY_LATEST_INSTALLED_VERSION="$LATEST_INSTALLED_VERSION" \ + SUMMARY_UPGRADE_MAIN_VERSION="$UPGRADE_MAIN_VERSION" \ + SUMMARY_UPGRADE_GATEWAY_STATUS="$UPGRADE_GATEWAY_STATUS" \ + SUMMARY_UPGRADE_AGENT_STATUS="$UPGRADE_AGENT_STATUS" \ + write_summary_json +)" + +if [[ "$JSON_OUTPUT" -eq 1 ]]; then + cat "$SUMMARY_JSON_PATH" +else + printf '\nSummary:\n' + if [[ -n "$TARGET_PACKAGE_SPEC" ]]; then + printf ' target-package: %s\n' "$TARGET_PACKAGE_SPEC" + fi + if [[ -n "$INSTALL_VERSION" ]]; then + printf ' baseline-install-version: %s\n' "$INSTALL_VERSION" + fi + printf ' fresh-main: %s (%s)\n' "$FRESH_MAIN_STATUS" "$FRESH_MAIN_VERSION" + printf ' latest->main precheck: %s (%s)\n' "$UPGRADE_PRECHECK_STATUS" "$LATEST_INSTALLED_VERSION" + printf ' latest->main: %s (%s)\n' "$UPGRADE_STATUS" "$UPGRADE_MAIN_VERSION" + printf ' logs: %s\n' "$RUN_DIR" + printf ' summary: %s\n' "$SUMMARY_JSON_PATH" +fi + +if [[ "$FRESH_MAIN_STATUS" == "fail" || "$UPGRADE_STATUS" == "fail" ]]; then + exit 1 +fi diff --git a/scripts/e2e/parallels-windows-smoke.sh b/scripts/e2e/parallels-windows-smoke.sh new file mode 100644 index 00000000000..cd144511f49 --- /dev/null +++ b/scripts/e2e/parallels-windows-smoke.sh @@ -0,0 +1,921 @@ +#!/usr/bin/env bash +set -euo pipefail + +VM_NAME="Windows 11" +SNAPSHOT_HINT="pre-openclaw-native-e2e-2026-03-12" +MODE="both" +OPENAI_API_KEY_ENV="OPENAI_API_KEY" +INSTALL_URL="https://openclaw.ai/install.ps1" +HOST_PORT="18426" +HOST_PORT_EXPLICIT=0 +HOST_IP="" +LATEST_VERSION="" +INSTALL_VERSION="" +TARGET_PACKAGE_SPEC="" +JSON_OUTPUT=0 +KEEP_SERVER=0 +CHECK_LATEST_REF=1 + +MAIN_TGZ_DIR="$(mktemp -d)" +MAIN_TGZ_PATH="" +MINGIT_ZIP_PATH="" +MINGIT_ZIP_NAME="" +SERVER_PID="" +RUN_DIR="$(mktemp -d /tmp/openclaw-parallels-windows.XXXXXX)" +BUILD_LOCK_DIR="${TMPDIR:-/tmp}/openclaw-parallels-build.lock" + +TIMEOUT_SNAPSHOT_S=240 +TIMEOUT_INSTALL_S=1200 +TIMEOUT_VERIFY_S=120 +TIMEOUT_ONBOARD_S=240 +TIMEOUT_GATEWAY_S=120 +TIMEOUT_AGENT_S=180 + +FRESH_MAIN_STATUS="skip" +FRESH_MAIN_VERSION="skip" +FRESH_GATEWAY_STATUS="skip" +FRESH_AGENT_STATUS="skip" +UPGRADE_STATUS="skip" +UPGRADE_PRECHECK_STATUS="skip" +LATEST_INSTALLED_VERSION="skip" +UPGRADE_MAIN_VERSION="skip" +UPGRADE_GATEWAY_STATUS="skip" +UPGRADE_AGENT_STATUS="skip" + +say() { + printf '==> %s\n' "$*" +} + +artifact_label() { + if [[ -n "$TARGET_PACKAGE_SPEC" ]]; then + printf 'target package tgz' + return + fi + printf 'current main tgz' +} + +warn() { + printf 'warn: %s\n' "$*" >&2 +} + +die() { + printf 'error: %s\n' "$*" >&2 + exit 1 +} + +cleanup() { + if [[ -n "${SERVER_PID:-}" ]]; then + kill "$SERVER_PID" >/dev/null 2>&1 || true + fi + rm -rf "$MAIN_TGZ_DIR" +} + +trap cleanup EXIT + +usage() { + cat <<'EOF' +Usage: bash scripts/e2e/parallels-windows-smoke.sh [options] + +Options: + --vm Parallels VM name. Default: "Windows 11" + --snapshot-hint Snapshot name substring/fuzzy match. + Default: "pre-openclaw-native-e2e-2026-03-12" + --mode + --openai-api-key-env Host env var name for OpenAI API key. + Default: OPENAI_API_KEY + --install-url Installer URL for latest release. Default: https://openclaw.ai/install.ps1 + --host-port Host HTTP port for current-main tgz. Default: 18426 + --host-ip Override Parallels host IP. + --latest-version Override npm latest version lookup. + --install-version Pin site-installer version/dist-tag for the baseline lane. + --target-package-spec + Install this npm package tarball instead of packing current main. + Example: openclaw@2026.3.13-beta.1 + --skip-latest-ref-check Skip latest-release ref-mode precheck. + --keep-server Leave temp host HTTP server running. + --json Print machine-readable JSON summary. + -h, --help Show help. +EOF +} + +while [[ $# -gt 0 ]]; do + case "$1" in + --vm) + VM_NAME="$2" + shift 2 + ;; + --snapshot-hint) + SNAPSHOT_HINT="$2" + shift 2 + ;; + --mode) + MODE="$2" + shift 2 + ;; + --openai-api-key-env) + OPENAI_API_KEY_ENV="$2" + shift 2 + ;; + --install-url) + INSTALL_URL="$2" + shift 2 + ;; + --host-port) + HOST_PORT="$2" + HOST_PORT_EXPLICIT=1 + shift 2 + ;; + --host-ip) + HOST_IP="$2" + shift 2 + ;; + --latest-version) + LATEST_VERSION="$2" + shift 2 + ;; + --install-version) + INSTALL_VERSION="$2" + shift 2 + ;; + --target-package-spec) + TARGET_PACKAGE_SPEC="$2" + shift 2 + ;; + --skip-latest-ref-check) + CHECK_LATEST_REF=0 + shift + ;; + --keep-server) + KEEP_SERVER=1 + shift + ;; + --json) + JSON_OUTPUT=1 + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) + die "unknown arg: $1" + ;; + esac +done + +case "$MODE" in + fresh|upgrade|both) ;; + *) + die "invalid --mode: $MODE" + ;; +esac + +OPENAI_API_KEY_VALUE="${!OPENAI_API_KEY_ENV:-}" +[[ -n "$OPENAI_API_KEY_VALUE" ]] || die "$OPENAI_API_KEY_ENV is required" + +ps_single_quote() { + printf "%s" "$1" | sed "s/'/''/g" +} + +ps_array_literal() { + local arg quoted parts=() + for arg in "$@"; do + quoted="$(ps_single_quote "$arg")" + parts+=("'$quoted'") + done + local joined="" + local part + for part in "${parts[@]}"; do + if [[ -n "$joined" ]]; then + joined+=", " + fi + joined+="$part" + done + printf '@(%s)' "$joined" +} + +resolve_snapshot_id() { + local json hint + json="$(prlctl snapshot-list "$VM_NAME" --json)" + hint="$SNAPSHOT_HINT" + SNAPSHOT_JSON="$json" SNAPSHOT_HINT="$hint" python3 - <<'PY' +import difflib +import json +import os +import sys + +payload = json.loads(os.environ["SNAPSHOT_JSON"]) +hint = os.environ["SNAPSHOT_HINT"].strip().lower() +best_id = None +best_score = -1.0 +for snapshot_id, meta in payload.items(): + name = str(meta.get("name", "")).strip() + lowered = name.lower() + score = 0.0 + if lowered == hint: + score = 10.0 + elif hint and hint in lowered: + score = 5.0 + len(hint) / max(len(lowered), 1) + else: + score = difflib.SequenceMatcher(None, hint, lowered).ratio() + if score > best_score: + best_score = score + best_id = snapshot_id +if not best_id: + sys.exit("no snapshot matched") +print(best_id) +PY +} + +resolve_host_ip() { + if [[ -n "$HOST_IP" ]]; then + printf '%s\n' "$HOST_IP" + return + fi + local detected + detected="$(ifconfig | awk '/inet 10\.211\./ { print $2; exit }')" + [[ -n "$detected" ]] || die "failed to detect Parallels host IP; pass --host-ip" + printf '%s\n' "$detected" +} + +is_host_port_free() { + local port="$1" + python3 - "$port" <<'PY' +import socket +import sys + +port = int(sys.argv[1]) +sock = socket.socket() +try: + sock.bind(("0.0.0.0", port)) +except OSError: + raise SystemExit(1) +finally: + sock.close() +PY +} + +allocate_host_port() { + python3 - <<'PY' +import socket + +sock = socket.socket() +sock.bind(("0.0.0.0", 0)) +print(sock.getsockname()[1]) +sock.close() +PY +} + +resolve_host_port() { + if is_host_port_free "$HOST_PORT"; then + printf '%s\n' "$HOST_PORT" + return + fi + if [[ "$HOST_PORT_EXPLICIT" -eq 1 ]]; then + die "host port $HOST_PORT already in use" + fi + HOST_PORT="$(allocate_host_port)" + warn "host port 18426 busy; using $HOST_PORT" + printf '%s\n' "$HOST_PORT" +} + +guest_exec() { + prlctl exec "$VM_NAME" --current-user "$@" +} + +guest_powershell() { + local script="$1" + local encoded + encoded="$( + SCRIPT_CONTENT="$script" python3 - <<'PY' +import base64 +import os + +script = "$ProgressPreference = 'SilentlyContinue'\n" + os.environ["SCRIPT_CONTENT"] +payload = script.encode("utf-16le") +print(base64.b64encode(payload).decode("ascii")) +PY + )" + guest_exec powershell.exe -NoProfile -ExecutionPolicy Bypass -EncodedCommand "$encoded" +} + +guest_run_openclaw() { + local env_name="${1:-}" + local env_value="${2:-}" + shift 2 + + local args_literal stdout_name stderr_name env_name_q env_value_q + args_literal="$(ps_array_literal "$@")" + stdout_name="openclaw-stdout-$RANDOM-$RANDOM.log" + stderr_name="openclaw-stderr-$RANDOM-$RANDOM.log" + env_name_q="$(ps_single_quote "$env_name")" + env_value_q="$(ps_single_quote "$env_value")" + + guest_powershell "$(cat </dev/null +} + +verify_windows_user_ready() { + guest_exec cmd.exe /d /s /c "echo ready" +} + +wait_for_guest_ready() { + local deadline + deadline=$((SECONDS + TIMEOUT_SNAPSHOT_S)) + while (( SECONDS < deadline )); do + if verify_windows_user_ready >/dev/null 2>&1; then + return 0 + fi + sleep 3 + done + return 1 +} + +phase_log_path() { + printf '%s/%s.log\n' "$RUN_DIR" "$1" +} + +show_log_excerpt() { + local log_path="$1" + warn "log tail: $log_path" + tail -n 80 "$log_path" >&2 || true +} + +phase_run() { + local phase_id="$1" + local timeout_s="$2" + shift 2 + + local log_path pid rc timed_out + log_path="$(phase_log_path "$phase_id")" + say "$phase_id" + timed_out=0 + + ( + "$@" + ) >"$log_path" 2>&1 & + pid=$! + + ( + sleep "$timeout_s" + kill "$pid" >/dev/null 2>&1 || true + sleep 2 + kill -9 "$pid" >/dev/null 2>&1 || true + ) & + local killer_pid=$! + + set +e + wait "$pid" + rc=$? + set -e + + if kill -0 "$killer_pid" >/dev/null 2>&1; then + kill "$killer_pid" >/dev/null 2>&1 || true + wait "$killer_pid" >/dev/null 2>&1 || true + else + timed_out=1 + fi + + if (( timed_out )); then + warn "$phase_id timed out after ${timeout_s}s" + printf 'timeout after %ss\n' "$timeout_s" >>"$log_path" + show_log_excerpt "$log_path" + return 124 + fi + + if [[ $rc -ne 0 ]]; then + warn "$phase_id failed (rc=$rc)" + show_log_excerpt "$log_path" + return "$rc" + fi + + return 0 +} + +extract_last_version() { + local log_path="$1" + python3 - "$log_path" <<'PY' +import pathlib +import re +import sys + +text = pathlib.Path(sys.argv[1]).read_text(errors="replace") +matches = re.findall(r"OpenClaw [^\r\n]+ \([0-9a-f]{7,}\)", text) +print(matches[-1] if matches else "") +PY +} + +write_summary_json() { + local summary_path="$RUN_DIR/summary.json" + python3 - "$summary_path" <<'PY' +import json +import os +import sys + +summary = { + "vm": os.environ["SUMMARY_VM"], + "snapshotHint": os.environ["SUMMARY_SNAPSHOT_HINT"], + "snapshotId": os.environ["SUMMARY_SNAPSHOT_ID"], + "mode": os.environ["SUMMARY_MODE"], + "latestVersion": os.environ["SUMMARY_LATEST_VERSION"], + "installVersion": os.environ["SUMMARY_INSTALL_VERSION"], + "targetPackageSpec": os.environ["SUMMARY_TARGET_PACKAGE_SPEC"], + "currentHead": os.environ["SUMMARY_CURRENT_HEAD"], + "runDir": os.environ["SUMMARY_RUN_DIR"], + "freshMain": { + "status": os.environ["SUMMARY_FRESH_MAIN_STATUS"], + "version": os.environ["SUMMARY_FRESH_MAIN_VERSION"], + "gateway": os.environ["SUMMARY_FRESH_GATEWAY_STATUS"], + "agent": os.environ["SUMMARY_FRESH_AGENT_STATUS"], + }, + "upgrade": { + "precheck": os.environ["SUMMARY_UPGRADE_PRECHECK_STATUS"], + "status": os.environ["SUMMARY_UPGRADE_STATUS"], + "latestVersionInstalled": os.environ["SUMMARY_LATEST_INSTALLED_VERSION"], + "mainVersion": os.environ["SUMMARY_UPGRADE_MAIN_VERSION"], + "gateway": os.environ["SUMMARY_UPGRADE_GATEWAY_STATUS"], + "agent": os.environ["SUMMARY_UPGRADE_AGENT_STATUS"], + }, +} +with open(sys.argv[1], "w", encoding="utf-8") as handle: + json.dump(summary, handle, indent=2, sort_keys=True) +print(sys.argv[1]) +PY +} + +resolve_latest_version() { + if [[ -n "$LATEST_VERSION" ]]; then + printf '%s\n' "$LATEST_VERSION" + return + fi + npm view openclaw version --userconfig "$(mktemp)" +} + +resolve_mingit_download() { + python3 - <<'PY' +import json +import urllib.request + +req = urllib.request.Request( + "https://api.github.com/repos/git-for-windows/git/releases/latest", + headers={ + "User-Agent": "openclaw-parallels-smoke", + "Accept": "application/vnd.github+json", + }, +) +with urllib.request.urlopen(req, timeout=30) as response: + data = json.load(response) + +assets = data.get("assets", []) +preferred_names = [ + "MinGit-2.53.0.2-arm64.zip", + "MinGit-2.53.0.2-64-bit.zip", +] + +best = None +for wanted in preferred_names: + for asset in assets: + if asset.get("name") == wanted: + best = asset + break + if best: + break + +if best is None: + for asset in assets: + name = asset.get("name", "") + if name.startswith("MinGit-") and name.endswith(".zip") and "busybox" not in name: + best = asset + break + +if best is None: + raise SystemExit("no MinGit asset found") + +print(best["name"]) +print(best["browser_download_url"]) +PY +} + +current_build_commit() { + python3 - <<'PY' +import json +import pathlib + +path = pathlib.Path("dist/build-info.json") +if not path.exists(): + print("") +else: + print(json.loads(path.read_text()).get("commit", "")) +PY +} + +acquire_build_lock() { + local owner_pid="" + while ! mkdir "$BUILD_LOCK_DIR" 2>/dev/null; do + if [[ -f "$BUILD_LOCK_DIR/pid" ]]; then + owner_pid="$(cat "$BUILD_LOCK_DIR/pid" 2>/dev/null || true)" + if [[ -n "$owner_pid" ]] && ! kill -0 "$owner_pid" >/dev/null 2>&1; then + warn "Removing stale Parallels build lock" + rm -rf "$BUILD_LOCK_DIR" + continue + fi + fi + sleep 1 + done + printf '%s\n' "$$" >"$BUILD_LOCK_DIR/pid" +} + +release_build_lock() { + if [[ -d "$BUILD_LOCK_DIR" ]]; then + rm -rf "$BUILD_LOCK_DIR" + fi +} + +ensure_current_build() { + local head build_commit + acquire_build_lock + head="$(git rev-parse HEAD)" + build_commit="$(current_build_commit)" + if [[ "$build_commit" == "$head" ]]; then + release_build_lock + return + fi + say "Build dist for current head" + pnpm build + build_commit="$(current_build_commit)" + release_build_lock + [[ "$build_commit" == "$head" ]] || die "dist/build-info.json still does not match HEAD after build" +} + +ensure_guest_git() { + local host_ip="$1" + local mingit_url + mingit_url="http://$host_ip:$HOST_PORT/$MINGIT_ZIP_NAME" + if guest_exec cmd.exe /d /s /c "where git.exe >nul 2>nul && git.exe --version"; then + return + fi + guest_exec cmd.exe /d /s /c "if exist \"%LOCALAPPDATA%\\OpenClaw\\deps\\portable-git\" rmdir /s /q \"%LOCALAPPDATA%\\OpenClaw\\deps\\portable-git\"" + guest_exec cmd.exe /d /s /c "if not exist \"%LOCALAPPDATA%\\OpenClaw\\deps\" mkdir \"%LOCALAPPDATA%\\OpenClaw\\deps\"" + guest_exec cmd.exe /d /s /c "mkdir \"%LOCALAPPDATA%\\OpenClaw\\deps\\portable-git\"" + guest_exec cmd.exe /d /s /c "curl.exe -fsSL \"$mingit_url\" -o \"%TEMP%\\$MINGIT_ZIP_NAME\"" + guest_exec cmd.exe /d /s /c "tar.exe -xf \"%TEMP%\\$MINGIT_ZIP_NAME\" -C \"%LOCALAPPDATA%\\OpenClaw\\deps\\portable-git\"" + guest_exec cmd.exe /d /s /c "del /q \"%TEMP%\\$MINGIT_ZIP_NAME\" & set \"PATH=%LOCALAPPDATA%\\OpenClaw\\deps\\portable-git\\cmd;%LOCALAPPDATA%\\OpenClaw\\deps\\portable-git\\mingw64\\bin;%LOCALAPPDATA%\\OpenClaw\\deps\\portable-git\\usr\\bin;%PATH%\" && git.exe --version" +} + +pack_main_tgz() { + local mingit_name mingit_url short_head pkg + if [[ -n "$TARGET_PACKAGE_SPEC" ]]; then + say "Pack target package tgz: $TARGET_PACKAGE_SPEC" + mapfile -t mingit_meta < <(resolve_mingit_download) + mingit_name="${mingit_meta[0]}" + mingit_url="${mingit_meta[1]}" + MINGIT_ZIP_NAME="$mingit_name" + MINGIT_ZIP_PATH="$MAIN_TGZ_DIR/$mingit_name" + if [[ ! -f "$MINGIT_ZIP_PATH" ]]; then + say "Download $MINGIT_ZIP_NAME" + curl -fsSL "$mingit_url" -o "$MINGIT_ZIP_PATH" + fi + pkg="$( + npm pack "$TARGET_PACKAGE_SPEC" --ignore-scripts --json --pack-destination "$MAIN_TGZ_DIR" \ + | python3 -c 'import json, sys; data = json.load(sys.stdin); print(data[-1]["filename"])' + )" + MAIN_TGZ_PATH="$MAIN_TGZ_DIR/$(basename "$pkg")" + TARGET_EXPECT_VERSION="$(tar -xOf "$MAIN_TGZ_PATH" package/package.json | python3 -c "import json, sys; print(json.load(sys.stdin)['version'])")" + say "Packed $MAIN_TGZ_PATH" + say "Target package version: $TARGET_EXPECT_VERSION" + return + fi + say "Pack current main tgz" + ensure_current_build + mapfile -t mingit_meta < <(resolve_mingit_download) + mingit_name="${mingit_meta[0]}" + mingit_url="${mingit_meta[1]}" + MINGIT_ZIP_NAME="$mingit_name" + MINGIT_ZIP_PATH="$MAIN_TGZ_DIR/$mingit_name" + if [[ ! -f "$MINGIT_ZIP_PATH" ]]; then + say "Download $MINGIT_ZIP_NAME" + curl -fsSL "$mingit_url" -o "$MINGIT_ZIP_PATH" + fi + short_head="$(git rev-parse --short HEAD)" + pkg="$( + npm pack --ignore-scripts --json --pack-destination "$MAIN_TGZ_DIR" \ + | python3 -c 'import json, sys; data = json.load(sys.stdin); print(data[-1]["filename"])' + )" + MAIN_TGZ_PATH="$MAIN_TGZ_DIR/openclaw-main-$short_head.tgz" + cp "$MAIN_TGZ_DIR/$pkg" "$MAIN_TGZ_PATH" + say "Packed $MAIN_TGZ_PATH" + tar -xOf "$MAIN_TGZ_PATH" package/dist/build-info.json +} + +verify_target_version() { + if [[ -n "$TARGET_PACKAGE_SPEC" ]]; then + verify_version_contains "$TARGET_EXPECT_VERSION" + return + fi + verify_version_contains "$(git rev-parse --short=7 HEAD)" +} + +start_server() { + local host_ip="$1" + local artifact probe_url attempt + artifact="$(basename "$MAIN_TGZ_PATH")" + attempt=0 + while :; do + attempt=$((attempt + 1)) + say "Serve $(artifact_label) on $host_ip:$HOST_PORT" + ( + cd "$MAIN_TGZ_DIR" + exec python3 -m http.server "$HOST_PORT" --bind 0.0.0.0 + ) >/tmp/openclaw-parallels-windows-http.log 2>&1 & + SERVER_PID=$! + sleep 1 + probe_url="http://127.0.0.1:$HOST_PORT/$artifact" + if kill -0 "$SERVER_PID" >/dev/null 2>&1 && curl -fsSI "$probe_url" >/dev/null 2>&1; then + return 0 + fi + kill "$SERVER_PID" >/dev/null 2>&1 || true + wait "$SERVER_PID" >/dev/null 2>&1 || true + SERVER_PID="" + if [[ "$HOST_PORT_EXPLICIT" -eq 1 || $attempt -ge 3 ]]; then + die "failed to start reachable host HTTP server on port $HOST_PORT" + fi + HOST_PORT="$(allocate_host_port)" + warn "retrying host HTTP server on port $HOST_PORT" + done +} + +install_latest_release() { + local install_url_q version_flag_q + install_url_q="$(ps_single_quote "$INSTALL_URL")" + version_flag_q="" + if [[ -n "$INSTALL_VERSION" ]]; then + version_flag_q="-Tag '$(ps_single_quote "$INSTALL_VERSION")' " + fi + guest_powershell "$(cat <&2 + return 1 + ;; + esac +} + +run_ref_onboard() { + local openai_key_q runner_name log_name done_name done_status + openai_key_q="$(ps_single_quote "$OPENAI_API_KEY_VALUE")" + runner_name="openclaw-onboard-$RANDOM-$RANDOM.ps1" + log_name="openclaw-onboard-$RANDOM-$RANDOM.log" + done_name="openclaw-onboard-$RANDOM-$RANDOM.done" + + guest_powershell "$(cat < "{1}" 2>&1' -f \$openclaw, \$log) + & cmd.exe /d /s /c \$cmdLine + Set-Content -Path \$done -Value ([string]\$LASTEXITCODE) +} catch { + if (Test-Path \$log) { + Add-Content -Path \$log -Value (\$_ | Out-String) + } else { + (\$_ | Out-String) | Set-Content -Path \$log + } + Set-Content -Path \$done -Value '1' +} +'@ | Set-Content -Path \$runner +Start-Process powershell.exe -ArgumentList @('-NoProfile', '-ExecutionPolicy', 'Bypass', '-File', \$runner) -WindowStyle Hidden | Out-Null +EOF +)" + + while :; do + done_status="$( + guest_powershell "\$done = Join-Path \$env:TEMP '$done_name'; if (Test-Path \$done) { (Get-Content \$done -Raw).Trim() }" + )" + done_status="${done_status//$'\r'/}" + if [[ -n "$done_status" ]]; then + guest_powershell "\$log = Join-Path \$env:TEMP '$log_name'; if (Test-Path \$log) { Get-Content \$log }" + [[ "$done_status" == "0" ]] + return $? + fi + sleep 2 + done +} + +verify_gateway() { + guest_run_openclaw "" "" gateway status --deep --require-rpc +} + +show_gateway_status_compat() { + if guest_run_openclaw "" "" gateway status --help | grep -Fq -- "--require-rpc"; then + guest_run_openclaw "" "" gateway status --deep --require-rpc + return + fi + guest_run_openclaw "" "" gateway status --deep +} + +verify_turn() { + guest_run_openclaw "" "" agent --agent main --message ping --json +} + +capture_latest_ref_failure() { + set +e + run_ref_onboard + local rc=$? + set -e + if [[ $rc -eq 0 ]]; then + say "Latest release ref-mode onboard passed" + return 0 + fi + warn "Latest release ref-mode onboard failed pre-upgrade" + set +e + show_gateway_status_compat || true + set -e + return 1 +} + +run_fresh_main_lane() { + local snapshot_id="$1" + local host_ip="$2" + phase_run "fresh.restore-snapshot" "$TIMEOUT_SNAPSHOT_S" restore_snapshot "$snapshot_id" || return $? + phase_run "fresh.wait-for-user" "$TIMEOUT_SNAPSHOT_S" wait_for_guest_ready || return $? + phase_run "fresh.ensure-git" "$TIMEOUT_INSTALL_S" ensure_guest_git "$host_ip" || return $? + phase_run "fresh.install-main" "$TIMEOUT_INSTALL_S" install_main_tgz "$host_ip" "openclaw-main-fresh.tgz" || return $? + FRESH_MAIN_VERSION="$(extract_last_version "$(phase_log_path fresh.install-main)")" + phase_run "fresh.verify-main-version" "$TIMEOUT_VERIFY_S" verify_target_version || return $? + phase_run "fresh.onboard-ref" "$TIMEOUT_ONBOARD_S" run_ref_onboard || return $? + phase_run "fresh.gateway-status" "$TIMEOUT_GATEWAY_S" verify_gateway || return $? + FRESH_GATEWAY_STATUS="pass" + phase_run "fresh.first-agent-turn" "$TIMEOUT_AGENT_S" verify_turn || return $? + FRESH_AGENT_STATUS="pass" +} + +run_upgrade_lane() { + local snapshot_id="$1" + local host_ip="$2" + phase_run "upgrade.restore-snapshot" "$TIMEOUT_SNAPSHOT_S" restore_snapshot "$snapshot_id" || return $? + phase_run "upgrade.wait-for-user" "$TIMEOUT_SNAPSHOT_S" wait_for_guest_ready || return $? + phase_run "upgrade.install-latest" "$TIMEOUT_INSTALL_S" install_latest_release || return $? + LATEST_INSTALLED_VERSION="$(extract_last_version "$(phase_log_path upgrade.install-latest)")" + phase_run "upgrade.verify-latest-version" "$TIMEOUT_VERIFY_S" verify_version_contains "$LATEST_VERSION" || return $? + if [[ "$CHECK_LATEST_REF" -eq 1 ]]; then + if phase_run "upgrade.latest-ref-precheck" "$TIMEOUT_ONBOARD_S" capture_latest_ref_failure; then + UPGRADE_PRECHECK_STATUS="latest-ref-pass" + else + UPGRADE_PRECHECK_STATUS="latest-ref-fail" + fi + else + UPGRADE_PRECHECK_STATUS="skipped" + fi + phase_run "upgrade.ensure-git" "$TIMEOUT_INSTALL_S" ensure_guest_git "$host_ip" || return $? + phase_run "upgrade.install-main" "$TIMEOUT_INSTALL_S" install_main_tgz "$host_ip" "openclaw-main-upgrade.tgz" || return $? + UPGRADE_MAIN_VERSION="$(extract_last_version "$(phase_log_path upgrade.install-main)")" + phase_run "upgrade.verify-main-version" "$TIMEOUT_VERIFY_S" verify_target_version || return $? + phase_run "upgrade.onboard-ref" "$TIMEOUT_ONBOARD_S" run_ref_onboard || return $? + phase_run "upgrade.gateway-status" "$TIMEOUT_GATEWAY_S" verify_gateway || return $? + UPGRADE_GATEWAY_STATUS="pass" + phase_run "upgrade.first-agent-turn" "$TIMEOUT_AGENT_S" verify_turn || return $? + UPGRADE_AGENT_STATUS="pass" +} + +SNAPSHOT_ID="$(resolve_snapshot_id)" +LATEST_VERSION="$(resolve_latest_version)" +HOST_IP="$(resolve_host_ip)" +HOST_PORT="$(resolve_host_port)" + +say "VM: $VM_NAME" +say "Snapshot hint: $SNAPSHOT_HINT" +say "Latest npm version: $LATEST_VERSION" +say "Current head: $(git rev-parse --short HEAD)" +say "Run logs: $RUN_DIR" + +pack_main_tgz +start_server "$HOST_IP" + +if [[ "$MODE" == "fresh" || "$MODE" == "both" ]]; then + set +e + run_fresh_main_lane "$SNAPSHOT_ID" "$HOST_IP" + fresh_rc=$? + set -e + if [[ $fresh_rc -eq 0 ]]; then + FRESH_MAIN_STATUS="pass" + else + FRESH_MAIN_STATUS="fail" + fi +fi + +if [[ "$MODE" == "upgrade" || "$MODE" == "both" ]]; then + set +e + run_upgrade_lane "$SNAPSHOT_ID" "$HOST_IP" + upgrade_rc=$? + set -e + if [[ $upgrade_rc -eq 0 ]]; then + UPGRADE_STATUS="pass" + else + UPGRADE_STATUS="fail" + fi +fi + +if [[ "$KEEP_SERVER" -eq 0 && -n "${SERVER_PID:-}" ]]; then + kill "$SERVER_PID" >/dev/null 2>&1 || true + SERVER_PID="" +fi + +SUMMARY_JSON_PATH="$( + SUMMARY_VM="$VM_NAME" \ + SUMMARY_SNAPSHOT_HINT="$SNAPSHOT_HINT" \ + SUMMARY_SNAPSHOT_ID="$SNAPSHOT_ID" \ + SUMMARY_MODE="$MODE" \ + SUMMARY_LATEST_VERSION="$LATEST_VERSION" \ + SUMMARY_INSTALL_VERSION="$INSTALL_VERSION" \ + SUMMARY_TARGET_PACKAGE_SPEC="$TARGET_PACKAGE_SPEC" \ + SUMMARY_CURRENT_HEAD="$(git rev-parse --short HEAD)" \ + SUMMARY_RUN_DIR="$RUN_DIR" \ + SUMMARY_FRESH_MAIN_STATUS="$FRESH_MAIN_STATUS" \ + SUMMARY_FRESH_MAIN_VERSION="$FRESH_MAIN_VERSION" \ + SUMMARY_FRESH_GATEWAY_STATUS="$FRESH_GATEWAY_STATUS" \ + SUMMARY_FRESH_AGENT_STATUS="$FRESH_AGENT_STATUS" \ + SUMMARY_UPGRADE_PRECHECK_STATUS="$UPGRADE_PRECHECK_STATUS" \ + SUMMARY_UPGRADE_STATUS="$UPGRADE_STATUS" \ + SUMMARY_LATEST_INSTALLED_VERSION="$LATEST_INSTALLED_VERSION" \ + SUMMARY_UPGRADE_MAIN_VERSION="$UPGRADE_MAIN_VERSION" \ + SUMMARY_UPGRADE_GATEWAY_STATUS="$UPGRADE_GATEWAY_STATUS" \ + SUMMARY_UPGRADE_AGENT_STATUS="$UPGRADE_AGENT_STATUS" \ + write_summary_json +)" + +if [[ "$JSON_OUTPUT" -eq 1 ]]; then + cat "$SUMMARY_JSON_PATH" +else + printf '\nSummary:\n' + if [[ -n "$TARGET_PACKAGE_SPEC" ]]; then + printf ' target-package: %s\n' "$TARGET_PACKAGE_SPEC" + fi + if [[ -n "$INSTALL_VERSION" ]]; then + printf ' baseline-install-version: %s\n' "$INSTALL_VERSION" + fi + printf ' fresh-main: %s (%s)\n' "$FRESH_MAIN_STATUS" "$FRESH_MAIN_VERSION" + printf ' latest->main precheck: %s (%s)\n' "$UPGRADE_PRECHECK_STATUS" "$LATEST_INSTALLED_VERSION" + printf ' latest->main: %s (%s)\n' "$UPGRADE_STATUS" "$UPGRADE_MAIN_VERSION" + printf ' logs: %s\n' "$RUN_DIR" + printf ' summary: %s\n' "$SUMMARY_JSON_PATH" +fi + +if [[ "$FRESH_MAIN_STATUS" == "fail" || "$UPGRADE_STATUS" == "fail" ]]; then + exit 1 +fi diff --git a/scripts/install.sh b/scripts/install.sh index f7f13490796..2abfbad9935 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -16,8 +16,9 @@ MUTED='\033[38;2;90;100;128m' # text-muted #5a6480 NC='\033[0m' # No Color DEFAULT_TAGLINE="All your chats, one OpenClaw." +NODE_DEFAULT_MAJOR=24 NODE_MIN_MAJOR=22 -NODE_MIN_MINOR=12 +NODE_MIN_MINOR=16 NODE_MIN_VERSION="${NODE_MIN_MAJOR}.${NODE_MIN_MINOR}" ORIGINAL_PATH="${PATH:-}" @@ -994,6 +995,7 @@ SHARP_IGNORE_GLOBAL_LIBVIPS="${SHARP_IGNORE_GLOBAL_LIBVIPS:-1}" NPM_LOGLEVEL="${OPENCLAW_NPM_LOGLEVEL:-error}" NPM_SILENT_FLAG="--silent" VERBOSE="${OPENCLAW_VERBOSE:-0}" +VERIFY_INSTALL="${OPENCLAW_VERIFY_INSTALL:-0}" OPENCLAW_BIN="" PNPM_CMD=() HELP=0 @@ -1015,6 +1017,7 @@ Options: --no-git-update Skip git pull for existing checkout --no-onboard Skip onboarding (non-interactive) --no-prompt Disable prompts (required in CI/automation) + --verify Run a post-install smoke verify --dry-run Print what would happen (no changes) --verbose Print debug output (set -x, npm verbose) --help, -h Show this help @@ -1026,6 +1029,7 @@ Environment variables: OPENCLAW_GIT_DIR=... OPENCLAW_GIT_UPDATE=0|1 OPENCLAW_NO_PROMPT=1 + OPENCLAW_VERIFY_INSTALL=1 OPENCLAW_DRY_RUN=1 OPENCLAW_NO_ONBOARD=1 OPENCLAW_VERBOSE=1 @@ -1035,6 +1039,7 @@ Environment variables: Examples: curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install.sh | bash curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install.sh | bash -s -- --no-onboard + curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install.sh | bash -s -- --no-onboard --verify curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install.sh | bash -s -- --install-method git --no-onboard EOF } @@ -1058,6 +1063,10 @@ parse_args() { VERBOSE=1 shift ;; + --verify) + VERIFY_INSTALL=1 + shift + ;; --no-prompt) NO_PROMPT=1 shift @@ -1316,14 +1325,14 @@ print_active_node_paths() { return 0 } -ensure_macos_node22_active() { +ensure_macos_default_node_active() { if [[ "$OS" != "macos" ]]; then return 0 fi local brew_node_prefix="" if command -v brew &> /dev/null; then - brew_node_prefix="$(brew --prefix node@22 2>/dev/null || true)" + brew_node_prefix="$(brew --prefix "node@${NODE_DEFAULT_MAJOR}" 2>/dev/null || true)" if [[ -n "$brew_node_prefix" && -x "${brew_node_prefix}/bin/node" ]]; then export PATH="${brew_node_prefix}/bin:$PATH" refresh_shell_command_cache @@ -1340,17 +1349,17 @@ ensure_macos_node22_active() { active_path="$(command -v node 2>/dev/null || echo "not found")" active_version="$(node -v 2>/dev/null || echo "missing")" - ui_error "Node.js v22 was installed but this shell is using ${active_version} (${active_path})" + ui_error "Node.js v${NODE_DEFAULT_MAJOR} was installed but this shell is using ${active_version} (${active_path})" if [[ -n "$brew_node_prefix" ]]; then echo "Add this to your shell profile and restart shell:" echo " export PATH=\"${brew_node_prefix}/bin:\$PATH\"" else - echo "Ensure Homebrew node@22 is first on PATH, then rerun installer." + echo "Ensure Homebrew node@${NODE_DEFAULT_MAJOR} is first on PATH, then rerun installer." fi return 1 } -ensure_node22_active_shell() { +ensure_default_node_active_shell() { if node_is_at_least_required; then return 0 fi @@ -1373,13 +1382,13 @@ ensure_node22_active_shell() { if [[ "$nvm_detected" -eq 1 ]]; then echo "nvm appears to be managing Node for this shell." echo "Run:" - echo " nvm install 22" - echo " nvm use 22" - echo " nvm alias default 22" + echo " nvm install ${NODE_DEFAULT_MAJOR}" + echo " nvm use ${NODE_DEFAULT_MAJOR}" + echo " nvm alias default ${NODE_DEFAULT_MAJOR}" echo "Then open a new shell and rerun:" echo " curl -fsSL https://openclaw.ai/install.sh | bash" else - echo "Install/select Node.js 22+ and ensure it is first on PATH, then rerun installer." + echo "Install/select Node.js ${NODE_DEFAULT_MAJOR} (or Node ${NODE_MIN_VERSION}+ minimum) and ensure it is first on PATH, then rerun installer." fi return 1 @@ -1410,9 +1419,9 @@ check_node() { install_node() { if [[ "$OS" == "macos" ]]; then ui_info "Installing Node.js via Homebrew" - run_quiet_step "Installing node@22" brew install node@22 - brew link node@22 --overwrite --force 2>/dev/null || true - if ! ensure_macos_node22_active; then + run_quiet_step "Installing node@${NODE_DEFAULT_MAJOR}" brew install "node@${NODE_DEFAULT_MAJOR}" + brew link "node@${NODE_DEFAULT_MAJOR}" --overwrite --force 2>/dev/null || true + if ! ensure_macos_default_node_active; then exit 1 fi ui_success "Node.js installed" @@ -1435,7 +1444,7 @@ install_node() { else run_quiet_step "Installing Node.js" sudo pacman -Sy --noconfirm nodejs npm fi - ui_success "Node.js v22 installed" + ui_success "Node.js v${NODE_DEFAULT_MAJOR} installed" print_active_node_paths || true return 0 fi @@ -1444,7 +1453,7 @@ install_node() { if command -v apt-get &> /dev/null; then local tmp tmp="$(mktempfile)" - download_file "https://deb.nodesource.com/setup_22.x" "$tmp" + download_file "https://deb.nodesource.com/setup_${NODE_DEFAULT_MAJOR}.x" "$tmp" if is_root; then run_quiet_step "Configuring NodeSource repository" bash "$tmp" run_quiet_step "Installing Node.js" apt-get install -y -qq nodejs @@ -1455,7 +1464,7 @@ install_node() { elif command -v dnf &> /dev/null; then local tmp tmp="$(mktempfile)" - download_file "https://rpm.nodesource.com/setup_22.x" "$tmp" + download_file "https://rpm.nodesource.com/setup_${NODE_DEFAULT_MAJOR}.x" "$tmp" if is_root; then run_quiet_step "Configuring NodeSource repository" bash "$tmp" run_quiet_step "Installing Node.js" dnf install -y -q nodejs @@ -1466,7 +1475,7 @@ install_node() { elif command -v yum &> /dev/null; then local tmp tmp="$(mktempfile)" - download_file "https://rpm.nodesource.com/setup_22.x" "$tmp" + download_file "https://rpm.nodesource.com/setup_${NODE_DEFAULT_MAJOR}.x" "$tmp" if is_root; then run_quiet_step "Configuring NodeSource repository" bash "$tmp" run_quiet_step "Installing Node.js" yum install -y -q nodejs @@ -1476,11 +1485,11 @@ install_node() { fi else ui_error "Could not detect package manager" - echo "Please install Node.js 22+ manually: https://nodejs.org" + echo "Please install Node.js ${NODE_DEFAULT_MAJOR} manually (or Node ${NODE_MIN_VERSION}+ minimum): https://nodejs.org" exit 1 fi - ui_success "Node.js v22 installed" + ui_success "Node.js v${NODE_DEFAULT_MAJOR} installed" print_active_node_paths || true fi } @@ -2195,7 +2204,38 @@ refresh_gateway_service_if_loaded() { return 0 fi - run_quiet_step "Probing gateway service" "$claw" gateway status --probe --deep || true + run_quiet_step "Probing gateway service" "$claw" gateway status --deep || true +} + +verify_installation() { + if [[ "${VERIFY_INSTALL}" != "1" ]]; then + return 0 + fi + + ui_stage "Verifying installation" + local claw="${OPENCLAW_BIN:-}" + if [[ -z "$claw" ]]; then + claw="$(resolve_openclaw_bin || true)" + fi + if [[ -z "$claw" ]]; then + ui_error "Install verify failed: openclaw not on PATH yet" + warn_openclaw_not_found + return 1 + fi + + run_quiet_step "Checking OpenClaw version" "$claw" --version || return 1 + + if is_gateway_daemon_loaded "$claw"; then + run_quiet_step "Checking gateway service" "$claw" gateway status --deep || { + ui_error "Install verify failed: gateway service unhealthy" + ui_info "Run: openclaw gateway status --deep" + return 1 + } + else + ui_info "Gateway service not loaded; skipping gateway deep probe" + fi + + ui_success "Install verify complete" } # Main installation flow @@ -2267,7 +2307,7 @@ main() { if ! check_node; then install_node fi - if ! ensure_node22_active_shell; then + if ! ensure_default_node_active_shell; then exit 1 fi @@ -2484,6 +2524,10 @@ main() { fi fi + if ! verify_installation; then + exit 1 + fi + if [[ "$should_open_dashboard" == "true" ]]; then maybe_open_dashboard fi diff --git a/scripts/ios-beta-archive.sh b/scripts/ios-beta-archive.sh new file mode 100755 index 00000000000..c65e9991389 --- /dev/null +++ b/scripts/ios-beta-archive.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +set -euo pipefail + +usage() { + cat <<'EOF' +Usage: + scripts/ios-beta-archive.sh [--build-number 7] + +Archives and exports a beta-release IPA locally without uploading. +EOF +} + +BUILD_NUMBER="${IOS_BETA_BUILD_NUMBER:-}" +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" + +while [[ $# -gt 0 ]]; do + case "$1" in + --) + shift + ;; + --build-number) + BUILD_NUMBER="${2:-}" + shift 2 + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "Unknown argument: $1" >&2 + usage + exit 1 + ;; + esac +done + +( + cd "${ROOT_DIR}/apps/ios" + IOS_BETA_BUILD_NUMBER="${BUILD_NUMBER}" fastlane ios beta_archive +) diff --git a/scripts/ios-beta-prepare.sh b/scripts/ios-beta-prepare.sh new file mode 100755 index 00000000000..9dd0d891c9e --- /dev/null +++ b/scripts/ios-beta-prepare.sh @@ -0,0 +1,165 @@ +#!/usr/bin/env bash +set -euo pipefail + +usage() { + cat <<'EOF' +Usage: + OPENCLAW_PUSH_RELAY_BASE_URL=https://relay.example.com \ + scripts/ios-beta-prepare.sh --build-number 7 [--team-id TEAMID] + +Prepares local beta-release inputs without touching local signing overrides: +- reads package.json.version and writes apps/ios/build/Version.xcconfig +- writes apps/ios/build/BetaRelease.xcconfig with canonical bundle IDs +- configures the beta build for relay-backed APNs registration +- regenerates apps/ios/OpenClaw.xcodeproj via xcodegen +EOF +} + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +IOS_DIR="${ROOT_DIR}/apps/ios" +BUILD_DIR="${IOS_DIR}/build" +BETA_XCCONFIG="${IOS_DIR}/build/BetaRelease.xcconfig" +TEAM_HELPER="${ROOT_DIR}/scripts/ios-team-id.sh" +VERSION_HELPER="${ROOT_DIR}/scripts/ios-write-version-xcconfig.sh" + +BUILD_NUMBER="" +TEAM_ID="${IOS_DEVELOPMENT_TEAM:-}" +PUSH_RELAY_BASE_URL="${OPENCLAW_PUSH_RELAY_BASE_URL:-${IOS_PUSH_RELAY_BASE_URL:-}}" +PUSH_RELAY_BASE_URL_XCCONFIG="" +PACKAGE_VERSION="$(cd "${ROOT_DIR}" && node -p "require('./package.json').version" 2>/dev/null || true)" + +prepare_build_dir() { + if [[ -L "${BUILD_DIR}" ]]; then + echo "Refusing to use symlinked build directory: ${BUILD_DIR}" >&2 + exit 1 + fi + + mkdir -p "${BUILD_DIR}" +} + +write_generated_file() { + local output_path="$1" + local tmp_file="" + + if [[ -e "${output_path}" && -L "${output_path}" ]]; then + echo "Refusing to overwrite symlinked file: ${output_path}" >&2 + exit 1 + fi + + tmp_file="$(mktemp "${output_path}.XXXXXX")" + cat >"${tmp_file}" + mv -f "${tmp_file}" "${output_path}" +} + +validate_push_relay_base_url() { + local value="$1" + + if [[ "${value}" =~ [[:space:]] ]]; then + echo "Invalid OPENCLAW_PUSH_RELAY_BASE_URL: whitespace is not allowed." >&2 + exit 1 + fi + + if [[ "${value}" == *'$'* || "${value}" == *'('* || "${value}" == *')'* || "${value}" == *'='* ]]; then + echo "Invalid OPENCLAW_PUSH_RELAY_BASE_URL: contains forbidden xcconfig characters." >&2 + exit 1 + fi + + if [[ ! "${value}" =~ ^https://[A-Za-z0-9.-]+(:([0-9]{1,5}))?(/[A-Za-z0-9._~!&*+,;:@%/-]*)?$ ]]; then + echo "Invalid OPENCLAW_PUSH_RELAY_BASE_URL: expected https://host[:port][/path]." >&2 + exit 1 + fi + + local port="${BASH_REMATCH[2]:-}" + if [[ -n "${port}" ]] && (( 10#${port} > 65535 )); then + echo "Invalid OPENCLAW_PUSH_RELAY_BASE_URL: port must be between 1 and 65535." >&2 + exit 1 + fi +} + +while [[ $# -gt 0 ]]; do + case "$1" in + --) + shift + ;; + --build-number) + BUILD_NUMBER="${2:-}" + shift 2 + ;; + --team-id) + TEAM_ID="${2:-}" + shift 2 + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "Unknown argument: $1" >&2 + usage + exit 1 + ;; + esac +done + +if [[ -z "${BUILD_NUMBER}" ]]; then + echo "Missing required --build-number." >&2 + usage + exit 1 +fi + +if [[ -z "${TEAM_ID}" ]]; then + TEAM_ID="$(IOS_ALLOW_KEYCHAIN_TEAM_FALLBACK=1 bash "${TEAM_HELPER}")" +fi + +if [[ -z "${TEAM_ID}" ]]; then + echo "Could not resolve Apple Team ID. Set IOS_DEVELOPMENT_TEAM or sign into Xcode." >&2 + exit 1 +fi + +if [[ -z "${PUSH_RELAY_BASE_URL}" ]]; then + echo "Missing OPENCLAW_PUSH_RELAY_BASE_URL (or IOS_PUSH_RELAY_BASE_URL) for beta relay registration." >&2 + exit 1 +fi + +validate_push_relay_base_url "${PUSH_RELAY_BASE_URL}" + +# `.xcconfig` treats `//` as a comment opener. Break the URL with a helper setting +# so Xcode still resolves it back to `https://...` at build time. +PUSH_RELAY_BASE_URL_XCCONFIG="$( + printf '%s' "${PUSH_RELAY_BASE_URL}" \ + | sed 's#//#$(OPENCLAW_URL_SLASH)$(OPENCLAW_URL_SLASH)#g' +)" + +prepare_build_dir + +( + bash "${VERSION_HELPER}" --build-number "${BUILD_NUMBER}" +) + +write_generated_file "${BETA_XCCONFIG}" <&2 + usage + exit 1 + ;; + esac +done + +( + cd "${ROOT_DIR}/apps/ios" + IOS_BETA_BUILD_NUMBER="${BUILD_NUMBER}" fastlane ios beta +) diff --git a/scripts/ios-write-version-xcconfig.sh b/scripts/ios-write-version-xcconfig.sh new file mode 100755 index 00000000000..e38044814bf --- /dev/null +++ b/scripts/ios-write-version-xcconfig.sh @@ -0,0 +1,99 @@ +#!/usr/bin/env bash +set -euo pipefail + +usage() { + cat <<'EOF' +Usage: + scripts/ios-write-version-xcconfig.sh [--build-number 7] + +Writes apps/ios/build/Version.xcconfig from root package.json.version: +- OPENCLAW_GATEWAY_VERSION = exact package.json version +- OPENCLAW_MARKETING_VERSION = short iOS/App Store version +- OPENCLAW_BUILD_VERSION = explicit build number or local numeric fallback +EOF +} + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +IOS_DIR="${ROOT_DIR}/apps/ios" +BUILD_DIR="${IOS_DIR}/build" +VERSION_XCCONFIG="${IOS_DIR}/build/Version.xcconfig" +PACKAGE_VERSION="$(cd "${ROOT_DIR}" && node -p "require('./package.json').version" 2>/dev/null || true)" +BUILD_NUMBER="" + +prepare_build_dir() { + if [[ -L "${BUILD_DIR}" ]]; then + echo "Refusing to use symlinked build directory: ${BUILD_DIR}" >&2 + exit 1 + fi + + mkdir -p "${BUILD_DIR}" +} + +write_generated_file() { + local output_path="$1" + local tmp_file="" + + if [[ -e "${output_path}" && -L "${output_path}" ]]; then + echo "Refusing to overwrite symlinked file: ${output_path}" >&2 + exit 1 + fi + + tmp_file="$(mktemp "${output_path}.XXXXXX")" + cat >"${tmp_file}" + mv -f "${tmp_file}" "${output_path}" +} + +while [[ $# -gt 0 ]]; do + case "$1" in + --) + shift + ;; + --build-number) + BUILD_NUMBER="${2:-}" + shift 2 + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "Unknown argument: $1" >&2 + usage + exit 1 + ;; + esac +done + +PACKAGE_VERSION="$(printf '%s' "${PACKAGE_VERSION}" | tr -d '\n' | xargs)" +if [[ -z "${PACKAGE_VERSION}" ]]; then + echo "Unable to read package.json.version from ${ROOT_DIR}/package.json." >&2 + exit 1 +fi + +if [[ "${PACKAGE_VERSION}" =~ ^([0-9]{4}\.[0-9]{1,2}\.[0-9]{1,2})([.-]?beta[.-][0-9]+)?$ ]]; then + MARKETING_VERSION="${BASH_REMATCH[1]}" +else + echo "Unsupported package.json.version '${PACKAGE_VERSION}'. Expected 2026.3.13 or 2026.3.13-beta.1." >&2 + exit 1 +fi + +if [[ -z "${BUILD_NUMBER}" ]]; then + BUILD_NUMBER="$(cd "${ROOT_DIR}" && git rev-list --count HEAD 2>/dev/null || printf '0')" +fi + +if [[ ! "${BUILD_NUMBER}" =~ ^[0-9]+$ ]]; then + echo "Invalid build number '${BUILD_NUMBER}'. Expected digits only." >&2 + exit 1 +fi + +prepare_build_dir + +write_generated_file "${VERSION_XCCONFIG}" <_API_KEY="..." && ./scripts/k8s/deploy.sh +# ============================================================================ + +set -euo pipefail + +# Defaults +CLUSTER_NAME="openclaw" +CONTAINER_CMD="" +DELETE=false + +# Colors +GREEN='\033[0;32m' +BLUE='\033[0;34m' +YELLOW='\033[0;33m' +RED='\033[0;31m' +NC='\033[0m' + +info() { echo -e "${BLUE}[INFO]${NC} $1"; } +success() { echo -e "${GREEN}[OK]${NC} $1"; } +warn() { echo -e "${YELLOW}[WARN]${NC} $1"; } +fail() { echo -e "${RED}[ERROR]${NC} $1" >&2; exit 1; } + +usage() { + cat </dev/null +} + +provider_responsive() { + case "$1" in + docker) + docker info &>/dev/null + ;; + podman) + podman info &>/dev/null + ;; + *) + return 1 + ;; + esac +} + +detect_provider() { + local candidate + + for candidate in podman docker; do + if provider_installed "$candidate" && provider_responsive "$candidate"; then + echo "$candidate" + return 0 + fi + done + + for candidate in podman docker; do + if provider_installed "$candidate"; then + case "$candidate" in + podman) + fail "Podman is installed but not responding, and no responsive Docker daemon was found. Ensure the podman machine is running (podman machine start) or start Docker." + ;; + docker) + fail "Docker is installed but not running, and no responsive Podman machine was found. Start Docker or start Podman." + ;; + esac + fi + done + + fail "Neither podman nor docker found. Install one to use Kind." +} + +CONTAINER_CMD=$(detect_provider) +info "Auto-detected container engine: $CONTAINER_CMD" + +# --------------------------------------------------------------------------- +# Prerequisites +# --------------------------------------------------------------------------- +if ! command -v kind &>/dev/null; then + fail "kind is not installed. Install it from https://kind.sigs.k8s.io/" +fi + +if ! command -v kubectl &>/dev/null; then + fail "kubectl is not installed. Install it before creating or managing a Kind cluster." +fi + +# Verify the container engine is responsive +if ! provider_responsive "$CONTAINER_CMD"; then + if [[ "$CONTAINER_CMD" == "docker" ]]; then + fail "Docker daemon is not running. Start it and try again." + elif [[ "$CONTAINER_CMD" == "podman" ]]; then + fail "Podman is not responding. Ensure the podman machine is running (podman machine start)." + fi +fi + +# --------------------------------------------------------------------------- +# Delete mode +# --------------------------------------------------------------------------- +if $DELETE; then + info "Deleting Kind cluster '$CLUSTER_NAME'..." + if KIND_EXPERIMENTAL_PROVIDER="$CONTAINER_CMD" kind get clusters 2>/dev/null | grep -qx "$CLUSTER_NAME"; then + KIND_EXPERIMENTAL_PROVIDER="$CONTAINER_CMD" kind delete cluster --name "$CLUSTER_NAME" + success "Cluster '$CLUSTER_NAME' deleted." + else + warn "Cluster '$CLUSTER_NAME' does not exist." + fi + exit 0 +fi + +# --------------------------------------------------------------------------- +# Check if cluster already exists +# --------------------------------------------------------------------------- +if KIND_EXPERIMENTAL_PROVIDER="$CONTAINER_CMD" kind get clusters 2>/dev/null | grep -qx "$CLUSTER_NAME"; then + warn "Cluster '$CLUSTER_NAME' already exists." + info "To recreate it, run: $0 --name \"$CLUSTER_NAME\" --delete && $0 --name \"$CLUSTER_NAME\"" + info "Switching kubectl context to kind-$CLUSTER_NAME..." + kubectl config use-context "kind-$CLUSTER_NAME" &>/dev/null && success "Context set." || warn "Could not switch context." + exit 0 +fi + +# --------------------------------------------------------------------------- +# Create cluster +# --------------------------------------------------------------------------- +info "Creating Kind cluster '$CLUSTER_NAME' (provider: $CONTAINER_CMD)..." + +KIND_EXPERIMENTAL_PROVIDER="$CONTAINER_CMD" kind create cluster \ + --name "$CLUSTER_NAME" \ + --config - <<'KINDCFG' +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + labels: + openclaw.dev/role: control-plane + # Uncomment to expose services on host ports: + # extraPortMappings: + # - containerPort: 30080 + # hostPort: 8080 + # protocol: TCP + # - containerPort: 30443 + # hostPort: 8443 + # protocol: TCP +KINDCFG + +success "Kind cluster '$CLUSTER_NAME' created." + +# --------------------------------------------------------------------------- +# Wait for readiness +# --------------------------------------------------------------------------- +info "Waiting for cluster to be ready..." +kubectl --context "kind-$CLUSTER_NAME" wait --for=condition=Ready nodes --all --timeout=120s >/dev/null +success "All nodes are Ready." + +# --------------------------------------------------------------------------- +# Summary +# --------------------------------------------------------------------------- +echo "" +echo "---------------------------------------------------------------" +echo " Kind cluster '$CLUSTER_NAME' is ready" +echo "---------------------------------------------------------------" +echo "" +echo " kubectl cluster-info --context kind-$CLUSTER_NAME" +echo "" +echo "" +echo " export _API_KEY=\"...\" && ./scripts/k8s/deploy.sh" +echo "" diff --git a/scripts/k8s/deploy.sh b/scripts/k8s/deploy.sh new file mode 100755 index 00000000000..abd62dedf58 --- /dev/null +++ b/scripts/k8s/deploy.sh @@ -0,0 +1,231 @@ +#!/usr/bin/env bash +# Deploy OpenClaw to Kubernetes. +# +# Secrets are generated in a temp directory and applied server-side. +# No secret material is ever written to the repo checkout. +# +# Usage: +# ./scripts/k8s/deploy.sh # Deploy (requires API key in env or secret already in cluster) +# ./scripts/k8s/deploy.sh --create-secret # Create or update the K8s Secret from env vars +# ./scripts/k8s/deploy.sh --show-token # Print the gateway token after deploy +# ./scripts/k8s/deploy.sh --delete # Tear down +# +# Environment: +# OPENCLAW_NAMESPACE Kubernetes namespace (default: openclaw) +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +MANIFESTS="$SCRIPT_DIR/manifests" +NS="${OPENCLAW_NAMESPACE:-openclaw}" + +# Check prerequisites +for cmd in kubectl openssl; do + command -v "$cmd" &>/dev/null || { echo "Missing: $cmd" >&2; exit 1; } +done +kubectl cluster-info &>/dev/null || { echo "Cannot connect to cluster. Check kubeconfig." >&2; exit 1; } + +# --------------------------------------------------------------------------- +# -h / --help +# --------------------------------------------------------------------------- +if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + cat <<'HELP' +Usage: ./scripts/k8s/deploy.sh [OPTION] + + (no args) Deploy OpenClaw (creates secret from env if needed) + --create-secret Create or update the K8s Secret from env vars without deploying + --show-token Print the gateway token after deploy or secret creation + --delete Delete the namespace and all resources + -h, --help Show this help + +Environment: + Export at least one provider API key: + ANTHROPIC_API_KEY, GEMINI_API_KEY, OPENAI_API_KEY, OPENROUTER_API_KEY + + OPENCLAW_NAMESPACE Kubernetes namespace (default: openclaw) +HELP + exit 0 +fi + +SHOW_TOKEN=false +MODE="deploy" + +while [[ $# -gt 0 ]]; do + case "$1" in + --create-secret) + MODE="create-secret" + ;; + --delete) + MODE="delete" + ;; + --show-token) + SHOW_TOKEN=true + ;; + *) + echo "Unknown option: $1" >&2 + echo "Run ./scripts/k8s/deploy.sh --help for usage." >&2 + exit 1 + ;; + esac + shift +done + +# --------------------------------------------------------------------------- +# --delete +# --------------------------------------------------------------------------- +if [[ "$MODE" == "delete" ]]; then + echo "Deleting namespace '$NS' and all resources..." + kubectl delete namespace "$NS" --ignore-not-found + echo "Done." + exit 0 +fi + +# --------------------------------------------------------------------------- +# Create and apply Secret to the cluster +# --------------------------------------------------------------------------- +_apply_secret() { + local TMP_DIR + local EXISTING_SECRET=false + local EXISTING_TOKEN="" + local ANTHROPIC_VALUE="" + local OPENAI_VALUE="" + local GEMINI_VALUE="" + local OPENROUTER_VALUE="" + local TOKEN + local SECRET_MANIFEST + TMP_DIR="$(mktemp -d)" + chmod 700 "$TMP_DIR" + trap 'rm -rf "$TMP_DIR"' EXIT + + if kubectl get secret openclaw-secrets -n "$NS" &>/dev/null; then + EXISTING_SECRET=true + EXISTING_TOKEN="$(kubectl get secret openclaw-secrets -n "$NS" -o jsonpath='{.data.OPENCLAW_GATEWAY_TOKEN}' | base64 -d)" + ANTHROPIC_VALUE="$(kubectl get secret openclaw-secrets -n "$NS" -o jsonpath='{.data.ANTHROPIC_API_KEY}' 2>/dev/null | base64 -d)" + OPENAI_VALUE="$(kubectl get secret openclaw-secrets -n "$NS" -o jsonpath='{.data.OPENAI_API_KEY}' 2>/dev/null | base64 -d)" + GEMINI_VALUE="$(kubectl get secret openclaw-secrets -n "$NS" -o jsonpath='{.data.GEMINI_API_KEY}' 2>/dev/null | base64 -d)" + OPENROUTER_VALUE="$(kubectl get secret openclaw-secrets -n "$NS" -o jsonpath='{.data.OPENROUTER_API_KEY}' 2>/dev/null | base64 -d)" + fi + + TOKEN="${EXISTING_TOKEN:-$(openssl rand -hex 32)}" + ANTHROPIC_VALUE="${ANTHROPIC_API_KEY:-$ANTHROPIC_VALUE}" + OPENAI_VALUE="${OPENAI_API_KEY:-$OPENAI_VALUE}" + GEMINI_VALUE="${GEMINI_API_KEY:-$GEMINI_VALUE}" + OPENROUTER_VALUE="${OPENROUTER_API_KEY:-$OPENROUTER_VALUE}" + SECRET_MANIFEST="$TMP_DIR/secrets.yaml" + + # Write secret material to temp files so kubectl handles encoding safely. + printf '%s' "$TOKEN" > "$TMP_DIR/OPENCLAW_GATEWAY_TOKEN" + printf '%s' "$ANTHROPIC_VALUE" > "$TMP_DIR/ANTHROPIC_API_KEY" + printf '%s' "$OPENAI_VALUE" > "$TMP_DIR/OPENAI_API_KEY" + printf '%s' "$GEMINI_VALUE" > "$TMP_DIR/GEMINI_API_KEY" + printf '%s' "$OPENROUTER_VALUE" > "$TMP_DIR/OPENROUTER_API_KEY" + chmod 600 \ + "$TMP_DIR/OPENCLAW_GATEWAY_TOKEN" \ + "$TMP_DIR/ANTHROPIC_API_KEY" \ + "$TMP_DIR/OPENAI_API_KEY" \ + "$TMP_DIR/GEMINI_API_KEY" \ + "$TMP_DIR/OPENROUTER_API_KEY" + + kubectl create secret generic openclaw-secrets \ + -n "$NS" \ + --from-file=OPENCLAW_GATEWAY_TOKEN="$TMP_DIR/OPENCLAW_GATEWAY_TOKEN" \ + --from-file=ANTHROPIC_API_KEY="$TMP_DIR/ANTHROPIC_API_KEY" \ + --from-file=OPENAI_API_KEY="$TMP_DIR/OPENAI_API_KEY" \ + --from-file=GEMINI_API_KEY="$TMP_DIR/GEMINI_API_KEY" \ + --from-file=OPENROUTER_API_KEY="$TMP_DIR/OPENROUTER_API_KEY" \ + --dry-run=client \ + -o yaml > "$SECRET_MANIFEST" + chmod 600 "$SECRET_MANIFEST" + + kubectl create namespace "$NS" --dry-run=client -o yaml | kubectl apply -f - >/dev/null + kubectl apply --server-side --field-manager=openclaw -f "$SECRET_MANIFEST" >/dev/null + # Clean up any annotation left by older client-side apply runs. + kubectl annotate secret openclaw-secrets -n "$NS" kubectl.kubernetes.io/last-applied-configuration- >/dev/null 2>&1 || true + rm -rf "$TMP_DIR" + trap - EXIT + + if $EXISTING_SECRET; then + echo "Secret updated in namespace '$NS'. Existing gateway token preserved." + else + echo "Secret created in namespace '$NS'." + fi + + if $SHOW_TOKEN; then + echo "Gateway token: $TOKEN" + else + echo "Gateway token stored in Secret only." + echo "Retrieve it with:" + echo " kubectl get secret openclaw-secrets -n $NS -o jsonpath='{.data.OPENCLAW_GATEWAY_TOKEN}' | base64 -d && echo" + fi +} + +# --------------------------------------------------------------------------- +# --create-secret +# --------------------------------------------------------------------------- +if [[ "$MODE" == "create-secret" ]]; then + HAS_KEY=false + for key in ANTHROPIC_API_KEY OPENAI_API_KEY GEMINI_API_KEY OPENROUTER_API_KEY; do + if [[ -n "${!key:-}" ]]; then + HAS_KEY=true + echo " Found $key in environment" + fi + done + + if ! $HAS_KEY; then + echo "No API keys found in environment. Export at least one and re-run:" + echo " export _API_KEY=\"...\" (ANTHROPIC, GEMINI, OPENAI, or OPENROUTER)" + echo " ./scripts/k8s/deploy.sh --create-secret" + exit 1 + fi + + _apply_secret + echo "" + echo "Now run:" + echo " ./scripts/k8s/deploy.sh" + exit 0 +fi + +# --------------------------------------------------------------------------- +# Check that the secret exists in the cluster +# --------------------------------------------------------------------------- +if ! kubectl get secret openclaw-secrets -n "$NS" &>/dev/null; then + HAS_KEY=false + for key in ANTHROPIC_API_KEY OPENAI_API_KEY GEMINI_API_KEY OPENROUTER_API_KEY; do + [[ -n "${!key:-}" ]] && HAS_KEY=true + done + + if $HAS_KEY; then + echo "Creating secret from environment..." + _apply_secret + echo "" + else + echo "No secret found and no API keys in environment." + echo "" + echo "Export at least one provider API key and re-run:" + echo " export _API_KEY=\"...\" (ANTHROPIC, GEMINI, OPENAI, or OPENROUTER)" + echo " ./scripts/k8s/deploy.sh" + exit 1 + fi +fi + +# --------------------------------------------------------------------------- +# Deploy +# --------------------------------------------------------------------------- +echo "Deploying to namespace '$NS'..." +kubectl create namespace "$NS" --dry-run=client -o yaml | kubectl apply -f - >/dev/null +kubectl apply -k "$MANIFESTS" -n "$NS" +kubectl rollout restart deployment/openclaw -n "$NS" 2>/dev/null || true +echo "" +echo "Waiting for rollout..." +kubectl rollout status deployment/openclaw -n "$NS" --timeout=300s +echo "" +echo "Done. Access the gateway:" +echo " kubectl port-forward svc/openclaw 18789:18789 -n $NS" +echo " open http://localhost:18789" +echo "" +if $SHOW_TOKEN; then + echo "Gateway token (paste into Control UI):" + echo " $(kubectl get secret openclaw-secrets -n "$NS" -o jsonpath='{.data.OPENCLAW_GATEWAY_TOKEN}' | base64 -d)" +echo "" +fi +echo "Retrieve the gateway token with:" +echo " kubectl get secret openclaw-secrets -n $NS -o jsonpath='{.data.OPENCLAW_GATEWAY_TOKEN}' | base64 -d && echo" diff --git a/scripts/k8s/manifests/configmap.yaml b/scripts/k8s/manifests/configmap.yaml new file mode 100644 index 00000000000..2334b0370c8 --- /dev/null +++ b/scripts/k8s/manifests/configmap.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: openclaw-config + labels: + app: openclaw +data: + openclaw.json: | + { + "gateway": { + "mode": "local", + "bind": "loopback", + "port": 18789, + "auth": { + "mode": "token" + }, + "controlUi": { + "enabled": true + } + }, + "agents": { + "defaults": { + "workspace": "~/.openclaw/workspace" + }, + "list": [ + { + "id": "default", + "name": "OpenClaw Assistant", + "workspace": "~/.openclaw/workspace" + } + ] + }, + "cron": { "enabled": false } + } + AGENTS.md: | + # OpenClaw Assistant + + You are a helpful AI assistant running in Kubernetes. diff --git a/scripts/k8s/manifests/deployment.yaml b/scripts/k8s/manifests/deployment.yaml new file mode 100644 index 00000000000..f87c266930b --- /dev/null +++ b/scripts/k8s/manifests/deployment.yaml @@ -0,0 +1,146 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: openclaw + labels: + app: openclaw +spec: + replicas: 1 + selector: + matchLabels: + app: openclaw + strategy: + type: Recreate + template: + metadata: + labels: + app: openclaw + spec: + automountServiceAccountToken: false + securityContext: + fsGroup: 1000 + seccompProfile: + type: RuntimeDefault + initContainers: + - name: init-config + image: busybox:1.37 + imagePullPolicy: IfNotPresent + command: + - sh + - -c + - | + cp /config/openclaw.json /home/node/.openclaw/openclaw.json + mkdir -p /home/node/.openclaw/workspace + cp /config/AGENTS.md /home/node/.openclaw/workspace/AGENTS.md + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + resources: + requests: + memory: 32Mi + cpu: 50m + limits: + memory: 64Mi + cpu: 100m + volumeMounts: + - name: openclaw-home + mountPath: /home/node/.openclaw + - name: config + mountPath: /config + containers: + - name: gateway + image: ghcr.io/openclaw/openclaw:slim + imagePullPolicy: IfNotPresent + command: + - node + - /app/dist/index.js + - gateway + - run + ports: + - name: gateway + containerPort: 18789 + protocol: TCP + env: + - name: HOME + value: /home/node + - name: OPENCLAW_CONFIG_DIR + value: /home/node/.openclaw + - name: NODE_ENV + value: production + - name: OPENCLAW_GATEWAY_TOKEN + valueFrom: + secretKeyRef: + name: openclaw-secrets + key: OPENCLAW_GATEWAY_TOKEN + - name: ANTHROPIC_API_KEY + valueFrom: + secretKeyRef: + name: openclaw-secrets + key: ANTHROPIC_API_KEY + optional: true + - name: OPENAI_API_KEY + valueFrom: + secretKeyRef: + name: openclaw-secrets + key: OPENAI_API_KEY + optional: true + - name: GEMINI_API_KEY + valueFrom: + secretKeyRef: + name: openclaw-secrets + key: GEMINI_API_KEY + optional: true + - name: OPENROUTER_API_KEY + valueFrom: + secretKeyRef: + name: openclaw-secrets + key: OPENROUTER_API_KEY + optional: true + resources: + requests: + memory: 512Mi + cpu: 250m + limits: + memory: 2Gi + cpu: "1" + livenessProbe: + exec: + command: + - node + - -e + - "require('http').get('http://127.0.0.1:18789/healthz', r => process.exit(r.statusCode < 400 ? 0 : 1)).on('error', () => process.exit(1))" + initialDelaySeconds: 60 + periodSeconds: 30 + timeoutSeconds: 10 + readinessProbe: + exec: + command: + - node + - -e + - "require('http').get('http://127.0.0.1:18789/readyz', r => process.exit(r.statusCode < 400 ? 0 : 1)).on('error', () => process.exit(1))" + initialDelaySeconds: 15 + periodSeconds: 10 + timeoutSeconds: 5 + volumeMounts: + - name: openclaw-home + mountPath: /home/node/.openclaw + - name: tmp-volume + mountPath: /tmp + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + volumes: + - name: openclaw-home + persistentVolumeClaim: + claimName: openclaw-home-pvc + - name: config + configMap: + name: openclaw-config + - name: tmp-volume + emptyDir: {} diff --git a/scripts/k8s/manifests/kustomization.yaml b/scripts/k8s/manifests/kustomization.yaml new file mode 100644 index 00000000000..7d1fa13e10c --- /dev/null +++ b/scripts/k8s/manifests/kustomization.yaml @@ -0,0 +1,7 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - pvc.yaml + - configmap.yaml + - deployment.yaml + - service.yaml diff --git a/scripts/k8s/manifests/pvc.yaml b/scripts/k8s/manifests/pvc.yaml new file mode 100644 index 00000000000..e834e788a0e --- /dev/null +++ b/scripts/k8s/manifests/pvc.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: openclaw-home-pvc + labels: + app: openclaw +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi diff --git a/scripts/k8s/manifests/service.yaml b/scripts/k8s/manifests/service.yaml new file mode 100644 index 00000000000..41df6219782 --- /dev/null +++ b/scripts/k8s/manifests/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: openclaw + labels: + app: openclaw +spec: + type: ClusterIP + selector: + app: openclaw + ports: + - name: gateway + port: 18789 + targetPort: 18789 + protocol: TCP diff --git a/scripts/openclaw-npm-release-check.ts b/scripts/openclaw-npm-release-check.ts new file mode 100644 index 00000000000..fcd2dc8e7e1 --- /dev/null +++ b/scripts/openclaw-npm-release-check.ts @@ -0,0 +1,263 @@ +#!/usr/bin/env -S node --import tsx + +import { execFileSync } from "node:child_process"; +import { readFileSync } from "node:fs"; +import { pathToFileURL } from "node:url"; + +type PackageJson = { + name?: string; + version?: string; + description?: string; + license?: string; + repository?: { url?: string } | string; + bin?: Record; + peerDependencies?: Record; + peerDependenciesMeta?: Record; +}; + +export type ParsedReleaseVersion = { + version: string; + channel: "stable" | "beta"; + year: number; + month: number; + day: number; + betaNumber?: number; + date: Date; +}; + +const STABLE_VERSION_REGEX = /^(?\d{4})\.(?[1-9]\d?)\.(?[1-9]\d?)$/; +const BETA_VERSION_REGEX = + /^(?\d{4})\.(?[1-9]\d?)\.(?[1-9]\d?)-beta\.(?[1-9]\d*)$/; +const EXPECTED_REPOSITORY_URL = "https://github.com/openclaw/openclaw"; +const MAX_CALVER_DISTANCE_DAYS = 2; + +function normalizeRepoUrl(value: unknown): string { + if (typeof value !== "string") { + return ""; + } + + return value + .trim() + .replace(/^git\+/, "") + .replace(/\.git$/i, "") + .replace(/\/+$/, ""); +} + +function parseDateParts( + version: string, + groups: Record, + channel: "stable" | "beta", +): ParsedReleaseVersion | null { + const year = Number.parseInt(groups.year ?? "", 10); + const month = Number.parseInt(groups.month ?? "", 10); + const day = Number.parseInt(groups.day ?? "", 10); + const betaNumber = channel === "beta" ? Number.parseInt(groups.beta ?? "", 10) : undefined; + + if ( + !Number.isInteger(year) || + !Number.isInteger(month) || + !Number.isInteger(day) || + month < 1 || + month > 12 || + day < 1 || + day > 31 + ) { + return null; + } + if (channel === "beta" && (!Number.isInteger(betaNumber) || (betaNumber ?? 0) < 1)) { + return null; + } + + const date = new Date(Date.UTC(year, month - 1, day)); + if ( + date.getUTCFullYear() !== year || + date.getUTCMonth() !== month - 1 || + date.getUTCDate() !== day + ) { + return null; + } + + return { + version, + channel, + year, + month, + day, + betaNumber, + date, + }; +} + +export function parseReleaseVersion(version: string): ParsedReleaseVersion | null { + const trimmed = version.trim(); + if (!trimmed) { + return null; + } + + const stableMatch = STABLE_VERSION_REGEX.exec(trimmed); + if (stableMatch?.groups) { + return parseDateParts(trimmed, stableMatch.groups, "stable"); + } + + const betaMatch = BETA_VERSION_REGEX.exec(trimmed); + if (betaMatch?.groups) { + return parseDateParts(trimmed, betaMatch.groups, "beta"); + } + + return null; +} + +function startOfUtcDay(date: Date): number { + return Date.UTC(date.getUTCFullYear(), date.getUTCMonth(), date.getUTCDate()); +} + +export function utcCalendarDayDistance(left: Date, right: Date): number { + return Math.round(Math.abs(startOfUtcDay(left) - startOfUtcDay(right)) / 86_400_000); +} + +export function collectReleasePackageMetadataErrors(pkg: PackageJson): string[] { + const actualRepositoryUrl = normalizeRepoUrl( + typeof pkg.repository === "string" ? pkg.repository : pkg.repository?.url, + ); + const errors: string[] = []; + + if (pkg.name !== "openclaw") { + errors.push(`package.json name must be "openclaw"; found "${pkg.name ?? ""}".`); + } + if (!pkg.description?.trim()) { + errors.push("package.json description must be non-empty."); + } + if (pkg.license !== "MIT") { + errors.push(`package.json license must be "MIT"; found "${pkg.license ?? ""}".`); + } + if (actualRepositoryUrl !== EXPECTED_REPOSITORY_URL) { + errors.push( + `package.json repository.url must resolve to ${EXPECTED_REPOSITORY_URL}; found ${ + actualRepositoryUrl || "" + }.`, + ); + } + if (pkg.bin?.openclaw !== "openclaw.mjs") { + errors.push( + `package.json bin.openclaw must be "openclaw.mjs"; found "${pkg.bin?.openclaw ?? ""}".`, + ); + } + if (pkg.peerDependencies?.["node-llama-cpp"] !== "3.16.2") { + errors.push( + `package.json peerDependencies["node-llama-cpp"] must be "3.16.2"; found "${ + pkg.peerDependencies?.["node-llama-cpp"] ?? "" + }".`, + ); + } + if (pkg.peerDependenciesMeta?.["node-llama-cpp"]?.optional !== true) { + errors.push('package.json peerDependenciesMeta["node-llama-cpp"].optional must be true.'); + } + + return errors; +} + +export function collectReleaseTagErrors(params: { + packageVersion: string; + releaseTag: string; + releaseSha?: string; + releaseMainRef?: string; + now?: Date; +}): string[] { + const errors: string[] = []; + const releaseTag = params.releaseTag.trim(); + const packageVersion = params.packageVersion.trim(); + const now = params.now ?? new Date(); + + const parsedVersion = parseReleaseVersion(packageVersion); + if (parsedVersion === null) { + errors.push( + `package.json version must match YYYY.M.D or YYYY.M.D-beta.N; found "${packageVersion || ""}".`, + ); + } + + if (!releaseTag.startsWith("v")) { + errors.push(`Release tag must start with "v"; found "${releaseTag || ""}".`); + } + + const tagVersion = releaseTag.startsWith("v") ? releaseTag.slice(1) : releaseTag; + const parsedTag = parseReleaseVersion(tagVersion); + if (parsedTag === null) { + errors.push( + `Release tag must match vYYYY.M.D or vYYYY.M.D-beta.N; found "${releaseTag || ""}".`, + ); + } + + const expectedTag = packageVersion ? `v${packageVersion}` : ""; + if (releaseTag !== expectedTag) { + errors.push( + `Release tag ${releaseTag || ""} does not match package.json version ${ + packageVersion || "" + }; expected ${expectedTag || ""}.`, + ); + } + + if (parsedVersion !== null) { + const dayDistance = utcCalendarDayDistance(parsedVersion.date, now); + if (dayDistance > MAX_CALVER_DISTANCE_DAYS) { + const nowLabel = now.toISOString().slice(0, 10); + const versionDate = parsedVersion.date.toISOString().slice(0, 10); + errors.push( + `Release version ${packageVersion} is ${dayDistance} days away from current UTC date ${nowLabel}; release CalVer date ${versionDate} must be within ${MAX_CALVER_DISTANCE_DAYS} days.`, + ); + } + } + + if (params.releaseSha?.trim() && params.releaseMainRef?.trim()) { + try { + execFileSync( + "git", + ["merge-base", "--is-ancestor", params.releaseSha, params.releaseMainRef], + { stdio: "ignore" }, + ); + } catch { + errors.push( + `Tagged commit ${params.releaseSha} is not contained in ${params.releaseMainRef}.`, + ); + } + } + + return errors; +} + +function loadPackageJson(): PackageJson { + return JSON.parse(readFileSync("package.json", "utf8")) as PackageJson; +} + +function main(): number { + const pkg = loadPackageJson(); + const metadataErrors = collectReleasePackageMetadataErrors(pkg); + const tagErrors = collectReleaseTagErrors({ + packageVersion: pkg.version ?? "", + releaseTag: process.env.RELEASE_TAG ?? "", + releaseSha: process.env.RELEASE_SHA, + releaseMainRef: process.env.RELEASE_MAIN_REF, + }); + const errors = [...metadataErrors, ...tagErrors]; + + if (errors.length > 0) { + for (const error of errors) { + console.error(`openclaw-npm-release-check: ${error}`); + } + return 1; + } + + const parsedVersion = parseReleaseVersion(pkg.version ?? ""); + const channel = parsedVersion?.channel ?? "unknown"; + const dayDistance = + parsedVersion === null + ? "unknown" + : String(utcCalendarDayDistance(parsedVersion.date, new Date())); + console.log( + `openclaw-npm-release-check: validated ${channel} release ${pkg.version} (${dayDistance} day UTC delta).`, + ); + return 0; +} + +if (import.meta.url === pathToFileURL(process.argv[1] ?? "").href) { + process.exit(main()); +} diff --git a/scripts/release-check.ts b/scripts/release-check.ts index fe2a9a1ea9c..6f621cef2d5 100755 --- a/scripts/release-check.ts +++ b/scripts/release-check.ts @@ -218,6 +218,16 @@ function runPackDry(): PackResult[] { return JSON.parse(raw) as PackResult[]; } +export function collectForbiddenPackPaths(paths: Iterable): string[] { + return [...paths] + .filter( + (path) => + forbiddenPrefixes.some((prefix) => path.startsWith(prefix)) || + /(^|\/)node_modules\//.test(path), + ) + .toSorted(); +} + function checkPluginVersions() { const rootPackagePath = resolve("package.json"); const rootPackage = JSON.parse(readFileSync(rootPackagePath, "utf8")) as PackageJson; @@ -422,9 +432,7 @@ function main() { return paths.has(group) ? [] : [group]; }) .toSorted(); - const forbidden = [...paths].filter((path) => - forbiddenPrefixes.some((prefix) => path.startsWith(prefix)), - ); + const forbidden = collectForbiddenPackPaths(paths); if (missing.length > 0 || forbidden.length > 0) { if (missing.length > 0) { diff --git a/scripts/test-live-gateway-models-docker.sh b/scripts/test-live-gateway-models-docker.sh index 92ddb905ed5..3998110efa6 100755 --- a/scripts/test-live-gateway-models-docker.sh +++ b/scripts/test-live-gateway-models-docker.sh @@ -3,6 +3,7 @@ set -euo pipefail ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" IMAGE_NAME="${OPENCLAW_IMAGE:-${CLAWDBOT_IMAGE:-openclaw:local}}" +LIVE_IMAGE_NAME="${OPENCLAW_LIVE_IMAGE:-${CLAWDBOT_LIVE_IMAGE:-${IMAGE_NAME}-live}}" CONFIG_DIR="${OPENCLAW_CONFIG_DIR:-${CLAWDBOT_CONFIG_DIR:-$HOME/.openclaw}}" WORKSPACE_DIR="${OPENCLAW_WORKSPACE_DIR:-${CLAWDBOT_WORKSPACE_DIR:-$HOME/.openclaw/workspace}}" PROFILE_FILE="${OPENCLAW_PROFILE_FILE:-${CLAWDBOT_PROFILE_FILE:-$HOME/.profile}}" @@ -33,8 +34,8 @@ cd "$tmp_dir" pnpm test:live EOF -echo "==> Build image: $IMAGE_NAME" -docker build -t "$IMAGE_NAME" -f "$ROOT_DIR/Dockerfile" "$ROOT_DIR" +echo "==> Build live-test image: $LIVE_IMAGE_NAME (target=build)" +docker build --target build -t "$LIVE_IMAGE_NAME" -f "$ROOT_DIR/Dockerfile" "$ROOT_DIR" echo "==> Run gateway live model tests (profile keys)" docker run --rm -t \ @@ -51,5 +52,5 @@ docker run --rm -t \ -v "$CONFIG_DIR":/home/node/.openclaw \ -v "$WORKSPACE_DIR":/home/node/.openclaw/workspace \ "${PROFILE_MOUNT[@]}" \ - "$IMAGE_NAME" \ + "$LIVE_IMAGE_NAME" \ -lc "$LIVE_TEST_CMD" diff --git a/scripts/test-live-models-docker.sh b/scripts/test-live-models-docker.sh index 5e3e1d0a311..cca4202710d 100755 --- a/scripts/test-live-models-docker.sh +++ b/scripts/test-live-models-docker.sh @@ -3,6 +3,7 @@ set -euo pipefail ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" IMAGE_NAME="${OPENCLAW_IMAGE:-${CLAWDBOT_IMAGE:-openclaw:local}}" +LIVE_IMAGE_NAME="${OPENCLAW_LIVE_IMAGE:-${CLAWDBOT_LIVE_IMAGE:-${IMAGE_NAME}-live}}" CONFIG_DIR="${OPENCLAW_CONFIG_DIR:-${CLAWDBOT_CONFIG_DIR:-$HOME/.openclaw}}" WORKSPACE_DIR="${OPENCLAW_WORKSPACE_DIR:-${CLAWDBOT_WORKSPACE_DIR:-$HOME/.openclaw/workspace}}" PROFILE_FILE="${OPENCLAW_PROFILE_FILE:-${CLAWDBOT_PROFILE_FILE:-$HOME/.profile}}" @@ -33,8 +34,8 @@ cd "$tmp_dir" pnpm test:live EOF -echo "==> Build image: $IMAGE_NAME" -docker build -t "$IMAGE_NAME" -f "$ROOT_DIR/Dockerfile" "$ROOT_DIR" +echo "==> Build live-test image: $LIVE_IMAGE_NAME (target=build)" +docker build --target build -t "$LIVE_IMAGE_NAME" -f "$ROOT_DIR/Dockerfile" "$ROOT_DIR" echo "==> Run live model tests (profile keys)" docker run --rm -t \ @@ -52,5 +53,5 @@ docker run --rm -t \ -v "$CONFIG_DIR":/home/node/.openclaw \ -v "$WORKSPACE_DIR":/home/node/.openclaw/workspace \ "${PROFILE_MOUNT[@]}" \ - "$IMAGE_NAME" \ + "$LIVE_IMAGE_NAME" \ -lc "$LIVE_TEST_CMD" diff --git a/scripts/test-parallel.mjs b/scripts/test-parallel.mjs index ca7636394bb..021ff1f905e 100644 --- a/scripts/test-parallel.mjs +++ b/scripts/test-parallel.mjs @@ -1,6 +1,7 @@ import { spawn } from "node:child_process"; import fs from "node:fs"; import os from "node:os"; +import path from "node:path"; // On Windows, `.cmd` launchers can fail with `spawn EINVAL` when invoked without a shell // (especially under GitHub Actions + Git Bash). Use `shell: true` and let the shell resolve pnpm. @@ -104,11 +105,11 @@ const hostMemoryGiB = Math.floor(os.totalmem() / 1024 ** 3); const highMemLocalHost = !isCI && hostMemoryGiB >= 96; const lowMemLocalHost = !isCI && hostMemoryGiB < 64; const nodeMajor = Number.parseInt(process.versions.node.split(".")[0] ?? "", 10); -// vmForks is a big win for transform/import heavy suites, but Node 24+ -// regressed with Vitest's vm runtime in this repo, and low-memory local hosts -// are more likely to hit per-worker V8 heap ceilings. Keep it opt-out via -// OPENCLAW_TEST_VM_FORKS=0, and let users force-enable with =1. -const supportsVmForks = Number.isFinite(nodeMajor) ? nodeMajor < 24 : true; +// vmForks is a big win for transform/import heavy suites. Node 24 is stable again +// for the default unit-fast lane after moving the known flaky files to fork-only +// isolation, but Node 25+ still falls back to process forks until re-validated. +// Keep it opt-out via OPENCLAW_TEST_VM_FORKS=0, and let users force-enable with =1. +const supportsVmForks = Number.isFinite(nodeMajor) ? nodeMajor <= 24 : true; const useVmForks = process.env.OPENCLAW_TEST_VM_FORKS === "1" || (process.env.OPENCLAW_TEST_VM_FORKS !== "0" && !isWindows && supportsVmForks && !lowMemLocalHost); @@ -205,6 +206,45 @@ const shardIndexOverride = (() => { const parsed = Number.parseInt(process.env.OPENCLAW_TEST_SHARD_INDEX ?? "", 10); return Number.isFinite(parsed) && parsed > 0 ? parsed : null; })(); +const OPTION_TAKES_VALUE = new Set([ + "-t", + "-c", + "-r", + "--testNamePattern", + "--config", + "--root", + "--dir", + "--reporter", + "--outputFile", + "--pool", + "--execArgv", + "--vmMemoryLimit", + "--maxWorkers", + "--environment", + "--shard", + "--changed", + "--sequence", + "--inspect", + "--inspectBrk", + "--testTimeout", + "--hookTimeout", + "--bail", + "--retry", + "--diff", + "--exclude", + "--project", + "--slowTestThreshold", + "--teardownTimeout", + "--attachmentsDir", + "--mode", + "--api", + "--browser", + "--maxConcurrency", + "--mergeReports", + "--configLoader", + "--experimental", +]); +const SINGLE_RUN_ONLY_FLAGS = new Set(["--coverage", "--outputFile", "--mergeReports"]); if (shardIndexOverride !== null && shardCount <= 1) { console.error( @@ -229,6 +269,219 @@ const silentArgs = const rawPassthroughArgs = process.argv.slice(2); const passthroughArgs = rawPassthroughArgs[0] === "--" ? rawPassthroughArgs.slice(1) : rawPassthroughArgs; +const parsePassthroughArgs = (args) => { + const fileFilters = []; + const optionArgs = []; + let consumeNextAsOptionValue = false; + + for (const arg of args) { + if (consumeNextAsOptionValue) { + optionArgs.push(arg); + consumeNextAsOptionValue = false; + continue; + } + if (arg === "--") { + optionArgs.push(arg); + continue; + } + if (arg.startsWith("-")) { + optionArgs.push(arg); + consumeNextAsOptionValue = !arg.includes("=") && OPTION_TAKES_VALUE.has(arg); + continue; + } + fileFilters.push(arg); + } + + return { fileFilters, optionArgs }; +}; +const { fileFilters: passthroughFileFilters, optionArgs: passthroughOptionArgs } = + parsePassthroughArgs(passthroughArgs); +const passthroughRequiresSingleRun = passthroughOptionArgs.some((arg) => { + if (!arg.startsWith("-")) { + return false; + } + const [flag] = arg.split("=", 1); + return SINGLE_RUN_ONLY_FLAGS.has(flag); +}); +const channelPrefixes = ["src/telegram/", "src/discord/", "src/web/", "src/browser/", "src/line/"]; +const baseConfigPrefixes = ["src/agents/", "src/auto-reply/", "src/commands/", "test/", "ui/"]; +const normalizeRepoPath = (value) => value.split(path.sep).join("/"); +const walkTestFiles = (rootDir) => { + if (!fs.existsSync(rootDir)) { + return []; + } + const entries = fs.readdirSync(rootDir, { withFileTypes: true }); + const files = []; + for (const entry of entries) { + const fullPath = path.join(rootDir, entry.name); + if (entry.isDirectory()) { + files.push(...walkTestFiles(fullPath)); + continue; + } + if (!entry.isFile()) { + continue; + } + if ( + fullPath.endsWith(".test.ts") || + fullPath.endsWith(".live.test.ts") || + fullPath.endsWith(".e2e.test.ts") + ) { + files.push(normalizeRepoPath(fullPath)); + } + } + return files; +}; +const allKnownTestFiles = [ + ...new Set([ + ...walkTestFiles("src"), + ...walkTestFiles("extensions"), + ...walkTestFiles("test"), + ...walkTestFiles(path.join("ui", "src", "ui")), + ]), +]; +const inferTarget = (fileFilter) => { + const isolated = unitIsolatedFiles.includes(fileFilter); + if (fileFilter.endsWith(".live.test.ts")) { + return { owner: "live", isolated }; + } + if (fileFilter.endsWith(".e2e.test.ts")) { + return { owner: "e2e", isolated }; + } + if (fileFilter.startsWith("extensions/")) { + return { owner: "extensions", isolated }; + } + if (fileFilter.startsWith("src/gateway/")) { + return { owner: "gateway", isolated }; + } + if (channelPrefixes.some((prefix) => fileFilter.startsWith(prefix))) { + return { owner: "channels", isolated }; + } + if (baseConfigPrefixes.some((prefix) => fileFilter.startsWith(prefix))) { + return { owner: "base", isolated }; + } + if (fileFilter.startsWith("src/")) { + return { owner: "unit", isolated }; + } + return { owner: "base", isolated }; +}; +const resolveFilterMatches = (fileFilter) => { + const normalizedFilter = normalizeRepoPath(fileFilter); + if (fs.existsSync(fileFilter)) { + const stats = fs.statSync(fileFilter); + if (stats.isFile()) { + return [normalizedFilter]; + } + if (stats.isDirectory()) { + const prefix = normalizedFilter.endsWith("/") ? normalizedFilter : `${normalizedFilter}/`; + return allKnownTestFiles.filter((file) => file.startsWith(prefix)); + } + } + if (/[*?[\]{}]/.test(normalizedFilter)) { + return allKnownTestFiles.filter((file) => path.matchesGlob(file, normalizedFilter)); + } + return allKnownTestFiles.filter((file) => file.includes(normalizedFilter)); +}; +const createTargetedEntry = (owner, isolated, filters) => { + const name = isolated ? `${owner}-isolated` : owner; + const forceForks = isolated; + if (owner === "unit") { + return { + name, + args: [ + "vitest", + "run", + "--config", + "vitest.unit.config.ts", + `--pool=${forceForks ? "forks" : useVmForks ? "vmForks" : "forks"}`, + ...(disableIsolation ? ["--isolate=false"] : []), + ...filters, + ], + }; + } + if (owner === "extensions") { + return { + name, + args: [ + "vitest", + "run", + "--config", + "vitest.extensions.config.ts", + ...(forceForks ? ["--pool=forks"] : useVmForks ? ["--pool=vmForks"] : []), + ...filters, + ], + }; + } + if (owner === "gateway") { + return { + name, + args: ["vitest", "run", "--config", "vitest.gateway.config.ts", "--pool=forks", ...filters], + }; + } + if (owner === "channels") { + return { + name, + args: [ + "vitest", + "run", + "--config", + "vitest.channels.config.ts", + ...(forceForks ? ["--pool=forks"] : []), + ...filters, + ], + }; + } + if (owner === "live") { + return { + name, + args: ["vitest", "run", "--config", "vitest.live.config.ts", ...filters], + }; + } + if (owner === "e2e") { + return { + name, + args: ["vitest", "run", "--config", "vitest.e2e.config.ts", ...filters], + }; + } + return { + name, + args: [ + "vitest", + "run", + "--config", + "vitest.config.ts", + ...(forceForks ? ["--pool=forks"] : []), + ...filters, + ], + }; +}; +const targetedEntries = (() => { + if (passthroughFileFilters.length === 0) { + return []; + } + const groups = passthroughFileFilters.reduce((acc, fileFilter) => { + const matchedFiles = resolveFilterMatches(fileFilter); + if (matchedFiles.length === 0) { + const target = inferTarget(normalizeRepoPath(fileFilter)); + const key = `${target.owner}:${target.isolated ? "isolated" : "default"}`; + const files = acc.get(key) ?? []; + files.push(normalizeRepoPath(fileFilter)); + acc.set(key, files); + return acc; + } + for (const matchedFile of matchedFiles) { + const target = inferTarget(matchedFile); + const key = `${target.owner}:${target.isolated ? "isolated" : "default"}`; + const files = acc.get(key) ?? []; + files.push(matchedFile); + acc.set(key, files); + } + return acc; + }, new Map()); + return Array.from(groups, ([key, filters]) => { + const [owner, mode] = key.split(":"); + return createTargetedEntry(owner, mode === "isolated", [...new Set(filters)]); + }); +})(); const topLevelParallelEnabled = testProfile !== "low" && testProfile !== "serial"; const overrideWorkers = Number.parseInt(process.env.OPENCLAW_TEST_WORKERS ?? "", 10); const resolvedOverride = @@ -311,7 +564,7 @@ const maxWorkersForRun = (name) => { if (isCI && isMacOS) { return 1; } - if (name === "unit-isolated") { + if (name === "unit-isolated" || name.endsWith("-isolated")) { return defaultWorkerBudget.unitIsolated; } if (name === "extensions") { @@ -397,16 +650,16 @@ const runOnce = (entry, extraArgs = []) => }); }); -const run = async (entry) => { +const run = async (entry, extraArgs = []) => { if (shardCount <= 1) { - return runOnce(entry); + return runOnce(entry, extraArgs); } if (shardIndexOverride !== null) { - return runOnce(entry, ["--shard", `${shardIndexOverride}/${shardCount}`]); + return runOnce(entry, ["--shard", `${shardIndexOverride}/${shardCount}`, ...extraArgs]); } for (let shardIndex = 1; shardIndex <= shardCount; shardIndex += 1) { // eslint-disable-next-line no-await-in-loop - const code = await runOnce(entry, ["--shard", `${shardIndex}/${shardCount}`]); + const code = await runOnce(entry, ["--shard", `${shardIndex}/${shardCount}`, ...extraArgs]); if (code !== 0) { return code; } @@ -414,15 +667,15 @@ const run = async (entry) => { return 0; }; -const runEntries = async (entries) => { +const runEntries = async (entries, extraArgs = []) => { if (topLevelParallelEnabled) { - const codes = await Promise.all(entries.map(run)); + const codes = await Promise.all(entries.map((entry) => run(entry, extraArgs))); return codes.find((code) => code !== 0); } for (const entry of entries) { // eslint-disable-next-line no-await-in-loop - const code = await run(entry); + const code = await run(entry, extraArgs); if (code !== 0) { return code; } @@ -440,57 +693,48 @@ const shutdown = (signal) => { process.on("SIGINT", () => shutdown("SIGINT")); process.on("SIGTERM", () => shutdown("SIGTERM")); -if (passthroughArgs.length > 0) { - const maxWorkers = maxWorkersForRun("unit"); - const args = maxWorkers - ? [ - "vitest", - "run", - "--maxWorkers", - String(maxWorkers), - ...silentArgs, - ...windowsCiArgs, - ...passthroughArgs, - ] - : ["vitest", "run", ...silentArgs, ...windowsCiArgs, ...passthroughArgs]; - const nodeOptions = process.env.NODE_OPTIONS ?? ""; - const nextNodeOptions = WARNING_SUPPRESSION_FLAGS.reduce( - (acc, flag) => (acc.includes(flag) ? acc : `${acc} ${flag}`.trim()), - nodeOptions, - ); - const code = await new Promise((resolve) => { - let child; - try { - child = spawn(pnpm, args, { - stdio: "inherit", - env: { ...process.env, NODE_OPTIONS: nextNodeOptions }, - shell: isWindows, - }); - } catch (err) { - console.error(`[test-parallel] spawn failed: ${String(err)}`); - resolve(1); - return; +if (targetedEntries.length > 0) { + if (passthroughRequiresSingleRun && targetedEntries.length > 1) { + console.error( + "[test-parallel] The provided Vitest args require a single run, but the selected test filters span multiple wrapper configs. Run one target/config at a time.", + ); + process.exit(2); + } + const targetedParallelRuns = keepGatewaySerial + ? targetedEntries.filter((entry) => entry.name !== "gateway") + : targetedEntries; + const targetedSerialRuns = keepGatewaySerial + ? targetedEntries.filter((entry) => entry.name === "gateway") + : []; + const failedTargetedParallel = await runEntries(targetedParallelRuns, passthroughOptionArgs); + if (failedTargetedParallel !== undefined) { + process.exit(failedTargetedParallel); + } + for (const entry of targetedSerialRuns) { + // eslint-disable-next-line no-await-in-loop + const code = await run(entry, passthroughOptionArgs); + if (code !== 0) { + process.exit(code); } - children.add(child); - child.on("error", (err) => { - console.error(`[test-parallel] child error: ${String(err)}`); - }); - child.on("exit", (exitCode, signal) => { - children.delete(child); - resolve(exitCode ?? (signal ? 1 : 0)); - }); - }); - process.exit(Number(code) || 0); + } + process.exit(0); } -const failedParallel = await runEntries(parallelRuns); +if (passthroughRequiresSingleRun && passthroughOptionArgs.length > 0) { + console.error( + "[test-parallel] The provided Vitest args require a single run. Use the dedicated npm script for that workflow (for example `pnpm test:coverage`) or target a single test file/filter.", + ); + process.exit(2); +} + +const failedParallel = await runEntries(parallelRuns, passthroughOptionArgs); if (failedParallel !== undefined) { process.exit(failedParallel); } for (const entry of serialRuns) { // eslint-disable-next-line no-await-in-loop - const code = await run(entry); + const code = await run(entry, passthroughOptionArgs); if (code !== 0) { process.exit(code); } diff --git a/skills/eightctl/SKILL.md b/skills/eightctl/SKILL.md index c3df81f628c..80a5f1f4bbb 100644 --- a/skills/eightctl/SKILL.md +++ b/skills/eightctl/SKILL.md @@ -6,7 +6,7 @@ metadata: { "openclaw": { - "emoji": "🎛️", + "emoji": "🛌", "requires": { "bins": ["eightctl"] }, "install": [ diff --git a/skills/gemini/SKILL.md b/skills/gemini/SKILL.md index 70850a4c522..f573afd6ba6 100644 --- a/skills/gemini/SKILL.md +++ b/skills/gemini/SKILL.md @@ -6,7 +6,7 @@ metadata: { "openclaw": { - "emoji": "♊️", + "emoji": "✨", "requires": { "bins": ["gemini"] }, "install": [ diff --git a/skills/node-connect/SKILL.md b/skills/node-connect/SKILL.md new file mode 100644 index 00000000000..ea468f19096 --- /dev/null +++ b/skills/node-connect/SKILL.md @@ -0,0 +1,142 @@ +--- +name: node-connect +description: Diagnose OpenClaw node connection and pairing failures for Android, iOS, and macOS companion apps. Use when QR/setup code/manual connect fails, local Wi-Fi works but VPS/tailnet does not, or errors mention pairing required, unauthorized, bootstrap token invalid or expired, gateway.bind, gateway.remote.url, Tailscale, or plugins.entries.device-pair.config.publicUrl. +--- + +# Node Connect + +Goal: find the one real route from node -> gateway, verify OpenClaw is advertising that route, then fix pairing/auth. + +## Topology first + +Decide which case you are in before proposing fixes: + +- same machine / emulator / USB tunnel +- same LAN / local Wi-Fi +- same Tailscale tailnet +- public URL / reverse proxy + +Do not mix them. + +- Local Wi-Fi problem: do not switch to Tailscale unless remote access is actually needed. +- VPS / remote gateway problem: do not keep debugging `localhost` or LAN IPs. + +## If ambiguous, ask first + +If the setup is unclear or the failure report is vague, ask short clarifying questions before diagnosing. + +Ask for: + +- which route they intend: same machine, same LAN, Tailscale tailnet, or public URL +- whether they used QR/setup code or manual host/port +- the exact app text/status/error, quoted exactly if possible +- whether `openclaw devices list` shows a pending pairing request + +Do not guess from `can't connect`. + +## Canonical checks + +Prefer `openclaw qr --json`. It uses the same setup-code payload Android scans. + +```bash +openclaw config get gateway.mode +openclaw config get gateway.bind +openclaw config get gateway.tailscale.mode +openclaw config get gateway.remote.url +openclaw config get gateway.auth.mode +openclaw config get gateway.auth.allowTailscale +openclaw config get plugins.entries.device-pair.config.publicUrl +openclaw qr --json +openclaw devices list +openclaw nodes status +``` + +If this OpenClaw instance is pointed at a remote gateway, also run: + +```bash +openclaw qr --remote --json +``` + +If Tailscale is part of the story: + +```bash +tailscale status --json +``` + +## Read the result, not guesses + +`openclaw qr --json` success means: + +- `gatewayUrl`: this is the actual endpoint the app should use. +- `urlSource`: this tells you which config path won. + +Common good sources: + +- `gateway.bind=lan`: same Wi-Fi / LAN only +- `gateway.bind=tailnet`: direct tailnet access +- `gateway.tailscale.mode=serve` or `gateway.tailscale.mode=funnel`: Tailscale route +- `plugins.entries.device-pair.config.publicUrl`: explicit public/reverse-proxy route +- `gateway.remote.url`: remote gateway route + +## Root-cause map + +If `openclaw qr --json` says `Gateway is only bound to loopback`: + +- remote node cannot connect yet +- fix the route, then generate a fresh setup code +- `gateway.bind=auto` is not enough if the effective QR route is still loopback +- same LAN: use `gateway.bind=lan` +- same tailnet: prefer `gateway.tailscale.mode=serve` or use `gateway.bind=tailnet` +- public internet: set a real `plugins.entries.device-pair.config.publicUrl` or `gateway.remote.url` + +If `gateway.bind=tailnet set, but no tailnet IP was found`: + +- gateway host is not actually on Tailscale + +If `qr --remote requires gateway.remote.url`: + +- remote-mode config is incomplete + +If the app says `pairing required`: + +- network route and auth worked +- approve the pending device + +```bash +openclaw devices list +openclaw devices approve --latest +``` + +If the app says `bootstrap token invalid or expired`: + +- old setup code +- generate a fresh one and rescan +- do this after any URL/auth fix too + +If the app says `unauthorized`: + +- wrong token/password, or wrong Tailscale expectation +- for Tailscale Serve, `gateway.auth.allowTailscale` must match the intended flow +- otherwise use explicit token/password + +## Fast heuristics + +- Same Wi-Fi setup + gateway advertises `127.0.0.1`, `localhost`, or loopback-only config: wrong. +- Remote setup + setup/manual uses private LAN IP: wrong. +- Tailnet setup + gateway advertises LAN IP instead of MagicDNS / tailnet route: wrong. +- Public URL set but QR still advertises something else: inspect `urlSource`; config is not what you think. +- `openclaw devices list` shows pending requests: stop changing network config and approve first. + +## Fix style + +Reply with one concrete diagnosis and one route. + +If there is not enough signal yet, ask for setup + exact app text instead of guessing. + +Good: + +- `The gateway is still loopback-only, so a node on another network can never reach it. Enable Tailscale Serve, restart the gateway, run openclaw qr again, rescan, then approve the pending device pairing.` + +Bad: + +- `Maybe LAN, maybe Tailscale, maybe port forwarding, maybe public URL.` diff --git a/skills/openai-image-gen/SKILL.md b/skills/openai-image-gen/SKILL.md index 5db45c2c0e5..5b12671b0b0 100644 --- a/skills/openai-image-gen/SKILL.md +++ b/skills/openai-image-gen/SKILL.md @@ -6,7 +6,7 @@ metadata: { "openclaw": { - "emoji": "🖼️", + "emoji": "🎨", "requires": { "bins": ["python3"], "env": ["OPENAI_API_KEY"] }, "primaryEnv": "OPENAI_API_KEY", "install": diff --git a/skills/openai-whisper-api/SKILL.md b/skills/openai-whisper-api/SKILL.md index 798b679e3ea..c961f132f4c 100644 --- a/skills/openai-whisper-api/SKILL.md +++ b/skills/openai-whisper-api/SKILL.md @@ -6,7 +6,7 @@ metadata: { "openclaw": { - "emoji": "☁️", + "emoji": "🌐", "requires": { "bins": ["curl"], "env": ["OPENAI_API_KEY"] }, "primaryEnv": "OPENAI_API_KEY", }, diff --git a/skills/openai-whisper/SKILL.md b/skills/openai-whisper/SKILL.md index 1c9411a3ff6..c22e0d62252 100644 --- a/skills/openai-whisper/SKILL.md +++ b/skills/openai-whisper/SKILL.md @@ -6,7 +6,7 @@ metadata: { "openclaw": { - "emoji": "🎙️", + "emoji": "🎤", "requires": { "bins": ["whisper"] }, "install": [ diff --git a/skills/sag/SKILL.md b/skills/sag/SKILL.md index a12e8a6d628..f0f7047651c 100644 --- a/skills/sag/SKILL.md +++ b/skills/sag/SKILL.md @@ -6,7 +6,7 @@ metadata: { "openclaw": { - "emoji": "🗣️", + "emoji": "🔊", "requires": { "bins": ["sag"], "env": ["ELEVENLABS_API_KEY"] }, "primaryEnv": "ELEVENLABS_API_KEY", "install": diff --git a/skills/sherpa-onnx-tts/SKILL.md b/skills/sherpa-onnx-tts/SKILL.md index 1628660637b..46f7ead58da 100644 --- a/skills/sherpa-onnx-tts/SKILL.md +++ b/skills/sherpa-onnx-tts/SKILL.md @@ -5,7 +5,7 @@ metadata: { "openclaw": { - "emoji": "🗣️", + "emoji": "🔉", "os": ["darwin", "linux", "win32"], "requires": { "env": ["SHERPA_ONNX_RUNTIME_DIR", "SHERPA_ONNX_MODEL_DIR"] }, "install": diff --git a/skills/video-frames/SKILL.md b/skills/video-frames/SKILL.md index 0aca9fbd199..93a550a6fc9 100644 --- a/skills/video-frames/SKILL.md +++ b/skills/video-frames/SKILL.md @@ -6,7 +6,7 @@ metadata: { "openclaw": { - "emoji": "🎞️", + "emoji": "🎬", "requires": { "bins": ["ffmpeg"] }, "install": [ diff --git a/skills/weather/SKILL.md b/skills/weather/SKILL.md index 3daedf90f25..8d463be0b6a 100644 --- a/skills/weather/SKILL.md +++ b/skills/weather/SKILL.md @@ -2,7 +2,7 @@ name: weather description: "Get current weather and forecasts via wttr.in or Open-Meteo. Use when: user asks about weather, temperature, or forecasts for any location. NOT for: historical weather data, severe weather alerts, or detailed meteorological analysis. No API key needed." homepage: https://wttr.in/:help -metadata: { "openclaw": { "emoji": "🌤️", "requires": { "bins": ["curl"] } } } +metadata: { "openclaw": { "emoji": "☔", "requires": { "bins": ["curl"] } } } --- # Weather Skill diff --git a/skills/xurl/SKILL.md b/skills/xurl/SKILL.md index cf76bf158ad..1d74d6de3ee 100644 --- a/skills/xurl/SKILL.md +++ b/skills/xurl/SKILL.md @@ -5,7 +5,7 @@ metadata: { "openclaw": { - "emoji": "𝕏", + "emoji": "🐦", "requires": { "bins": ["xurl"] }, "install": [ diff --git a/src/acp/client.test.ts b/src/acp/client.test.ts index cbb52bd73cc..0cbc376720c 100644 --- a/src/acp/client.test.ts +++ b/src/acp/client.test.ts @@ -4,9 +4,11 @@ import type { RequestPermissionRequest } from "@agentclientprotocol/sdk"; import { afterEach, describe, expect, it, vi } from "vitest"; import { createTrackedTempDirs } from "../test-utils/tracked-temp-dirs.js"; import { + buildAcpClientStripKeys, resolveAcpClientSpawnEnv, resolveAcpClientSpawnInvocation, resolvePermissionRequest, + shouldStripProviderAuthEnvVarsForAcpServer, } from "./client.js"; import { extractAttachmentsFromPrompt, extractTextFromPrompt } from "./event-mapper.js"; @@ -110,6 +112,120 @@ describe("resolveAcpClientSpawnEnv", () => { expect(env.OPENCLAW_SHELL).toBe("acp-client"); expect(env.OPENAI_API_KEY).toBeUndefined(); }); + + it("strips provider auth env vars for the default OpenClaw bridge", () => { + const stripKeys = new Set(["OPENAI_API_KEY", "GITHUB_TOKEN", "HF_TOKEN"]); + const env = resolveAcpClientSpawnEnv( + { + OPENAI_API_KEY: "openai-secret", // pragma: allowlist secret + GITHUB_TOKEN: "gh-secret", // pragma: allowlist secret + HF_TOKEN: "hf-secret", // pragma: allowlist secret + OPENCLAW_API_KEY: "keep-me", + PATH: "/usr/bin", + }, + { stripKeys }, + ); + + expect(env.OPENAI_API_KEY).toBeUndefined(); + expect(env.GITHUB_TOKEN).toBeUndefined(); + expect(env.HF_TOKEN).toBeUndefined(); + expect(env.OPENCLAW_API_KEY).toBe("keep-me"); + expect(env.PATH).toBe("/usr/bin"); + expect(env.OPENCLAW_SHELL).toBe("acp-client"); + }); + + it("strips provider auth env vars case-insensitively", () => { + const env = resolveAcpClientSpawnEnv( + { + OpenAI_Api_Key: "openai-secret", // pragma: allowlist secret + Github_Token: "gh-secret", // pragma: allowlist secret + OPENCLAW_API_KEY: "keep-me", + }, + { stripKeys: new Set(["OPENAI_API_KEY", "GITHUB_TOKEN"]) }, + ); + + expect(env.OpenAI_Api_Key).toBeUndefined(); + expect(env.Github_Token).toBeUndefined(); + expect(env.OPENCLAW_API_KEY).toBe("keep-me"); + expect(env.OPENCLAW_SHELL).toBe("acp-client"); + }); + + it("preserves provider auth env vars for explicit custom ACP servers", () => { + const env = resolveAcpClientSpawnEnv({ + OPENAI_API_KEY: "openai-secret", // pragma: allowlist secret + GITHUB_TOKEN: "gh-secret", // pragma: allowlist secret + HF_TOKEN: "hf-secret", // pragma: allowlist secret + OPENCLAW_API_KEY: "keep-me", + }); + + expect(env.OPENAI_API_KEY).toBe("openai-secret"); + expect(env.GITHUB_TOKEN).toBe("gh-secret"); + expect(env.HF_TOKEN).toBe("hf-secret"); + expect(env.OPENCLAW_API_KEY).toBe("keep-me"); + expect(env.OPENCLAW_SHELL).toBe("acp-client"); + }); +}); + +describe("shouldStripProviderAuthEnvVarsForAcpServer", () => { + it("strips provider auth env vars for the default bridge", () => { + expect(shouldStripProviderAuthEnvVarsForAcpServer()).toBe(true); + expect( + shouldStripProviderAuthEnvVarsForAcpServer({ + serverCommand: "openclaw", + serverArgs: ["acp"], + defaultServerCommand: "openclaw", + defaultServerArgs: ["acp"], + }), + ).toBe(true); + }); + + it("preserves provider auth env vars for explicit custom ACP servers", () => { + expect( + shouldStripProviderAuthEnvVarsForAcpServer({ + serverCommand: "custom-acp-server", + serverArgs: ["serve"], + defaultServerCommand: "openclaw", + defaultServerArgs: ["acp"], + }), + ).toBe(false); + }); + + it("preserves provider auth env vars when an explicit override uses the default executable with different args", () => { + expect( + shouldStripProviderAuthEnvVarsForAcpServer({ + serverCommand: process.execPath, + serverArgs: ["custom-entry.js"], + defaultServerCommand: process.execPath, + defaultServerArgs: ["dist/entry.js", "acp"], + }), + ).toBe(false); + }); +}); + +describe("buildAcpClientStripKeys", () => { + it("always includes active skill env keys", () => { + const stripKeys = buildAcpClientStripKeys({ + stripProviderAuthEnvVars: false, + activeSkillEnvKeys: ["SKILL_SECRET", "OPENAI_API_KEY"], + }); + + expect(stripKeys.has("SKILL_SECRET")).toBe(true); + expect(stripKeys.has("OPENAI_API_KEY")).toBe(true); + expect(stripKeys.has("GITHUB_TOKEN")).toBe(false); + }); + + it("adds provider auth env vars for the default bridge", () => { + const stripKeys = buildAcpClientStripKeys({ + stripProviderAuthEnvVars: true, + activeSkillEnvKeys: ["SKILL_SECRET"], + }); + + expect(stripKeys.has("SKILL_SECRET")).toBe(true); + expect(stripKeys.has("OPENAI_API_KEY")).toBe(true); + expect(stripKeys.has("GITHUB_TOKEN")).toBe(true); + expect(stripKeys.has("HF_TOKEN")).toBe(true); + expect(stripKeys.has("OPENCLAW_API_KEY")).toBe(false); + }); }); describe("resolveAcpClientSpawnInvocation", () => { diff --git a/src/acp/client.ts b/src/acp/client.ts index 54be5ffc455..2f3ac28641a 100644 --- a/src/acp/client.ts +++ b/src/acp/client.ts @@ -19,6 +19,10 @@ import { materializeWindowsSpawnProgram, resolveWindowsSpawnProgram, } from "../plugin-sdk/windows-spawn.js"; +import { + listKnownProviderAuthEnvVarNames, + omitEnvKeysCaseInsensitive, +} from "../secrets/provider-env-vars.js"; import { DANGEROUS_ACP_TOOLS } from "../security/dangerous-tools.js"; const SAFE_AUTO_APPROVE_TOOL_IDS = new Set(["read", "search", "web_search", "memory_search"]); @@ -346,20 +350,56 @@ function buildServerArgs(opts: AcpClientOptions): string[] { return args; } +type AcpClientSpawnEnvOptions = { + stripKeys?: Iterable; +}; + export function resolveAcpClientSpawnEnv( baseEnv: NodeJS.ProcessEnv = process.env, - options?: { stripKeys?: ReadonlySet }, + options: AcpClientSpawnEnvOptions = {}, ): NodeJS.ProcessEnv { - const env: NodeJS.ProcessEnv = { ...baseEnv }; - if (options?.stripKeys) { - for (const key of options.stripKeys) { - delete env[key]; - } - } + const env = omitEnvKeysCaseInsensitive(baseEnv, options.stripKeys ?? []); env.OPENCLAW_SHELL = "acp-client"; return env; } +export function shouldStripProviderAuthEnvVarsForAcpServer( + params: { + serverCommand?: string; + serverArgs?: string[]; + defaultServerCommand?: string; + defaultServerArgs?: string[]; + } = {}, +): boolean { + const serverCommand = params.serverCommand?.trim(); + if (!serverCommand) { + return true; + } + const defaultServerCommand = params.defaultServerCommand?.trim(); + if (!defaultServerCommand || serverCommand !== defaultServerCommand) { + return false; + } + const serverArgs = params.serverArgs ?? []; + const defaultServerArgs = params.defaultServerArgs ?? []; + return ( + serverArgs.length === defaultServerArgs.length && + serverArgs.every((arg, index) => arg === defaultServerArgs[index]) + ); +} + +export function buildAcpClientStripKeys(params: { + stripProviderAuthEnvVars?: boolean; + activeSkillEnvKeys?: Iterable; +}): Set { + const stripKeys = new Set(params.activeSkillEnvKeys ?? []); + if (params.stripProviderAuthEnvVars) { + for (const key of listKnownProviderAuthEnvVarNames()) { + stripKeys.add(key); + } + } + return stripKeys; +} + type AcpSpawnRuntime = { platform: NodeJS.Platform; env: NodeJS.ProcessEnv; @@ -456,12 +496,22 @@ export async function createAcpClient(opts: AcpClientOptions = {}): Promise { - const sessionKey = normalizeSessionKey(input.sessionKey); + const sessionKey = canonicalizeAcpSessionKey({ + cfg: input.cfg, + sessionKey: input.sessionKey, + }); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } @@ -234,6 +237,7 @@ export class AcpSessionManager { sessionKey, agent, mode: input.mode, + resumeSessionId: input.resumeSessionId, cwd: requestedCwd, }), fallbackCode: "ACP_SESSION_INIT_FAILED", @@ -320,7 +324,7 @@ export class AcpSessionManager { sessionKey: string; signal?: AbortSignal; }): Promise { - const sessionKey = normalizeSessionKey(params.sessionKey); + const sessionKey = canonicalizeAcpSessionKey(params); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } @@ -396,7 +400,7 @@ export class AcpSessionManager { sessionKey: string; runtimeMode: string; }): Promise { - const sessionKey = normalizeSessionKey(params.sessionKey); + const sessionKey = canonicalizeAcpSessionKey(params); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } @@ -451,7 +455,7 @@ export class AcpSessionManager { key: string; value: string; }): Promise { - const sessionKey = normalizeSessionKey(params.sessionKey); + const sessionKey = canonicalizeAcpSessionKey(params); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } @@ -524,7 +528,7 @@ export class AcpSessionManager { sessionKey: string; patch: Partial; }): Promise { - const sessionKey = normalizeSessionKey(params.sessionKey); + const sessionKey = canonicalizeAcpSessionKey(params); const validatedPatch = validateRuntimeOptionPatch(params.patch); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); @@ -554,7 +558,7 @@ export class AcpSessionManager { cfg: OpenClawConfig; sessionKey: string; }): Promise { - const sessionKey = normalizeSessionKey(params.sessionKey); + const sessionKey = canonicalizeAcpSessionKey(params); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } @@ -590,7 +594,10 @@ export class AcpSessionManager { } async runTurn(input: AcpRunTurnInput): Promise { - const sessionKey = normalizeSessionKey(input.sessionKey); + const sessionKey = canonicalizeAcpSessionKey({ + cfg: input.cfg, + sessionKey: input.sessionKey, + }); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } @@ -655,6 +662,7 @@ export class AcpSessionManager { for await (const event of runtime.runTurn({ handle, text: input.text, + attachments: input.attachments, mode: input.mode, requestId: input.requestId, signal: combinedSignal, @@ -736,7 +744,7 @@ export class AcpSessionManager { sessionKey: string; reason?: string; }): Promise { - const sessionKey = normalizeSessionKey(params.sessionKey); + const sessionKey = canonicalizeAcpSessionKey(params); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } @@ -804,7 +812,10 @@ export class AcpSessionManager { } async closeSession(input: AcpCloseSessionInput): Promise { - const sessionKey = normalizeSessionKey(input.sessionKey); + const sessionKey = canonicalizeAcpSessionKey({ + cfg: input.cfg, + sessionKey: input.sessionKey, + }); if (!sessionKey) { throw new AcpRuntimeError("ACP_SESSION_INIT_FAILED", "ACP session key is required."); } diff --git a/src/acp/control-plane/manager.test.ts b/src/acp/control-plane/manager.test.ts index ebdf356ca9f..8152944834c 100644 --- a/src/acp/control-plane/manager.test.ts +++ b/src/acp/control-plane/manager.test.ts @@ -170,6 +170,57 @@ describe("AcpSessionManager", () => { expect(resolved.error.message).toContain("ACP metadata is missing"); }); + it("canonicalizes the main alias before ACP rehydrate after restart", async () => { + const runtimeState = createRuntime(); + hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ + id: "acpx", + runtime: runtimeState.runtime, + }); + hoisted.readAcpSessionEntryMock.mockImplementation((paramsUnknown: unknown) => { + const sessionKey = (paramsUnknown as { sessionKey?: string }).sessionKey; + if (sessionKey !== "agent:main:main") { + return null; + } + return { + sessionKey, + storeSessionKey: sessionKey, + acp: { + ...readySessionMeta(), + agent: "main", + runtimeSessionName: sessionKey, + }, + }; + }); + + const manager = new AcpSessionManager(); + const cfg = { + ...baseCfg, + session: { mainKey: "main" }, + agents: { list: [{ id: "main", default: true }] }, + } as OpenClawConfig; + + await manager.runTurn({ + cfg, + sessionKey: "main", + text: "after restart", + mode: "prompt", + requestId: "r-main", + }); + + expect(hoisted.readAcpSessionEntryMock).toHaveBeenCalledWith( + expect.objectContaining({ + cfg, + sessionKey: "agent:main:main", + }), + ); + expect(runtimeState.ensureSession).toHaveBeenCalledWith( + expect.objectContaining({ + agent: "main", + sessionKey: "agent:main:main", + }), + ); + }); + it("serializes concurrent turns for the same ACP session", async () => { const runtimeState = createRuntime(); hoisted.requireAcpRuntimeBackendMock.mockReturnValue({ diff --git a/src/acp/control-plane/manager.types.ts b/src/acp/control-plane/manager.types.ts index 7337e8063f9..a2989c0d0f2 100644 --- a/src/acp/control-plane/manager.types.ts +++ b/src/acp/control-plane/manager.types.ts @@ -43,14 +43,21 @@ export type AcpInitializeSessionInput = { sessionKey: string; agent: string; mode: AcpRuntimeSessionMode; + resumeSessionId?: string; cwd?: string; backendId?: string; }; +export type AcpTurnAttachment = { + mediaType: string; + data: string; +}; + export type AcpRunTurnInput = { cfg: OpenClawConfig; sessionKey: string; text: string; + attachments?: AcpTurnAttachment[]; mode: AcpRuntimePromptMode; requestId: string; signal?: AbortSignal; diff --git a/src/acp/control-plane/manager.utils.ts b/src/acp/control-plane/manager.utils.ts index 17729c6c2fc..90f7c516538 100644 --- a/src/acp/control-plane/manager.utils.ts +++ b/src/acp/control-plane/manager.utils.ts @@ -1,6 +1,14 @@ import type { OpenClawConfig } from "../../config/config.js"; +import { + canonicalizeMainSessionAlias, + resolveMainSessionKey, +} from "../../config/sessions/main-session.js"; import type { SessionAcpMeta } from "../../config/sessions/types.js"; -import { normalizeAgentId, parseAgentSessionKey } from "../../routing/session-key.js"; +import { + normalizeAgentId, + normalizeMainKey, + parseAgentSessionKey, +} from "../../routing/session-key.js"; import { ACP_ERROR_CODES, AcpRuntimeError } from "../runtime/errors.js"; import type { AcpSessionResolution } from "./manager.types.js"; @@ -42,6 +50,33 @@ export function normalizeSessionKey(sessionKey: string): string { return sessionKey.trim(); } +export function canonicalizeAcpSessionKey(params: { + cfg: OpenClawConfig; + sessionKey: string; +}): string { + const normalized = normalizeSessionKey(params.sessionKey); + if (!normalized) { + return ""; + } + const lowered = normalized.toLowerCase(); + if (lowered === "global" || lowered === "unknown") { + return lowered; + } + const parsed = parseAgentSessionKey(lowered); + if (parsed) { + return canonicalizeMainSessionAlias({ + cfg: params.cfg, + agentId: parsed.agentId, + sessionKey: lowered, + }); + } + const mainKey = normalizeMainKey(params.cfg.session?.mainKey); + if (lowered === "main" || lowered === mainKey) { + return resolveMainSessionKey(params.cfg); + } + return lowered; +} + export function normalizeActorKey(sessionKey: string): string { return sessionKey.trim().toLowerCase(); } diff --git a/src/acp/conversation-id.ts b/src/acp/conversation-id.ts index 7281fef4924..9cf17c9a579 100644 --- a/src/acp/conversation-id.ts +++ b/src/acp/conversation-id.ts @@ -4,7 +4,7 @@ export type ParsedTelegramTopicConversation = { canonicalConversationId: string; }; -function normalizeText(value: unknown): string { +export function normalizeConversationText(value: unknown): string { if (typeof value === "string") { return value.trim(); } @@ -15,7 +15,7 @@ function normalizeText(value: unknown): string { } export function parseTelegramChatIdFromTarget(raw: unknown): string | undefined { - const text = normalizeText(raw); + const text = normalizeConversationText(raw); if (!text) { return undefined; } diff --git a/src/acp/event-mapper.test.ts b/src/acp/event-mapper.test.ts new file mode 100644 index 00000000000..2aca401d483 --- /dev/null +++ b/src/acp/event-mapper.test.ts @@ -0,0 +1,18 @@ +import { describe, expect, it } from "vitest"; +import { extractToolCallLocations } from "./event-mapper.js"; + +describe("extractToolCallLocations", () => { + it("enforces the global node visit cap across nested structures", () => { + const nested = Array.from({ length: 20 }, (_, outer) => + Array.from({ length: 20 }, (_, inner) => + inner === 19 ? { path: `/tmp/file-${outer}.txt` } : { note: `${outer}-${inner}` }, + ), + ); + + const locations = extractToolCallLocations(nested); + + expect(locations).toBeDefined(); + expect(locations?.length).toBeLessThan(20); + expect(locations).not.toContainEqual({ path: "/tmp/file-19.txt" }); + }); +}); diff --git a/src/acp/event-mapper.ts b/src/acp/event-mapper.ts index 83b91524a7f..c164f356307 100644 --- a/src/acp/event-mapper.ts +++ b/src/acp/event-mapper.ts @@ -1,4 +1,10 @@ -import type { ContentBlock, ImageContent, ToolKind } from "@agentclientprotocol/sdk"; +import type { + ContentBlock, + ImageContent, + ToolCallContent, + ToolCallLocation, + ToolKind, +} from "@agentclientprotocol/sdk"; export type GatewayAttachment = { type: string; @@ -6,6 +12,39 @@ export type GatewayAttachment = { content: string; }; +const TOOL_LOCATION_PATH_KEYS = [ + "path", + "filePath", + "file_path", + "targetPath", + "target_path", + "targetFile", + "target_file", + "sourcePath", + "source_path", + "destinationPath", + "destination_path", + "oldPath", + "old_path", + "newPath", + "new_path", + "outputPath", + "output_path", + "inputPath", + "input_path", +] as const; + +const TOOL_LOCATION_LINE_KEYS = [ + "line", + "lineNumber", + "line_number", + "startLine", + "start_line", +] as const; +const TOOL_RESULT_PATH_MARKER_RE = /^(?:FILE|MEDIA):(.+)$/gm; +const TOOL_LOCATION_MAX_DEPTH = 4; +const TOOL_LOCATION_MAX_NODES = 100; + const INLINE_CONTROL_ESCAPE_MAP: Readonly> = { "\0": "\\0", "\r": "\\r", @@ -56,6 +95,152 @@ function escapeResourceTitle(value: string): string { return escapeInlineControlChars(value).replace(/[()[\]]/g, (char) => `\\${char}`); } +function asRecord(value: unknown): Record | undefined { + return value && typeof value === "object" && !Array.isArray(value) + ? (value as Record) + : undefined; +} + +function normalizeToolLocationPath(value: string): string | undefined { + const trimmed = value.trim(); + if ( + !trimmed || + trimmed.length > 4096 || + trimmed.includes("\u0000") || + trimmed.includes("\r") || + trimmed.includes("\n") + ) { + return undefined; + } + if (/^https?:\/\//i.test(trimmed)) { + return undefined; + } + if (/^file:\/\//i.test(trimmed)) { + try { + const parsed = new URL(trimmed); + return decodeURIComponent(parsed.pathname || "") || undefined; + } catch { + return undefined; + } + } + return trimmed; +} + +function normalizeToolLocationLine(value: unknown): number | undefined { + if (typeof value !== "number" || !Number.isFinite(value)) { + return undefined; + } + const line = Math.floor(value); + return line > 0 ? line : undefined; +} + +function extractToolLocationLine(record: Record): number | undefined { + for (const key of TOOL_LOCATION_LINE_KEYS) { + const line = normalizeToolLocationLine(record[key]); + if (line !== undefined) { + return line; + } + } + return undefined; +} + +function addToolLocation( + locations: Map, + rawPath: string, + line?: number, +): void { + const path = normalizeToolLocationPath(rawPath); + if (!path) { + return; + } + for (const [existingKey, existing] of locations.entries()) { + if (existing.path !== path) { + continue; + } + if (line === undefined || existing.line === line) { + return; + } + if (existing.line === undefined) { + locations.delete(existingKey); + } + } + const locationKey = `${path}:${line ?? ""}`; + if (locations.has(locationKey)) { + return; + } + locations.set(locationKey, line ? { path, line } : { path }); +} + +function collectLocationsFromTextMarkers( + text: string, + locations: Map, +): void { + for (const match of text.matchAll(TOOL_RESULT_PATH_MARKER_RE)) { + const candidate = match[1]?.trim(); + if (candidate) { + addToolLocation(locations, candidate); + } + } +} + +function collectToolLocations( + value: unknown, + locations: Map, + state: { visited: number }, + depth: number, +): void { + if (state.visited >= TOOL_LOCATION_MAX_NODES || depth > TOOL_LOCATION_MAX_DEPTH) { + return; + } + state.visited += 1; + + if (typeof value === "string") { + collectLocationsFromTextMarkers(value, locations); + return; + } + if (!value || typeof value !== "object") { + return; + } + if (Array.isArray(value)) { + for (const item of value) { + collectToolLocations(item, locations, state, depth + 1); + if (state.visited >= TOOL_LOCATION_MAX_NODES) { + return; + } + } + return; + } + + const record = value as Record; + const line = extractToolLocationLine(record); + for (const key of TOOL_LOCATION_PATH_KEYS) { + const rawPath = record[key]; + if (typeof rawPath === "string") { + addToolLocation(locations, rawPath, line); + } + } + + const content = Array.isArray(record.content) ? record.content : undefined; + if (content) { + for (const block of content) { + const entry = asRecord(block); + if (entry?.type === "text" && typeof entry.text === "string") { + collectLocationsFromTextMarkers(entry.text, locations); + } + } + } + + for (const [key, nested] of Object.entries(record)) { + if (key === "content") { + continue; + } + collectToolLocations(nested, locations, state, depth + 1); + if (state.visited >= TOOL_LOCATION_MAX_NODES) { + return; + } + } +} + export function extractTextFromPrompt(prompt: ContentBlock[], maxBytes?: number): string { const parts: string[] = []; // Track accumulated byte count per block to catch oversized prompts before full concatenation @@ -152,3 +337,74 @@ export function inferToolKind(name?: string): ToolKind { } return "other"; } + +export function extractToolCallContent(value: unknown): ToolCallContent[] | undefined { + if (typeof value === "string") { + return value.trim() + ? [ + { + type: "content", + content: { + type: "text", + text: value, + }, + }, + ] + : undefined; + } + + const record = asRecord(value); + if (!record) { + return undefined; + } + + const contents: ToolCallContent[] = []; + const blocks = Array.isArray(record.content) ? record.content : []; + for (const block of blocks) { + const entry = asRecord(block); + if (entry?.type === "text" && typeof entry.text === "string" && entry.text.trim()) { + contents.push({ + type: "content", + content: { + type: "text", + text: entry.text, + }, + }); + } + } + + if (contents.length > 0) { + return contents; + } + + const fallbackText = + typeof record.text === "string" + ? record.text + : typeof record.message === "string" + ? record.message + : typeof record.error === "string" + ? record.error + : undefined; + + if (!fallbackText?.trim()) { + return undefined; + } + + return [ + { + type: "content", + content: { + type: "text", + text: fallbackText, + }, + }, + ]; +} + +export function extractToolCallLocations(...values: unknown[]): ToolCallLocation[] | undefined { + const locations = new Map(); + for (const value of values) { + collectToolLocations(value, locations, { visited: 0 }, 0); + } + return locations.size > 0 ? [...locations.values()] : undefined; +} diff --git a/src/acp/persistent-bindings.resolve.ts b/src/acp/persistent-bindings.resolve.ts index c69f1afe5af..84f052797ad 100644 --- a/src/acp/persistent-bindings.resolve.ts +++ b/src/acp/persistent-bindings.resolve.ts @@ -117,6 +117,70 @@ function toConfiguredBindingSpec(params: { }; } +function resolveConfiguredBindingRecord(params: { + cfg: OpenClawConfig; + bindings: AgentAcpBinding[]; + channel: ConfiguredAcpBindingChannel; + accountId: string; + selectConversation: ( + binding: AgentAcpBinding, + ) => { conversationId: string; parentConversationId?: string } | null; +}): ResolvedConfiguredAcpBinding | null { + let wildcardMatch: { + binding: AgentAcpBinding; + conversationId: string; + parentConversationId?: string; + } | null = null; + for (const binding of params.bindings) { + if (normalizeBindingChannel(binding.match.channel) !== params.channel) { + continue; + } + const accountMatchPriority = resolveAccountMatchPriority( + binding.match.accountId, + params.accountId, + ); + if (accountMatchPriority === 0) { + continue; + } + const conversation = params.selectConversation(binding); + if (!conversation) { + continue; + } + const spec = toConfiguredBindingSpec({ + cfg: params.cfg, + channel: params.channel, + accountId: params.accountId, + conversationId: conversation.conversationId, + parentConversationId: conversation.parentConversationId, + binding, + }); + if (accountMatchPriority === 2) { + return { + spec, + record: toConfiguredAcpBindingRecord(spec), + }; + } + if (!wildcardMatch) { + wildcardMatch = { binding, ...conversation }; + } + } + if (!wildcardMatch) { + return null; + } + const spec = toConfiguredBindingSpec({ + cfg: params.cfg, + channel: params.channel, + accountId: params.accountId, + conversationId: wildcardMatch.conversationId, + parentConversationId: wildcardMatch.parentConversationId, + binding: wildcardMatch.binding, + }); + return { + spec, + record: toConfiguredAcpBindingRecord(spec), + }; +} + export function resolveConfiguredAcpBindingSpecBySessionKey(params: { cfg: OpenClawConfig; sessionKey: string; @@ -207,57 +271,20 @@ export function resolveConfiguredAcpBindingRecord(params: { if (channel === "discord") { const bindings = listAcpBindings(params.cfg); - const resolveDiscordBindingForConversation = ( - targetConversationId: string, - ): ResolvedConfiguredAcpBinding | null => { - let wildcardMatch: AgentAcpBinding | null = null; - for (const binding of bindings) { - if (normalizeBindingChannel(binding.match.channel) !== "discord") { - continue; - } - const accountMatchPriority = resolveAccountMatchPriority( - binding.match.accountId, - accountId, - ); - if (accountMatchPriority === 0) { - continue; - } - const bindingConversationId = resolveBindingConversationId(binding); - if (!bindingConversationId || bindingConversationId !== targetConversationId) { - continue; - } - if (accountMatchPriority === 2) { - const spec = toConfiguredBindingSpec({ - cfg: params.cfg, - channel: "discord", - accountId, - conversationId: targetConversationId, - binding, - }); - return { - spec, - record: toConfiguredAcpBindingRecord(spec), - }; - } - if (!wildcardMatch) { - wildcardMatch = binding; - } - } - if (wildcardMatch) { - const spec = toConfiguredBindingSpec({ - cfg: params.cfg, - channel: "discord", - accountId, - conversationId: targetConversationId, - binding: wildcardMatch, - }); - return { - spec, - record: toConfiguredAcpBindingRecord(spec), - }; - } - return null; - }; + const resolveDiscordBindingForConversation = (targetConversationId: string) => + resolveConfiguredBindingRecord({ + cfg: params.cfg, + bindings, + channel: "discord", + accountId, + selectConversation: (binding) => { + const bindingConversationId = resolveBindingConversationId(binding); + if (!bindingConversationId || bindingConversationId !== targetConversationId) { + return null; + } + return { conversationId: targetConversationId }; + }, + }); const directMatch = resolveDiscordBindingForConversation(conversationId); if (directMatch) { @@ -280,61 +307,31 @@ export function resolveConfiguredAcpBindingRecord(params: { if (!parsed || !parsed.chatId.startsWith("-")) { return null; } - let wildcardMatch: AgentAcpBinding | null = null; - for (const binding of listAcpBindings(params.cfg)) { - if (normalizeBindingChannel(binding.match.channel) !== "telegram") { - continue; - } - const accountMatchPriority = resolveAccountMatchPriority(binding.match.accountId, accountId); - if (accountMatchPriority === 0) { - continue; - } - const targetConversationId = resolveBindingConversationId(binding); - if (!targetConversationId) { - continue; - } - const targetParsed = parseTelegramTopicConversation({ - conversationId: targetConversationId, - }); - if (!targetParsed || !targetParsed.chatId.startsWith("-")) { - continue; - } - if (targetParsed.canonicalConversationId !== parsed.canonicalConversationId) { - continue; - } - if (accountMatchPriority === 2) { - const spec = toConfiguredBindingSpec({ - cfg: params.cfg, - channel: "telegram", - accountId, + return resolveConfiguredBindingRecord({ + cfg: params.cfg, + bindings: listAcpBindings(params.cfg), + channel: "telegram", + accountId, + selectConversation: (binding) => { + const targetConversationId = resolveBindingConversationId(binding); + if (!targetConversationId) { + return null; + } + const targetParsed = parseTelegramTopicConversation({ + conversationId: targetConversationId, + }); + if (!targetParsed || !targetParsed.chatId.startsWith("-")) { + return null; + } + if (targetParsed.canonicalConversationId !== parsed.canonicalConversationId) { + return null; + } + return { conversationId: parsed.canonicalConversationId, parentConversationId: parsed.chatId, - binding, - }); - return { - spec, - record: toConfiguredAcpBindingRecord(spec), }; - } - if (!wildcardMatch) { - wildcardMatch = binding; - } - } - if (wildcardMatch) { - const spec = toConfiguredBindingSpec({ - cfg: params.cfg, - channel: "telegram", - accountId, - conversationId: parsed.canonicalConversationId, - parentConversationId: parsed.chatId, - binding: wildcardMatch, - }); - return { - spec, - record: toConfiguredAcpBindingRecord(spec), - }; - } - return null; + }, + }); } return null; diff --git a/src/acp/persistent-bindings.test.ts b/src/acp/persistent-bindings.test.ts index deafbc53e15..30e74c05082 100644 --- a/src/acp/persistent-bindings.test.ts +++ b/src/acp/persistent-bindings.test.ts @@ -30,6 +30,10 @@ import { resolveConfiguredAcpBindingSpecBySessionKey, } from "./persistent-bindings.js"; +type ConfiguredBinding = NonNullable[number]; +type BindingRecordInput = Parameters[0]; +type BindingSpec = Parameters[0]["spec"]; + const baseCfg = { session: { mainKey: "main", scope: "per-sender" }, agents: { @@ -37,6 +41,105 @@ const baseCfg = { }, } satisfies OpenClawConfig; +const defaultDiscordConversationId = "1478836151241412759"; +const defaultDiscordAccountId = "default"; + +function createCfgWithBindings( + bindings: ConfiguredBinding[], + overrides?: Partial, +): OpenClawConfig { + return { + ...baseCfg, + ...overrides, + bindings, + } as OpenClawConfig; +} + +function createDiscordBinding(params: { + agentId: string; + conversationId: string; + accountId?: string; + acp?: Record; +}): ConfiguredBinding { + return { + type: "acp", + agentId: params.agentId, + match: { + channel: "discord", + accountId: params.accountId ?? defaultDiscordAccountId, + peer: { kind: "channel", id: params.conversationId }, + }, + ...(params.acp ? { acp: params.acp } : {}), + } as ConfiguredBinding; +} + +function createTelegramGroupBinding(params: { + agentId: string; + conversationId: string; + acp?: Record; +}): ConfiguredBinding { + return { + type: "acp", + agentId: params.agentId, + match: { + channel: "telegram", + accountId: defaultDiscordAccountId, + peer: { kind: "group", id: params.conversationId }, + }, + ...(params.acp ? { acp: params.acp } : {}), + } as ConfiguredBinding; +} + +function resolveBindingRecord(cfg: OpenClawConfig, overrides: Partial = {}) { + return resolveConfiguredAcpBindingRecord({ + cfg, + channel: "discord", + accountId: defaultDiscordAccountId, + conversationId: defaultDiscordConversationId, + ...overrides, + }); +} + +function resolveDiscordBindingSpecBySession( + cfg: OpenClawConfig, + conversationId = defaultDiscordConversationId, +) { + const resolved = resolveBindingRecord(cfg, { conversationId }); + return resolveConfiguredAcpBindingSpecBySessionKey({ + cfg, + sessionKey: resolved?.record.targetSessionKey ?? "", + }); +} + +function createDiscordPersistentSpec(overrides: Partial = {}): BindingSpec { + return { + channel: "discord", + accountId: defaultDiscordAccountId, + conversationId: defaultDiscordConversationId, + agentId: "codex", + mode: "persistent", + ...overrides, + } as BindingSpec; +} + +function mockReadySession(params: { spec: BindingSpec; cwd: string }) { + const sessionKey = buildConfiguredAcpSessionKey(params.spec); + managerMocks.resolveSession.mockReturnValue({ + kind: "ready", + sessionKey, + meta: { + backend: "acpx", + agent: params.spec.acpAgentId ?? params.spec.agentId, + runtimeSessionName: "existing", + mode: params.spec.mode, + runtimeOptions: { cwd: params.cwd }, + state: "idle", + lastActivityAt: Date.now(), + }, + }); + return sessionKey; +} + beforeEach(() => { managerMocks.resolveSession.mockReset(); managerMocks.closeSession.mockReset().mockResolvedValue({ @@ -50,58 +153,30 @@ beforeEach(() => { describe("resolveConfiguredAcpBindingRecord", () => { it("resolves discord channel ACP binding from top-level typed bindings", () => { - const cfg = { - ...baseCfg, - bindings: [ - { - type: "acp", - agentId: "codex", - match: { - channel: "discord", - accountId: "default", - peer: { kind: "channel", id: "1478836151241412759" }, - }, - acp: { - cwd: "/repo/openclaw", - }, - }, - ], - } satisfies OpenClawConfig; - - const resolved = resolveConfiguredAcpBindingRecord({ - cfg, - channel: "discord", - accountId: "default", - conversationId: "1478836151241412759", - }); + const cfg = createCfgWithBindings([ + createDiscordBinding({ + agentId: "codex", + conversationId: defaultDiscordConversationId, + acp: { cwd: "/repo/openclaw" }, + }), + ]); + const resolved = resolveBindingRecord(cfg); expect(resolved?.spec.channel).toBe("discord"); - expect(resolved?.spec.conversationId).toBe("1478836151241412759"); + expect(resolved?.spec.conversationId).toBe(defaultDiscordConversationId); expect(resolved?.spec.agentId).toBe("codex"); expect(resolved?.record.targetSessionKey).toContain("agent:codex:acp:binding:discord:default:"); expect(resolved?.record.metadata?.source).toBe("config"); }); it("falls back to parent discord channel when conversation is a thread id", () => { - const cfg = { - ...baseCfg, - bindings: [ - { - type: "acp", - agentId: "codex", - match: { - channel: "discord", - accountId: "default", - peer: { kind: "channel", id: "channel-parent-1" }, - }, - }, - ], - } satisfies OpenClawConfig; - - const resolved = resolveConfiguredAcpBindingRecord({ - cfg, - channel: "discord", - accountId: "default", + const cfg = createCfgWithBindings([ + createDiscordBinding({ + agentId: "codex", + conversationId: "channel-parent-1", + }), + ]); + const resolved = resolveBindingRecord(cfg, { conversationId: "thread-123", parentConversationId: "channel-parent-1", }); @@ -111,34 +186,17 @@ describe("resolveConfiguredAcpBindingRecord", () => { }); it("prefers direct discord thread binding over parent channel fallback", () => { - const cfg = { - ...baseCfg, - bindings: [ - { - type: "acp", - agentId: "codex", - match: { - channel: "discord", - accountId: "default", - peer: { kind: "channel", id: "channel-parent-1" }, - }, - }, - { - type: "acp", - agentId: "claude", - match: { - channel: "discord", - accountId: "default", - peer: { kind: "channel", id: "thread-123" }, - }, - }, - ], - } satisfies OpenClawConfig; - - const resolved = resolveConfiguredAcpBindingRecord({ - cfg, - channel: "discord", - accountId: "default", + const cfg = createCfgWithBindings([ + createDiscordBinding({ + agentId: "codex", + conversationId: "channel-parent-1", + }), + createDiscordBinding({ + agentId: "claude", + conversationId: "thread-123", + }), + ]); + const resolved = resolveBindingRecord(cfg, { conversationId: "thread-123", parentConversationId: "channel-parent-1", }); @@ -148,60 +206,30 @@ describe("resolveConfiguredAcpBindingRecord", () => { }); it("prefers exact account binding over wildcard for the same discord conversation", () => { - const cfg = { - ...baseCfg, - bindings: [ - { - type: "acp", - agentId: "codex", - match: { - channel: "discord", - accountId: "*", - peer: { kind: "channel", id: "1478836151241412759" }, - }, - }, - { - type: "acp", - agentId: "claude", - match: { - channel: "discord", - accountId: "default", - peer: { kind: "channel", id: "1478836151241412759" }, - }, - }, - ], - } satisfies OpenClawConfig; - - const resolved = resolveConfiguredAcpBindingRecord({ - cfg, - channel: "discord", - accountId: "default", - conversationId: "1478836151241412759", - }); + const cfg = createCfgWithBindings([ + createDiscordBinding({ + agentId: "codex", + conversationId: defaultDiscordConversationId, + accountId: "*", + }), + createDiscordBinding({ + agentId: "claude", + conversationId: defaultDiscordConversationId, + }), + ]); + const resolved = resolveBindingRecord(cfg); expect(resolved?.spec.agentId).toBe("claude"); }); it("returns null when no top-level ACP binding matches the conversation", () => { - const cfg = { - ...baseCfg, - bindings: [ - { - type: "acp", - agentId: "codex", - match: { - channel: "discord", - accountId: "default", - peer: { kind: "channel", id: "different-channel" }, - }, - }, - ], - } satisfies OpenClawConfig; - - const resolved = resolveConfiguredAcpBindingRecord({ - cfg, - channel: "discord", - accountId: "default", + const cfg = createCfgWithBindings([ + createDiscordBinding({ + agentId: "codex", + conversationId: "different-channel", + }), + ]); + const resolved = resolveBindingRecord(cfg, { conversationId: "thread-123", parentConversationId: "channel-parent-1", }); @@ -210,23 +238,13 @@ describe("resolveConfiguredAcpBindingRecord", () => { }); it("resolves telegram forum topic bindings using canonical conversation ids", () => { - const cfg = { - ...baseCfg, - bindings: [ - { - type: "acp", - agentId: "claude", - match: { - channel: "telegram", - accountId: "default", - peer: { kind: "group", id: "-1001234567890:topic:42" }, - }, - acp: { - backend: "acpx", - }, - }, - ], - } satisfies OpenClawConfig; + const cfg = createCfgWithBindings([ + createTelegramGroupBinding({ + agentId: "claude", + conversationId: "-1001234567890:topic:42", + acp: { backend: "acpx" }, + }), + ]); const canonical = resolveConfiguredAcpBindingRecord({ cfg, @@ -250,20 +268,12 @@ describe("resolveConfiguredAcpBindingRecord", () => { }); it("skips telegram non-group topic configs", () => { - const cfg = { - ...baseCfg, - bindings: [ - { - type: "acp", - agentId: "claude", - match: { - channel: "telegram", - accountId: "default", - peer: { kind: "group", id: "123456789:topic:42" }, - }, - }, - ], - } satisfies OpenClawConfig; + const cfg = createCfgWithBindings([ + createTelegramGroupBinding({ + agentId: "claude", + conversationId: "123456789:topic:42", + }), + ]); const resolved = resolveConfiguredAcpBindingRecord({ cfg, @@ -275,44 +285,34 @@ describe("resolveConfiguredAcpBindingRecord", () => { }); it("applies agent runtime ACP defaults for bound conversations", () => { - const cfg = { - ...baseCfg, - agents: { - list: [ - { id: "main" }, - { - id: "coding", - runtime: { - type: "acp", - acp: { - agent: "codex", - backend: "acpx", - mode: "oneshot", - cwd: "/workspace/repo-a", + const cfg = createCfgWithBindings( + [ + createDiscordBinding({ + agentId: "coding", + conversationId: defaultDiscordConversationId, + }), + ], + { + agents: { + list: [ + { id: "main" }, + { + id: "coding", + runtime: { + type: "acp", + acp: { + agent: "codex", + backend: "acpx", + mode: "oneshot", + cwd: "/workspace/repo-a", + }, }, }, - }, - ], - }, - bindings: [ - { - type: "acp", - agentId: "coding", - match: { - channel: "discord", - accountId: "default", - peer: { kind: "channel", id: "1478836151241412759" }, - }, + ], }, - ], - } satisfies OpenClawConfig; - - const resolved = resolveConfiguredAcpBindingRecord({ - cfg, - channel: "discord", - accountId: "default", - conversationId: "1478836151241412759", - }); + }, + ); + const resolved = resolveBindingRecord(cfg); expect(resolved?.spec.agentId).toBe("coding"); expect(resolved?.spec.acpAgentId).toBe("codex"); @@ -324,37 +324,17 @@ describe("resolveConfiguredAcpBindingRecord", () => { describe("resolveConfiguredAcpBindingSpecBySessionKey", () => { it("maps a configured discord binding session key back to its spec", () => { - const cfg = { - ...baseCfg, - bindings: [ - { - type: "acp", - agentId: "codex", - match: { - channel: "discord", - accountId: "default", - peer: { kind: "channel", id: "1478836151241412759" }, - }, - acp: { - backend: "acpx", - }, - }, - ], - } satisfies OpenClawConfig; - - const resolved = resolveConfiguredAcpBindingRecord({ - cfg, - channel: "discord", - accountId: "default", - conversationId: "1478836151241412759", - }); - const spec = resolveConfiguredAcpBindingSpecBySessionKey({ - cfg, - sessionKey: resolved?.record.targetSessionKey ?? "", - }); + const cfg = createCfgWithBindings([ + createDiscordBinding({ + agentId: "codex", + conversationId: defaultDiscordConversationId, + acp: { backend: "acpx" }, + }), + ]); + const spec = resolveDiscordBindingSpecBySession(cfg); expect(spec?.channel).toBe("discord"); - expect(spec?.conversationId).toBe("1478836151241412759"); + expect(spec?.conversationId).toBe(defaultDiscordConversationId); expect(spec?.agentId).toBe("codex"); expect(spec?.backend).toBe("acpx"); }); @@ -368,46 +348,20 @@ describe("resolveConfiguredAcpBindingSpecBySessionKey", () => { }); it("prefers exact account ACP settings over wildcard when session keys collide", () => { - const cfg = { - ...baseCfg, - bindings: [ - { - type: "acp", - agentId: "codex", - match: { - channel: "discord", - accountId: "*", - peer: { kind: "channel", id: "1478836151241412759" }, - }, - acp: { - backend: "wild", - }, - }, - { - type: "acp", - agentId: "codex", - match: { - channel: "discord", - accountId: "default", - peer: { kind: "channel", id: "1478836151241412759" }, - }, - acp: { - backend: "exact", - }, - }, - ], - } satisfies OpenClawConfig; - - const resolved = resolveConfiguredAcpBindingRecord({ - cfg, - channel: "discord", - accountId: "default", - conversationId: "1478836151241412759", - }); - const spec = resolveConfiguredAcpBindingSpecBySessionKey({ - cfg, - sessionKey: resolved?.record.targetSessionKey ?? "", - }); + const cfg = createCfgWithBindings([ + createDiscordBinding({ + agentId: "codex", + conversationId: defaultDiscordConversationId, + accountId: "*", + acp: { backend: "wild" }, + }), + createDiscordBinding({ + agentId: "codex", + conversationId: defaultDiscordConversationId, + acp: { backend: "exact" }, + }), + ]); + const spec = resolveDiscordBindingSpecBySession(cfg); expect(spec?.backend).toBe("exact"); }); @@ -435,26 +389,10 @@ describe("buildConfiguredAcpSessionKey", () => { describe("ensureConfiguredAcpBindingSession", () => { it("keeps an existing ready session when configured binding omits cwd", async () => { - const spec = { - channel: "discord" as const, - accountId: "default", - conversationId: "1478836151241412759", - agentId: "codex", - mode: "persistent" as const, - }; - const sessionKey = buildConfiguredAcpSessionKey(spec); - managerMocks.resolveSession.mockReturnValue({ - kind: "ready", - sessionKey, - meta: { - backend: "acpx", - agent: "codex", - runtimeSessionName: "existing", - mode: "persistent", - runtimeOptions: { cwd: "/workspace/openclaw" }, - state: "idle", - lastActivityAt: Date.now(), - }, + const spec = createDiscordPersistentSpec(); + const sessionKey = mockReadySession({ + spec, + cwd: "/workspace/openclaw", }); const ensured = await ensureConfiguredAcpBindingSession({ @@ -468,27 +406,12 @@ describe("ensureConfiguredAcpBindingSession", () => { }); it("reinitializes a ready session when binding config explicitly sets mismatched cwd", async () => { - const spec = { - channel: "discord" as const, - accountId: "default", - conversationId: "1478836151241412759", - agentId: "codex", - mode: "persistent" as const, + const spec = createDiscordPersistentSpec({ cwd: "/workspace/repo-a", - }; - const sessionKey = buildConfiguredAcpSessionKey(spec); - managerMocks.resolveSession.mockReturnValue({ - kind: "ready", - sessionKey, - meta: { - backend: "acpx", - agent: "codex", - runtimeSessionName: "existing", - mode: "persistent", - runtimeOptions: { cwd: "/workspace/other-repo" }, - state: "idle", - lastActivityAt: Date.now(), - }, + }); + const sessionKey = mockReadySession({ + spec, + cwd: "/workspace/other-repo", }); const ensured = await ensureConfiguredAcpBindingSession({ @@ -508,14 +431,10 @@ describe("ensureConfiguredAcpBindingSession", () => { }); it("initializes ACP session with runtime agent override when provided", async () => { - const spec = { - channel: "discord" as const, - accountId: "default", - conversationId: "1478836151241412759", + const spec = createDiscordPersistentSpec({ agentId: "coding", acpAgentId: "codex", - mode: "persistent" as const, - }; + }); managerMocks.resolveSession.mockReturnValue({ kind: "none" }); const ensured = await ensureConfiguredAcpBindingSession({ @@ -534,24 +453,16 @@ describe("ensureConfiguredAcpBindingSession", () => { describe("resetAcpSessionInPlace", () => { it("reinitializes from configured binding when ACP metadata is missing", async () => { - const cfg = { - ...baseCfg, - bindings: [ - { - type: "acp", - agentId: "claude", - match: { - channel: "discord", - accountId: "default", - peer: { kind: "channel", id: "1478844424791396446" }, - }, - acp: { - mode: "persistent", - backend: "acpx", - }, + const cfg = createCfgWithBindings([ + createDiscordBinding({ + agentId: "claude", + conversationId: "1478844424791396446", + acp: { + mode: "persistent", + backend: "acpx", }, - ], - } satisfies OpenClawConfig; + }), + ]); const sessionKey = buildConfiguredAcpSessionKey({ channel: "discord", accountId: "default", diff --git a/src/acp/runtime/session-meta.test.ts b/src/acp/runtime/session-meta.test.ts new file mode 100644 index 00000000000..f9a0f399f81 --- /dev/null +++ b/src/acp/runtime/session-meta.test.ts @@ -0,0 +1,69 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import type { OpenClawConfig } from "../../config/config.js"; + +const hoisted = vi.hoisted(() => { + const resolveAllAgentSessionStoreTargetsMock = vi.fn(); + const loadSessionStoreMock = vi.fn(); + return { + resolveAllAgentSessionStoreTargetsMock, + loadSessionStoreMock, + }; +}); + +vi.mock("../../config/sessions.js", async () => { + const actual = await vi.importActual( + "../../config/sessions.js", + ); + return { + ...actual, + resolveAllAgentSessionStoreTargets: (cfg: OpenClawConfig, opts: unknown) => + hoisted.resolveAllAgentSessionStoreTargetsMock(cfg, opts), + loadSessionStore: (storePath: string) => hoisted.loadSessionStoreMock(storePath), + }; +}); + +const { listAcpSessionEntries } = await import("./session-meta.js"); + +describe("listAcpSessionEntries", () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + it("reads ACP sessions from resolved configured store targets", async () => { + const cfg = { + session: { + store: "/custom/sessions/{agentId}.json", + }, + } as OpenClawConfig; + hoisted.resolveAllAgentSessionStoreTargetsMock.mockResolvedValue([ + { + agentId: "ops", + storePath: "/custom/sessions/ops.json", + }, + ]); + hoisted.loadSessionStoreMock.mockReturnValue({ + "agent:ops:acp:s1": { + updatedAt: 123, + acp: { + backend: "acpx", + agent: "ops", + mode: "persistent", + state: "idle", + }, + }, + }); + + const entries = await listAcpSessionEntries({ cfg }); + + expect(hoisted.resolveAllAgentSessionStoreTargetsMock).toHaveBeenCalledWith(cfg, undefined); + expect(hoisted.loadSessionStoreMock).toHaveBeenCalledWith("/custom/sessions/ops.json"); + expect(entries).toEqual([ + expect.objectContaining({ + cfg, + storePath: "/custom/sessions/ops.json", + sessionKey: "agent:ops:acp:s1", + storeSessionKey: "agent:ops:acp:s1", + }), + ]); + }); +}); diff --git a/src/acp/runtime/session-meta.ts b/src/acp/runtime/session-meta.ts index fd4a5813f9b..ff48d1e1ce6 100644 --- a/src/acp/runtime/session-meta.ts +++ b/src/acp/runtime/session-meta.ts @@ -1,9 +1,11 @@ -import path from "node:path"; -import { resolveAgentSessionDirs } from "../../agents/session-dirs.js"; import type { OpenClawConfig } from "../../config/config.js"; import { loadConfig } from "../../config/config.js"; -import { resolveStateDir } from "../../config/paths.js"; -import { loadSessionStore, resolveStorePath, updateSessionStore } from "../../config/sessions.js"; +import { + loadSessionStore, + resolveAllAgentSessionStoreTargets, + resolveStorePath, + updateSessionStore, +} from "../../config/sessions.js"; import { mergeSessionEntry, type SessionAcpMeta, @@ -88,14 +90,17 @@ export function readAcpSessionEntry(params: { export async function listAcpSessionEntries(params: { cfg?: OpenClawConfig; + env?: NodeJS.ProcessEnv; }): Promise { const cfg = params.cfg ?? loadConfig(); - const stateDir = resolveStateDir(process.env); - const sessionDirs = await resolveAgentSessionDirs(stateDir); + const storeTargets = await resolveAllAgentSessionStoreTargets( + cfg, + params.env ? { env: params.env } : undefined, + ); const entries: AcpSessionStoreEntry[] = []; - for (const sessionsDir of sessionDirs) { - const storePath = path.join(sessionsDir, "sessions.json"); + for (const target of storeTargets) { + const storePath = target.storePath; let store: Record; try { store = loadSessionStore(storePath); diff --git a/src/acp/runtime/types.ts b/src/acp/runtime/types.ts index 6a3d3bb3f8e..b46f264b92d 100644 --- a/src/acp/runtime/types.ts +++ b/src/acp/runtime/types.ts @@ -35,13 +35,20 @@ export type AcpRuntimeEnsureInput = { sessionKey: string; agent: string; mode: AcpRuntimeSessionMode; + resumeSessionId?: string; cwd?: string; env?: Record; }; +export type AcpRuntimeTurnAttachment = { + mediaType: string; + data: string; +}; + export type AcpRuntimeTurnInput = { handle: AcpRuntimeHandle; text: string; + attachments?: AcpRuntimeTurnAttachment[]; mode: AcpRuntimePromptMode; requestId: string; signal?: AbortSignal; diff --git a/src/acp/secret-file.test.ts b/src/acp/secret-file.test.ts index 4db2d265d7f..bef3cf3ed02 100644 --- a/src/acp/secret-file.test.ts +++ b/src/acp/secret-file.test.ts @@ -1,54 +1,12 @@ -import { mkdir, symlink, writeFile } from "node:fs/promises"; -import path from "node:path"; -import { afterEach, describe, expect, it } from "vitest"; -import { createTrackedTempDirs } from "../test-utils/tracked-temp-dirs.js"; +import { describe, expect, it } from "vitest"; import { MAX_SECRET_FILE_BYTES, readSecretFromFile } from "./secret-file.js"; -const tempDirs = createTrackedTempDirs(); -const createTempDir = () => tempDirs.make("openclaw-secret-file-test-"); - -afterEach(async () => { - await tempDirs.cleanup(); -}); - describe("readSecretFromFile", () => { - it("reads and trims a regular secret file", async () => { - const dir = await createTempDir(); - const file = path.join(dir, "secret.txt"); - await writeFile(file, " top-secret \n", "utf8"); - - expect(readSecretFromFile(file, "Gateway password")).toBe("top-secret"); + it("keeps the shared secret-file limit", () => { + expect(MAX_SECRET_FILE_BYTES).toBe(16 * 1024); }); - it("rejects files larger than the secret-file limit", async () => { - const dir = await createTempDir(); - const file = path.join(dir, "secret.txt"); - await writeFile(file, "x".repeat(MAX_SECRET_FILE_BYTES + 1), "utf8"); - - expect(() => readSecretFromFile(file, "Gateway password")).toThrow( - `Gateway password file at ${file} exceeds ${MAX_SECRET_FILE_BYTES} bytes.`, - ); - }); - - it("rejects non-regular files", async () => { - const dir = await createTempDir(); - const nestedDir = path.join(dir, "secret-dir"); - await mkdir(nestedDir); - - expect(() => readSecretFromFile(nestedDir, "Gateway password")).toThrow( - `Gateway password file at ${nestedDir} must be a regular file.`, - ); - }); - - it("rejects symlinks", async () => { - const dir = await createTempDir(); - const target = path.join(dir, "target.txt"); - const link = path.join(dir, "secret-link.txt"); - await writeFile(target, "top-secret\n", "utf8"); - await symlink(target, link); - - expect(() => readSecretFromFile(link, "Gateway password")).toThrow( - `Gateway password file at ${link} must not be a symlink.`, - ); + it("exposes the hardened secret reader", () => { + expect(typeof readSecretFromFile).toBe("function"); }); }); diff --git a/src/acp/secret-file.ts b/src/acp/secret-file.ts index 45ec36d28cb..902e0fc0627 100644 --- a/src/acp/secret-file.ts +++ b/src/acp/secret-file.ts @@ -1,43 +1,10 @@ -import fs from "node:fs"; -import { resolveUserPath } from "../utils.js"; +import { DEFAULT_SECRET_FILE_MAX_BYTES, readSecretFileSync } from "../infra/secret-file.js"; -export const MAX_SECRET_FILE_BYTES = 16 * 1024; +export const MAX_SECRET_FILE_BYTES = DEFAULT_SECRET_FILE_MAX_BYTES; export function readSecretFromFile(filePath: string, label: string): string { - const resolvedPath = resolveUserPath(filePath.trim()); - if (!resolvedPath) { - throw new Error(`${label} file path is empty.`); - } - - let stat: fs.Stats; - try { - stat = fs.lstatSync(resolvedPath); - } catch (err) { - throw new Error(`Failed to inspect ${label} file at ${resolvedPath}: ${String(err)}`, { - cause: err, - }); - } - if (stat.isSymbolicLink()) { - throw new Error(`${label} file at ${resolvedPath} must not be a symlink.`); - } - if (!stat.isFile()) { - throw new Error(`${label} file at ${resolvedPath} must be a regular file.`); - } - if (stat.size > MAX_SECRET_FILE_BYTES) { - throw new Error(`${label} file at ${resolvedPath} exceeds ${MAX_SECRET_FILE_BYTES} bytes.`); - } - - let raw = ""; - try { - raw = fs.readFileSync(resolvedPath, "utf8"); - } catch (err) { - throw new Error(`Failed to read ${label} file at ${resolvedPath}: ${String(err)}`, { - cause: err, - }); - } - const secret = raw.trim(); - if (!secret) { - throw new Error(`${label} file at ${resolvedPath} is empty.`); - } - return secret; + return readSecretFileSync(filePath, label, { + maxBytes: MAX_SECRET_FILE_BYTES, + rejectSymlink: true, + }); } diff --git a/src/acp/server.startup.test.ts b/src/acp/server.startup.test.ts index 2f9b96d8511..35c43478ec9 100644 --- a/src/acp/server.startup.test.ts +++ b/src/acp/server.startup.test.ts @@ -129,6 +129,22 @@ describe("serveAcpGateway startup", () => { return { signalHandlers, onceSpy }; } + async function emitHelloAndWaitForAgentSideConnection() { + const gateway = getMockGateway(); + gateway.emitHello(); + await vi.waitFor(() => { + expect(mockState.agentSideConnectionCtor).toHaveBeenCalledTimes(1); + }); + } + + async function stopServeWithSigint( + signalHandlers: Map void>, + servePromise: Promise, + ) { + signalHandlers.get("SIGINT")?.(); + await servePromise; + } + beforeAll(async () => { ({ serveAcpGateway } = await import("./server.js")); }); @@ -153,14 +169,8 @@ describe("serveAcpGateway startup", () => { await Promise.resolve(); expect(mockState.agentSideConnectionCtor).not.toHaveBeenCalled(); - const gateway = getMockGateway(); - gateway.emitHello(); - await vi.waitFor(() => { - expect(mockState.agentSideConnectionCtor).toHaveBeenCalledTimes(1); - }); - - signalHandlers.get("SIGINT")?.(); - await servePromise; + await emitHelloAndWaitForAgentSideConnection(); + await stopServeWithSigint(signalHandlers, servePromise); } finally { onceSpy.mockRestore(); } @@ -207,13 +217,8 @@ describe("serveAcpGateway startup", () => { password: "resolved-secret-password", // pragma: allowlist secret }); - const gateway = getMockGateway(); - gateway.emitHello(); - await vi.waitFor(() => { - expect(mockState.agentSideConnectionCtor).toHaveBeenCalledTimes(1); - }); - signalHandlers.get("SIGINT")?.(); - await servePromise; + await emitHelloAndWaitForAgentSideConnection(); + await stopServeWithSigint(signalHandlers, servePromise); } finally { onceSpy.mockRestore(); } @@ -236,13 +241,8 @@ describe("serveAcpGateway startup", () => { }), ); - const gateway = getMockGateway(); - gateway.emitHello(); - await vi.waitFor(() => { - expect(mockState.agentSideConnectionCtor).toHaveBeenCalledTimes(1); - }); - signalHandlers.get("SIGINT")?.(); - await servePromise; + await emitHelloAndWaitForAgentSideConnection(); + await stopServeWithSigint(signalHandlers, servePromise); } finally { onceSpy.mockRestore(); } diff --git a/src/acp/translator.cancel-scoping.test.ts b/src/acp/translator.cancel-scoping.test.ts new file mode 100644 index 00000000000..e862222f7a0 --- /dev/null +++ b/src/acp/translator.cancel-scoping.test.ts @@ -0,0 +1,278 @@ +import type { CancelNotification, PromptRequest, PromptResponse } from "@agentclientprotocol/sdk"; +import { describe, expect, it, vi } from "vitest"; +import type { GatewayClient } from "../gateway/client.js"; +import type { EventFrame } from "../gateway/protocol/index.js"; +import { createInMemorySessionStore } from "./session.js"; +import { AcpGatewayAgent } from "./translator.js"; +import { createAcpConnection, createAcpGateway } from "./translator.test-helpers.js"; + +type Harness = { + agent: AcpGatewayAgent; + requestSpy: ReturnType; + sessionUpdateSpy: ReturnType; + sessionStore: ReturnType; + sentRunIds: string[]; +}; + +function createPromptRequest(sessionId: string): PromptRequest { + return { + sessionId, + prompt: [{ type: "text", text: "hello" }], + _meta: {}, + } as unknown as PromptRequest; +} + +function createChatEvent(payload: Record): EventFrame { + return { + type: "event", + event: "chat", + payload, + } as EventFrame; +} + +function createToolEvent(payload: Record): EventFrame { + return { + type: "event", + event: "agent", + payload, + } as EventFrame; +} + +function createHarness(sessions: Array<{ sessionId: string; sessionKey: string }>): Harness { + const sentRunIds: string[] = []; + const requestSpy = vi.fn(async (method: string, params?: Record) => { + if (method === "chat.send") { + const runId = params?.idempotencyKey; + if (typeof runId === "string") { + sentRunIds.push(runId); + } + return new Promise(() => {}); + } + return {}; + }); + const connection = createAcpConnection(); + const sessionStore = createInMemorySessionStore(); + for (const session of sessions) { + sessionStore.createSession({ + sessionId: session.sessionId, + sessionKey: session.sessionKey, + cwd: "/tmp", + }); + } + + const agent = new AcpGatewayAgent( + connection, + createAcpGateway(requestSpy as unknown as GatewayClient["request"]), + { sessionStore }, + ); + + return { + agent, + requestSpy, + // eslint-disable-next-line @typescript-eslint/unbound-method + sessionUpdateSpy: connection.sessionUpdate as unknown as ReturnType, + sessionStore, + sentRunIds, + }; +} + +async function startPendingPrompt( + harness: Harness, + sessionId: string, +): Promise<{ promptPromise: Promise; runId: string }> { + const before = harness.sentRunIds.length; + const promptPromise = harness.agent.prompt(createPromptRequest(sessionId)); + await vi.waitFor(() => { + expect(harness.sentRunIds.length).toBe(before + 1); + }); + return { + promptPromise, + runId: harness.sentRunIds[before], + }; +} + +async function cancelAndExpectAbortForPendingRun( + harness: Harness, + sessionId: string, + sessionKey: string, + pending: { promptPromise: Promise; runId: string }, +) { + await harness.agent.cancel({ sessionId } as CancelNotification); + + expect(harness.requestSpy).toHaveBeenCalledWith("chat.abort", { + sessionKey, + runId: pending.runId, + }); + await expect(pending.promptPromise).resolves.toEqual({ stopReason: "cancelled" }); +} + +async function deliverFinalChatEventAndExpectEndTurn( + harness: Harness, + sessionKey: string, + pending: { promptPromise: Promise; runId: string }, + seq: number, +) { + await harness.agent.handleGatewayEvent( + createChatEvent({ + runId: pending.runId, + sessionKey, + seq, + state: "final", + }), + ); + await expect(pending.promptPromise).resolves.toEqual({ stopReason: "end_turn" }); +} + +describe("acp translator cancel and run scoping", () => { + it("cancel passes active runId to chat.abort", async () => { + const sessionKey = "agent:main:shared"; + const harness = createHarness([{ sessionId: "session-1", sessionKey }]); + const pending = await startPendingPrompt(harness, "session-1"); + + await cancelAndExpectAbortForPendingRun(harness, "session-1", sessionKey, pending); + }); + + it("cancel uses pending runId when there is no active run", async () => { + const sessionKey = "agent:main:shared"; + const harness = createHarness([{ sessionId: "session-1", sessionKey }]); + const pending = await startPendingPrompt(harness, "session-1"); + harness.sessionStore.clearActiveRun("session-1"); + + await cancelAndExpectAbortForPendingRun(harness, "session-1", sessionKey, pending); + }); + + it("cancel skips chat.abort when there is no active run and no pending prompt", async () => { + const sessionKey = "agent:main:shared"; + const harness = createHarness([{ sessionId: "session-1", sessionKey }]); + + await harness.agent.cancel({ sessionId: "session-1" } as CancelNotification); + + const abortCalls = harness.requestSpy.mock.calls.filter(([method]) => method === "chat.abort"); + expect(abortCalls).toHaveLength(0); + }); + + it("cancel from a session without active run does not abort another session sharing the same key", async () => { + const sessionKey = "agent:main:shared"; + const harness = createHarness([ + { sessionId: "session-1", sessionKey }, + { sessionId: "session-2", sessionKey }, + ]); + const pending2 = await startPendingPrompt(harness, "session-2"); + + await harness.agent.cancel({ sessionId: "session-1" } as CancelNotification); + + const abortCalls = harness.requestSpy.mock.calls.filter(([method]) => method === "chat.abort"); + expect(abortCalls).toHaveLength(0); + expect(harness.sessionStore.getSession("session-2")?.activeRunId).toBe(pending2.runId); + + await deliverFinalChatEventAndExpectEndTurn(harness, sessionKey, pending2, 1); + }); + + it("drops chat events when runId does not match the active prompt", async () => { + const sessionKey = "agent:main:shared"; + const harness = createHarness([{ sessionId: "session-1", sessionKey }]); + const pending = await startPendingPrompt(harness, "session-1"); + + await harness.agent.handleGatewayEvent( + createChatEvent({ + runId: "run-other", + sessionKey, + seq: 1, + state: "final", + }), + ); + expect(harness.sessionStore.getSession("session-1")?.activeRunId).toBe(pending.runId); + + await harness.agent.handleGatewayEvent( + createChatEvent({ + runId: pending.runId, + sessionKey, + seq: 2, + state: "final", + }), + ); + await expect(pending.promptPromise).resolves.toEqual({ stopReason: "end_turn" }); + }); + + it("drops tool events when runId does not match the active prompt", async () => { + const sessionKey = "agent:main:shared"; + const harness = createHarness([{ sessionId: "session-1", sessionKey }]); + const pending = await startPendingPrompt(harness, "session-1"); + harness.sessionUpdateSpy.mockClear(); + + await harness.agent.handleGatewayEvent( + createToolEvent({ + runId: "run-other", + sessionKey, + stream: "tool", + data: { + phase: "start", + name: "read_file", + toolCallId: "tool-1", + args: { path: "README.md" }, + }, + }), + ); + + expect(harness.sessionUpdateSpy).not.toHaveBeenCalled(); + + await harness.agent.handleGatewayEvent( + createChatEvent({ + runId: pending.runId, + sessionKey, + seq: 1, + state: "final", + }), + ); + await expect(pending.promptPromise).resolves.toEqual({ stopReason: "end_turn" }); + }); + + it("routes events to the pending prompt that matches runId when session keys are shared", async () => { + const sessionKey = "agent:main:shared"; + const harness = createHarness([ + { sessionId: "session-1", sessionKey }, + { sessionId: "session-2", sessionKey }, + ]); + const pending1 = await startPendingPrompt(harness, "session-1"); + const pending2 = await startPendingPrompt(harness, "session-2"); + harness.sessionUpdateSpy.mockClear(); + + await harness.agent.handleGatewayEvent( + createToolEvent({ + runId: pending2.runId, + sessionKey, + stream: "tool", + data: { + phase: "start", + name: "read_file", + toolCallId: "tool-2", + args: { path: "notes.txt" }, + }, + }), + ); + expect(harness.sessionUpdateSpy).toHaveBeenCalledWith( + expect.objectContaining({ + sessionId: "session-2", + update: expect.objectContaining({ + sessionUpdate: "tool_call", + toolCallId: "tool-2", + status: "in_progress", + }), + }), + ); + expect(harness.sessionUpdateSpy).toHaveBeenCalledTimes(1); + + await deliverFinalChatEventAndExpectEndTurn(harness, sessionKey, pending2, 1); + expect(harness.sessionStore.getSession("session-1")?.activeRunId).toBe(pending1.runId); + + await harness.agent.handleGatewayEvent( + createChatEvent({ + runId: pending1.runId, + sessionKey, + seq: 2, + state: "final", + }), + ); + await expect(pending1.promptPromise).resolves.toEqual({ stopReason: "end_turn" }); + }); +}); diff --git a/src/acp/translator.prompt-prefix.test.ts b/src/acp/translator.prompt-prefix.test.ts index 38c186519c0..9d53e3aa103 100644 --- a/src/acp/translator.prompt-prefix.test.ts +++ b/src/acp/translator.prompt-prefix.test.ts @@ -7,7 +7,52 @@ import { createInMemorySessionStore } from "./session.js"; import { AcpGatewayAgent } from "./translator.js"; import { createAcpConnection, createAcpGateway } from "./translator.test-helpers.js"; +const TEST_SESSION_ID = "session-1"; +const TEST_SESSION_KEY = "agent:main:main"; +const TEST_PROMPT = { + sessionId: TEST_SESSION_ID, + prompt: [{ type: "text", text: "hello" }], + _meta: {}, +} as unknown as PromptRequest; + describe("acp prompt cwd prefix", () => { + const createStopAfterSendSpy = () => + vi.fn(async (method: string) => { + if (method === "chat.send") { + throw new Error("stop-after-send"); + } + return {}; + }); + + async function runPromptAndCaptureRequest( + options: { + cwd?: string; + prefixCwd?: boolean; + provenanceMode?: "meta" | "meta+receipt"; + } = {}, + ) { + const sessionStore = createInMemorySessionStore(); + sessionStore.createSession({ + sessionId: TEST_SESSION_ID, + sessionKey: TEST_SESSION_KEY, + cwd: options.cwd ?? path.join(os.homedir(), "openclaw-test"), + }); + + const requestSpy = createStopAfterSendSpy(); + const agent = new AcpGatewayAgent( + createAcpConnection(), + createAcpGateway(requestSpy as unknown as GatewayClient["request"]), + { + sessionStore, + prefixCwd: options.prefixCwd, + provenanceMode: options.provenanceMode, + }, + ); + + await expect(agent.prompt(TEST_PROMPT)).rejects.toThrow("stop-after-send"); + return requestSpy; + } + async function runPromptWithCwd(cwd: string) { const pinnedHome = os.homedir(); const previousOpenClawHome = process.env.OPENCLAW_HOME; @@ -15,37 +60,8 @@ describe("acp prompt cwd prefix", () => { delete process.env.OPENCLAW_HOME; process.env.HOME = pinnedHome; - const sessionStore = createInMemorySessionStore(); - sessionStore.createSession({ - sessionId: "session-1", - sessionKey: "agent:main:main", - cwd, - }); - - const requestSpy = vi.fn(async (method: string) => { - if (method === "chat.send") { - throw new Error("stop-after-send"); - } - return {}; - }); - const agent = new AcpGatewayAgent( - createAcpConnection(), - createAcpGateway(requestSpy as unknown as GatewayClient["request"]), - { - sessionStore, - prefixCwd: true, - }, - ); - try { - await expect( - agent.prompt({ - sessionId: "session-1", - prompt: [{ type: "text", text: "hello" }], - _meta: {}, - } as unknown as PromptRequest), - ).rejects.toThrow("stop-after-send"); - return requestSpy; + return await runPromptAndCaptureRequest({ cwd, prefixCwd: true }); } finally { if (previousOpenClawHome === undefined) { delete process.env.OPENCLAW_HOME; @@ -83,42 +99,13 @@ describe("acp prompt cwd prefix", () => { }); it("injects system provenance metadata when enabled", async () => { - const sessionStore = createInMemorySessionStore(); - sessionStore.createSession({ - sessionId: "session-1", - sessionKey: "agent:main:main", - cwd: path.join(os.homedir(), "openclaw-test"), - }); - - const requestSpy = vi.fn(async (method: string) => { - if (method === "chat.send") { - throw new Error("stop-after-send"); - } - return {}; - }); - const agent = new AcpGatewayAgent( - createAcpConnection(), - createAcpGateway(requestSpy as unknown as GatewayClient["request"]), - { - sessionStore, - provenanceMode: "meta", - }, - ); - - await expect( - agent.prompt({ - sessionId: "session-1", - prompt: [{ type: "text", text: "hello" }], - _meta: {}, - } as unknown as PromptRequest), - ).rejects.toThrow("stop-after-send"); - + const requestSpy = await runPromptAndCaptureRequest({ provenanceMode: "meta" }); expect(requestSpy).toHaveBeenCalledWith( "chat.send", expect.objectContaining({ systemInputProvenance: { kind: "external_user", - originSessionId: "session-1", + originSessionId: TEST_SESSION_ID, sourceChannel: "acp", sourceTool: "openclaw_acp", }, @@ -129,42 +116,13 @@ describe("acp prompt cwd prefix", () => { }); it("injects a system provenance receipt when requested", async () => { - const sessionStore = createInMemorySessionStore(); - sessionStore.createSession({ - sessionId: "session-1", - sessionKey: "agent:main:main", - cwd: path.join(os.homedir(), "openclaw-test"), - }); - - const requestSpy = vi.fn(async (method: string) => { - if (method === "chat.send") { - throw new Error("stop-after-send"); - } - return {}; - }); - const agent = new AcpGatewayAgent( - createAcpConnection(), - createAcpGateway(requestSpy as unknown as GatewayClient["request"]), - { - sessionStore, - provenanceMode: "meta+receipt", - }, - ); - - await expect( - agent.prompt({ - sessionId: "session-1", - prompt: [{ type: "text", text: "hello" }], - _meta: {}, - } as unknown as PromptRequest), - ).rejects.toThrow("stop-after-send"); - + const requestSpy = await runPromptAndCaptureRequest({ provenanceMode: "meta+receipt" }); expect(requestSpy).toHaveBeenCalledWith( "chat.send", expect.objectContaining({ systemInputProvenance: { kind: "external_user", - originSessionId: "session-1", + originSessionId: TEST_SESSION_ID, sourceChannel: "acp", sourceTool: "openclaw_acp", }, @@ -182,14 +140,14 @@ describe("acp prompt cwd prefix", () => { expect(requestSpy).toHaveBeenCalledWith( "chat.send", expect.objectContaining({ - systemProvenanceReceipt: expect.stringContaining("originSessionId=session-1"), + systemProvenanceReceipt: expect.stringContaining(`originSessionId=${TEST_SESSION_ID}`), }), { expectFinal: true }, ); expect(requestSpy).toHaveBeenCalledWith( "chat.send", expect.objectContaining({ - systemProvenanceReceipt: expect.stringContaining("targetSession=agent:main:main"), + systemProvenanceReceipt: expect.stringContaining(`targetSession=${TEST_SESSION_KEY}`), }), { expectFinal: true }, ); diff --git a/src/acp/translator.session-rate-limit.test.ts b/src/acp/translator.session-rate-limit.test.ts index 2e7d03b0f7b..3e3f254d0ee 100644 --- a/src/acp/translator.session-rate-limit.test.ts +++ b/src/acp/translator.session-rate-limit.test.ts @@ -2,9 +2,12 @@ import type { LoadSessionRequest, NewSessionRequest, PromptRequest, + SetSessionConfigOptionRequest, + SetSessionModeRequest, } from "@agentclientprotocol/sdk"; import { describe, expect, it, vi } from "vitest"; import type { GatewayClient } from "../gateway/client.js"; +import type { EventFrame } from "../gateway/protocol/index.js"; import { createInMemorySessionStore } from "./session.js"; import { AcpGatewayAgent } from "./translator.js"; import { createAcpConnection, createAcpGateway } from "./translator.test-helpers.js"; @@ -38,6 +41,65 @@ function createPromptRequest( } as unknown as PromptRequest; } +function createSetSessionModeRequest(sessionId: string, modeId: string): SetSessionModeRequest { + return { + sessionId, + modeId, + _meta: {}, + } as unknown as SetSessionModeRequest; +} + +function createSetSessionConfigOptionRequest( + sessionId: string, + configId: string, + value: string | boolean, +): SetSessionConfigOptionRequest { + return { + sessionId, + configId, + value, + _meta: {}, + } as unknown as SetSessionConfigOptionRequest; +} + +function createToolEvent(params: { + sessionKey: string; + phase: "start" | "update" | "result"; + toolCallId: string; + name: string; + args?: Record; + partialResult?: unknown; + result?: unknown; + isError?: boolean; +}): EventFrame { + return { + event: "agent", + payload: { + sessionKey: params.sessionKey, + stream: "tool", + data: { + phase: params.phase, + toolCallId: params.toolCallId, + name: params.name, + args: params.args, + partialResult: params.partialResult, + result: params.result, + isError: params.isError, + }, + }, + } as unknown as EventFrame; +} + +function createChatFinalEvent(sessionKey: string): EventFrame { + return { + event: "chat", + payload: { + sessionKey, + state: "final", + }, + } as unknown as EventFrame; +} + async function expectOversizedPromptRejected(params: { sessionId: string; text: string }) { const request = vi.fn(async () => ({ ok: true })) as GatewayClient["request"]; const sessionStore = createInMemorySessionStore(); @@ -97,6 +159,852 @@ describe("acp session creation rate limit", () => { }); }); +describe("acp unsupported bridge session setup", () => { + it("rejects per-session MCP servers on newSession", async () => { + const sessionStore = createInMemorySessionStore(); + const connection = createAcpConnection(); + const sessionUpdate = connection.__sessionUpdateMock; + const agent = new AcpGatewayAgent(connection, createAcpGateway(), { + sessionStore, + }); + + await expect( + agent.newSession({ + ...createNewSessionRequest(), + mcpServers: [{ name: "docs", command: "mcp-docs" }] as never[], + }), + ).rejects.toThrow(/does not support per-session MCP servers/i); + + expect(sessionStore.hasSession("docs-session")).toBe(false); + expect(sessionUpdate).not.toHaveBeenCalled(); + sessionStore.clearAllSessionsForTest(); + }); + + it("rejects per-session MCP servers on loadSession", async () => { + const sessionStore = createInMemorySessionStore(); + const connection = createAcpConnection(); + const sessionUpdate = connection.__sessionUpdateMock; + const agent = new AcpGatewayAgent(connection, createAcpGateway(), { + sessionStore, + }); + + await expect( + agent.loadSession({ + ...createLoadSessionRequest("docs-session"), + mcpServers: [{ name: "docs", command: "mcp-docs" }] as never[], + }), + ).rejects.toThrow(/does not support per-session MCP servers/i); + + expect(sessionStore.hasSession("docs-session")).toBe(false); + expect(sessionUpdate).not.toHaveBeenCalled(); + sessionStore.clearAllSessionsForTest(); + }); +}); + +describe("acp session UX bridge behavior", () => { + it("returns initial modes and thought-level config options for new sessions", async () => { + const sessionStore = createInMemorySessionStore(); + const agent = new AcpGatewayAgent(createAcpConnection(), createAcpGateway(), { + sessionStore, + }); + + const result = await agent.newSession(createNewSessionRequest()); + + expect(result.modes?.currentModeId).toBe("adaptive"); + expect(result.modes?.availableModes.map((mode) => mode.id)).toContain("adaptive"); + expect(result.configOptions).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + id: "thought_level", + currentValue: "adaptive", + category: "thought_level", + }), + expect.objectContaining({ + id: "verbose_level", + currentValue: "off", + }), + expect.objectContaining({ + id: "reasoning_level", + currentValue: "off", + }), + expect.objectContaining({ + id: "response_usage", + currentValue: "off", + }), + expect.objectContaining({ + id: "elevated_level", + currentValue: "off", + }), + ]), + ); + + sessionStore.clearAllSessionsForTest(); + }); + + it("replays user and assistant text history on loadSession and returns initial controls", async () => { + const sessionStore = createInMemorySessionStore(); + const connection = createAcpConnection(); + const sessionUpdate = connection.__sessionUpdateMock; + const request = vi.fn(async (method: string) => { + if (method === "sessions.list") { + return { + ts: Date.now(), + path: "/tmp/sessions.json", + count: 1, + defaults: { + modelProvider: null, + model: null, + contextTokens: null, + }, + sessions: [ + { + key: "agent:main:work", + label: "main-work", + displayName: "Main work", + derivedTitle: "Fix ACP bridge", + kind: "direct", + updatedAt: 1_710_000_000_000, + thinkingLevel: "high", + modelProvider: "openai", + model: "gpt-5.4", + verboseLevel: "full", + reasoningLevel: "stream", + responseUsage: "tokens", + elevatedLevel: "ask", + totalTokens: 4096, + totalTokensFresh: true, + contextTokens: 8192, + }, + ], + }; + } + if (method === "sessions.get") { + return { + messages: [ + { role: "user", content: [{ type: "text", text: "Question" }] }, + { role: "assistant", content: [{ type: "text", text: "Answer" }] }, + { role: "system", content: [{ type: "text", text: "ignore me" }] }, + { role: "assistant", content: [{ type: "image", image: "skip" }] }, + ], + }; + } + return { ok: true }; + }) as GatewayClient["request"]; + const agent = new AcpGatewayAgent(connection, createAcpGateway(request), { + sessionStore, + }); + + const result = await agent.loadSession(createLoadSessionRequest("agent:main:work")); + + expect(result.modes?.currentModeId).toBe("high"); + expect(result.modes?.availableModes.map((mode) => mode.id)).toContain("xhigh"); + expect(result.configOptions).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + id: "thought_level", + currentValue: "high", + }), + expect.objectContaining({ + id: "verbose_level", + currentValue: "full", + }), + expect.objectContaining({ + id: "reasoning_level", + currentValue: "stream", + }), + expect.objectContaining({ + id: "response_usage", + currentValue: "tokens", + }), + expect.objectContaining({ + id: "elevated_level", + currentValue: "ask", + }), + ]), + ); + expect(sessionUpdate).toHaveBeenCalledWith({ + sessionId: "agent:main:work", + update: { + sessionUpdate: "user_message_chunk", + content: { type: "text", text: "Question" }, + }, + }); + expect(sessionUpdate).toHaveBeenCalledWith({ + sessionId: "agent:main:work", + update: { + sessionUpdate: "agent_message_chunk", + content: { type: "text", text: "Answer" }, + }, + }); + expect(sessionUpdate).toHaveBeenCalledWith({ + sessionId: "agent:main:work", + update: expect.objectContaining({ + sessionUpdate: "available_commands_update", + }), + }); + expect(sessionUpdate).toHaveBeenCalledWith({ + sessionId: "agent:main:work", + update: { + sessionUpdate: "session_info_update", + title: "Fix ACP bridge", + updatedAt: "2024-03-09T16:00:00.000Z", + }, + }); + expect(sessionUpdate).toHaveBeenCalledWith({ + sessionId: "agent:main:work", + update: { + sessionUpdate: "usage_update", + used: 4096, + size: 8192, + _meta: { + source: "gateway-session-store", + approximate: true, + }, + }, + }); + + sessionStore.clearAllSessionsForTest(); + }); + + it("falls back to an empty transcript when sessions.get fails during loadSession", async () => { + const sessionStore = createInMemorySessionStore(); + const connection = createAcpConnection(); + const sessionUpdate = connection.__sessionUpdateMock; + const request = vi.fn(async (method: string) => { + if (method === "sessions.list") { + return { + ts: Date.now(), + path: "/tmp/sessions.json", + count: 1, + defaults: { + modelProvider: null, + model: null, + contextTokens: null, + }, + sessions: [ + { + key: "agent:main:recover", + label: "recover", + displayName: "Recover session", + kind: "direct", + updatedAt: 1_710_000_000_000, + thinkingLevel: "adaptive", + modelProvider: "openai", + model: "gpt-5.4", + }, + ], + }; + } + if (method === "sessions.get") { + throw new Error("sessions.get unavailable"); + } + return { ok: true }; + }) as GatewayClient["request"]; + const agent = new AcpGatewayAgent(connection, createAcpGateway(request), { + sessionStore, + }); + + const result = await agent.loadSession(createLoadSessionRequest("agent:main:recover")); + + expect(result.modes?.currentModeId).toBe("adaptive"); + expect(sessionUpdate).toHaveBeenCalledWith({ + sessionId: "agent:main:recover", + update: expect.objectContaining({ + sessionUpdate: "available_commands_update", + }), + }); + expect(sessionUpdate).not.toHaveBeenCalledWith({ + sessionId: "agent:main:recover", + update: expect.objectContaining({ + sessionUpdate: "user_message_chunk", + }), + }); + + sessionStore.clearAllSessionsForTest(); + }); +}); + +describe("acp setSessionMode bridge behavior", () => { + it("surfaces gateway mode patch failures instead of succeeding silently", async () => { + const sessionStore = createInMemorySessionStore(); + const request = vi.fn(async (method: string) => { + if (method === "sessions.patch") { + throw new Error("gateway rejected mode"); + } + return { ok: true }; + }) as GatewayClient["request"]; + const agent = new AcpGatewayAgent(createAcpConnection(), createAcpGateway(request), { + sessionStore, + }); + + await agent.loadSession(createLoadSessionRequest("mode-session")); + + await expect( + agent.setSessionMode(createSetSessionModeRequest("mode-session", "high")), + ).rejects.toThrow(/gateway rejected mode/i); + + sessionStore.clearAllSessionsForTest(); + }); + + it("emits current mode and thought-level config updates after a successful mode change", async () => { + const sessionStore = createInMemorySessionStore(); + const connection = createAcpConnection(); + const sessionUpdate = connection.__sessionUpdateMock; + const request = vi.fn(async (method: string) => { + if (method === "sessions.list") { + return { + ts: Date.now(), + path: "/tmp/sessions.json", + count: 1, + defaults: { + modelProvider: null, + model: null, + contextTokens: null, + }, + sessions: [ + { + key: "mode-session", + kind: "direct", + updatedAt: Date.now(), + thinkingLevel: "high", + modelProvider: "openai", + model: "gpt-5.4", + }, + ], + }; + } + return { ok: true }; + }) as GatewayClient["request"]; + const agent = new AcpGatewayAgent(connection, createAcpGateway(request), { + sessionStore, + }); + + await agent.loadSession(createLoadSessionRequest("mode-session")); + sessionUpdate.mockClear(); + + await agent.setSessionMode(createSetSessionModeRequest("mode-session", "high")); + + expect(sessionUpdate).toHaveBeenCalledWith({ + sessionId: "mode-session", + update: { + sessionUpdate: "current_mode_update", + currentModeId: "high", + }, + }); + expect(sessionUpdate).toHaveBeenCalledWith({ + sessionId: "mode-session", + update: { + sessionUpdate: "config_option_update", + configOptions: expect.arrayContaining([ + expect.objectContaining({ + id: "thought_level", + currentValue: "high", + }), + ]), + }, + }); + + sessionStore.clearAllSessionsForTest(); + }); +}); + +describe("acp setSessionConfigOption bridge behavior", () => { + it("updates the thought-level config option and returns refreshed options", async () => { + const sessionStore = createInMemorySessionStore(); + const connection = createAcpConnection(); + const sessionUpdate = connection.__sessionUpdateMock; + const request = vi.fn(async (method: string) => { + if (method === "sessions.list") { + return { + ts: Date.now(), + path: "/tmp/sessions.json", + count: 1, + defaults: { + modelProvider: null, + model: null, + contextTokens: null, + }, + sessions: [ + { + key: "config-session", + kind: "direct", + updatedAt: Date.now(), + thinkingLevel: "minimal", + modelProvider: "openai", + model: "gpt-5.4", + }, + ], + }; + } + return { ok: true }; + }) as GatewayClient["request"]; + const agent = new AcpGatewayAgent(connection, createAcpGateway(request), { + sessionStore, + }); + + await agent.loadSession(createLoadSessionRequest("config-session")); + sessionUpdate.mockClear(); + + const result = await agent.setSessionConfigOption( + createSetSessionConfigOptionRequest("config-session", "thought_level", "minimal"), + ); + + expect(result.configOptions).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + id: "thought_level", + currentValue: "minimal", + }), + ]), + ); + expect(sessionUpdate).toHaveBeenCalledWith({ + sessionId: "config-session", + update: { + sessionUpdate: "current_mode_update", + currentModeId: "minimal", + }, + }); + expect(sessionUpdate).toHaveBeenCalledWith({ + sessionId: "config-session", + update: { + sessionUpdate: "config_option_update", + configOptions: expect.arrayContaining([ + expect.objectContaining({ + id: "thought_level", + currentValue: "minimal", + }), + ]), + }, + }); + + sessionStore.clearAllSessionsForTest(); + }); + + it("updates non-mode ACP config options through gateway session patches", async () => { + const sessionStore = createInMemorySessionStore(); + const connection = createAcpConnection(); + const sessionUpdate = connection.__sessionUpdateMock; + const request = vi.fn(async (method: string) => { + if (method === "sessions.list") { + return { + ts: Date.now(), + path: "/tmp/sessions.json", + count: 1, + defaults: { + modelProvider: null, + model: null, + contextTokens: null, + }, + sessions: [ + { + key: "reasoning-session", + kind: "direct", + updatedAt: Date.now(), + thinkingLevel: "minimal", + modelProvider: "openai", + model: "gpt-5.4", + reasoningLevel: "stream", + }, + ], + }; + } + return { ok: true }; + }) as GatewayClient["request"]; + const agent = new AcpGatewayAgent(connection, createAcpGateway(request), { + sessionStore, + }); + + await agent.loadSession(createLoadSessionRequest("reasoning-session")); + sessionUpdate.mockClear(); + + const result = await agent.setSessionConfigOption( + createSetSessionConfigOptionRequest("reasoning-session", "reasoning_level", "stream"), + ); + + expect(result.configOptions).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + id: "reasoning_level", + currentValue: "stream", + }), + ]), + ); + expect(sessionUpdate).toHaveBeenCalledWith({ + sessionId: "reasoning-session", + update: { + sessionUpdate: "config_option_update", + configOptions: expect.arrayContaining([ + expect.objectContaining({ + id: "reasoning_level", + currentValue: "stream", + }), + ]), + }, + }); + + sessionStore.clearAllSessionsForTest(); + }); + + it("updates fast mode ACP config options through gateway session patches", async () => { + const sessionStore = createInMemorySessionStore(); + const connection = createAcpConnection(); + const sessionUpdate = connection.__sessionUpdateMock; + const request = vi.fn(async (method: string, params?: unknown) => { + if (method === "sessions.list") { + return { + ts: Date.now(), + path: "/tmp/sessions.json", + count: 1, + defaults: { + modelProvider: null, + model: null, + contextTokens: null, + }, + sessions: [ + { + key: "fast-session", + kind: "direct", + updatedAt: Date.now(), + thinkingLevel: "minimal", + modelProvider: "openai", + model: "gpt-5.4", + fastMode: true, + }, + ], + }; + } + if (method === "sessions.patch") { + expect(params).toEqual({ + key: "fast-session", + fastMode: true, + }); + } + return { ok: true }; + }) as GatewayClient["request"]; + const agent = new AcpGatewayAgent(connection, createAcpGateway(request), { + sessionStore, + }); + + await agent.loadSession(createLoadSessionRequest("fast-session")); + sessionUpdate.mockClear(); + + const result = await agent.setSessionConfigOption( + createSetSessionConfigOptionRequest("fast-session", "fast_mode", "on"), + ); + + expect(result.configOptions).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + id: "fast_mode", + currentValue: "on", + }), + ]), + ); + expect(sessionUpdate).toHaveBeenCalledWith({ + sessionId: "fast-session", + update: { + sessionUpdate: "config_option_update", + configOptions: expect.arrayContaining([ + expect.objectContaining({ + id: "fast_mode", + currentValue: "on", + }), + ]), + }, + }); + + sessionStore.clearAllSessionsForTest(); + }); + + it("rejects non-string ACP config option values", async () => { + const sessionStore = createInMemorySessionStore(); + const connection = createAcpConnection(); + const request = vi.fn(async (method: string) => { + if (method === "sessions.list") { + return { + ts: Date.now(), + path: "/tmp/sessions.json", + count: 1, + defaults: { + modelProvider: null, + model: null, + contextTokens: null, + }, + sessions: [ + { + key: "bool-config-session", + kind: "direct", + updatedAt: Date.now(), + thinkingLevel: "minimal", + modelProvider: "openai", + model: "gpt-5.4", + }, + ], + }; + } + return { ok: true }; + }) as GatewayClient["request"]; + const agent = new AcpGatewayAgent(connection, createAcpGateway(request), { + sessionStore, + }); + + await agent.loadSession(createLoadSessionRequest("bool-config-session")); + + await expect( + agent.setSessionConfigOption( + createSetSessionConfigOptionRequest("bool-config-session", "thought_level", false), + ), + ).rejects.toThrow( + 'ACP bridge does not support non-string session config option values for "thought_level".', + ); + expect(request).not.toHaveBeenCalledWith( + "sessions.patch", + expect.objectContaining({ key: "bool-config-session" }), + ); + + sessionStore.clearAllSessionsForTest(); + }); +}); + +describe("acp tool streaming bridge behavior", () => { + it("maps Gateway tool partial output and file locations into ACP tool updates", async () => { + const sessionStore = createInMemorySessionStore(); + const connection = createAcpConnection(); + const sessionUpdate = connection.__sessionUpdateMock; + const request = vi.fn(async (method: string) => { + if (method === "chat.send") { + return new Promise(() => {}); + } + return { ok: true }; + }) as GatewayClient["request"]; + const agent = new AcpGatewayAgent(connection, createAcpGateway(request), { + sessionStore, + }); + + await agent.loadSession(createLoadSessionRequest("tool-session")); + sessionUpdate.mockClear(); + + const promptPromise = agent.prompt(createPromptRequest("tool-session", "Inspect app.ts")); + + await agent.handleGatewayEvent( + createToolEvent({ + sessionKey: "tool-session", + phase: "start", + toolCallId: "tool-1", + name: "read", + args: { path: "src/app.ts", line: 12 }, + }), + ); + await agent.handleGatewayEvent( + createToolEvent({ + sessionKey: "tool-session", + phase: "update", + toolCallId: "tool-1", + name: "read", + partialResult: { + content: [{ type: "text", text: "partial output" }], + details: { path: "src/app.ts" }, + }, + }), + ); + await agent.handleGatewayEvent( + createToolEvent({ + sessionKey: "tool-session", + phase: "result", + toolCallId: "tool-1", + name: "read", + result: { + content: [{ type: "text", text: "FILE:src/app.ts" }], + details: { path: "src/app.ts" }, + }, + }), + ); + await agent.handleGatewayEvent(createChatFinalEvent("tool-session")); + await promptPromise; + + expect(sessionUpdate).toHaveBeenCalledWith({ + sessionId: "tool-session", + update: { + sessionUpdate: "tool_call", + toolCallId: "tool-1", + title: "read: path: src/app.ts, line: 12", + status: "in_progress", + rawInput: { path: "src/app.ts", line: 12 }, + kind: "read", + locations: [{ path: "src/app.ts", line: 12 }], + }, + }); + expect(sessionUpdate).toHaveBeenCalledWith({ + sessionId: "tool-session", + update: { + sessionUpdate: "tool_call_update", + toolCallId: "tool-1", + status: "in_progress", + rawOutput: { + content: [{ type: "text", text: "partial output" }], + details: { path: "src/app.ts" }, + }, + content: [ + { + type: "content", + content: { type: "text", text: "partial output" }, + }, + ], + locations: [{ path: "src/app.ts", line: 12 }], + }, + }); + expect(sessionUpdate).toHaveBeenCalledWith({ + sessionId: "tool-session", + update: { + sessionUpdate: "tool_call_update", + toolCallId: "tool-1", + status: "completed", + rawOutput: { + content: [{ type: "text", text: "FILE:src/app.ts" }], + details: { path: "src/app.ts" }, + }, + content: [ + { + type: "content", + content: { type: "text", text: "FILE:src/app.ts" }, + }, + ], + locations: [{ path: "src/app.ts", line: 12 }], + }, + }); + + sessionStore.clearAllSessionsForTest(); + }); +}); + +describe("acp session metadata and usage updates", () => { + it("emits a fresh usage snapshot after prompt completion when gateway totals are available", async () => { + const sessionStore = createInMemorySessionStore(); + const connection = createAcpConnection(); + const sessionUpdate = connection.__sessionUpdateMock; + const request = vi.fn(async (method: string) => { + if (method === "sessions.list") { + return { + ts: Date.now(), + path: "/tmp/sessions.json", + count: 1, + defaults: { + modelProvider: null, + model: null, + contextTokens: null, + }, + sessions: [ + { + key: "usage-session", + displayName: "Usage session", + kind: "direct", + updatedAt: 1_710_000_123_000, + thinkingLevel: "adaptive", + modelProvider: "openai", + model: "gpt-5.4", + totalTokens: 1200, + totalTokensFresh: true, + contextTokens: 4000, + }, + ], + }; + } + if (method === "chat.send") { + return new Promise(() => {}); + } + return { ok: true }; + }) as GatewayClient["request"]; + const agent = new AcpGatewayAgent(connection, createAcpGateway(request), { + sessionStore, + }); + + await agent.loadSession(createLoadSessionRequest("usage-session")); + sessionUpdate.mockClear(); + + const promptPromise = agent.prompt(createPromptRequest("usage-session", "hello")); + await agent.handleGatewayEvent(createChatFinalEvent("usage-session")); + await promptPromise; + + expect(sessionUpdate).toHaveBeenCalledWith({ + sessionId: "usage-session", + update: { + sessionUpdate: "session_info_update", + title: "Usage session", + updatedAt: "2024-03-09T16:02:03.000Z", + }, + }); + expect(sessionUpdate).toHaveBeenCalledWith({ + sessionId: "usage-session", + update: { + sessionUpdate: "usage_update", + used: 1200, + size: 4000, + _meta: { + source: "gateway-session-store", + approximate: true, + }, + }, + }); + + sessionStore.clearAllSessionsForTest(); + }); + + it("still resolves prompts when snapshot updates fail after completion", async () => { + const sessionStore = createInMemorySessionStore(); + const connection = createAcpConnection(); + const sessionUpdate = connection.__sessionUpdateMock; + const request = vi.fn(async (method: string) => { + if (method === "sessions.list") { + return { + ts: Date.now(), + path: "/tmp/sessions.json", + count: 1, + defaults: { + modelProvider: null, + model: null, + contextTokens: null, + }, + sessions: [ + { + key: "usage-session", + displayName: "Usage session", + kind: "direct", + updatedAt: 1_710_000_123_000, + thinkingLevel: "adaptive", + modelProvider: "openai", + model: "gpt-5.4", + totalTokens: 1200, + totalTokensFresh: true, + contextTokens: 4000, + }, + ], + }; + } + if (method === "chat.send") { + return new Promise(() => {}); + } + return { ok: true }; + }) as GatewayClient["request"]; + const agent = new AcpGatewayAgent(connection, createAcpGateway(request), { + sessionStore, + }); + + await agent.loadSession(createLoadSessionRequest("usage-session")); + sessionUpdate.mockClear(); + sessionUpdate.mockRejectedValueOnce(new Error("session update transport failed")); + + const promptPromise = agent.prompt(createPromptRequest("usage-session", "hello")); + await agent.handleGatewayEvent(createChatFinalEvent("usage-session")); + + await expect(promptPromise).resolves.toEqual({ stopReason: "end_turn" }); + const session = sessionStore.getSession("usage-session"); + expect(session?.activeRunId).toBeNull(); + expect(session?.abortController).toBeNull(); + + sessionStore.clearAllSessionsForTest(); + }); +}); + describe("acp prompt size hardening", () => { it("rejects oversized prompt blocks without leaking active runs", async () => { await expectOversizedPromptRejected({ @@ -112,3 +1020,144 @@ describe("acp prompt size hardening", () => { }); }); }); + +describe("acp final chat snapshots", () => { + async function createSnapshotHarness() { + const sessionStore = createInMemorySessionStore(); + const connection = createAcpConnection(); + const sessionUpdate = connection.__sessionUpdateMock; + const request = vi.fn(async (method: string) => { + if (method === "chat.send") { + return new Promise(() => {}); + } + return { ok: true }; + }) as GatewayClient["request"]; + const agent = new AcpGatewayAgent(connection, createAcpGateway(request), { + sessionStore, + }); + await agent.loadSession(createLoadSessionRequest("snapshot-session")); + sessionUpdate.mockClear(); + const promptPromise = agent.prompt(createPromptRequest("snapshot-session", "hello")); + const runId = sessionStore.getSession("snapshot-session")?.activeRunId; + if (!runId) { + throw new Error("Expected ACP prompt run to be active"); + } + return { agent, sessionUpdate, promptPromise, runId, sessionStore }; + } + + it("emits final snapshot text before resolving end_turn", async () => { + const { agent, sessionUpdate, promptPromise, runId, sessionStore } = + await createSnapshotHarness(); + + await agent.handleGatewayEvent({ + event: "chat", + payload: { + sessionKey: "snapshot-session", + runId, + state: "final", + stopReason: "end_turn", + message: { + content: [{ type: "text", text: "FINAL TEXT SHOULD BE EMITTED" }], + }, + }, + } as unknown as EventFrame); + + await expect(promptPromise).resolves.toEqual({ stopReason: "end_turn" }); + expect(sessionUpdate).toHaveBeenCalledWith({ + sessionId: "snapshot-session", + update: { + sessionUpdate: "agent_message_chunk", + content: { type: "text", text: "FINAL TEXT SHOULD BE EMITTED" }, + }, + }); + expect(sessionStore.getSession("snapshot-session")?.activeRunId).toBeNull(); + sessionStore.clearAllSessionsForTest(); + }); + + it("does not duplicate text when final repeats the last delta snapshot", async () => { + const { agent, sessionUpdate, promptPromise, runId, sessionStore } = + await createSnapshotHarness(); + + await agent.handleGatewayEvent({ + event: "chat", + payload: { + sessionKey: "snapshot-session", + runId, + state: "delta", + message: { + content: [{ type: "text", text: "Hello world" }], + }, + }, + } as unknown as EventFrame); + + await agent.handleGatewayEvent({ + event: "chat", + payload: { + sessionKey: "snapshot-session", + runId, + state: "final", + stopReason: "end_turn", + message: { + content: [{ type: "text", text: "Hello world" }], + }, + }, + } as unknown as EventFrame); + + await expect(promptPromise).resolves.toEqual({ stopReason: "end_turn" }); + const chunks = sessionUpdate.mock.calls.filter( + (call: unknown[]) => + (call[0] as Record)?.update && + (call[0] as Record>).update?.sessionUpdate === + "agent_message_chunk", + ); + expect(chunks).toHaveLength(1); + sessionStore.clearAllSessionsForTest(); + }); + + it("emits only the missing tail when the final snapshot extends prior deltas", async () => { + const { agent, sessionUpdate, promptPromise, runId, sessionStore } = + await createSnapshotHarness(); + + await agent.handleGatewayEvent({ + event: "chat", + payload: { + sessionKey: "snapshot-session", + runId, + state: "delta", + message: { + content: [{ type: "text", text: "Hello" }], + }, + }, + } as unknown as EventFrame); + + await agent.handleGatewayEvent({ + event: "chat", + payload: { + sessionKey: "snapshot-session", + runId, + state: "final", + stopReason: "max_tokens", + message: { + content: [{ type: "text", text: "Hello world" }], + }, + }, + } as unknown as EventFrame); + + await expect(promptPromise).resolves.toEqual({ stopReason: "max_tokens" }); + expect(sessionUpdate).toHaveBeenCalledWith({ + sessionId: "snapshot-session", + update: { + sessionUpdate: "agent_message_chunk", + content: { type: "text", text: "Hello" }, + }, + }); + expect(sessionUpdate).toHaveBeenCalledWith({ + sessionId: "snapshot-session", + update: { + sessionUpdate: "agent_message_chunk", + content: { type: "text", text: " world" }, + }, + }); + sessionStore.clearAllSessionsForTest(); + }); +}); diff --git a/src/acp/translator.test-helpers.ts b/src/acp/translator.test-helpers.ts index c80918ba2cc..2bd7fd2747f 100644 --- a/src/acp/translator.test-helpers.ts +++ b/src/acp/translator.test-helpers.ts @@ -2,10 +2,16 @@ import type { AgentSideConnection } from "@agentclientprotocol/sdk"; import { vi } from "vitest"; import type { GatewayClient } from "../gateway/client.js"; -export function createAcpConnection(): AgentSideConnection { +export type TestAcpConnection = AgentSideConnection & { + __sessionUpdateMock: ReturnType; +}; + +export function createAcpConnection(): TestAcpConnection { + const sessionUpdate = vi.fn(async () => {}); return { - sessionUpdate: vi.fn(async () => {}), - } as unknown as AgentSideConnection; + sessionUpdate, + __sessionUpdateMock: sessionUpdate, + } as unknown as TestAcpConnection; } export function createAcpGateway( diff --git a/src/acp/translator.ts b/src/acp/translator.ts index d399228afa6..8ab1f821fc8 100644 --- a/src/acp/translator.ts +++ b/src/acp/translator.ts @@ -16,14 +16,21 @@ import type { NewSessionResponse, PromptRequest, PromptResponse, + SessionConfigOption, + SessionModeState, + SetSessionConfigOptionRequest, + SetSessionConfigOptionResponse, SetSessionModeRequest, SetSessionModeResponse, StopReason, + ToolCallLocation, + ToolKind, } from "@agentclientprotocol/sdk"; import { PROTOCOL_VERSION } from "@agentclientprotocol/sdk"; +import { listThinkingLevels } from "../auto-reply/thinking.js"; import type { GatewayClient } from "../gateway/client.js"; import type { EventFrame } from "../gateway/protocol/index.js"; -import type { SessionsListResult } from "../gateway/session-utils.js"; +import type { GatewaySessionRow, SessionsListResult } from "../gateway/session-utils.js"; import { createFixedWindowRateLimiter, type FixedWindowRateLimiter, @@ -32,6 +39,8 @@ import { shortenHomePath } from "../utils.js"; import { getAvailableCommands } from "./commands.js"; import { extractAttachmentsFromPrompt, + extractToolCallContent, + extractToolCallLocations, extractTextFromPrompt, formatToolTitle, inferToolKind, @@ -43,6 +52,13 @@ import { ACP_AGENT_INFO, type AcpServerOptions } from "./types.js"; // Maximum allowed prompt size (2MB) to prevent DoS via memory exhaustion (CWE-400, GHSA-cxpw-2g23-2vgw) const MAX_PROMPT_BYTES = 2 * 1024 * 1024; +const ACP_THOUGHT_LEVEL_CONFIG_ID = "thought_level"; +const ACP_FAST_MODE_CONFIG_ID = "fast_mode"; +const ACP_VERBOSE_LEVEL_CONFIG_ID = "verbose_level"; +const ACP_REASONING_LEVEL_CONFIG_ID = "reasoning_level"; +const ACP_RESPONSE_USAGE_CONFIG_ID = "response_usage"; +const ACP_ELEVATED_LEVEL_CONFIG_ID = "elevated_level"; +const ACP_LOAD_SESSION_REPLAY_LIMIT = 1_000_000; type PendingPrompt = { sessionId: string; @@ -52,16 +68,248 @@ type PendingPrompt = { reject: (err: Error) => void; sentTextLength?: number; sentText?: string; - toolCalls?: Set; + toolCalls?: Map; +}; + +type PendingToolCall = { + kind: ToolKind; + locations?: ToolCallLocation[]; + rawInput?: Record; + title: string; }; type AcpGatewayAgentOptions = AcpServerOptions & { sessionStore?: AcpSessionStore; }; +type GatewaySessionPresentationRow = Pick< + GatewaySessionRow, + | "displayName" + | "label" + | "derivedTitle" + | "updatedAt" + | "thinkingLevel" + | "fastMode" + | "modelProvider" + | "model" + | "verboseLevel" + | "reasoningLevel" + | "responseUsage" + | "elevatedLevel" + | "totalTokens" + | "totalTokensFresh" + | "contextTokens" +>; + +type SessionPresentation = { + configOptions: SessionConfigOption[]; + modes: SessionModeState; +}; + +type SessionMetadata = { + title?: string | null; + updatedAt?: string | null; +}; + +type SessionUsageSnapshot = { + size: number; + used: number; +}; + +type SessionSnapshot = SessionPresentation & { + metadata?: SessionMetadata; + usage?: SessionUsageSnapshot; +}; + +type GatewayTranscriptMessage = { + role?: unknown; + content?: unknown; +}; + const SESSION_CREATE_RATE_LIMIT_DEFAULT_MAX_REQUESTS = 120; const SESSION_CREATE_RATE_LIMIT_DEFAULT_WINDOW_MS = 10_000; +function formatThinkingLevelName(level: string): string { + switch (level) { + case "xhigh": + return "Extra High"; + case "adaptive": + return "Adaptive"; + default: + return level.length > 0 ? `${level[0].toUpperCase()}${level.slice(1)}` : "Unknown"; + } +} + +function buildThinkingModeDescription(level: string): string | undefined { + if (level === "adaptive") { + return "Use the Gateway session default thought level."; + } + return undefined; +} + +function formatConfigValueName(value: string): string { + switch (value) { + case "xhigh": + return "Extra High"; + default: + return value.length > 0 ? `${value[0].toUpperCase()}${value.slice(1)}` : "Unknown"; + } +} + +function buildSelectConfigOption(params: { + id: string; + name: string; + description: string; + currentValue: string; + values: readonly string[]; + category?: string; +}): SessionConfigOption { + return { + type: "select", + id: params.id, + name: params.name, + category: params.category, + description: params.description, + currentValue: params.currentValue, + options: params.values.map((value) => ({ + value, + name: formatConfigValueName(value), + })), + }; +} + +function buildSessionPresentation(params: { + row?: GatewaySessionPresentationRow; + overrides?: Partial; +}): SessionPresentation { + const row = { + ...params.row, + ...params.overrides, + }; + const availableLevelIds: string[] = [...listThinkingLevels(row.modelProvider, row.model)]; + const currentModeId = row.thinkingLevel?.trim() || "adaptive"; + if (!availableLevelIds.includes(currentModeId)) { + availableLevelIds.push(currentModeId); + } + + const modes: SessionModeState = { + currentModeId, + availableModes: availableLevelIds.map((level) => ({ + id: level, + name: formatThinkingLevelName(level), + description: buildThinkingModeDescription(level), + })), + }; + + const configOptions: SessionConfigOption[] = [ + buildSelectConfigOption({ + id: ACP_THOUGHT_LEVEL_CONFIG_ID, + name: "Thought level", + category: "thought_level", + description: + "Controls how much deliberate reasoning OpenClaw requests from the Gateway model.", + currentValue: currentModeId, + values: availableLevelIds, + }), + buildSelectConfigOption({ + id: ACP_FAST_MODE_CONFIG_ID, + name: "Fast mode", + description: "Controls whether OpenAI sessions use the Gateway fast-mode profile.", + currentValue: row.fastMode ? "on" : "off", + values: ["off", "on"], + }), + buildSelectConfigOption({ + id: ACP_VERBOSE_LEVEL_CONFIG_ID, + name: "Tool verbosity", + description: + "Controls how much tool progress and output detail OpenClaw keeps enabled for the session.", + currentValue: row.verboseLevel?.trim() || "off", + values: ["off", "on", "full"], + }), + buildSelectConfigOption({ + id: ACP_REASONING_LEVEL_CONFIG_ID, + name: "Reasoning stream", + description: "Controls whether reasoning-capable models emit reasoning text for the session.", + currentValue: row.reasoningLevel?.trim() || "off", + values: ["off", "on", "stream"], + }), + buildSelectConfigOption({ + id: ACP_RESPONSE_USAGE_CONFIG_ID, + name: "Usage detail", + description: + "Controls how much usage information OpenClaw attaches to responses for the session.", + currentValue: row.responseUsage?.trim() || "off", + values: ["off", "tokens", "full"], + }), + buildSelectConfigOption({ + id: ACP_ELEVATED_LEVEL_CONFIG_ID, + name: "Elevated actions", + description: "Controls how aggressively the session allows elevated execution behavior.", + currentValue: row.elevatedLevel?.trim() || "off", + values: ["off", "on", "ask", "full"], + }), + ]; + + return { configOptions, modes }; +} + +function extractReplayText(content: unknown): string | undefined { + if (typeof content === "string") { + return content.length > 0 ? content : undefined; + } + if (!Array.isArray(content)) { + return undefined; + } + const text = content + .map((block) => { + if (!block || typeof block !== "object" || Array.isArray(block)) { + return ""; + } + const typedBlock = block as { type?: unknown; text?: unknown }; + return typedBlock.type === "text" && typeof typedBlock.text === "string" + ? typedBlock.text + : ""; + }) + .join(""); + return text.length > 0 ? text : undefined; +} + +function buildSessionMetadata(params: { + row?: GatewaySessionPresentationRow; + sessionKey: string; +}): SessionMetadata { + const title = + params.row?.derivedTitle?.trim() || + params.row?.displayName?.trim() || + params.row?.label?.trim() || + params.sessionKey; + const updatedAt = + typeof params.row?.updatedAt === "number" && Number.isFinite(params.row.updatedAt) + ? new Date(params.row.updatedAt).toISOString() + : null; + return { title, updatedAt }; +} + +function buildSessionUsageSnapshot( + row?: GatewaySessionPresentationRow, +): SessionUsageSnapshot | undefined { + const totalTokens = row?.totalTokens; + const contextTokens = row?.contextTokens; + if ( + row?.totalTokensFresh !== true || + typeof totalTokens !== "number" || + !Number.isFinite(totalTokens) || + typeof contextTokens !== "number" || + !Number.isFinite(contextTokens) || + contextTokens <= 0 + ) { + return undefined; + } + const size = Math.max(0, Math.floor(contextTokens)); + const used = Math.max(0, Math.min(Math.floor(totalTokens), size)); + return { size, used }; +} + function buildSystemInputProvenance(originSessionId: string) { return { kind: "external_user" as const, @@ -170,9 +418,7 @@ export class AcpGatewayAgent implements Agent { } async newSession(params: NewSessionRequest): Promise { - if (params.mcpServers.length > 0) { - this.log(`ignoring ${params.mcpServers.length} MCP servers`); - } + this.assertSupportedSessionSetup(params.mcpServers); this.enforceSessionCreateRateLimit("newSession"); const sessionId = randomUUID(); @@ -188,14 +434,21 @@ export class AcpGatewayAgent implements Agent { cwd: params.cwd, }); this.log(`newSession: ${session.sessionId} -> ${session.sessionKey}`); + const sessionSnapshot = await this.getSessionSnapshot(session.sessionKey); + await this.sendSessionSnapshotUpdate(session.sessionId, sessionSnapshot, { + includeControls: false, + }); await this.sendAvailableCommands(session.sessionId); - return { sessionId: session.sessionId }; + const { configOptions, modes } = sessionSnapshot; + return { + sessionId: session.sessionId, + configOptions, + modes, + }; } async loadSession(params: LoadSessionRequest): Promise { - if (params.mcpServers.length > 0) { - this.log(`ignoring ${params.mcpServers.length} MCP servers`); - } + this.assertSupportedSessionSetup(params.mcpServers); if (!this.sessionStore.hasSession(params.sessionId)) { this.enforceSessionCreateRateLimit("loadSession"); } @@ -212,8 +465,20 @@ export class AcpGatewayAgent implements Agent { cwd: params.cwd, }); this.log(`loadSession: ${session.sessionId} -> ${session.sessionKey}`); + const [sessionSnapshot, transcript] = await Promise.all([ + this.getSessionSnapshot(session.sessionKey), + this.getSessionTranscript(session.sessionKey).catch((err) => { + this.log(`session transcript fallback for ${session.sessionKey}: ${String(err)}`); + return []; + }), + ]); + await this.replaySessionTranscript(session.sessionId, transcript); + await this.sendSessionSnapshotUpdate(session.sessionId, sessionSnapshot, { + includeControls: false, + }); await this.sendAvailableCommands(session.sessionId); - return {}; + const { configOptions, modes } = sessionSnapshot; + return { configOptions, modes }; } async unstable_listSessions(params: ListSessionsRequest): Promise { @@ -254,13 +519,52 @@ export class AcpGatewayAgent implements Agent { thinkingLevel: params.modeId, }); this.log(`setSessionMode: ${session.sessionId} -> ${params.modeId}`); + const sessionSnapshot = await this.getSessionSnapshot(session.sessionKey, { + thinkingLevel: params.modeId, + }); + await this.sendSessionSnapshotUpdate(session.sessionId, sessionSnapshot, { + includeControls: true, + }); } catch (err) { this.log(`setSessionMode error: ${String(err)}`); - throw err; + throw err instanceof Error ? err : new Error(String(err)); } return {}; } + async setSessionConfigOption( + params: SetSessionConfigOptionRequest, + ): Promise { + const session = this.sessionStore.getSession(params.sessionId); + if (!session) { + throw new Error(`Session ${params.sessionId} not found`); + } + const sessionPatch = this.resolveSessionConfigPatch(params.configId, params.value); + + try { + await this.gateway.request("sessions.patch", { + key: session.sessionKey, + ...sessionPatch.patch, + }); + this.log( + `setSessionConfigOption: ${session.sessionId} -> ${params.configId}=${params.value}`, + ); + const sessionSnapshot = await this.getSessionSnapshot( + session.sessionKey, + sessionPatch.overrides, + ); + await this.sendSessionSnapshotUpdate(session.sessionId, sessionSnapshot, { + includeControls: true, + }); + return { + configOptions: sessionSnapshot.configOptions, + }; + } catch (err) { + this.log(`setSessionConfigOption error: ${String(err)}`); + throw err instanceof Error ? err : new Error(String(err)); + } + } + async prompt(params: PromptRequest): Promise { const session = this.sessionStore.getSession(params.sessionId); if (!session) { @@ -338,15 +642,25 @@ export class AcpGatewayAgent implements Agent { if (!session) { return; } + // Capture runId before cancelActiveRun clears session.activeRunId. + const activeRunId = session.activeRunId; this.sessionStore.cancelActiveRun(params.sessionId); + const pending = this.pendingPrompts.get(params.sessionId); + const scopedRunId = activeRunId ?? pending?.idempotencyKey; + if (!scopedRunId) { + return; + } + try { - await this.gateway.request("chat.abort", { sessionKey: session.sessionKey }); + await this.gateway.request("chat.abort", { + sessionKey: session.sessionKey, + runId: scopedRunId, + }); } catch (err) { this.log(`cancel error: ${String(err)}`); } - const pending = this.pendingPrompts.get(params.sessionId); if (pending) { this.pendingPrompts.delete(params.sessionId); pending.resolve({ stopReason: "cancelled" }); @@ -378,6 +692,7 @@ export class AcpGatewayAgent implements Agent { return; } const stream = payload.stream as string | undefined; + const runId = payload.runId as string | undefined; const data = payload.data as Record | undefined; const sessionKey = payload.sessionKey as string | undefined; if (!stream || !data || !sessionKey) { @@ -394,29 +709,55 @@ export class AcpGatewayAgent implements Agent { return; } - const pending = this.findPendingBySessionKey(sessionKey); + const pending = this.findPendingBySessionKey(sessionKey, runId); if (!pending) { return; } if (phase === "start") { if (!pending.toolCalls) { - pending.toolCalls = new Set(); + pending.toolCalls = new Map(); } if (pending.toolCalls.has(toolCallId)) { return; } - pending.toolCalls.add(toolCallId); const args = data.args as Record | undefined; + const title = formatToolTitle(name, args); + const kind = inferToolKind(name); + const locations = extractToolCallLocations(args); + pending.toolCalls.set(toolCallId, { + title, + kind, + rawInput: args, + locations, + }); await this.connection.sessionUpdate({ sessionId: pending.sessionId, update: { sessionUpdate: "tool_call", toolCallId, - title: formatToolTitle(name, args), + title, status: "in_progress", rawInput: args, - kind: inferToolKind(name), + kind, + locations, + }, + }); + return; + } + + if (phase === "update") { + const toolState = pending.toolCalls?.get(toolCallId); + const partialResult = data.partialResult; + await this.connection.sessionUpdate({ + sessionId: pending.sessionId, + update: { + sessionUpdate: "tool_call_update", + toolCallId, + status: "in_progress", + rawOutput: partialResult, + content: extractToolCallContent(partialResult), + locations: extractToolCallLocations(toolState?.locations, partialResult), }, }); return; @@ -424,6 +765,8 @@ export class AcpGatewayAgent implements Agent { if (phase === "result") { const isError = Boolean(data.isError); + const toolState = pending.toolCalls?.get(toolCallId); + pending.toolCalls?.delete(toolCallId); await this.connection.sessionUpdate({ sessionId: pending.sessionId, update: { @@ -431,6 +774,8 @@ export class AcpGatewayAgent implements Agent { toolCallId, status: isError ? "failed" : "completed", rawOutput: data.result, + content: extractToolCallContent(data.result), + locations: extractToolCallLocations(toolState?.locations, data.result), }, }); } @@ -450,27 +795,30 @@ export class AcpGatewayAgent implements Agent { return; } - const pending = this.findPendingBySessionKey(sessionKey); + const pending = this.findPendingBySessionKey(sessionKey, runId); if (!pending) { return; } - if (runId && pending.idempotencyKey !== runId) { - return; - } - if (state === "delta" && messageData) { + const shouldHandleMessageSnapshot = messageData && (state === "delta" || state === "final"); + if (shouldHandleMessageSnapshot) { + // Gateway chat events can carry the latest full assistant snapshot on both + // incremental updates and the terminal final event. Process the snapshot + // first so ACP clients never drop the last visible assistant text. await this.handleDeltaEvent(pending.sessionId, messageData); - return; + if (state === "delta") { + return; + } } if (state === "final") { const rawStopReason = payload.stopReason as string | undefined; const stopReason: StopReason = rawStopReason === "max_tokens" ? "max_tokens" : "end_turn"; - this.finishPrompt(pending.sessionId, pending, stopReason); + await this.finishPrompt(pending.sessionId, pending, stopReason); return; } if (state === "aborted") { - this.finishPrompt(pending.sessionId, pending, "cancelled"); + await this.finishPrompt(pending.sessionId, pending, "cancelled"); return; } if (state === "error") { @@ -478,7 +826,7 @@ export class AcpGatewayAgent implements Agent { // do not treat transient backend errors (timeouts, rate-limits) as deliberate // refusals. TODO: when ChatEventSchema gains a structured errorKind field // (e.g. "refusal" | "timeout" | "rate_limit"), use it to distinguish here. - this.finishPrompt(pending.sessionId, pending, "end_turn"); + void this.finishPrompt(pending.sessionId, pending, "end_turn"); } } @@ -511,17 +859,33 @@ export class AcpGatewayAgent implements Agent { }); } - private finishPrompt(sessionId: string, pending: PendingPrompt, stopReason: StopReason): void { + private async finishPrompt( + sessionId: string, + pending: PendingPrompt, + stopReason: StopReason, + ): Promise { this.pendingPrompts.delete(sessionId); this.sessionStore.clearActiveRun(sessionId); + const sessionSnapshot = await this.getSessionSnapshot(pending.sessionKey); + try { + await this.sendSessionSnapshotUpdate(sessionId, sessionSnapshot, { + includeControls: false, + }); + } catch (err) { + this.log(`session snapshot update failed for ${sessionId}: ${String(err)}`); + } pending.resolve({ stopReason }); } - private findPendingBySessionKey(sessionKey: string): PendingPrompt | undefined { + private findPendingBySessionKey(sessionKey: string, runId?: string): PendingPrompt | undefined { for (const pending of this.pendingPrompts.values()) { - if (pending.sessionKey === sessionKey) { - return pending; + if (pending.sessionKey !== sessionKey) { + continue; } + if (runId && pending.idempotencyKey !== runId) { + continue; + } + return pending; } return undefined; } @@ -536,6 +900,194 @@ export class AcpGatewayAgent implements Agent { }); } + private async getSessionSnapshot( + sessionKey: string, + overrides?: Partial, + ): Promise { + try { + const row = await this.getGatewaySessionRow(sessionKey); + return { + ...buildSessionPresentation({ row, overrides }), + metadata: buildSessionMetadata({ row, sessionKey }), + usage: buildSessionUsageSnapshot(row), + }; + } catch (err) { + this.log(`session presentation fallback for ${sessionKey}: ${String(err)}`); + return { + ...buildSessionPresentation({ overrides }), + metadata: buildSessionMetadata({ sessionKey }), + }; + } + } + + private async getGatewaySessionRow( + sessionKey: string, + ): Promise { + const result = await this.gateway.request("sessions.list", { + limit: 200, + search: sessionKey, + includeDerivedTitles: true, + }); + const session = result.sessions.find((entry) => entry.key === sessionKey); + if (!session) { + return undefined; + } + return { + displayName: session.displayName, + label: session.label, + derivedTitle: session.derivedTitle, + updatedAt: session.updatedAt, + thinkingLevel: session.thinkingLevel, + modelProvider: session.modelProvider, + model: session.model, + fastMode: session.fastMode, + verboseLevel: session.verboseLevel, + reasoningLevel: session.reasoningLevel, + responseUsage: session.responseUsage, + elevatedLevel: session.elevatedLevel, + totalTokens: session.totalTokens, + totalTokensFresh: session.totalTokensFresh, + contextTokens: session.contextTokens, + }; + } + + private resolveSessionConfigPatch( + configId: string, + value: string | boolean, + ): { + overrides: Partial; + patch: Record; + } { + if (typeof value !== "string") { + throw new Error( + `ACP bridge does not support non-string session config option values for "${configId}".`, + ); + } + switch (configId) { + case ACP_THOUGHT_LEVEL_CONFIG_ID: + return { + patch: { thinkingLevel: value }, + overrides: { thinkingLevel: value }, + }; + case ACP_FAST_MODE_CONFIG_ID: + return { + patch: { fastMode: value === "on" }, + overrides: { fastMode: value === "on" }, + }; + case ACP_VERBOSE_LEVEL_CONFIG_ID: + return { + patch: { verboseLevel: value }, + overrides: { verboseLevel: value }, + }; + case ACP_REASONING_LEVEL_CONFIG_ID: + return { + patch: { reasoningLevel: value }, + overrides: { reasoningLevel: value }, + }; + case ACP_RESPONSE_USAGE_CONFIG_ID: + return { + patch: { responseUsage: value }, + overrides: { responseUsage: value as GatewaySessionPresentationRow["responseUsage"] }, + }; + case ACP_ELEVATED_LEVEL_CONFIG_ID: + return { + patch: { elevatedLevel: value }, + overrides: { elevatedLevel: value }, + }; + default: + throw new Error(`ACP bridge mode does not support session config option "${configId}".`); + } + } + + private async getSessionTranscript(sessionKey: string): Promise { + const result = await this.gateway.request<{ messages?: unknown[] }>("sessions.get", { + key: sessionKey, + limit: ACP_LOAD_SESSION_REPLAY_LIMIT, + }); + if (!Array.isArray(result.messages)) { + return []; + } + return result.messages as GatewayTranscriptMessage[]; + } + + private async replaySessionTranscript( + sessionId: string, + transcript: ReadonlyArray, + ): Promise { + for (const message of transcript) { + const role = typeof message.role === "string" ? message.role : ""; + if (role !== "user" && role !== "assistant") { + continue; + } + const text = extractReplayText(message.content); + if (!text) { + continue; + } + await this.connection.sessionUpdate({ + sessionId, + update: { + sessionUpdate: role === "user" ? "user_message_chunk" : "agent_message_chunk", + content: { type: "text", text }, + }, + }); + } + } + + private async sendSessionSnapshotUpdate( + sessionId: string, + sessionSnapshot: SessionSnapshot, + options: { includeControls: boolean }, + ): Promise { + if (options.includeControls) { + await this.connection.sessionUpdate({ + sessionId, + update: { + sessionUpdate: "current_mode_update", + currentModeId: sessionSnapshot.modes.currentModeId, + }, + }); + await this.connection.sessionUpdate({ + sessionId, + update: { + sessionUpdate: "config_option_update", + configOptions: sessionSnapshot.configOptions, + }, + }); + } + if (sessionSnapshot.metadata) { + await this.connection.sessionUpdate({ + sessionId, + update: { + sessionUpdate: "session_info_update", + ...sessionSnapshot.metadata, + }, + }); + } + if (sessionSnapshot.usage) { + await this.connection.sessionUpdate({ + sessionId, + update: { + sessionUpdate: "usage_update", + used: sessionSnapshot.usage.used, + size: sessionSnapshot.usage.size, + _meta: { + source: "gateway-session-store", + approximate: true, + }, + }, + }); + } + } + + private assertSupportedSessionSetup(mcpServers: ReadonlyArray): void { + if (mcpServers.length === 0) { + return; + } + throw new Error( + "ACP bridge mode does not support per-session MCP servers. Configure MCP on the OpenClaw gateway or agent instead.", + ); + } + private enforceSessionCreateRateLimit(method: "newSession" | "loadSession"): void { const budget = this.sessionCreateRateLimiter.consume(); if (budget.allowed) { diff --git a/src/agents/acp-spawn-parent-stream.ts b/src/agents/acp-spawn-parent-stream.ts index 94f04ce3940..36b113386c2 100644 --- a/src/agents/acp-spawn-parent-stream.ts +++ b/src/agents/acp-spawn-parent-stream.ts @@ -180,7 +180,9 @@ export function startAcpSpawnParentStreamRelay(params: { }; const wake = () => { requestHeartbeatNow( - scopedHeartbeatWakeOptions(parentSessionKey, { reason: "acp:spawn:stream" }), + scopedHeartbeatWakeOptions(parentSessionKey, { + reason: "acp:spawn:stream", + }), ); }; const emit = (text: string, contextKey: string) => { diff --git a/src/agents/acp-spawn.test.ts b/src/agents/acp-spawn.test.ts index 0f28b709792..c53584cdf55 100644 --- a/src/agents/acp-spawn.test.ts +++ b/src/agents/acp-spawn.test.ts @@ -38,6 +38,7 @@ const hoisted = vi.hoisted(() => { const loadSessionStoreMock = vi.fn(); const resolveStorePathMock = vi.fn(); const resolveSessionTranscriptFileMock = vi.fn(); + const areHeartbeatsEnabledMock = vi.fn(); const state = { cfg: createDefaultSpawnConfig(), }; @@ -55,6 +56,7 @@ const hoisted = vi.hoisted(() => { loadSessionStoreMock, resolveStorePathMock, resolveSessionTranscriptFileMock, + areHeartbeatsEnabledMock, state, }; }); @@ -128,6 +130,14 @@ vi.mock("../infra/outbound/session-binding-service.js", async (importOriginal) = }; }); +vi.mock("../infra/heartbeat-wake.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + areHeartbeatsEnabled: () => hoisted.areHeartbeatsEnabledMock(), + }; +}); + vi.mock("./acp-spawn-parent-stream.js", () => ({ startAcpSpawnParentStreamRelay: (...args: unknown[]) => hoisted.startAcpSpawnParentStreamRelayMock(...args), @@ -192,6 +202,7 @@ function expectResolvedIntroTextInBindMetadata(): void { describe("spawnAcpDirect", () => { beforeEach(() => { hoisted.state.cfg = createDefaultSpawnConfig(); + hoisted.areHeartbeatsEnabledMock.mockReset().mockReturnValue(true); hoisted.callGatewayMock.mockReset().mockImplementation(async (argsUnknown: unknown) => { const args = argsUnknown as { method?: string }; @@ -393,6 +404,8 @@ describe("spawnAcpDirect", () => { expect(result.status).toBe("accepted"); expect(result.mode).toBe("run"); + expect(result.streamLogPath).toBeUndefined(); + expect(hoisted.startAcpSpawnParentStreamRelayMock).not.toHaveBeenCalled(); expect(hoisted.resolveSessionTranscriptFileMock).toHaveBeenCalledWith( expect.objectContaining({ sessionId: "sess-123", @@ -633,6 +646,290 @@ describe("spawnAcpDirect", () => { expect(secondHandle.notifyStarted).toHaveBeenCalledTimes(1); }); + it("implicitly streams mode=run ACP spawns for subagent requester sessions", async () => { + hoisted.state.cfg = { + ...hoisted.state.cfg, + agents: { + defaults: { + heartbeat: { + every: "30m", + target: "last", + }, + }, + }, + }; + const firstHandle = createRelayHandle(); + const secondHandle = createRelayHandle(); + hoisted.startAcpSpawnParentStreamRelayMock + .mockReset() + .mockReturnValueOnce(firstHandle) + .mockReturnValueOnce(secondHandle); + hoisted.loadSessionStoreMock.mockReset().mockImplementation(() => { + const store: Record< + string, + { sessionId: string; updatedAt: number; deliveryContext?: unknown } + > = { + "agent:main:subagent:parent": { + sessionId: "parent-sess-1", + updatedAt: Date.now(), + deliveryContext: { + channel: "discord", + to: "channel:parent-channel", + accountId: "default", + }, + }, + }; + return new Proxy(store, { + get(target, prop) { + if (typeof prop === "string" && prop.startsWith("agent:codex:acp:")) { + return { sessionId: "sess-123", updatedAt: Date.now() }; + } + return target[prop as keyof typeof target]; + }, + }); + }); + + const result = await spawnAcpDirect( + { + task: "Investigate flaky tests", + agentId: "codex", + }, + { + agentSessionKey: "agent:main:subagent:parent", + agentChannel: "discord", + agentAccountId: "default", + agentTo: "channel:parent-channel", + }, + ); + + expect(result.status).toBe("accepted"); + expect(result.mode).toBe("run"); + expect(result.streamLogPath).toBe("/tmp/sess-main.acp-stream.jsonl"); + const agentCall = hoisted.callGatewayMock.mock.calls + .map((call: unknown[]) => call[0] as { method?: string; params?: Record }) + .find((request) => request.method === "agent"); + expect(agentCall?.params?.deliver).toBe(false); + expect(agentCall?.params?.channel).toBeUndefined(); + expect(agentCall?.params?.to).toBeUndefined(); + expect(agentCall?.params?.threadId).toBeUndefined(); + expect(hoisted.startAcpSpawnParentStreamRelayMock).toHaveBeenCalledWith( + expect.objectContaining({ + parentSessionKey: "agent:main:subagent:parent", + agentId: "codex", + logPath: "/tmp/sess-main.acp-stream.jsonl", + emitStartNotice: false, + }), + ); + expect(firstHandle.dispose).toHaveBeenCalledTimes(1); + expect(secondHandle.notifyStarted).toHaveBeenCalledTimes(1); + }); + + it("does not implicitly stream when heartbeat target is not session-local", async () => { + hoisted.state.cfg = { + ...hoisted.state.cfg, + agents: { + defaults: { + heartbeat: { + every: "30m", + target: "discord", + to: "channel:ops-room", + }, + }, + }, + }; + + const result = await spawnAcpDirect( + { + task: "Investigate flaky tests", + agentId: "codex", + }, + { + agentSessionKey: "agent:main:subagent:fixed-target", + }, + ); + + expect(result.status).toBe("accepted"); + expect(result.mode).toBe("run"); + expect(result.streamLogPath).toBeUndefined(); + expect(hoisted.startAcpSpawnParentStreamRelayMock).not.toHaveBeenCalled(); + }); + + it("does not implicitly stream when session scope is global", async () => { + hoisted.state.cfg = { + ...hoisted.state.cfg, + session: { + ...hoisted.state.cfg.session, + scope: "global", + }, + agents: { + defaults: { + heartbeat: { + every: "30m", + target: "last", + }, + }, + }, + }; + + const result = await spawnAcpDirect( + { + task: "Investigate flaky tests", + agentId: "codex", + }, + { + agentSessionKey: "agent:main:subagent:global-scope", + }, + ); + + expect(result.status).toBe("accepted"); + expect(result.mode).toBe("run"); + expect(result.streamLogPath).toBeUndefined(); + expect(hoisted.startAcpSpawnParentStreamRelayMock).not.toHaveBeenCalled(); + }); + + it("does not implicitly stream for subagent requester sessions when heartbeat is disabled", async () => { + hoisted.state.cfg = { + ...hoisted.state.cfg, + agents: { + list: [{ id: "main", heartbeat: { every: "30m" } }, { id: "research" }], + }, + }; + + const result = await spawnAcpDirect( + { + task: "Investigate flaky tests", + agentId: "codex", + }, + { + agentSessionKey: "agent:research:subagent:orchestrator", + }, + ); + + expect(result.status).toBe("accepted"); + expect(result.mode).toBe("run"); + expect(result.streamLogPath).toBeUndefined(); + expect(hoisted.startAcpSpawnParentStreamRelayMock).not.toHaveBeenCalled(); + }); + + it("does not implicitly stream for subagent requester sessions when heartbeat cadence is invalid", async () => { + hoisted.state.cfg = { + ...hoisted.state.cfg, + agents: { + list: [ + { + id: "research", + heartbeat: { every: "0m" }, + }, + ], + }, + }; + + const result = await spawnAcpDirect( + { + task: "Investigate flaky tests", + agentId: "codex", + }, + { + agentSessionKey: "agent:research:subagent:invalid-heartbeat", + }, + ); + + expect(result.status).toBe("accepted"); + expect(result.mode).toBe("run"); + expect(result.streamLogPath).toBeUndefined(); + expect(hoisted.startAcpSpawnParentStreamRelayMock).not.toHaveBeenCalled(); + }); + + it("does not implicitly stream when heartbeats are runtime-disabled", async () => { + hoisted.areHeartbeatsEnabledMock.mockReturnValue(false); + + const result = await spawnAcpDirect( + { + task: "Investigate flaky tests", + agentId: "codex", + }, + { + agentSessionKey: "agent:main:subagent:runtime-disabled", + }, + ); + + expect(result.status).toBe("accepted"); + expect(result.mode).toBe("run"); + expect(result.streamLogPath).toBeUndefined(); + expect(hoisted.startAcpSpawnParentStreamRelayMock).not.toHaveBeenCalled(); + }); + + it("does not implicitly stream for legacy subagent requester session keys", async () => { + const result = await spawnAcpDirect( + { + task: "Investigate flaky tests", + agentId: "codex", + }, + { + agentSessionKey: "subagent:legacy-worker", + }, + ); + + expect(result.status).toBe("accepted"); + expect(result.mode).toBe("run"); + expect(result.streamLogPath).toBeUndefined(); + expect(hoisted.startAcpSpawnParentStreamRelayMock).not.toHaveBeenCalled(); + }); + + it("does not implicitly stream for subagent requester sessions with thread context", async () => { + const result = await spawnAcpDirect( + { + task: "Investigate flaky tests", + agentId: "codex", + }, + { + agentSessionKey: "agent:main:subagent:thread-context", + agentChannel: "discord", + agentAccountId: "default", + agentTo: "channel:parent-channel", + agentThreadId: "requester-thread", + }, + ); + + expect(result.status).toBe("accepted"); + expect(result.mode).toBe("run"); + expect(result.streamLogPath).toBeUndefined(); + expect(hoisted.startAcpSpawnParentStreamRelayMock).not.toHaveBeenCalled(); + }); + + it("does not implicitly stream for thread-bound subagent requester sessions", async () => { + hoisted.sessionBindingListBySessionMock.mockImplementation((targetSessionKey: string) => { + if (targetSessionKey === "agent:main:subagent:thread-bound") { + return [ + createSessionBinding({ + targetSessionKey, + targetKind: "subagent", + status: "active", + }), + ]; + } + return []; + }); + + const result = await spawnAcpDirect( + { + task: "Investigate flaky tests", + agentId: "codex", + }, + { + agentSessionKey: "agent:main:subagent:thread-bound", + agentChannel: "discord", + agentAccountId: "default", + agentTo: "channel:parent-channel", + }, + ); + + expect(result.status).toBe("accepted"); + expect(result.mode).toBe("run"); + expect(result.streamLogPath).toBeUndefined(); + expect(hoisted.startAcpSpawnParentStreamRelayMock).not.toHaveBeenCalled(); + }); + it("announces parent relay start only after successful child dispatch", async () => { const firstHandle = createRelayHandle(); const secondHandle = createRelayHandle(); diff --git a/src/agents/acp-spawn.ts b/src/agents/acp-spawn.ts index c08cca8fcf8..9d68a234aea 100644 --- a/src/agents/acp-spawn.ts +++ b/src/agents/acp-spawn.ts @@ -10,6 +10,7 @@ import { resolveAcpThreadSessionDetailLines, } from "../acp/runtime/session-identifiers.js"; import type { AcpRuntimeSessionMode } from "../acp/runtime/types.js"; +import { DEFAULT_HEARTBEAT_EVERY } from "../auto-reply/heartbeat.js"; import { resolveThreadBindingIntroText, resolveThreadBindingThreadName, @@ -21,11 +22,13 @@ import { resolveThreadBindingMaxAgeMsForChannel, resolveThreadBindingSpawnPolicy, } from "../channels/thread-bindings-policy.js"; +import { parseDurationMs } from "../cli/parse-duration.js"; import { loadConfig } from "../config/config.js"; import type { OpenClawConfig } from "../config/config.js"; import { loadSessionStore, resolveStorePath, type SessionEntry } from "../config/sessions.js"; import { resolveSessionTranscriptFile } from "../config/sessions/transcript.js"; import { callGateway } from "../gateway/call.js"; +import { areHeartbeatsEnabled } from "../infra/heartbeat-wake.js"; import { resolveConversationIdFromTargets } from "../infra/outbound/conversation-id.js"; import { getSessionBindingService, @@ -33,13 +36,18 @@ import { type SessionBindingRecord, } from "../infra/outbound/session-binding-service.js"; import { createSubsystemLogger } from "../logging/subsystem.js"; -import { normalizeAgentId } from "../routing/session-key.js"; -import { normalizeDeliveryContext } from "../utils/delivery-context.js"; +import { + isSubagentSessionKey, + normalizeAgentId, + parseAgentSessionKey, +} from "../routing/session-key.js"; +import { deliveryContextFromSession, normalizeDeliveryContext } from "../utils/delivery-context.js"; import { type AcpSpawnParentRelayHandle, resolveAcpSpawnStreamLogPath, startAcpSpawnParentStreamRelay, } from "./acp-spawn-parent-stream.js"; +import { resolveAgentConfig, resolveDefaultAgentId } from "./agent-scope.js"; import { resolveSandboxRuntimeStatus } from "./sandbox/runtime-status.js"; import { resolveInternalSessionKey, resolveMainSessionAlias } from "./tools/sessions-helpers.js"; @@ -56,6 +64,7 @@ export type SpawnAcpParams = { task: string; label?: string; agentId?: string; + resumeSessionId?: string; cwd?: string; mode?: SpawnAcpMode; thread?: boolean; @@ -129,6 +138,95 @@ function resolveAcpSessionMode(mode: SpawnAcpMode): AcpRuntimeSessionMode { return mode === "session" ? "persistent" : "oneshot"; } +function isHeartbeatEnabledForSessionAgent(params: { + cfg: OpenClawConfig; + sessionKey?: string; +}): boolean { + if (!areHeartbeatsEnabled()) { + return false; + } + const requesterAgentId = parseAgentSessionKey(params.sessionKey)?.agentId; + if (!requesterAgentId) { + return true; + } + + const agentEntries = params.cfg.agents?.list ?? []; + const hasExplicitHeartbeatAgents = agentEntries.some((entry) => Boolean(entry?.heartbeat)); + const enabledByPolicy = hasExplicitHeartbeatAgents + ? agentEntries.some( + (entry) => Boolean(entry?.heartbeat) && normalizeAgentId(entry?.id) === requesterAgentId, + ) + : requesterAgentId === resolveDefaultAgentId(params.cfg); + if (!enabledByPolicy) { + return false; + } + + const heartbeatEvery = + resolveAgentConfig(params.cfg, requesterAgentId)?.heartbeat?.every ?? + params.cfg.agents?.defaults?.heartbeat?.every ?? + DEFAULT_HEARTBEAT_EVERY; + const trimmedEvery = typeof heartbeatEvery === "string" ? heartbeatEvery.trim() : ""; + if (!trimmedEvery) { + return false; + } + try { + return parseDurationMs(trimmedEvery, { defaultUnit: "m" }) > 0; + } catch { + return false; + } +} + +function resolveHeartbeatConfigForAgent(params: { + cfg: OpenClawConfig; + agentId: string; +}): NonNullable["defaults"]>["heartbeat"] { + const defaults = params.cfg.agents?.defaults?.heartbeat; + const overrides = resolveAgentConfig(params.cfg, params.agentId)?.heartbeat; + if (!defaults && !overrides) { + return undefined; + } + return { + ...defaults, + ...overrides, + }; +} + +function hasSessionLocalHeartbeatRelayRoute(params: { + cfg: OpenClawConfig; + parentSessionKey: string; + requesterAgentId: string; +}): boolean { + const scope = params.cfg.session?.scope ?? "per-sender"; + if (scope === "global") { + return false; + } + + const heartbeat = resolveHeartbeatConfigForAgent({ + cfg: params.cfg, + agentId: params.requesterAgentId, + }); + if ((heartbeat?.target ?? "none") !== "last") { + return false; + } + + // Explicit delivery overrides are not session-local and can route updates + // to unrelated destinations (for example a pinned ops channel). + if (typeof heartbeat?.to === "string" && heartbeat.to.trim().length > 0) { + return false; + } + if (typeof heartbeat?.accountId === "string" && heartbeat.accountId.trim().length > 0) { + return false; + } + + const storePath = resolveStorePath(params.cfg.session?.store, { + agentId: params.requesterAgentId, + }); + const sessionStore = loadSessionStore(storePath); + const parentEntry = sessionStore[params.parentSessionKey]; + const parentDeliveryContext = deliveryContextFromSession(parentEntry); + return Boolean(parentDeliveryContext?.channel && parentDeliveryContext.to); +} + function resolveTargetAcpAgentId(params: { requestedAgentId?: string; cfg: OpenClawConfig; @@ -325,6 +423,8 @@ export async function spawnAcpDirect( error: 'sessions_spawn streamTo="parent" requires an active requester session context.', }; } + + const requestThreadBinding = params.thread === true; const runtimePolicyError = resolveAcpSpawnRuntimePolicyError({ cfg, requesterSessionKey: ctx.agentSessionKey, @@ -338,7 +438,6 @@ export async function spawnAcpDirect( }; } - const requestThreadBinding = params.thread === true; const spawnMode = resolveSpawnMode({ requestedMode: params.mode, threadRequested: requestThreadBinding, @@ -350,6 +449,52 @@ export async function spawnAcpDirect( }; } + const bindingService = getSessionBindingService(); + const requesterParsedSession = parseAgentSessionKey(parentSessionKey); + const requesterIsSubagentSession = + Boolean(requesterParsedSession) && isSubagentSessionKey(parentSessionKey); + const requesterHasActiveSubagentBinding = + requesterIsSubagentSession && parentSessionKey + ? bindingService + .listBySession(parentSessionKey) + .some((record) => record.targetKind === "subagent" && record.status !== "ended") + : false; + const requesterHasThreadContext = + typeof ctx.agentThreadId === "string" + ? ctx.agentThreadId.trim().length > 0 + : ctx.agentThreadId != null; + const requesterHeartbeatEnabled = isHeartbeatEnabledForSessionAgent({ + cfg, + sessionKey: parentSessionKey, + }); + const requesterAgentId = requesterParsedSession?.agentId; + const requesterHeartbeatRelayRouteUsable = + parentSessionKey && requesterAgentId + ? hasSessionLocalHeartbeatRelayRoute({ + cfg, + parentSessionKey, + requesterAgentId, + }) + : false; + + // For mode=run without thread binding, implicitly route output to parent + // only for spawned subagent orchestrator sessions with heartbeat enabled + // AND a session-local heartbeat delivery route (target=last + usable last route). + // Skip requester sessions that are thread-bound (or carrying thread context) + // so user-facing threads do not receive unsolicited ACP progress chatter + // unless streamTo="parent" is explicitly requested. Use resolved spawnMode + // (not params.mode) so default mode selection works. + const implicitStreamToParent = + !streamToParentRequested && + spawnMode === "run" && + !requestThreadBinding && + requesterIsSubagentSession && + !requesterHasActiveSubagentBinding && + !requesterHasThreadContext && + requesterHeartbeatEnabled && + requesterHeartbeatRelayRouteUsable; + const effectiveStreamToParent = streamToParentRequested || implicitStreamToParent; + const targetAgentResult = resolveTargetAcpAgentId({ requestedAgentId: params.agentId, cfg, @@ -391,7 +536,6 @@ export async function spawnAcpDirect( } const acpManager = getAcpSessionManager(); - const bindingService = getSessionBindingService(); let binding: SessionBindingRecord | null = null; let sessionCreated = false; let initializedRuntime: AcpSpawnRuntimeCloseHandle | undefined; @@ -426,6 +570,7 @@ export async function spawnAcpDirect( sessionKey, agent: targetAgentId, mode: runtimeMode, + resumeSessionId: params.resumeSessionId, cwd: params.cwd, backendId: cfg.acp?.backend, }); @@ -528,17 +673,17 @@ export async function spawnAcpDirect( // Fresh one-shot ACP runs should bootstrap the worker first, then let higher layers // decide how to relay status. Inline delivery is reserved for thread-bound sessions. const useInlineDelivery = - hasDeliveryTarget && spawnMode === "session" && !streamToParentRequested; + hasDeliveryTarget && spawnMode === "session" && !effectiveStreamToParent; const childIdem = crypto.randomUUID(); let childRunId: string = childIdem; const streamLogPath = - streamToParentRequested && parentSessionKey + effectiveStreamToParent && parentSessionKey ? resolveAcpSpawnStreamLogPath({ childSessionKey: sessionKey, }) : undefined; let parentRelay: AcpSpawnParentRelayHandle | undefined; - if (streamToParentRequested && parentSessionKey) { + if (effectiveStreamToParent && parentSessionKey) { // Register relay before dispatch so fast lifecycle failures are not missed. parentRelay = startAcpSpawnParentStreamRelay({ runId: childIdem, @@ -583,7 +728,7 @@ export async function spawnAcpDirect( }; } - if (streamToParentRequested && parentSessionKey) { + if (effectiveStreamToParent && parentSessionKey) { if (parentRelay && childRunId !== childIdem) { parentRelay.dispose(); // Defensive fallback if gateway returns a runId that differs from idempotency key. diff --git a/src/agents/anthropic-payload-log.ts b/src/agents/anthropic-payload-log.ts index 6bfb3d8d374..2eb5d62e770 100644 --- a/src/agents/anthropic-payload-log.ts +++ b/src/agents/anthropic-payload-log.ts @@ -136,7 +136,7 @@ export function createAnthropicPayloadLogger(params: { if (!isAnthropicModel(model)) { return streamFn(model, context, options); } - const nextOnPayload = (payload: unknown, payloadModel: Parameters[0]) => { + const nextOnPayload = (payload: unknown) => { const redactedPayload = redactImageDataForDiagnostics(payload); record({ ...base, @@ -145,7 +145,7 @@ export function createAnthropicPayloadLogger(params: { payload: redactedPayload, payloadDigest: digest(redactedPayload), }); - return options?.onPayload?.(payload, payloadModel); + return options?.onPayload?.(payload, model); }; return streamFn(model, context, { ...options, diff --git a/src/agents/auth-profiles.markauthprofilefailure.test.ts b/src/agents/auth-profiles.markauthprofilefailure.test.ts index e5690f75c6a..5c4d73197b3 100644 --- a/src/agents/auth-profiles.markauthprofilefailure.test.ts +++ b/src/agents/auth-profiles.markauthprofilefailure.test.ts @@ -190,6 +190,58 @@ describe("markAuthProfileFailure", () => { } }); + it("resets error count when previous cooldown has expired to prevent escalation", async () => { + const agentDir = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-auth-")); + try { + const authPath = path.join(agentDir, "auth-profiles.json"); + const now = Date.now(); + // Simulate state left on disk after 3 rapid failures within a 1-min cooldown + // window. The cooldown has since expired, but clearExpiredCooldowns() only + // ran in-memory and never persisted — so disk still carries errorCount: 3. + fs.writeFileSync( + authPath, + JSON.stringify({ + version: 1, + profiles: { + "anthropic:default": { + type: "api_key", + provider: "anthropic", + key: "sk-default", + }, + }, + usageStats: { + "anthropic:default": { + errorCount: 3, + failureCounts: { rate_limit: 3 }, + lastFailureAt: now - 120_000, // 2 minutes ago + cooldownUntil: now - 60_000, // expired 1 minute ago + }, + }, + }), + ); + + const store = ensureAuthProfileStore(agentDir); + await markAuthProfileFailure({ + store, + profileId: "anthropic:default", + reason: "rate_limit", + agentDir, + }); + + const stats = store.usageStats?.["anthropic:default"]; + // Error count should reset to 1 (not escalate to 4) because the + // previous cooldown expired. Cooldown should be ~1 min, not ~60 min. + expect(stats?.errorCount).toBe(1); + expect(stats?.failureCounts?.rate_limit).toBe(1); + const cooldownMs = (stats?.cooldownUntil ?? 0) - now; + // calculateAuthProfileCooldownMs(1) = 60_000 (1 minute) + expect(cooldownMs).toBeLessThan(120_000); + expect(cooldownMs).toBeGreaterThan(0); + } finally { + fs.rmSync(agentDir, { recursive: true, force: true }); + } + }); + it("does not persist cooldown windows for OpenRouter profiles", async () => { await withAuthProfileStore(async ({ agentDir, store }) => { await markAuthProfileFailure({ diff --git a/src/agents/auth-profiles.runtime.ts b/src/agents/auth-profiles.runtime.ts new file mode 100644 index 00000000000..5c25bb97c84 --- /dev/null +++ b/src/agents/auth-profiles.runtime.ts @@ -0,0 +1 @@ +export { ensureAuthProfileStore } from "./auth-profiles.js"; diff --git a/src/agents/auth-profiles/oauth.openai-codex-refresh-fallback.test.ts b/src/agents/auth-profiles/oauth.openai-codex-refresh-fallback.test.ts index 9d47be8c79e..23381d89a05 100644 --- a/src/agents/auth-profiles/oauth.openai-codex-refresh-fallback.test.ts +++ b/src/agents/auth-profiles/oauth.openai-codex-refresh-fallback.test.ts @@ -17,17 +17,13 @@ const { getOAuthApiKeyMock } = vi.hoisted(() => ({ }), })); -vi.mock("@mariozechner/pi-ai", async () => { - const actual = await vi.importActual("@mariozechner/pi-ai"); - return { - ...actual, - getOAuthApiKey: getOAuthApiKeyMock, - getOAuthProviders: () => [ - { id: "openai-codex", envApiKey: "OPENAI_API_KEY", oauthTokenEnv: "OPENAI_OAUTH_TOKEN" }, // pragma: allowlist secret - { id: "anthropic", envApiKey: "ANTHROPIC_API_KEY", oauthTokenEnv: "ANTHROPIC_OAUTH_TOKEN" }, // pragma: allowlist secret - ], - }; -}); +vi.mock("@mariozechner/pi-ai/oauth", () => ({ + getOAuthApiKey: getOAuthApiKeyMock, + getOAuthProviders: () => [ + { id: "openai-codex", envApiKey: "OPENAI_API_KEY", oauthTokenEnv: "OPENAI_OAUTH_TOKEN" }, // pragma: allowlist secret + { id: "anthropic", envApiKey: "ANTHROPIC_API_KEY", oauthTokenEnv: "ANTHROPIC_OAUTH_TOKEN" }, // pragma: allowlist secret + ], +})); function createExpiredOauthStore(params: { profileId: string; diff --git a/src/agents/auth-profiles/oauth.test.ts b/src/agents/auth-profiles/oauth.test.ts index c38d043c549..d4161b0d8ad 100644 --- a/src/agents/auth-profiles/oauth.test.ts +++ b/src/agents/auth-profiles/oauth.test.ts @@ -32,6 +32,20 @@ function tokenStore(params: { }; } +function githubCopilotTokenStore(profileId: string, includeInlineToken = true): AuthProfileStore { + return { + version: 1, + profiles: { + [profileId]: { + type: "token", + provider: "github-copilot", + ...(includeInlineToken ? { token: "" } : {}), + tokenRef: { source: "env", provider: "default", id: "GITHUB_TOKEN" }, + }, + }, + }; +} + async function resolveWithConfig(params: { profileId: string; provider: string; @@ -59,6 +73,25 @@ async function withEnvVar(key: string, value: string, run: () => Promise): } } +async function expectResolvedApiKey(params: { + profileId: string; + provider: string; + mode: "api_key" | "token" | "oauth"; + store: AuthProfileStore; + expectedApiKey: string; +}) { + const result = await resolveApiKeyForProfile({ + cfg: cfgFor(params.profileId, params.provider, params.mode), + store: params.store, + profileId: params.profileId, + }); + expect(result).toEqual({ + apiKey: params.expectedApiKey, // pragma: allowlist secret + provider: params.provider, + email: undefined, + }); +} + describe("resolveApiKeyForProfile config compatibility", () => { it("accepts token credentials when config mode is oauth", async () => { const profileId = "anthropic:token"; @@ -278,25 +311,12 @@ describe("resolveApiKeyForProfile secret refs", () => { it("resolves token tokenRef from env", async () => { const profileId = "github-copilot:default"; await withEnvVar("GITHUB_TOKEN", "gh-ref-token", async () => { - const result = await resolveApiKeyForProfile({ - cfg: cfgFor(profileId, "github-copilot", "token"), - store: { - version: 1, - profiles: { - [profileId]: { - type: "token", - provider: "github-copilot", - token: "", - tokenRef: { source: "env", provider: "default", id: "GITHUB_TOKEN" }, - }, - }, - }, + await expectResolvedApiKey({ profileId, - }); - expect(result).toEqual({ - apiKey: "gh-ref-token", // pragma: allowlist secret provider: "github-copilot", - email: undefined, + mode: "token", + store: githubCopilotTokenStore(profileId), + expectedApiKey: "gh-ref-token", // pragma: allowlist secret }); }); }); @@ -304,24 +324,12 @@ describe("resolveApiKeyForProfile secret refs", () => { it("resolves token tokenRef without inline token when expires is absent", async () => { const profileId = "github-copilot:no-inline-token"; await withEnvVar("GITHUB_TOKEN", "gh-ref-token", async () => { - const result = await resolveApiKeyForProfile({ - cfg: cfgFor(profileId, "github-copilot", "token"), - store: { - version: 1, - profiles: { - [profileId]: { - type: "token", - provider: "github-copilot", - tokenRef: { source: "env", provider: "default", id: "GITHUB_TOKEN" }, - }, - }, - }, + await expectResolvedApiKey({ profileId, - }); - expect(result).toEqual({ - apiKey: "gh-ref-token", // pragma: allowlist secret provider: "github-copilot", - email: undefined, + mode: "token", + store: githubCopilotTokenStore(profileId, false), + expectedApiKey: "gh-ref-token", // pragma: allowlist secret }); }); }); diff --git a/src/agents/auth-profiles/oauth.ts b/src/agents/auth-profiles/oauth.ts index 3604fd47b74..edc1ddfb24e 100644 --- a/src/agents/auth-profiles/oauth.ts +++ b/src/agents/auth-profiles/oauth.ts @@ -1,5 +1,9 @@ -import type { OAuthCredentials, OAuthProvider } from "@mariozechner/pi-ai/oauth"; -import { getOAuthApiKey, getOAuthProviders } from "@mariozechner/pi-ai/oauth"; +import { + getOAuthApiKey, + getOAuthProviders, + type OAuthCredentials, + type OAuthProvider, +} from "@mariozechner/pi-ai/oauth"; import { loadConfig, type OpenClawConfig } from "../../config/config.js"; import { coerceSecretRef } from "../../config/types.secrets.js"; import { withFileLock } from "../../infra/file-lock.js"; diff --git a/src/agents/auth-profiles/state-observation.test.ts b/src/agents/auth-profiles/state-observation.test.ts new file mode 100644 index 00000000000..05f2abfff19 --- /dev/null +++ b/src/agents/auth-profiles/state-observation.test.ts @@ -0,0 +1,38 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import { resetLogger, setLoggerOverride } from "../../logging/logger.js"; +import { logAuthProfileFailureStateChange } from "./state-observation.js"; + +afterEach(() => { + setLoggerOverride(null); + resetLogger(); +}); + +describe("logAuthProfileFailureStateChange", () => { + it("sanitizes consoleMessage fields before logging", () => { + const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {}); + setLoggerOverride({ level: "silent", consoleLevel: "warn" }); + + logAuthProfileFailureStateChange({ + runId: "run-1\nforged\tentry\rtest", + profileId: "openai:profile-1", + provider: "openai\u001b]8;;https://evil.test\u0007", + reason: "overloaded", + previous: undefined, + next: { + errorCount: 1, + cooldownUntil: 1_700_000_060_000, + failureCounts: { overloaded: 1 }, + }, + now: 1_700_000_000_000, + }); + + const consoleLine = warnSpy.mock.calls[0]?.[0]; + expect(typeof consoleLine).toBe("string"); + expect(consoleLine).toContain("runId=run-1 forged entry test"); + expect(consoleLine).toContain("provider=openai]8;;https://evil.test"); + expect(consoleLine).not.toContain("\n"); + expect(consoleLine).not.toContain("\r"); + expect(consoleLine).not.toContain("\t"); + expect(consoleLine).not.toContain("\u001b"); + }); +}); diff --git a/src/agents/auth-profiles/state-observation.ts b/src/agents/auth-profiles/state-observation.ts new file mode 100644 index 00000000000..633bdc0031b --- /dev/null +++ b/src/agents/auth-profiles/state-observation.ts @@ -0,0 +1,59 @@ +import { redactIdentifier } from "../../logging/redact-identifier.js"; +import { createSubsystemLogger } from "../../logging/subsystem.js"; +import { sanitizeForConsole } from "../pi-embedded-error-observation.js"; +import type { AuthProfileFailureReason, ProfileUsageStats } from "./types.js"; + +const observationLog = createSubsystemLogger("agent/embedded"); + +export function logAuthProfileFailureStateChange(params: { + runId?: string; + profileId: string; + provider: string; + reason: AuthProfileFailureReason; + previous: ProfileUsageStats | undefined; + next: ProfileUsageStats; + now: number; +}): void { + const windowType = + params.reason === "billing" || params.reason === "auth_permanent" ? "disabled" : "cooldown"; + const previousCooldownUntil = params.previous?.cooldownUntil; + const previousDisabledUntil = params.previous?.disabledUntil; + // Active cooldown/disable windows are intentionally immutable; log whether this + // update reused the existing window instead of extending it. + const windowReused = + windowType === "disabled" + ? typeof previousDisabledUntil === "number" && + Number.isFinite(previousDisabledUntil) && + previousDisabledUntil > params.now && + previousDisabledUntil === params.next.disabledUntil + : typeof previousCooldownUntil === "number" && + Number.isFinite(previousCooldownUntil) && + previousCooldownUntil > params.now && + previousCooldownUntil === params.next.cooldownUntil; + const safeProfileId = redactIdentifier(params.profileId, { len: 12 }); + const safeRunId = sanitizeForConsole(params.runId) ?? "-"; + const safeProvider = sanitizeForConsole(params.provider) ?? "-"; + + observationLog.warn("auth profile failure state updated", { + event: "auth_profile_failure_state_updated", + tags: ["error_handling", "auth_profiles", windowType], + runId: params.runId, + profileId: safeProfileId, + provider: params.provider, + reason: params.reason, + windowType, + windowReused, + previousErrorCount: params.previous?.errorCount, + errorCount: params.next.errorCount, + previousCooldownUntil, + cooldownUntil: params.next.cooldownUntil, + previousDisabledUntil, + disabledUntil: params.next.disabledUntil, + previousDisabledReason: params.previous?.disabledReason, + disabledReason: params.next.disabledReason, + failureCounts: params.next.failureCounts, + consoleMessage: + `auth profile failure state updated: runId=${safeRunId} profile=${safeProfileId} provider=${safeProvider} ` + + `reason=${params.reason} window=${windowType} reused=${String(windowReused)}`, + }); +} diff --git a/src/agents/auth-profiles/usage.test.ts b/src/agents/auth-profiles/usage.test.ts index 120f75d3665..6dd5697cc99 100644 --- a/src/agents/auth-profiles/usage.test.ts +++ b/src/agents/auth-profiles/usage.test.ts @@ -207,7 +207,7 @@ describe("resolveProfilesUnavailableReason", () => { ).toBe("overloaded"); }); - it("falls back to rate_limit when active cooldown has no reason history", () => { + it("falls back to unknown when active cooldown has no reason history", () => { const now = Date.now(); const store = makeStore({ "anthropic:default": { @@ -221,7 +221,7 @@ describe("resolveProfilesUnavailableReason", () => { profileIds: ["anthropic:default"], now, }), - ).toBe("rate_limit"); + ).toBe("unknown"); }); it("ignores expired windows and returns null when no profile is actively unavailable", () => { @@ -608,6 +608,10 @@ describe("markAuthProfileFailure — active windows do not extend on retry", () }); } + // When a cooldown/disabled window expires, the error count resets to prevent + // stale counters from escalating the next cooldown (the root cause of + // infinite cooldown loops — see #40989). The next failure should compute + // backoff from errorCount=1, not from the accumulated stale count. const expiredWindowCases = [ { label: "cooldownUntil", @@ -617,7 +621,8 @@ describe("markAuthProfileFailure — active windows do not extend on retry", () errorCount: 3, lastFailureAt: now - 60_000, }), - expectedUntil: (now: number) => now + 60 * 60 * 1000, + // errorCount resets → calculateAuthProfileCooldownMs(1) = 60_000 + expectedUntil: (now: number) => now + 60_000, readUntil: (stats: WindowStats | undefined) => stats?.cooldownUntil, }, { @@ -630,7 +635,9 @@ describe("markAuthProfileFailure — active windows do not extend on retry", () failureCounts: { billing: 2 }, lastFailureAt: now - 60_000, }), - expectedUntil: (now: number) => now + 20 * 60 * 60 * 1000, + // errorCount resets, billing count resets to 1 → + // calculateAuthProfileBillingDisableMsWithConfig(1, 5h, 24h) = 5h + expectedUntil: (now: number) => now + 5 * 60 * 60 * 1000, readUntil: (stats: WindowStats | undefined) => stats?.disabledUntil, }, { @@ -643,7 +650,9 @@ describe("markAuthProfileFailure — active windows do not extend on retry", () failureCounts: { auth_permanent: 2 }, lastFailureAt: now - 60_000, }), - expectedUntil: (now: number) => now + 20 * 60 * 60 * 1000, + // errorCount resets, auth_permanent count resets to 1 → + // calculateAuthProfileBillingDisableMsWithConfig(1, 5h, 24h) = 5h + expectedUntil: (now: number) => now + 5 * 60 * 60 * 1000, readUntil: (stats: WindowStats | undefined) => stats?.disabledUntil, }, ]; diff --git a/src/agents/auth-profiles/usage.ts b/src/agents/auth-profiles/usage.ts index c28b51e3e57..20e1cbaa497 100644 --- a/src/agents/auth-profiles/usage.ts +++ b/src/agents/auth-profiles/usage.ts @@ -1,5 +1,6 @@ import type { OpenClawConfig } from "../../config/config.js"; import { normalizeProviderId } from "../model-selection.js"; +import { logAuthProfileFailureStateChange } from "./state-observation.js"; import { saveAuthProfileStore, updateAuthProfileStoreWithLock } from "./store.js"; import type { AuthProfileFailureReason, AuthProfileStore, ProfileUsageStats } from "./types.js"; @@ -109,7 +110,11 @@ export function resolveProfilesUnavailableReason(params: { recordedReason = true; } if (!recordedReason) { - addScore("rate_limit", 1); + // No failure counts recorded for this cooldown window. Previously this + // defaulted to "rate_limit", which caused false "rate limit reached" + // warnings when the actual reason was unknown (e.g. transient network + // blip or server error without a classified failure count). + addScore("unknown", 1); } } @@ -400,9 +405,19 @@ function computeNextProfileUsageStats(params: { params.existing.lastFailureAt > 0 && params.now - params.existing.lastFailureAt > windowMs; - const baseErrorCount = windowExpired ? 0 : (params.existing.errorCount ?? 0); + // If the previous cooldown has already expired, reset error counters so the + // profile gets a fresh backoff window. clearExpiredCooldowns() does this + // in-memory during profile ordering, but the on-disk state may still carry + // the old counters when the lock-based updater reads a fresh store. Without + // this check, stale error counts from an expired cooldown cause the next + // failure to escalate to a much longer cooldown (e.g. 1 min → 25 min). + const unusableUntil = resolveProfileUnusableUntil(params.existing); + const previousCooldownExpired = typeof unusableUntil === "number" && params.now >= unusableUntil; + + const shouldResetCounters = windowExpired || previousCooldownExpired; + const baseErrorCount = shouldResetCounters ? 0 : (params.existing.errorCount ?? 0); const nextErrorCount = baseErrorCount + 1; - const failureCounts = windowExpired ? {} : { ...params.existing.failureCounts }; + const failureCounts = shouldResetCounters ? {} : { ...params.existing.failureCounts }; failureCounts[params.reason] = (failureCounts[params.reason] ?? 0) + 1; const updatedStats: ProfileUsageStats = { @@ -452,12 +467,16 @@ export async function markAuthProfileFailure(params: { reason: AuthProfileFailureReason; cfg?: OpenClawConfig; agentDir?: string; + runId?: string; }): Promise { - const { store, profileId, reason, agentDir, cfg } = params; + const { store, profileId, reason, agentDir, cfg, runId } = params; const profile = store.profiles[profileId]; if (!profile || isAuthCooldownBypassedForProvider(profile.provider)) { return; } + let nextStats: ProfileUsageStats | undefined; + let previousStats: ProfileUsageStats | undefined; + let updateTime = 0; const updated = await updateAuthProfileStoreWithLock({ agentDir, updater: (freshStore) => { @@ -472,19 +491,32 @@ export async function markAuthProfileFailure(params: { providerId: providerKey, }); - updateUsageStatsEntry(freshStore, profileId, (existing) => - computeNextProfileUsageStats({ - existing: existing ?? {}, - now, - reason, - cfgResolved, - }), - ); + previousStats = freshStore.usageStats?.[profileId]; + updateTime = now; + const computed = computeNextProfileUsageStats({ + existing: previousStats ?? {}, + now, + reason, + cfgResolved, + }); + nextStats = computed; + updateUsageStatsEntry(freshStore, profileId, () => computed); return true; }, }); if (updated) { store.usageStats = updated.usageStats; + if (nextStats) { + logAuthProfileFailureStateChange({ + runId, + profileId, + provider: profile.provider, + reason, + previous: previousStats, + next: nextStats, + now: updateTime, + }); + } return; } if (!store.profiles[profileId]) { @@ -498,15 +530,25 @@ export async function markAuthProfileFailure(params: { providerId: providerKey, }); - updateUsageStatsEntry(store, profileId, (existing) => - computeNextProfileUsageStats({ - existing: existing ?? {}, - now, - reason, - cfgResolved, - }), - ); + previousStats = store.usageStats?.[profileId]; + const computed = computeNextProfileUsageStats({ + existing: previousStats ?? {}, + now, + reason, + cfgResolved, + }); + nextStats = computed; + updateUsageStatsEntry(store, profileId, () => computed); saveAuthProfileStore(store, agentDir); + logAuthProfileFailureStateChange({ + runId, + profileId, + provider: store.profiles[profileId]?.provider ?? profile.provider, + reason, + previous: previousStats, + next: nextStats, + now, + }); } /** @@ -518,12 +560,14 @@ export async function markAuthProfileCooldown(params: { store: AuthProfileStore; profileId: string; agentDir?: string; + runId?: string; }): Promise { await markAuthProfileFailure({ store: params.store, profileId: params.profileId, reason: "unknown", agentDir: params.agentDir, + runId: params.runId, }); } diff --git a/src/agents/bash-tools.exec-approval-followup.ts b/src/agents/bash-tools.exec-approval-followup.ts new file mode 100644 index 00000000000..af24f07fb50 --- /dev/null +++ b/src/agents/bash-tools.exec-approval-followup.ts @@ -0,0 +1,61 @@ +import { callGatewayTool } from "./tools/gateway.js"; + +type ExecApprovalFollowupParams = { + approvalId: string; + sessionKey?: string; + turnSourceChannel?: string; + turnSourceTo?: string; + turnSourceAccountId?: string; + turnSourceThreadId?: string | number; + resultText: string; +}; + +export function buildExecApprovalFollowupPrompt(resultText: string): string { + return [ + "An async command the user already approved has completed.", + "Do not run the command again.", + "", + "Exact completion details:", + resultText.trim(), + "", + "Reply to the user in a helpful way.", + "If it succeeded, share the relevant output.", + "If it failed, explain what went wrong.", + ].join("\n"); +} + +export async function sendExecApprovalFollowup( + params: ExecApprovalFollowupParams, +): Promise { + const sessionKey = params.sessionKey?.trim(); + const resultText = params.resultText.trim(); + if (!sessionKey || !resultText) { + return false; + } + + const channel = params.turnSourceChannel?.trim(); + const to = params.turnSourceTo?.trim(); + const threadId = + params.turnSourceThreadId != null && params.turnSourceThreadId !== "" + ? String(params.turnSourceThreadId) + : undefined; + + await callGatewayTool( + "agent", + { timeoutMs: 60_000 }, + { + sessionKey, + message: buildExecApprovalFollowupPrompt(resultText), + deliver: true, + bestEffortDeliver: true, + channel: channel && to ? channel : undefined, + to: channel && to ? to : undefined, + accountId: channel && to ? params.turnSourceAccountId?.trim() || undefined : undefined, + threadId: channel && to ? threadId : undefined, + idempotencyKey: `exec-approval-followup:${params.approvalId}`, + }, + { expectFinal: true }, + ); + + return true; +} diff --git a/src/agents/bash-tools.exec-approval-request.ts b/src/agents/bash-tools.exec-approval-request.ts index 7c28827c051..2b2fd7d9a5b 100644 --- a/src/agents/bash-tools.exec-approval-request.ts +++ b/src/agents/bash-tools.exec-approval-request.ts @@ -7,7 +7,7 @@ import { callGatewayTool } from "./tools/gateway.js"; export type RequestExecApprovalDecisionParams = { id: string; - command: string; + command?: string; commandArgv?: string[]; systemRunPlan?: SystemRunApprovalPlan; env?: Record; @@ -35,8 +35,8 @@ function buildExecApprovalRequestToolParams( ): ExecApprovalRequestToolParams { return { id: params.id, - command: params.command, - commandArgv: params.commandArgv, + ...(params.command ? { command: params.command } : {}), + ...(params.commandArgv ? { commandArgv: params.commandArgv } : {}), systemRunPlan: params.systemRunPlan, env: params.env, cwd: params.cwd, @@ -150,7 +150,7 @@ export async function requestExecApprovalDecision( type HostExecApprovalParams = { approvalId: string; - command: string; + command?: string; commandArgv?: string[]; systemRunPlan?: SystemRunApprovalPlan; env?: Record; diff --git a/src/agents/bash-tools.exec-host-gateway.ts b/src/agents/bash-tools.exec-host-gateway.ts index 49a958c9c5b..149a4785dd5 100644 --- a/src/agents/bash-tools.exec-host-gateway.ts +++ b/src/agents/bash-tools.exec-host-gateway.ts @@ -19,15 +19,18 @@ import { registerExecApprovalRequestForHostOrThrow, } from "./bash-tools.exec-approval-request.js"; import { - createDefaultExecApprovalRequestContext, - resolveBaseExecApprovalDecision, + buildDefaultExecApprovalRequestArgs, + buildExecApprovalFollowupTarget, + buildExecApprovalPendingToolResult, + createExecApprovalDecisionState, + createAndRegisterDefaultExecApprovalRequest, resolveApprovalDecisionOrUndefined, resolveExecHostApprovalContext, + sendExecApprovalFollowupResult, } from "./bash-tools.exec-host-shared.js"; import { DEFAULT_NOTIFY_TAIL_CHARS, createApprovalSlug, - emitExecSystemEvent, normalizeNotifyOutput, runExecProcess, } from "./bash-tools.exec-runtime.js"; @@ -138,67 +141,78 @@ export async function processGatewayAllowlist( } if (requiresAsk) { - const { - approvalId, - approvalSlug, - contextKey, - noticeSeconds, - warningText, - expiresAtMs: defaultExpiresAtMs, - preResolvedDecision: defaultPreResolvedDecision, - } = createDefaultExecApprovalRequestContext({ + const requestArgs = buildDefaultExecApprovalRequestArgs({ warnings: params.warnings, approvalRunningNoticeMs: params.approvalRunningNoticeMs, createApprovalSlug, + turnSourceChannel: params.turnSourceChannel, + turnSourceAccountId: params.turnSourceAccountId, + }); + const registerGatewayApproval = async (approvalId: string) => + await registerExecApprovalRequestForHostOrThrow({ + approvalId, + command: params.command, + workdir: params.workdir, + host: "gateway", + security: hostSecurity, + ask: hostAsk, + ...buildExecApprovalRequesterContext({ + agentId: params.agentId, + sessionKey: params.sessionKey, + }), + resolvedPath: allowlistEval.segments[0]?.resolution?.resolvedPath, + ...buildExecApprovalTurnSourceContext(params), + }); + const { + approvalId, + approvalSlug, + warningText, + expiresAtMs, + preResolvedDecision, + initiatingSurface, + sentApproverDms, + unavailableReason, + } = await createAndRegisterDefaultExecApprovalRequest({ + ...requestArgs, + register: registerGatewayApproval, }); const resolvedPath = allowlistEval.segments[0]?.resolution?.resolvedPath; const effectiveTimeout = typeof params.timeoutSec === "number" ? params.timeoutSec : params.defaultTimeoutSec; - let expiresAtMs = defaultExpiresAtMs; - let preResolvedDecision = defaultPreResolvedDecision; - - // Register first so the returned approval ID is actionable immediately. - const registration = await registerExecApprovalRequestForHostOrThrow({ + const followupTarget = buildExecApprovalFollowupTarget({ approvalId, - command: params.command, - workdir: params.workdir, - host: "gateway", - security: hostSecurity, - ask: hostAsk, - ...buildExecApprovalRequesterContext({ - agentId: params.agentId, - sessionKey: params.sessionKey, - }), - resolvedPath, - ...buildExecApprovalTurnSourceContext(params), + sessionKey: params.notifySessionKey, + turnSourceChannel: params.turnSourceChannel, + turnSourceTo: params.turnSourceTo, + turnSourceAccountId: params.turnSourceAccountId, + turnSourceThreadId: params.turnSourceThreadId, }); - expiresAtMs = registration.expiresAtMs; - preResolvedDecision = registration.finalDecision; void (async () => { const decision = await resolveApprovalDecisionOrUndefined({ approvalId, preResolvedDecision, onFailure: () => - emitExecSystemEvent( + void sendExecApprovalFollowupResult( + followupTarget, `Exec denied (gateway id=${approvalId}, approval-request-failed): ${params.command}`, - { - sessionKey: params.notifySessionKey, - contextKey, - }, ), }); if (decision === undefined) { return; } - const baseDecision = resolveBaseExecApprovalDecision({ + const { + baseDecision, + approvedByAsk: initialApprovedByAsk, + deniedReason: initialDeniedReason, + } = createExecApprovalDecisionState({ decision, askFallback, obfuscationDetected: obfuscation.detected, }); - let approvedByAsk = baseDecision.approvedByAsk; - let deniedReason = baseDecision.deniedReason; + let approvedByAsk = initialApprovedByAsk; + let deniedReason = initialDeniedReason; if (baseDecision.timedOut && askFallback === "allowlist") { if (!analysisOk || !allowlistSatisfied) { @@ -230,12 +244,9 @@ export async function processGatewayAllowlist( } if (deniedReason) { - emitExecSystemEvent( + await sendExecApprovalFollowupResult( + followupTarget, `Exec denied (gateway id=${approvalId}, ${deniedReason}): ${params.command}`, - { - sessionKey: params.notifySessionKey, - contextKey, - }, ); return; } @@ -262,32 +273,16 @@ export async function processGatewayAllowlist( timeoutSec: effectiveTimeout, }); } catch { - emitExecSystemEvent( + await sendExecApprovalFollowupResult( + followupTarget, `Exec denied (gateway id=${approvalId}, spawn-failed): ${params.command}`, - { - sessionKey: params.notifySessionKey, - contextKey, - }, ); return; } markBackgrounded(run.session); - let runningTimer: NodeJS.Timeout | null = null; - if (params.approvalRunningNoticeMs > 0) { - runningTimer = setTimeout(() => { - emitExecSystemEvent( - `Exec running (gateway id=${approvalId}, session=${run?.session.id}, >${noticeSeconds}s): ${params.command}`, - { sessionKey: params.notifySessionKey, contextKey }, - ); - }, params.approvalRunningNoticeMs); - } - const outcome = await run.promise; - if (runningTimer) { - clearTimeout(runningTimer); - } const output = normalizeNotifyOutput( tail(outcome.aggregated || "", DEFAULT_NOTIFY_TAIL_CHARS), ); @@ -295,29 +290,22 @@ export async function processGatewayAllowlist( const summary = output ? `Exec finished (gateway id=${approvalId}, session=${run.session.id}, ${exitLabel})\n${output}` : `Exec finished (gateway id=${approvalId}, session=${run.session.id}, ${exitLabel})`; - emitExecSystemEvent(summary, { sessionKey: params.notifySessionKey, contextKey }); + await sendExecApprovalFollowupResult(followupTarget, summary); })(); return { - pendingResult: { - content: [ - { - type: "text", - text: - `${warningText}Approval required (id ${approvalSlug}). ` + - "Approve to run; updates will arrive after completion.", - }, - ], - details: { - status: "approval-pending", - approvalId, - approvalSlug, - expiresAtMs, - host: "gateway", - command: params.command, - cwd: params.workdir, - }, - }, + pendingResult: buildExecApprovalPendingToolResult({ + host: "gateway", + command: params.command, + cwd: params.workdir, + warningText, + approvalId, + approvalSlug, + expiresAtMs, + initiatingSurface, + sentApproverDms, + unavailableReason, + }), }; } diff --git a/src/agents/bash-tools.exec-host-node.ts b/src/agents/bash-tools.exec-host-node.ts index b66a6ededf1..16af23590b4 100644 --- a/src/agents/bash-tools.exec-host-node.ts +++ b/src/agents/bash-tools.exec-host-node.ts @@ -17,13 +17,12 @@ import { buildExecApprovalTurnSourceContext, registerExecApprovalRequestForHostOrThrow, } from "./bash-tools.exec-approval-request.js"; +import * as execHostShared from "./bash-tools.exec-host-shared.js"; import { - createDefaultExecApprovalRequestContext, - resolveBaseExecApprovalDecision, - resolveApprovalDecisionOrUndefined, - resolveExecHostApprovalContext, -} from "./bash-tools.exec-host-shared.js"; -import { createApprovalSlug, emitExecSystemEvent } from "./bash-tools.exec-runtime.js"; + DEFAULT_NOTIFY_TAIL_CHARS, + createApprovalSlug, + normalizeNotifyOutput, +} from "./bash-tools.exec-runtime.js"; import type { ExecToolDetails } from "./bash-tools.exec-types.js"; import { callGatewayTool } from "./tools/gateway.js"; import { listNodes, resolveNodeIdFromList } from "./tools/nodes-utils.js"; @@ -54,7 +53,7 @@ export type ExecuteNodeHostCommandParams = { export async function executeNodeHostCommand( params: ExecuteNodeHostCommandParams, ): Promise> { - const { hostSecurity, hostAsk, askFallback } = resolveExecHostApprovalContext({ + const { hostSecurity, hostAsk, askFallback } = execHostShared.resolveExecHostApprovalContext({ agentId: params.agentId, security: params.security, ask: params.ask, @@ -113,7 +112,7 @@ export async function executeNodeHostCommand( throw new Error("invalid system.run.prepare response"); } const runArgv = prepared.plan.argv; - const runRawCommand = prepared.plan.rawCommand ?? prepared.cmdText; + const runRawCommand = prepared.plan.commandText; const runCwd = prepared.plan.cwd ?? params.workdir; const runAgentId = prepared.plan.agentId ?? params.agentId; const runSessionKey = prepared.plan.sessionKey ?? params.sessionKey; @@ -187,6 +186,7 @@ export async function executeNodeHostCommand( approvedByAsk: boolean, approvalDecision: "allow-once" | "allow-always" | null, runId?: string, + suppressNotifyOnExit?: boolean, ) => ({ nodeId, @@ -202,70 +202,83 @@ export async function executeNodeHostCommand( approved: approvedByAsk, approvalDecision: approvalDecision ?? undefined, runId: runId ?? undefined, + suppressNotifyOnExit: suppressNotifyOnExit === true ? true : undefined, }, idempotencyKey: crypto.randomUUID(), }) satisfies Record; if (requiresAsk) { - const { - approvalId, - approvalSlug, - contextKey, - noticeSeconds, - warningText, - expiresAtMs: defaultExpiresAtMs, - preResolvedDecision: defaultPreResolvedDecision, - } = createDefaultExecApprovalRequestContext({ + const requestArgs = execHostShared.buildDefaultExecApprovalRequestArgs({ warnings: params.warnings, approvalRunningNoticeMs: params.approvalRunningNoticeMs, createApprovalSlug, + turnSourceChannel: params.turnSourceChannel, + turnSourceAccountId: params.turnSourceAccountId, }); - let expiresAtMs = defaultExpiresAtMs; - let preResolvedDecision = defaultPreResolvedDecision; - - // Register first so the returned approval ID is actionable immediately. - const registration = await registerExecApprovalRequestForHostOrThrow({ + const registerNodeApproval = async (approvalId: string) => + await registerExecApprovalRequestForHostOrThrow({ + approvalId, + systemRunPlan: prepared.plan, + env: nodeEnv, + workdir: runCwd, + host: "node", + nodeId, + security: hostSecurity, + ask: hostAsk, + ...buildExecApprovalRequesterContext({ + agentId: runAgentId, + sessionKey: runSessionKey, + }), + ...buildExecApprovalTurnSourceContext(params), + }); + const { approvalId, - command: prepared.cmdText, - commandArgv: prepared.plan.argv, - systemRunPlan: prepared.plan, - env: nodeEnv, - workdir: runCwd, - host: "node", - nodeId, - security: hostSecurity, - ask: hostAsk, - ...buildExecApprovalRequesterContext({ - agentId: runAgentId, - sessionKey: runSessionKey, - }), - ...buildExecApprovalTurnSourceContext(params), + approvalSlug, + warningText, + expiresAtMs, + preResolvedDecision, + initiatingSurface, + sentApproverDms, + unavailableReason, + } = await execHostShared.createAndRegisterDefaultExecApprovalRequest({ + ...requestArgs, + register: registerNodeApproval, + }); + const followupTarget = execHostShared.buildExecApprovalFollowupTarget({ + approvalId, + sessionKey: params.notifySessionKey, + turnSourceChannel: params.turnSourceChannel, + turnSourceTo: params.turnSourceTo, + turnSourceAccountId: params.turnSourceAccountId, + turnSourceThreadId: params.turnSourceThreadId, }); - expiresAtMs = registration.expiresAtMs; - preResolvedDecision = registration.finalDecision; void (async () => { - const decision = await resolveApprovalDecisionOrUndefined({ + const decision = await execHostShared.resolveApprovalDecisionOrUndefined({ approvalId, preResolvedDecision, onFailure: () => - emitExecSystemEvent( + void execHostShared.sendExecApprovalFollowupResult( + followupTarget, `Exec denied (node=${nodeId} id=${approvalId}, approval-request-failed): ${params.command}`, - { sessionKey: params.notifySessionKey, contextKey }, ), }); if (decision === undefined) { return; } - const baseDecision = resolveBaseExecApprovalDecision({ + const { + baseDecision, + approvedByAsk: initialApprovedByAsk, + deniedReason: initialDeniedReason, + } = execHostShared.createExecApprovalDecisionState({ decision, askFallback, obfuscationDetected: obfuscation.detected, }); - let approvedByAsk = baseDecision.approvedByAsk; + let approvedByAsk = initialApprovedByAsk; let approvalDecision: "allow-once" | "allow-always" | null = null; - let deniedReason = baseDecision.deniedReason; + let deniedReason = initialDeniedReason; if (baseDecision.timedOut && askFallback === "full" && approvedByAsk) { approvalDecision = "allow-once"; @@ -278,67 +291,65 @@ export async function executeNodeHostCommand( } if (deniedReason) { - emitExecSystemEvent( + await execHostShared.sendExecApprovalFollowupResult( + followupTarget, `Exec denied (node=${nodeId} id=${approvalId}, ${deniedReason}): ${params.command}`, - { - sessionKey: params.notifySessionKey, - contextKey, - }, ); return; } - let runningTimer: NodeJS.Timeout | null = null; - if (params.approvalRunningNoticeMs > 0) { - runningTimer = setTimeout(() => { - emitExecSystemEvent( - `Exec running (node=${nodeId} id=${approvalId}, >${noticeSeconds}s): ${params.command}`, - { sessionKey: params.notifySessionKey, contextKey }, - ); - }, params.approvalRunningNoticeMs); - } - try { - await callGatewayTool( + const raw = await callGatewayTool<{ + payload?: { + stdout?: string; + stderr?: string; + error?: string | null; + exitCode?: number | null; + timedOut?: boolean; + }; + }>( "node.invoke", { timeoutMs: invokeTimeoutMs }, - buildInvokeParams(approvedByAsk, approvalDecision, approvalId), + buildInvokeParams(approvedByAsk, approvalDecision, approvalId, true), ); + const payload = + raw?.payload && typeof raw.payload === "object" + ? (raw.payload as { + stdout?: string; + stderr?: string; + error?: string | null; + exitCode?: number | null; + timedOut?: boolean; + }) + : {}; + const combined = [payload.stdout, payload.stderr, payload.error].filter(Boolean).join("\n"); + const output = normalizeNotifyOutput(combined.slice(-DEFAULT_NOTIFY_TAIL_CHARS)); + const exitLabel = payload.timedOut ? "timeout" : `code ${payload.exitCode ?? "?"}`; + const summary = output + ? `Exec finished (node=${nodeId} id=${approvalId}, ${exitLabel})\n${output}` + : `Exec finished (node=${nodeId} id=${approvalId}, ${exitLabel})`; + await execHostShared.sendExecApprovalFollowupResult(followupTarget, summary); } catch { - emitExecSystemEvent( + await execHostShared.sendExecApprovalFollowupResult( + followupTarget, `Exec denied (node=${nodeId} id=${approvalId}, invoke-failed): ${params.command}`, - { - sessionKey: params.notifySessionKey, - contextKey, - }, ); - } finally { - if (runningTimer) { - clearTimeout(runningTimer); - } } })(); - return { - content: [ - { - type: "text", - text: - `${warningText}Approval required (id ${approvalSlug}). ` + - "Approve to run; updates will arrive after completion.", - }, - ], - details: { - status: "approval-pending", - approvalId, - approvalSlug, - expiresAtMs, - host: "node", - command: params.command, - cwd: params.workdir, - nodeId, - }, - }; + return execHostShared.buildExecApprovalPendingToolResult({ + host: "node", + command: params.command, + cwd: params.workdir, + warningText, + approvalId, + approvalSlug, + expiresAtMs, + initiatingSurface, + sentApproverDms, + unavailableReason, + nodeId, + }); } const startedAt = Date.now(); diff --git a/src/agents/bash-tools.exec-host-shared.ts b/src/agents/bash-tools.exec-host-shared.ts index c24e0a2f1fa..a9adaff17ee 100644 --- a/src/agents/bash-tools.exec-host-shared.ts +++ b/src/agents/bash-tools.exec-host-shared.ts @@ -1,4 +1,12 @@ import crypto from "node:crypto"; +import type { AgentToolResult } from "@mariozechner/pi-agent-core"; +import { loadConfig } from "../config/config.js"; +import { buildExecApprovalUnavailableReplyPayload } from "../infra/exec-approval-reply.js"; +import { + hasConfiguredExecApprovalDmRoute, + type ExecApprovalInitiatingSurfaceState, + resolveExecApprovalInitiatingSurfaceState, +} from "../infra/exec-approval-surface.js"; import { maxAsk, minSecurity, @@ -6,8 +14,14 @@ import { type ExecAsk, type ExecSecurity, } from "../infra/exec-approvals.js"; -import { resolveRegisteredExecApprovalDecision } from "./bash-tools.exec-approval-request.js"; +import { sendExecApprovalFollowup } from "./bash-tools.exec-approval-followup.js"; +import { + type ExecApprovalRegistration, + resolveRegisteredExecApprovalDecision, +} from "./bash-tools.exec-approval-request.js"; +import { buildApprovalPendingMessage } from "./bash-tools.exec-runtime.js"; import { DEFAULT_APPROVAL_TIMEOUT_MS } from "./bash-tools.exec-runtime.js"; +import type { ExecToolDetails } from "./bash-tools.exec-types.js"; type ResolvedExecApprovals = ReturnType; @@ -28,6 +42,39 @@ export type ExecApprovalRequestState = ExecApprovalPendingState & { noticeSeconds: number; }; +export type ExecApprovalUnavailableReason = + | "no-approval-route" + | "initiating-platform-disabled" + | "initiating-platform-unsupported"; + +export type RegisteredExecApprovalRequestContext = { + approvalId: string; + approvalSlug: string; + warningText: string; + expiresAtMs: number; + preResolvedDecision: string | null | undefined; + initiatingSurface: ExecApprovalInitiatingSurfaceState; + sentApproverDms: boolean; + unavailableReason: ExecApprovalUnavailableReason | null; +}; + +export type ExecApprovalFollowupTarget = { + approvalId: string; + sessionKey?: string; + turnSourceChannel?: string; + turnSourceTo?: string; + turnSourceAccountId?: string; + turnSourceThreadId?: string | number; +}; + +export type DefaultExecApprovalRequestArgs = { + warnings: string[]; + approvalRunningNoticeMs: number; + createApprovalSlug: (approvalId: string) => string; + turnSourceChannel?: string; + turnSourceAccountId?: string; +}; + export function createExecApprovalPendingState(params: { warnings: string[]; timeoutMs: number; @@ -158,3 +205,197 @@ export async function resolveApprovalDecisionOrUndefined(params: { return undefined; } } + +export function resolveExecApprovalUnavailableState(params: { + turnSourceChannel?: string; + turnSourceAccountId?: string; + preResolvedDecision: string | null | undefined; +}): { + initiatingSurface: ExecApprovalInitiatingSurfaceState; + sentApproverDms: boolean; + unavailableReason: ExecApprovalUnavailableReason | null; +} { + const initiatingSurface = resolveExecApprovalInitiatingSurfaceState({ + channel: params.turnSourceChannel, + accountId: params.turnSourceAccountId, + }); + const sentApproverDms = + (initiatingSurface.kind === "disabled" || initiatingSurface.kind === "unsupported") && + hasConfiguredExecApprovalDmRoute(loadConfig()); + const unavailableReason = + params.preResolvedDecision === null + ? "no-approval-route" + : initiatingSurface.kind === "disabled" + ? "initiating-platform-disabled" + : initiatingSurface.kind === "unsupported" + ? "initiating-platform-unsupported" + : null; + return { + initiatingSurface, + sentApproverDms, + unavailableReason, + }; +} + +export async function createAndRegisterDefaultExecApprovalRequest(params: { + warnings: string[]; + approvalRunningNoticeMs: number; + createApprovalSlug: (approvalId: string) => string; + turnSourceChannel?: string; + turnSourceAccountId?: string; + register: (approvalId: string) => Promise; +}): Promise { + const { + approvalId, + approvalSlug, + warningText, + expiresAtMs: defaultExpiresAtMs, + preResolvedDecision: defaultPreResolvedDecision, + } = createDefaultExecApprovalRequestContext({ + warnings: params.warnings, + approvalRunningNoticeMs: params.approvalRunningNoticeMs, + createApprovalSlug: params.createApprovalSlug, + }); + const registration = await params.register(approvalId); + const preResolvedDecision = registration.finalDecision; + const { initiatingSurface, sentApproverDms, unavailableReason } = + resolveExecApprovalUnavailableState({ + turnSourceChannel: params.turnSourceChannel, + turnSourceAccountId: params.turnSourceAccountId, + preResolvedDecision, + }); + + return { + approvalId, + approvalSlug, + warningText, + expiresAtMs: registration.expiresAtMs ?? defaultExpiresAtMs, + preResolvedDecision: + registration.finalDecision === undefined + ? defaultPreResolvedDecision + : registration.finalDecision, + initiatingSurface, + sentApproverDms, + unavailableReason, + }; +} + +export function buildDefaultExecApprovalRequestArgs( + params: DefaultExecApprovalRequestArgs, +): DefaultExecApprovalRequestArgs { + return { + warnings: params.warnings, + approvalRunningNoticeMs: params.approvalRunningNoticeMs, + createApprovalSlug: params.createApprovalSlug, + turnSourceChannel: params.turnSourceChannel, + turnSourceAccountId: params.turnSourceAccountId, + }; +} + +export function buildExecApprovalFollowupTarget( + params: ExecApprovalFollowupTarget, +): ExecApprovalFollowupTarget { + return { + approvalId: params.approvalId, + sessionKey: params.sessionKey, + turnSourceChannel: params.turnSourceChannel, + turnSourceTo: params.turnSourceTo, + turnSourceAccountId: params.turnSourceAccountId, + turnSourceThreadId: params.turnSourceThreadId, + }; +} + +export function createExecApprovalDecisionState(params: { + decision: string | null | undefined; + askFallback: ResolvedExecApprovals["agent"]["askFallback"]; + obfuscationDetected: boolean; +}) { + const baseDecision = resolveBaseExecApprovalDecision({ + decision: params.decision ?? null, + askFallback: params.askFallback, + obfuscationDetected: params.obfuscationDetected, + }); + return { + baseDecision, + approvedByAsk: baseDecision.approvedByAsk, + deniedReason: baseDecision.deniedReason, + }; +} + +export async function sendExecApprovalFollowupResult( + target: ExecApprovalFollowupTarget, + resultText: string, +): Promise { + await sendExecApprovalFollowup({ + approvalId: target.approvalId, + sessionKey: target.sessionKey, + turnSourceChannel: target.turnSourceChannel, + turnSourceTo: target.turnSourceTo, + turnSourceAccountId: target.turnSourceAccountId, + turnSourceThreadId: target.turnSourceThreadId, + resultText, + }).catch(() => {}); +} + +export function buildExecApprovalPendingToolResult(params: { + host: "gateway" | "node"; + command: string; + cwd: string; + warningText: string; + approvalId: string; + approvalSlug: string; + expiresAtMs: number; + initiatingSurface: ExecApprovalInitiatingSurfaceState; + sentApproverDms: boolean; + unavailableReason: ExecApprovalUnavailableReason | null; + nodeId?: string; +}): AgentToolResult { + return { + content: [ + { + type: "text", + text: + params.unavailableReason !== null + ? (buildExecApprovalUnavailableReplyPayload({ + warningText: params.warningText, + reason: params.unavailableReason, + channelLabel: params.initiatingSurface.channelLabel, + sentApproverDms: params.sentApproverDms, + }).text ?? "") + : buildApprovalPendingMessage({ + warningText: params.warningText, + approvalSlug: params.approvalSlug, + approvalId: params.approvalId, + command: params.command, + cwd: params.cwd, + host: params.host, + nodeId: params.nodeId, + }), + }, + ], + details: + params.unavailableReason !== null + ? ({ + status: "approval-unavailable", + reason: params.unavailableReason, + channelLabel: params.initiatingSurface.channelLabel, + sentApproverDms: params.sentApproverDms, + host: params.host, + command: params.command, + cwd: params.cwd, + nodeId: params.nodeId, + warningText: params.warningText, + } satisfies ExecToolDetails) + : ({ + status: "approval-pending", + approvalId: params.approvalId, + approvalSlug: params.approvalSlug, + expiresAtMs: params.expiresAtMs, + host: params.host, + command: params.command, + cwd: params.cwd, + nodeId: params.nodeId, + warningText: params.warningText, + } satisfies ExecToolDetails), + }; +} diff --git a/src/agents/bash-tools.exec-runtime.ts b/src/agents/bash-tools.exec-runtime.ts index 9714e4255ee..5c3301414b9 100644 --- a/src/agents/bash-tools.exec-runtime.ts +++ b/src/agents/bash-tools.exec-runtime.ts @@ -230,6 +230,40 @@ export function createApprovalSlug(id: string) { return id.slice(0, APPROVAL_SLUG_LENGTH); } +export function buildApprovalPendingMessage(params: { + warningText?: string; + approvalSlug: string; + approvalId: string; + command: string; + cwd: string; + host: "gateway" | "node"; + nodeId?: string; +}) { + let fence = "```"; + while (params.command.includes(fence)) { + fence += "`"; + } + const commandBlock = `${fence}sh\n${params.command}\n${fence}`; + const lines: string[] = []; + const warningText = params.warningText?.trim(); + if (warningText) { + lines.push(warningText, ""); + } + lines.push(`Approval required (id ${params.approvalSlug}, full ${params.approvalId}).`); + lines.push(`Host: ${params.host}`); + if (params.nodeId) { + lines.push(`Node: ${params.nodeId}`); + } + lines.push(`CWD: ${params.cwd}`); + lines.push("Command:"); + lines.push(commandBlock); + lines.push("Mode: foreground (interactive approvals available)."); + lines.push("Background mode requires pre-approved policy (allow-always or ask=off)."); + lines.push(`Reply with: /approve ${params.approvalSlug} allow-once|allow-always|deny`); + lines.push("If the short code is ambiguous, use the full id in /approve."); + return lines.join("\n"); +} + export function resolveApprovalRunningNoticeMs(value?: number) { if (typeof value !== "number" || !Number.isFinite(value)) { return DEFAULT_APPROVAL_RUNNING_NOTICE_MS; diff --git a/src/agents/bash-tools.exec-types.ts b/src/agents/bash-tools.exec-types.ts index bef8ea4bff1..7236fdaaf47 100644 --- a/src/agents/bash-tools.exec-types.ts +++ b/src/agents/bash-tools.exec-types.ts @@ -60,4 +60,19 @@ export type ExecToolDetails = command: string; cwd?: string; nodeId?: string; + warningText?: string; + } + | { + status: "approval-unavailable"; + reason: + | "initiating-platform-disabled" + | "initiating-platform-unsupported" + | "no-approval-route"; + channelLabel?: string; + sentApproverDms?: boolean; + host: ExecHost; + command: string; + cwd?: string; + nodeId?: string; + warningText?: string; }; diff --git a/src/agents/bash-tools.exec.approval-id.test.ts b/src/agents/bash-tools.exec.approval-id.test.ts index b7f4729948c..211d8e3dcaa 100644 --- a/src/agents/bash-tools.exec.approval-id.test.ts +++ b/src/agents/bash-tools.exec.approval-id.test.ts @@ -2,6 +2,7 @@ import fs from "node:fs/promises"; import os from "node:os"; import path from "node:path"; import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; +import { clearConfigCache } from "../config/config.js"; import { buildSystemRunPreparePayload } from "../test-utils/system-run-prepare-payload.js"; vi.mock("./tools/gateway.js", () => ({ @@ -42,6 +43,162 @@ function buildPreparedSystemRunPayload(rawInvokeParams: unknown) { return buildSystemRunPreparePayload(params); } +function getTestConfigPath() { + return path.join(process.env.HOME ?? "", ".openclaw", "openclaw.json"); +} + +async function writeOpenClawConfig(config: Record, pretty = false) { + const configPath = getTestConfigPath(); + await fs.mkdir(path.dirname(configPath), { recursive: true }); + await fs.writeFile(configPath, JSON.stringify(config, null, pretty ? 2 : undefined)); +} + +async function writeExecApprovalsConfig(config: Record) { + const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json"); + await fs.mkdir(path.dirname(approvalsPath), { recursive: true }); + await fs.writeFile(approvalsPath, JSON.stringify(config, null, 2)); +} + +function acceptedApprovalResponse(params: unknown) { + return { status: "accepted", id: (params as { id?: string })?.id }; +} + +function getResultText(result: { content: Array<{ type?: string; text?: string }> }) { + return result.content.find((part) => part.type === "text")?.text ?? ""; +} + +function expectPendingApprovalText( + result: { + details: { status?: string }; + content: Array<{ type?: string; text?: string }>; + }, + options: { + command: string; + host: "gateway" | "node"; + nodeId?: string; + interactive?: boolean; + }, +) { + expect(result.details.status).toBe("approval-pending"); + const details = result.details as { approvalId: string; approvalSlug: string }; + const pendingText = getResultText(result); + expect(pendingText).toContain( + `Reply with: /approve ${details.approvalSlug} allow-once|allow-always|deny`, + ); + expect(pendingText).toContain(`full ${details.approvalId}`); + expect(pendingText).toContain(`Host: ${options.host}`); + if (options.nodeId) { + expect(pendingText).toContain(`Node: ${options.nodeId}`); + } + expect(pendingText).toContain(`CWD: ${process.cwd()}`); + expect(pendingText).toContain("Command:\n```sh\n"); + expect(pendingText).toContain(options.command); + if (options.interactive) { + expect(pendingText).toContain("Mode: foreground (interactive approvals available)."); + expect(pendingText).toContain("Background mode requires pre-approved policy"); + } + return details; +} + +function expectPendingCommandText( + result: { + details: { status?: string }; + content: Array<{ type?: string; text?: string }>; + }, + command: string, +) { + expect(result.details.status).toBe("approval-pending"); + const text = getResultText(result); + expect(text).toContain("Command:\n```sh\n"); + expect(text).toContain(command); +} + +function mockGatewayOkCalls(calls: string[]) { + vi.mocked(callGatewayTool).mockImplementation(async (method) => { + calls.push(method); + return { ok: true }; + }); +} + +function createElevatedAllowlistExecTool() { + return createExecTool({ + ask: "on-miss", + security: "allowlist", + approvalRunningNoticeMs: 0, + elevated: { enabled: true, allowed: true, defaultLevel: "ask" }, + }); +} + +async function expectGatewayExecWithoutApproval(options: { + config: Record; + command: string; + ask?: "always" | "on-miss" | "off"; +}) { + await writeExecApprovalsConfig(options.config); + const calls: string[] = []; + mockGatewayOkCalls(calls); + + const tool = createExecTool({ + host: "gateway", + ask: options.ask, + security: "full", + approvalRunningNoticeMs: 0, + }); + + const result = await tool.execute("call-no-approval", { command: options.command }); + expect(result.details.status).toBe("completed"); + expect(calls).not.toContain("exec.approval.request"); + expect(calls).not.toContain("exec.approval.waitDecision"); +} + +function mockAcceptedApprovalFlow(options: { + onAgent?: (params: Record) => void; + onNodeInvoke?: (params: unknown) => unknown; +}) { + vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => { + if (method === "exec.approval.request") { + return acceptedApprovalResponse(params); + } + if (method === "exec.approval.waitDecision") { + return { decision: "allow-once" }; + } + if (method === "agent" && options.onAgent) { + options.onAgent(params as Record); + return { status: "ok" }; + } + if (method === "node.invoke" && options.onNodeInvoke) { + return await options.onNodeInvoke(params); + } + return { ok: true }; + }); +} + +function mockPendingApprovalRegistration() { + vi.mocked(callGatewayTool).mockImplementation(async (method) => { + if (method === "exec.approval.request") { + return { status: "accepted", id: "approval-id" }; + } + if (method === "exec.approval.waitDecision") { + return { decision: null }; + } + return { ok: true }; + }); +} + +function expectApprovalUnavailableText(result: { + details: { status?: string }; + content: Array<{ type?: string; text?: string }>; +}) { + expect(result.details.status).toBe("approval-unavailable"); + const text = result.content.find((part) => part.type === "text")?.text ?? ""; + expect(text).not.toContain("/approve"); + expect(text).not.toContain("npm view diver name version description"); + expect(text).not.toContain("Pending command:"); + expect(text).not.toContain("Host:"); + expect(text).not.toContain("CWD:"); + return text; +} + describe("exec approvals", () => { let previousHome: string | undefined; let previousUserProfile: string | undefined; @@ -63,6 +220,7 @@ describe("exec approvals", () => { afterEach(() => { vi.resetAllMocks(); + clearConfigCache(); if (previousHome === undefined) { delete process.env.HOME; } else { @@ -77,15 +235,13 @@ describe("exec approvals", () => { it("reuses approval id as the node runId", async () => { let invokeParams: unknown; + let agentParams: unknown; - vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => { - if (method === "exec.approval.request") { - return { status: "accepted", id: (params as { id?: string })?.id }; - } - if (method === "exec.approval.waitDecision") { - return { decision: "allow-once" }; - } - if (method === "node.invoke") { + mockAcceptedApprovalFlow({ + onAgent: (params) => { + agentParams = params; + }, + onNodeInvoke: (params) => { const invoke = params as { command?: string }; if (invoke.command === "system.run.prepare") { return buildPreparedSystemRunPayload(params); @@ -94,19 +250,24 @@ describe("exec approvals", () => { invokeParams = params; return { payload: { success: true, stdout: "ok" } }; } - } - return { ok: true }; + }, }); const tool = createExecTool({ host: "node", ask: "always", approvalRunningNoticeMs: 0, + sessionKey: "agent:main:main", }); const result = await tool.execute("call1", { command: "ls -la" }); - expect(result.details.status).toBe("approval-pending"); - const approvalId = (result.details as { approvalId: string }).approvalId; + const details = expectPendingApprovalText(result, { + command: "ls -la", + host: "node", + nodeId: "node-1", + interactive: true, + }); + const approvalId = details.approvalId; await expect .poll(() => (invokeParams as { params?: { runId?: string } } | undefined)?.params?.runId, { @@ -114,6 +275,12 @@ describe("exec approvals", () => { interval: 20, }) .toBe(approvalId); + expect( + (invokeParams as { params?: { suppressNotifyOnExit?: boolean } } | undefined)?.params, + ).toMatchObject({ + suppressNotifyOnExit: true, + }); + await expect.poll(() => agentParams, { timeout: 2_000, interval: 20 }).toBeTruthy(); }); it("skips approval when node allowlist is satisfied", async () => { @@ -188,74 +355,28 @@ describe("exec approvals", () => { }); it("uses exec-approvals ask=off to suppress gateway prompts", async () => { - const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json"); - await fs.mkdir(path.dirname(approvalsPath), { recursive: true }); - await fs.writeFile( - approvalsPath, - JSON.stringify( - { - version: 1, - defaults: { security: "full", ask: "off", askFallback: "full" }, - agents: { - main: { security: "full", ask: "off", askFallback: "full" }, - }, + await expectGatewayExecWithoutApproval({ + config: { + version: 1, + defaults: { security: "full", ask: "off", askFallback: "full" }, + agents: { + main: { security: "full", ask: "off", askFallback: "full" }, }, - null, - 2, - ), - ); - - const calls: string[] = []; - vi.mocked(callGatewayTool).mockImplementation(async (method) => { - calls.push(method); - return { ok: true }; - }); - - const tool = createExecTool({ - host: "gateway", + }, + command: "echo ok", ask: "on-miss", - security: "full", - approvalRunningNoticeMs: 0, }); - - const result = await tool.execute("call3b", { command: "echo ok" }); - expect(result.details.status).toBe("completed"); - expect(calls).not.toContain("exec.approval.request"); - expect(calls).not.toContain("exec.approval.waitDecision"); }); it("inherits ask=off from exec-approvals defaults when tool ask is unset", async () => { - const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json"); - await fs.mkdir(path.dirname(approvalsPath), { recursive: true }); - await fs.writeFile( - approvalsPath, - JSON.stringify( - { - version: 1, - defaults: { security: "full", ask: "off", askFallback: "full" }, - agents: {}, - }, - null, - 2, - ), - ); - - const calls: string[] = []; - vi.mocked(callGatewayTool).mockImplementation(async (method) => { - calls.push(method); - return { ok: true }; + await expectGatewayExecWithoutApproval({ + config: { + version: 1, + defaults: { security: "full", ask: "off", askFallback: "full" }, + agents: {}, + }, + command: "echo ok", }); - - const tool = createExecTool({ - host: "gateway", - security: "full", - approvalRunningNoticeMs: 0, - }); - - const result = await tool.execute("call3c", { command: "echo ok" }); - expect(result.details.status).toBe("completed"); - expect(calls).not.toContain("exec.approval.request"); - expect(calls).not.toContain("exec.approval.waitDecision"); }); it("requires approval for elevated ask when allowlist misses", async () => { @@ -270,7 +391,113 @@ describe("exec approvals", () => { if (method === "exec.approval.request") { resolveApproval?.(); // Return registration confirmation - return { status: "accepted", id: (params as { id?: string })?.id }; + return acceptedApprovalResponse(params); + } + if (method === "exec.approval.waitDecision") { + return { decision: "deny" }; + } + return { ok: true }; + }); + + const tool = createElevatedAllowlistExecTool(); + + const result = await tool.execute("call4", { command: "echo ok", elevated: true }); + expectPendingApprovalText(result, { command: "echo ok", host: "gateway" }); + await approvalSeen; + expect(calls).toContain("exec.approval.request"); + expect(calls).toContain("exec.approval.waitDecision"); + }); + + it("starts a direct agent follow-up after approved gateway exec completes", async () => { + const agentCalls: Array> = []; + + mockAcceptedApprovalFlow({ + onAgent: (params) => { + agentCalls.push(params); + }, + }); + + const tool = createExecTool({ + host: "gateway", + ask: "always", + approvalRunningNoticeMs: 0, + sessionKey: "agent:main:main", + elevated: { enabled: true, allowed: true, defaultLevel: "ask" }, + }); + + const result = await tool.execute("call-gw-followup", { + command: "echo ok", + workdir: process.cwd(), + gatewayUrl: undefined, + gatewayToken: undefined, + }); + + expect(result.details.status).toBe("approval-pending"); + await expect.poll(() => agentCalls.length, { timeout: 3_000, interval: 20 }).toBe(1); + expect(agentCalls[0]).toEqual( + expect.objectContaining({ + sessionKey: "agent:main:main", + deliver: true, + idempotencyKey: expect.stringContaining("exec-approval-followup:"), + }), + ); + expect(typeof agentCalls[0]?.message).toBe("string"); + expect(agentCalls[0]?.message).toContain( + "An async command the user already approved has completed.", + ); + }); + + it("requires a separate approval for each elevated command after allow-once", async () => { + const requestCommands: string[] = []; + const requestIds: string[] = []; + const waitIds: string[] = []; + + vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => { + if (method === "exec.approval.request") { + const request = params as { id?: string; command?: string }; + if (typeof request.command === "string") { + requestCommands.push(request.command); + } + if (typeof request.id === "string") { + requestIds.push(request.id); + } + return acceptedApprovalResponse(request); + } + if (method === "exec.approval.waitDecision") { + const wait = params as { id?: string }; + if (typeof wait.id === "string") { + waitIds.push(wait.id); + } + return { decision: "allow-once" }; + } + return { ok: true }; + }); + + const tool = createElevatedAllowlistExecTool(); + + const first = await tool.execute("call-seq-1", { + command: "npm view diver --json", + elevated: true, + }); + const second = await tool.execute("call-seq-2", { + command: "brew outdated", + elevated: true, + }); + + expect(first.details.status).toBe("approval-pending"); + expect(second.details.status).toBe("approval-pending"); + expect(requestCommands).toEqual(["npm view diver --json", "brew outdated"]); + expect(requestIds).toHaveLength(2); + expect(requestIds[0]).not.toBe(requestIds[1]); + expect(waitIds).toEqual(requestIds); + }); + + it("shows full chained gateway commands in approval-pending message", async () => { + const calls: string[] = []; + vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => { + calls.push(method); + if (method === "exec.approval.request") { + return acceptedApprovalResponse(params); } if (method === "exec.approval.waitDecision") { return { decision: "deny" }; @@ -279,17 +506,46 @@ describe("exec approvals", () => { }); const tool = createExecTool({ + host: "gateway", ask: "on-miss", security: "allowlist", approvalRunningNoticeMs: 0, - elevated: { enabled: true, allowed: true, defaultLevel: "ask" }, }); - const result = await tool.execute("call4", { command: "echo ok", elevated: true }); - expect(result.details.status).toBe("approval-pending"); - await approvalSeen; + const result = await tool.execute("call-chain-gateway", { + command: "npm view diver --json | jq .name && brew outdated", + }); + + expectPendingCommandText(result, "npm view diver --json | jq .name && brew outdated"); + expect(calls).toContain("exec.approval.request"); + }); + + it("shows full chained node commands in approval-pending message", async () => { + const calls: string[] = []; + vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => { + calls.push(method); + if (method === "node.invoke") { + const invoke = params as { command?: string }; + if (invoke.command === "system.run.prepare") { + return buildPreparedSystemRunPayload(params); + } + } + return { ok: true }; + }); + + const tool = createExecTool({ + host: "node", + ask: "always", + security: "full", + approvalRunningNoticeMs: 0, + }); + + const result = await tool.execute("call-chain-node", { + command: "npm view diver --json | jq .name && brew outdated", + }); + + expectPendingCommandText(result, "npm view diver --json | jq .name && brew outdated"); expect(calls).toContain("exec.approval.request"); - expect(calls).toContain("exec.approval.waitDecision"); }); it("waits for approval registration before returning approval-pending", async () => { @@ -354,6 +610,72 @@ describe("exec approvals", () => { ); }); + it("returns an unavailable approval message instead of a local /approve prompt when discord exec approvals are disabled", async () => { + await writeOpenClawConfig({ + channels: { + discord: { + enabled: true, + execApprovals: { enabled: false }, + }, + }, + }); + + mockPendingApprovalRegistration(); + + const tool = createExecTool({ + host: "gateway", + ask: "always", + approvalRunningNoticeMs: 0, + messageProvider: "discord", + accountId: "default", + currentChannelId: "1234567890", + }); + + const result = await tool.execute("call-unavailable", { + command: "npm view diver name version description", + }); + + const text = expectApprovalUnavailableText(result); + expect(text).toContain("chat exec approvals are not enabled on Discord"); + expect(text).toContain("Web UI or terminal UI"); + }); + + it("tells Telegram users that allowed approvers were DMed when Telegram approvals are disabled but Discord DM approvals are enabled", async () => { + await writeOpenClawConfig( + { + channels: { + telegram: { + enabled: true, + execApprovals: { enabled: false }, + }, + discord: { + enabled: true, + execApprovals: { enabled: true, approvers: ["123"], target: "dm" }, + }, + }, + }, + true, + ); + + mockPendingApprovalRegistration(); + + const tool = createExecTool({ + host: "gateway", + ask: "always", + approvalRunningNoticeMs: 0, + messageProvider: "telegram", + accountId: "default", + currentChannelId: "-1003841603622", + }); + + const result = await tool.execute("call-tg-unavailable", { + command: "npm view diver name version description", + }); + + const text = expectApprovalUnavailableText(result); + expect(text).toContain("Approval required. I sent the allowed approvers DMs."); + }); + it("denies node obfuscated command when approval request times out", async () => { vi.mocked(detectCommandObfuscation).mockReturnValue({ detected: true, diff --git a/src/agents/context.lookup.test.ts b/src/agents/context.lookup.test.ts index 584f9c27cbb..a395f0b3089 100644 --- a/src/agents/context.lookup.test.ts +++ b/src/agents/context.lookup.test.ts @@ -1,8 +1,13 @@ import { beforeEach, describe, expect, it, vi } from "vitest"; -function mockContextModuleDeps(loadConfigImpl: () => unknown) { +type DiscoveredModel = { id: string; contextWindow: number }; + +function mockContextDeps(params: { + loadConfig: () => unknown; + discoveredModels?: DiscoveredModel[]; +}) { vi.doMock("../config/config.js", () => ({ - loadConfig: loadConfigImpl, + loadConfig: params.loadConfig, })); vi.doMock("./models-config.js", () => ({ ensureOpenClawModelsJson: vi.fn(async () => {}), @@ -13,11 +18,44 @@ function mockContextModuleDeps(loadConfigImpl: () => unknown) { vi.doMock("./pi-model-discovery.js", () => ({ discoverAuthStorage: vi.fn(() => ({})), discoverModels: vi.fn(() => ({ - getAll: () => [], + getAll: () => params.discoveredModels ?? [], })), })); } +function mockContextModuleDeps(loadConfigImpl: () => unknown) { + mockContextDeps({ loadConfig: loadConfigImpl }); +} + +// Shared mock setup used by multiple tests. +function mockDiscoveryDeps( + models: DiscoveredModel[], + configModels?: Record }>, +) { + mockContextDeps({ + loadConfig: () => ({ models: configModels ? { providers: configModels } : {} }), + discoveredModels: models, + }); +} + +function createContextOverrideConfig(provider: string, model: string, contextWindow: number) { + return { + models: { + providers: { + [provider]: { + models: [{ id: model, contextWindow }], + }, + }, + }, + }; +} + +async function importResolveContextTokensForModel() { + const { resolveContextTokensForModel } = await import("./context.js"); + await new Promise((r) => setTimeout(r, 0)); + return resolveContextTokensForModel; +} + describe("lookupContextTokens", () => { beforeEach(() => { vi.resetModules(); @@ -87,4 +125,184 @@ describe("lookupContextTokens", () => { vi.useRealTimers(); } }); + + it("returns the smaller window when the same bare model id is discovered under multiple providers", async () => { + mockDiscoveryDeps([ + { id: "gemini-3.1-pro-preview", contextWindow: 1_048_576 }, + { id: "gemini-3.1-pro-preview", contextWindow: 128_000 }, + ]); + + const { lookupContextTokens } = await import("./context.js"); + // Trigger async cache population. + await new Promise((r) => setTimeout(r, 0)); + // Conservative minimum: bare-id cache feeds runtime flush/compaction paths. + expect(lookupContextTokens("gemini-3.1-pro-preview")).toBe(128_000); + }); + + it("resolveContextTokensForModel returns discovery value when provider-qualified entry exists in cache", async () => { + // Registry returns provider-qualified entries (real-world scenario from #35976). + // When no explicit config override exists, the bare cache lookup hits the + // provider-qualified raw discovery entry. + mockDiscoveryDeps([ + { id: "github-copilot/gemini-3.1-pro-preview", contextWindow: 128_000 }, + { id: "google-gemini-cli/gemini-3.1-pro-preview", contextWindow: 1_048_576 }, + ]); + + const { resolveContextTokensForModel } = await import("./context.js"); + await new Promise((r) => setTimeout(r, 0)); + + // With provider specified and no config override, bare lookup finds the + // provider-qualified discovery entry. + const result = resolveContextTokensForModel({ + provider: "google-gemini-cli", + model: "gemini-3.1-pro-preview", + }); + expect(result).toBe(1_048_576); + }); + + it("resolveContextTokensForModel returns configured override via direct config scan (beats discovery)", async () => { + // Config has an explicit contextWindow; resolveContextTokensForModel should + // return it via direct config scan, preventing collisions with raw discovery + // entries. Real callers (status.summary.ts etc.) always pass cfg. + mockDiscoveryDeps([ + { id: "google-gemini-cli/gemini-3.1-pro-preview", contextWindow: 1_048_576 }, + ]); + + const cfg = createContextOverrideConfig("google-gemini-cli", "gemini-3.1-pro-preview", 200_000); + const resolveContextTokensForModel = await importResolveContextTokensForModel(); + + const result = resolveContextTokensForModel({ + cfg: cfg as never, + provider: "google-gemini-cli", + model: "gemini-3.1-pro-preview", + }); + expect(result).toBe(200_000); + }); + + it("resolveContextTokensForModel honors configured overrides when provider keys use mixed case", async () => { + mockDiscoveryDeps([{ id: "openrouter/anthropic/claude-sonnet-4-5", contextWindow: 1_048_576 }]); + + const cfg = createContextOverrideConfig(" OpenRouter ", "anthropic/claude-sonnet-4-5", 200_000); + const resolveContextTokensForModel = await importResolveContextTokensForModel(); + + const result = resolveContextTokensForModel({ + cfg: cfg as never, + provider: "openrouter", + model: "anthropic/claude-sonnet-4-5", + }); + expect(result).toBe(200_000); + }); + + it("resolveContextTokensForModel: config direct scan prevents OpenRouter qualified key collision for Google provider", async () => { + // When provider is explicitly "google" and cfg has a Google contextWindow + // override, the config direct scan returns it before any cache lookup — + // so the OpenRouter raw "google/gemini-2.5-pro" qualified entry is never hit. + // Real callers (status.summary.ts) always pass cfg when provider is explicit. + mockDiscoveryDeps([{ id: "google/gemini-2.5-pro", contextWindow: 999_000 }]); + + const cfg = createContextOverrideConfig("google", "gemini-2.5-pro", 2_000_000); + const resolveContextTokensForModel = await importResolveContextTokensForModel(); + + // Google with explicit cfg: config direct scan wins before any cache lookup. + const googleResult = resolveContextTokensForModel({ + cfg: cfg as never, + provider: "google", + model: "gemini-2.5-pro", + }); + expect(googleResult).toBe(2_000_000); + + // OpenRouter provider with slash model id: bare lookup finds the raw entry. + const openrouterResult = resolveContextTokensForModel({ + provider: "openrouter", + model: "google/gemini-2.5-pro", + }); + expect(openrouterResult).toBe(999_000); + }); + + it("resolveContextTokensForModel prefers exact provider key over alias-normalized match", async () => { + // When both "qwen" and "qwen-portal" exist as config keys (alias pattern), + // resolveConfiguredProviderContextWindow must return the exact-key match first, + // not the first normalized hit — mirroring pi-embedded-runner/model.ts behaviour. + mockDiscoveryDeps([]); + + const cfg = { + models: { + providers: { + "qwen-portal": { models: [{ id: "qwen-max", contextWindow: 32_000 }] }, + qwen: { models: [{ id: "qwen-max", contextWindow: 128_000 }] }, + }, + }, + }; + + const { resolveContextTokensForModel } = await import("./context.js"); + await new Promise((r) => setTimeout(r, 0)); + + // Exact key "qwen" wins over the alias-normalized match "qwen-portal". + const qwenResult = resolveContextTokensForModel({ + cfg: cfg as never, + provider: "qwen", + model: "qwen-max", + }); + expect(qwenResult).toBe(128_000); + + // Exact key "qwen-portal" wins (no alias lookup needed). + const portalResult = resolveContextTokensForModel({ + cfg: cfg as never, + provider: "qwen-portal", + model: "qwen-max", + }); + expect(portalResult).toBe(32_000); + }); + + it("resolveContextTokensForModel(model-only) does not apply config scan for inferred provider", async () => { + // status.ts log-usage fallback calls resolveContextTokensForModel({ model }) + // with no provider. When model = "google/gemini-2.5-pro" (OpenRouter ID), + // resolveProviderModelRef infers provider="google". Without the guard, + // resolveConfiguredProviderContextWindow would return Google's configured + // window and misreport context limits for the OpenRouter session. + mockDiscoveryDeps([{ id: "google/gemini-2.5-pro", contextWindow: 999_000 }]); + + const cfg = createContextOverrideConfig("google", "gemini-2.5-pro", 2_000_000); + const resolveContextTokensForModel = await importResolveContextTokensForModel(); + + // model-only call (no explicit provider) must NOT apply config direct scan. + // Falls through to bare cache lookup: "google/gemini-2.5-pro" → 999k ✓. + const modelOnlyResult = resolveContextTokensForModel({ + cfg: cfg as never, + model: "google/gemini-2.5-pro", + // no provider + }); + expect(modelOnlyResult).toBe(999_000); + + // Explicit provider still uses config scan ✓. + const explicitResult = resolveContextTokensForModel({ + cfg: cfg as never, + provider: "google", + model: "gemini-2.5-pro", + }); + expect(explicitResult).toBe(2_000_000); + }); + + it("resolveContextTokensForModel: qualified key beats bare min when provider is explicit (original #35976 fix)", async () => { + // Regression: when both "gemini-3.1-pro-preview" (bare, min=128k) AND + // "google-gemini-cli/gemini-3.1-pro-preview" (qualified, 1M) are in cache, + // an explicit-provider call must return the provider-specific qualified value, + // not the collided bare minimum. + mockDiscoveryDeps([ + { id: "github-copilot/gemini-3.1-pro-preview", contextWindow: 128_000 }, + { id: "gemini-3.1-pro-preview", contextWindow: 128_000 }, + { id: "google-gemini-cli/gemini-3.1-pro-preview", contextWindow: 1_048_576 }, + ]); + + const { resolveContextTokensForModel } = await import("./context.js"); + await new Promise((r) => setTimeout(r, 0)); + + // Qualified "google-gemini-cli/gemini-3.1-pro-preview" → 1M wins over + // bare "gemini-3.1-pro-preview" → 128k (cross-provider minimum). + const result = resolveContextTokensForModel({ + provider: "google-gemini-cli", + model: "gemini-3.1-pro-preview", + }); + expect(result).toBe(1_048_576); + }); }); diff --git a/src/agents/context.test.ts b/src/agents/context.test.ts index 267755a8849..98eb99d7295 100644 --- a/src/agents/context.test.ts +++ b/src/agents/context.test.ts @@ -8,23 +8,44 @@ import { import { createSessionManagerRuntimeRegistry } from "./pi-extensions/session-manager-runtime-registry.js"; describe("applyDiscoveredContextWindows", () => { - it("keeps the smallest context window when duplicate model ids are discovered", () => { + it("keeps the smallest context window when the same bare model id appears under multiple providers", () => { const cache = new Map(); applyDiscoveredContextWindows({ cache, models: [ - { id: "claude-sonnet-4-5", contextWindow: 1_000_000 }, - { id: "claude-sonnet-4-5", contextWindow: 200_000 }, + { id: "gemini-3.1-pro-preview", contextWindow: 128_000 }, + { id: "gemini-3.1-pro-preview", contextWindow: 1_048_576 }, ], }); - expect(cache.get("claude-sonnet-4-5")).toBe(200_000); + // Keep the conservative (minimum) value: this cache feeds runtime paths such + // as flush thresholds and session persistence, not just /status display. + // Callers with a known provider should use resolveContextTokensForModel which + // tries the provider-qualified key first. + expect(cache.get("gemini-3.1-pro-preview")).toBe(128_000); + }); + + it("stores provider-qualified entries independently", () => { + const cache = new Map(); + applyDiscoveredContextWindows({ + cache, + models: [ + { id: "github-copilot/gemini-3.1-pro-preview", contextWindow: 128_000 }, + { id: "google-gemini-cli/gemini-3.1-pro-preview", contextWindow: 1_048_576 }, + ], + }); + + expect(cache.get("github-copilot/gemini-3.1-pro-preview")).toBe(128_000); + expect(cache.get("google-gemini-cli/gemini-3.1-pro-preview")).toBe(1_048_576); }); }); describe("applyConfiguredContextWindows", () => { - it("overrides discovered cache values with explicit models.providers contextWindow", () => { - const cache = new Map([["anthropic/claude-opus-4-6", 1_000_000]]); + it("writes bare model id to cache; does not touch raw provider-qualified discovery entries", () => { + // Discovery stored a provider-qualified entry; config override goes into the + // bare key only. resolveContextTokensForModel now scans config directly, so + // there is no need (and no benefit) to also write a synthetic qualified key. + const cache = new Map([["openrouter/anthropic/claude-opus-4-6", 1_000_000]]); applyConfiguredContextWindows({ cache, modelsConfig: { @@ -37,6 +58,33 @@ describe("applyConfiguredContextWindows", () => { }); expect(cache.get("anthropic/claude-opus-4-6")).toBe(200_000); + // Discovery entry is untouched — no synthetic write that could corrupt + // an unrelated provider's raw slash-containing model ID. + expect(cache.get("openrouter/anthropic/claude-opus-4-6")).toBe(1_000_000); + }); + + it("does not write synthetic provider-qualified keys; only bare model ids go into cache", () => { + // applyConfiguredContextWindows must NOT write "google-gemini-cli/gemini-3.1-pro-preview" + // into the cache — that keyspace is reserved for raw discovery model IDs and + // a synthetic write would overwrite unrelated entries (e.g. OpenRouter's + // "google/gemini-2.5-pro" being clobbered by a Google provider config). + const cache = new Map(); + cache.set("google-gemini-cli/gemini-3.1-pro-preview", 1_048_576); // discovery entry + applyConfiguredContextWindows({ + cache, + modelsConfig: { + providers: { + "google-gemini-cli": { + models: [{ id: "gemini-3.1-pro-preview", contextWindow: 200_000 }], + }, + }, + }, + }); + + // Bare key is written. + expect(cache.get("gemini-3.1-pro-preview")).toBe(200_000); + // Discovery entry is NOT overwritten. + expect(cache.get("google-gemini-cli/gemini-3.1-pro-preview")).toBe(1_048_576); }); it("adds config-only model context windows and ignores invalid entries", () => { diff --git a/src/agents/context.ts b/src/agents/context.ts index bd3aeaf6fc2..c18d9534689 100644 --- a/src/agents/context.ts +++ b/src/agents/context.ts @@ -6,6 +6,7 @@ import type { OpenClawConfig } from "../config/config.js"; import { computeBackoff, type BackoffPolicy } from "../infra/backoff.js"; import { consumeRootOptionToken, FLAG_TERMINATOR } from "../infra/cli-root-options.js"; import { resolveOpenClawAgentDir } from "./agent-paths.js"; +import { normalizeProviderId } from "./model-selection.js"; import { ensureOpenClawModelsJson } from "./models-config.js"; type ModelEntry = { id: string; contextWindow?: number }; @@ -41,8 +42,12 @@ export function applyDiscoveredContextWindows(params: { continue; } const existing = params.cache.get(model.id); - // When multiple providers expose the same model id with different limits, - // prefer the smaller window so token budgeting is fail-safe (no overestimation). + // When the same bare model id appears under multiple providers with different + // limits, keep the smaller window. This cache feeds both display paths and + // runtime paths (flush thresholds, session context-token persistence), so + // overestimating the limit could delay compaction and cause context overflow. + // Callers that know the active provider should use resolveContextTokensForModel, + // which tries the provider-qualified key first and falls back here. if (existing === undefined || contextWindow < existing) { params.cache.set(model.id, contextWindow); } @@ -152,7 +157,8 @@ function ensureContextWindowCacheLoaded(): Promise { } try { - const { discoverAuthStorage, discoverModels } = await import("./pi-model-discovery.js"); + const { discoverAuthStorage, discoverModels } = + await import("./pi-model-discovery-runtime.js"); const agentDir = resolveOpenClawAgentDir(); const authStorage = discoverAuthStorage(agentDir); const modelRegistry = discoverModels(authStorage, agentDir) as unknown as ModelRegistryLike; @@ -222,13 +228,15 @@ function resolveProviderModelRef(params: { } const providerRaw = params.provider?.trim(); if (providerRaw) { + // Keep the exact (lowercased) provider key; callers that need the canonical + // alias (e.g. cache key construction) apply normalizeProviderId explicitly. return { provider: providerRaw.toLowerCase(), model: modelRaw }; } const slash = modelRaw.indexOf("/"); if (slash <= 0) { return undefined; } - const provider = modelRaw.slice(0, slash).trim().toLowerCase(); + const provider = normalizeProviderId(modelRaw.slice(0, slash)); const model = modelRaw.slice(slash + 1).trim(); if (!provider || !model) { return undefined; @@ -236,6 +244,58 @@ function resolveProviderModelRef(params: { return { provider, model }; } +// Look up an explicit contextWindow override for a specific provider+model +// directly from config, without going through the shared discovery cache. +// This avoids the cache keyspace collision where "provider/model" synthetic +// keys overlap with raw slash-containing model IDs (e.g. OpenRouter's +// "google/gemini-2.5-pro" stored as a raw catalog entry). +function resolveConfiguredProviderContextWindow( + cfg: OpenClawConfig | undefined, + provider: string, + model: string, +): number | undefined { + const providers = (cfg?.models as ModelsConfig | undefined)?.providers; + if (!providers) { + return undefined; + } + + // Mirror the lookup order in pi-embedded-runner/model.ts: exact key first, + // then normalized fallback. This prevents alias collisions (e.g. when both + // "qwen" and "qwen-portal" exist as config keys) from picking the wrong + // contextWindow based on Object.entries iteration order. + function findContextWindow(matchProviderId: (id: string) => boolean): number | undefined { + for (const [providerId, providerConfig] of Object.entries(providers!)) { + if (!matchProviderId(providerId)) { + continue; + } + if (!Array.isArray(providerConfig?.models)) { + continue; + } + for (const m of providerConfig.models) { + if ( + typeof m?.id === "string" && + m.id === model && + typeof m?.contextWindow === "number" && + m.contextWindow > 0 + ) { + return m.contextWindow; + } + } + } + return undefined; + } + + // 1. Exact match (case-insensitive, no alias expansion). + const exactResult = findContextWindow((id) => id.trim().toLowerCase() === provider.toLowerCase()); + if (exactResult !== undefined) { + return exactResult; + } + + // 2. Normalized fallback: covers alias keys such as "qwen" → "qwen-portal". + const normalizedProvider = normalizeProviderId(provider); + return findContextWindow((id) => normalizeProviderId(id) === normalizedProvider); +} + function isAnthropic1MModel(provider: string, model: string): boolean { if (provider !== "anthropic") { return false; @@ -267,7 +327,64 @@ export function resolveContextTokensForModel(params: { if (modelParams?.context1m === true && isAnthropic1MModel(ref.provider, ref.model)) { return ANTHROPIC_CONTEXT_1M_TOKENS; } + // Only do the config direct scan when the caller explicitly passed a + // provider. When provider is inferred from a slash in the model string + // (e.g. "google/gemini-2.5-pro" → ref.provider = "google"), the model ID + // may belong to a DIFFERENT provider (e.g. an OpenRouter session). Scanning + // cfg.models.providers.google in that case would return Google's configured + // window and misreport context limits for the OpenRouter session. + // See status.ts log-usage fallback which calls with only { model } set. + if (params.provider) { + const configuredWindow = resolveConfiguredProviderContextWindow( + params.cfg, + ref.provider, + ref.model, + ); + if (configuredWindow !== undefined) { + return configuredWindow; + } + } } - return lookupContextTokens(params.model) ?? params.fallbackContextTokens; + // When provider is explicitly given and the model ID is bare (no slash), + // try the provider-qualified cache key BEFORE the bare key. Discovery + // entries are stored under qualified IDs (e.g. "google-gemini-cli/ + // gemini-3.1-pro-preview → 1M"), while the bare key may hold a cross- + // provider minimum (128k). Returning the qualified entry gives the correct + // provider-specific window for /status and session context-token persistence. + // + // Guard: only when params.provider is explicit (not inferred from a slash in + // the model string). For model-only callers (e.g. status.ts log-usage + // fallback with model="google/gemini-2.5-pro"), the inferred provider would + // construct "google/gemini-2.5-pro" as the qualified key which accidentally + // matches OpenRouter's raw discovery entry — the bare lookup is correct there. + if (params.provider && ref && !ref.model.includes("/")) { + const qualifiedResult = lookupContextTokens( + `${normalizeProviderId(ref.provider)}/${ref.model}`, + ); + if (qualifiedResult !== undefined) { + return qualifiedResult; + } + } + + // Bare key fallback. For model-only calls with slash-containing IDs + // (e.g. "google/gemini-2.5-pro") this IS the raw discovery cache key. + const bareResult = lookupContextTokens(params.model); + if (bareResult !== undefined) { + return bareResult; + } + + // When provider is implicit, try qualified as a last resort so inferred + // provider/model pairs (e.g. model="google-gemini-cli/gemini-3.1-pro") + // still find discovery entries stored under that qualified ID. + if (!params.provider && ref && !ref.model.includes("/")) { + const qualifiedResult = lookupContextTokens( + `${normalizeProviderId(ref.provider)}/${ref.model}`, + ); + if (qualifiedResult !== undefined) { + return qualifiedResult; + } + } + + return params.fallbackContextTokens; } diff --git a/src/agents/failover-error.test.ts b/src/agents/failover-error.test.ts index a99cfb5c4b2..38e3530f011 100644 --- a/src/agents/failover-error.test.ts +++ b/src/agents/failover-error.test.ts @@ -67,7 +67,9 @@ describe("failover-error", () => { expect(resolveFailoverReasonFromError({ statusCode: "429" })).toBe("rate_limit"); expect(resolveFailoverReasonFromError({ status: 403 })).toBe("auth"); expect(resolveFailoverReasonFromError({ status: 408 })).toBe("timeout"); + expect(resolveFailoverReasonFromError({ status: 499 })).toBe("timeout"); expect(resolveFailoverReasonFromError({ status: 400 })).toBe("format"); + expect(resolveFailoverReasonFromError({ status: 422 })).toBe("format"); // Keep the status-only path behavior-preserving and conservative. expect(resolveFailoverReasonFromError({ status: 500 })).toBeNull(); expect(resolveFailoverReasonFromError({ status: 502 })).toBe("timeout"); @@ -93,6 +95,12 @@ describe("failover-error", () => { message: ANTHROPIC_OVERLOADED_PAYLOAD, }), ).toBe("overloaded"); + expect( + resolveFailoverReasonFromError({ + status: 499, + message: ANTHROPIC_OVERLOADED_PAYLOAD, + }), + ).toBe("overloaded"); expect( resolveFailoverReasonFromError({ status: 429, @@ -155,6 +163,44 @@ describe("failover-error", () => { ).toBe("billing"); }); + it("treats HTTP 422 as format error", () => { + expect( + resolveFailoverReasonFromError({ + status: 422, + message: "check open ai req parameter error", + }), + ).toBe("format"); + expect( + resolveFailoverReasonFromError({ + status: 422, + message: "Unprocessable Entity", + }), + ).toBe("format"); + }); + + it("treats 422 with billing message as billing instead of format", () => { + expect( + resolveFailoverReasonFromError({ + status: 422, + message: "insufficient credits", + }), + ).toBe("billing"); + }); + + it("classifies OpenRouter 'requires more credits' text as billing", () => { + expect( + resolveFailoverReasonFromError({ + message: "This model requires more credits to use", + }), + ).toBe("billing"); + expect( + resolveFailoverReasonFromError({ + status: 402, + message: "This model require more credits", + }), + ).toBe("billing"); + }); + it("treats zhipuai weekly/monthly limit exhausted as rate_limit", () => { expect( resolveFailoverReasonFromError({ @@ -197,6 +243,13 @@ describe("failover-error", () => { message: "Workspace spend limit reached. Contact your admin.", }), ).toBe("rate_limit"); + expect( + resolveFailoverReasonFromError({ + status: 402, + message: + "You have reached your subscription quota limit. Please wait for automatic quota refresh in the rolling time window, upgrade to a higher plan, or use a Pay-As-You-Go API Key for unlimited access. Learn more: https://zenmux.ai/docs/guide/subscription.html", + }), + ).toBe("rate_limit"); expect( resolveFailoverReasonFromError({ status: 402, @@ -267,6 +320,8 @@ describe("failover-error", () => { it("infers timeout from common node error codes", () => { expect(resolveFailoverReasonFromError({ code: "ETIMEDOUT" })).toBe("timeout"); expect(resolveFailoverReasonFromError({ code: "ECONNRESET" })).toBe("timeout"); + expect(resolveFailoverReasonFromError({ code: "EHOSTDOWN" })).toBe("timeout"); + expect(resolveFailoverReasonFromError({ code: "EPIPE" })).toBe("timeout"); }); it("infers timeout from abort/error stop-reason messages", () => { @@ -280,6 +335,9 @@ describe("failover-error", () => { expect(resolveFailoverReasonFromError({ message: "stop reason: error" })).toBe("timeout"); expect(resolveFailoverReasonFromError({ message: "reason: abort" })).toBe("timeout"); expect(resolveFailoverReasonFromError({ message: "reason: error" })).toBe("timeout"); + expect( + resolveFailoverReasonFromError({ message: "Unhandled stop reason: network_error" }), + ).toBe("timeout"); }); it("infers timeout from connection/network error messages", () => { @@ -306,6 +364,23 @@ describe("failover-error", () => { expect(isTimeoutError(err)).toBe(true); }); + it("classifies abort-wrapped RESOURCE_EXHAUSTED as rate_limit", () => { + const err = Object.assign(new Error("request aborted"), { + name: "AbortError", + cause: { + error: { + code: 429, + message: GEMINI_RESOURCE_EXHAUSTED_MESSAGE, + status: "RESOURCE_EXHAUSTED", + }, + }, + }); + + expect(resolveFailoverReasonFromError(err)).toBe("rate_limit"); + expect(coerceToFailoverError(err)?.reason).toBe("rate_limit"); + expect(coerceToFailoverError(err)?.status).toBe(429); + }); + it("coerces failover-worthy errors into FailoverError with metadata", () => { const err = coerceToFailoverError("credit balance too low", { provider: "anthropic", diff --git a/src/agents/failover-error.ts b/src/agents/failover-error.ts index a39685e1b16..dd482310a2b 100644 --- a/src/agents/failover-error.ts +++ b/src/agents/failover-error.ts @@ -68,7 +68,30 @@ export function resolveFailoverStatus(reason: FailoverReason): number | undefine } } -function getStatusCode(err: unknown): number | undefined { +function findErrorProperty( + err: unknown, + reader: (candidate: unknown) => T | undefined, + seen: Set = new Set(), +): T | undefined { + const direct = reader(err); + if (direct !== undefined) { + return direct; + } + if (!err || typeof err !== "object") { + return undefined; + } + if (seen.has(err)) { + return undefined; + } + seen.add(err); + const candidate = err as { error?: unknown; cause?: unknown }; + return ( + findErrorProperty(candidate.error, reader, seen) ?? + findErrorProperty(candidate.cause, reader, seen) + ); +} + +function readDirectStatusCode(err: unknown): number | undefined { if (!err || typeof err !== "object") { return undefined; } @@ -84,38 +107,87 @@ function getStatusCode(err: unknown): number | undefined { return undefined; } -function getErrorCode(err: unknown): string | undefined { +function getStatusCode(err: unknown): number | undefined { + return findErrorProperty(err, readDirectStatusCode); +} + +function readDirectErrorCode(err: unknown): string | undefined { if (!err || typeof err !== "object") { return undefined; } - const candidate = (err as { code?: unknown }).code; - if (typeof candidate !== "string") { + const directCode = (err as { code?: unknown }).code; + if (typeof directCode === "string") { + const trimmed = directCode.trim(); + return trimmed ? trimmed : undefined; + } + const status = (err as { status?: unknown }).status; + if (typeof status !== "string" || /^\d+$/.test(status)) { return undefined; } - const trimmed = candidate.trim(); + const trimmed = status.trim(); return trimmed ? trimmed : undefined; } -function getErrorMessage(err: unknown): string { +function getErrorCode(err: unknown): string | undefined { + return findErrorProperty(err, readDirectErrorCode); +} + +function readDirectErrorMessage(err: unknown): string | undefined { if (err instanceof Error) { - return err.message; + return err.message || undefined; } if (typeof err === "string") { - return err; + return err || undefined; } if (typeof err === "number" || typeof err === "boolean" || typeof err === "bigint") { return String(err); } if (typeof err === "symbol") { - return err.description ?? ""; + return err.description ?? undefined; } if (err && typeof err === "object") { const message = (err as { message?: unknown }).message; if (typeof message === "string") { - return message; + return message || undefined; } } - return ""; + return undefined; +} + +function getErrorMessage(err: unknown): string { + return findErrorProperty(err, readDirectErrorMessage) ?? ""; +} + +function getErrorCause(err: unknown): unknown { + if (!err || typeof err !== "object" || !("cause" in err)) { + return undefined; + } + return (err as { cause?: unknown }).cause; +} + +/** Classify rate-limit / overloaded from symbolic error codes like RESOURCE_EXHAUSTED. */ +function classifyFailoverReasonFromSymbolicCode(raw: string | undefined): FailoverReason | null { + const normalized = raw?.trim().toUpperCase(); + if (!normalized) { + return null; + } + switch (normalized) { + case "RESOURCE_EXHAUSTED": + case "RATE_LIMIT": + case "RATE_LIMITED": + case "RATE_LIMIT_EXCEEDED": + case "TOO_MANY_REQUESTS": + case "THROTTLED": + case "THROTTLING": + case "THROTTLINGEXCEPTION": + case "THROTTLING_EXCEPTION": + return "rate_limit"; + case "OVERLOADED": + case "OVERLOADED_ERROR": + return "overloaded"; + default: + return null; + } } function hasTimeoutHint(err: unknown): boolean { @@ -160,6 +232,12 @@ export function resolveFailoverReasonFromError(err: unknown): FailoverReason | n return statusReason; } + // Check symbolic error codes (e.g. RESOURCE_EXHAUSTED from Google APIs) + const symbolicCodeReason = classifyFailoverReasonFromSymbolicCode(getErrorCode(err)); + if (symbolicCodeReason) { + return symbolicCodeReason; + } + const code = (getErrorCode(err) ?? "").toUpperCase(); if ( [ @@ -170,12 +248,24 @@ export function resolveFailoverReasonFromError(err: unknown): FailoverReason | n "ECONNREFUSED", "ENETUNREACH", "EHOSTUNREACH", + "EHOSTDOWN", "ENETRESET", + "EPIPE", "EAI_AGAIN", ].includes(code) ) { return "timeout"; } + // Walk into error cause chain *before* timeout heuristics so that a specific + // cause (e.g. RESOURCE_EXHAUSTED wrapped in AbortError) overrides a parent + // message-based "timeout" guess from isTimeoutError. + const cause = getErrorCause(err); + if (cause && cause !== err) { + const causeReason = resolveFailoverReasonFromError(cause); + if (causeReason) { + return causeReason; + } + } if (isTimeoutError(err)) { return "timeout"; } diff --git a/src/agents/fast-mode.ts b/src/agents/fast-mode.ts new file mode 100644 index 00000000000..3935eeae27b --- /dev/null +++ b/src/agents/fast-mode.ts @@ -0,0 +1,58 @@ +import { normalizeFastMode } from "../auto-reply/thinking.js"; +import type { OpenClawConfig } from "../config/config.js"; +import type { SessionEntry } from "../config/sessions.js"; + +export type FastModeState = { + enabled: boolean; + source: "session" | "config" | "default"; +}; + +export function resolveFastModeParam( + extraParams: Record | undefined, +): boolean | undefined { + return normalizeFastMode( + (extraParams?.fastMode ?? extraParams?.fast_mode) as string | boolean | null | undefined, + ); +} + +function resolveConfiguredFastModeRaw(params: { + cfg: OpenClawConfig | undefined; + provider: string; + model: string; +}): unknown { + const modelKey = `${params.provider}/${params.model}`; + const modelConfig = params.cfg?.agents?.defaults?.models?.[modelKey]; + return modelConfig?.params?.fastMode ?? modelConfig?.params?.fast_mode; +} + +export function resolveConfiguredFastMode(params: { + cfg: OpenClawConfig | undefined; + provider: string; + model: string; +}): boolean { + return ( + normalizeFastMode( + resolveConfiguredFastModeRaw(params) as string | boolean | null | undefined, + ) ?? false + ); +} + +export function resolveFastModeState(params: { + cfg: OpenClawConfig | undefined; + provider: string; + model: string; + sessionEntry?: Pick | undefined; +}): FastModeState { + const sessionOverride = normalizeFastMode(params.sessionEntry?.fastMode); + if (sessionOverride !== undefined) { + return { enabled: sessionOverride, source: "session" }; + } + + const configuredRaw = resolveConfiguredFastModeRaw(params); + const configured = normalizeFastMode(configuredRaw as string | boolean | null | undefined); + if (configured !== undefined) { + return { enabled: configured, source: "config" }; + } + + return { enabled: false, source: "default" }; +} diff --git a/src/agents/huggingface-models.ts b/src/agents/huggingface-models.ts index 7d3755adefb..0e7ae4270f7 100644 --- a/src/agents/huggingface-models.ts +++ b/src/agents/huggingface-models.ts @@ -1,5 +1,6 @@ import type { ModelDefinitionConfig } from "../config/types.models.js"; import { createSubsystemLogger } from "../logging/subsystem.js"; +import { isReasoningModelHeuristic } from "./ollama-models.js"; const log = createSubsystemLogger("huggingface-models"); @@ -125,7 +126,7 @@ export function buildHuggingfaceModelDefinition( */ function inferredMetaFromModelId(id: string): { name: string; reasoning: boolean } { const base = id.split("/").pop() ?? id; - const reasoning = /r1|reasoning|thinking|reason/i.test(id) || /-\d+[tb]?-thinking/i.test(base); + const reasoning = isReasoningModelHeuristic(id); const name = base.replace(/-/g, " ").replace(/\b(\w)/g, (c) => c.toUpperCase()); return { name, reasoning }; } diff --git a/src/agents/lanes.test.ts b/src/agents/lanes.test.ts new file mode 100644 index 00000000000..9538de70d26 --- /dev/null +++ b/src/agents/lanes.test.ts @@ -0,0 +1,18 @@ +import { describe, expect, it } from "vitest"; +import { AGENT_LANE_NESTED, resolveNestedAgentLane } from "./lanes.js"; + +describe("resolveNestedAgentLane", () => { + it("defaults to the nested lane when no lane is provided", () => { + expect(resolveNestedAgentLane()).toBe(AGENT_LANE_NESTED); + }); + + it("moves cron lane callers onto the nested lane", () => { + expect(resolveNestedAgentLane("cron")).toBe(AGENT_LANE_NESTED); + expect(resolveNestedAgentLane(" cron ")).toBe(AGENT_LANE_NESTED); + }); + + it("preserves non-cron lanes", () => { + expect(resolveNestedAgentLane("subagent")).toBe("subagent"); + expect(resolveNestedAgentLane(" custom-lane ")).toBe("custom-lane"); + }); +}); diff --git a/src/agents/lanes.ts b/src/agents/lanes.ts index 1688a4b8b9a..e9fa2217cf7 100644 --- a/src/agents/lanes.ts +++ b/src/agents/lanes.ts @@ -2,3 +2,13 @@ import { CommandLane } from "../process/lanes.js"; export const AGENT_LANE_NESTED = CommandLane.Nested; export const AGENT_LANE_SUBAGENT = CommandLane.Subagent; + +export function resolveNestedAgentLane(lane?: string): string { + const trimmed = lane?.trim(); + // Nested agent runs should not inherit the cron execution lane. Cron jobs + // already occupy that lane while they dispatch inner work. + if (!trimmed || trimmed === "cron") { + return AGENT_LANE_NESTED; + } + return trimmed; +} diff --git a/src/agents/live-model-filter.ts b/src/agents/live-model-filter.ts index 03de7d772cc..059e12d9711 100644 --- a/src/agents/live-model-filter.ts +++ b/src/agents/live-model-filter.ts @@ -81,7 +81,7 @@ export function isModernModelRef(ref: ModelRef): boolean { return false; } - if (provider === "openrouter" || provider === "opencode") { + if (provider === "openrouter" || provider === "opencode" || provider === "opencode-go") { // OpenRouter/opencode are pass-through proxies; accept any model ID // rather than restricting to a static prefix list. return true; diff --git a/src/agents/memory-search.test.ts b/src/agents/memory-search.test.ts index 9372b4c7696..feb0054b302 100644 --- a/src/agents/memory-search.test.ts +++ b/src/agents/memory-search.test.ts @@ -29,6 +29,56 @@ describe("memory search config", () => { }); } + function expectEmptyMultimodalConfig(resolved: ReturnType) { + expect(resolved?.multimodal).toEqual({ + enabled: true, + modalities: [], + maxFileBytes: 10 * 1024 * 1024, + }); + } + + function configWithRemoteDefaults(remote: Record) { + return asConfig({ + agents: { + defaults: { + memorySearch: { + provider: "openai", + remote, + }, + }, + list: [ + { + id: "main", + default: true, + memorySearch: { + remote: { + baseUrl: "https://agent.example/v1", + }, + }, + }, + ], + }, + }); + } + + function expectMergedRemoteConfig( + resolved: ReturnType, + apiKey: unknown, + ) { + expect(resolved?.remote).toEqual({ + baseUrl: "https://agent.example/v1", + apiKey, + headers: { "X-Default": "on" }, + batch: { + enabled: false, + wait: true, + concurrency: 2, + pollIntervalMs: 2000, + timeoutMinutes: 60, + }, + }); + } + it("returns null when disabled", () => { const cfg = asConfig({ agents: { @@ -131,6 +181,105 @@ describe("memory search config", () => { expect(resolved?.extraPaths).toEqual(["/shared/notes", "docs", "../team-notes"]); }); + it("normalizes multimodal settings", () => { + const cfg = asConfig({ + agents: { + defaults: { + memorySearch: { + provider: "gemini", + model: "gemini-embedding-2-preview", + multimodal: { + enabled: true, + modalities: ["all"], + maxFileBytes: 8192, + }, + }, + }, + }, + }); + const resolved = resolveMemorySearchConfig(cfg, "main"); + expect(resolved?.multimodal).toEqual({ + enabled: true, + modalities: ["image", "audio"], + maxFileBytes: 8192, + }); + }); + + it("keeps an explicit empty multimodal modalities list empty", () => { + const cfg = asConfig({ + agents: { + defaults: { + memorySearch: { + provider: "gemini", + model: "gemini-embedding-2-preview", + multimodal: { + enabled: true, + modalities: [], + }, + }, + }, + }, + }); + const resolved = resolveMemorySearchConfig(cfg, "main"); + expectEmptyMultimodalConfig(resolved); + expect(resolved?.provider).toBe("gemini"); + }); + + it("does not enforce multimodal provider validation when no modalities are active", () => { + const cfg = asConfig({ + agents: { + defaults: { + memorySearch: { + provider: "openai", + model: "text-embedding-3-small", + fallback: "openai", + multimodal: { + enabled: true, + modalities: [], + }, + }, + }, + }, + }); + const resolved = resolveMemorySearchConfig(cfg, "main"); + expectEmptyMultimodalConfig(resolved); + }); + + it("rejects multimodal memory on unsupported providers", () => { + const cfg = asConfig({ + agents: { + defaults: { + memorySearch: { + provider: "openai", + model: "text-embedding-3-small", + multimodal: { enabled: true, modalities: ["image"] }, + }, + }, + }, + }); + expect(() => resolveMemorySearchConfig(cfg, "main")).toThrow( + /memorySearch\.multimodal requires memorySearch\.provider = "gemini"/, + ); + }); + + it("rejects multimodal memory when fallback is configured", () => { + const cfg = asConfig({ + agents: { + defaults: { + memorySearch: { + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "openai", + multimodal: { enabled: true, modalities: ["image"] }, + }, + }, + }, + }); + expect(() => resolveMemorySearchConfig(cfg, "main")).toThrow( + /memorySearch\.multimodal does not support memorySearch\.fallback/, + ); + }); + it("includes batch defaults for openai without remote overrides", () => { const cfg = configWithDefaultProvider("openai"); const resolved = resolveMemorySearchConfig(cfg, "main"); @@ -177,89 +326,32 @@ describe("memory search config", () => { expect(resolved?.sync.sessions).toEqual({ deltaBytes: 100000, deltaMessages: 50, + postCompactionForce: true, }); }); it("merges remote defaults with agent overrides", () => { - const cfg = asConfig({ - agents: { - defaults: { - memorySearch: { - provider: "openai", - remote: { - baseUrl: "https://default.example/v1", - apiKey: "default-key", // pragma: allowlist secret - headers: { "X-Default": "on" }, - }, - }, - }, - list: [ - { - id: "main", - default: true, - memorySearch: { - remote: { - baseUrl: "https://agent.example/v1", - }, - }, - }, - ], - }, - }); - const resolved = resolveMemorySearchConfig(cfg, "main"); - expect(resolved?.remote).toEqual({ - baseUrl: "https://agent.example/v1", + const cfg = configWithRemoteDefaults({ + baseUrl: "https://default.example/v1", apiKey: "default-key", // pragma: allowlist secret headers: { "X-Default": "on" }, - batch: { - enabled: false, - wait: true, - concurrency: 2, - pollIntervalMs: 2000, - timeoutMinutes: 60, - }, }); + const resolved = resolveMemorySearchConfig(cfg, "main"); + expectMergedRemoteConfig(resolved, "default-key"); // pragma: allowlist secret }); it("preserves SecretRef remote apiKey when merging defaults with agent overrides", () => { - const cfg = asConfig({ - agents: { - defaults: { - memorySearch: { - provider: "openai", - remote: { - apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret - headers: { "X-Default": "on" }, - }, - }, - }, - list: [ - { - id: "main", - default: true, - memorySearch: { - remote: { - baseUrl: "https://agent.example/v1", - }, - }, - }, - ], - }, + const cfg = configWithRemoteDefaults({ + apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret + headers: { "X-Default": "on" }, }); const resolved = resolveMemorySearchConfig(cfg, "main"); - expect(resolved?.remote).toEqual({ - baseUrl: "https://agent.example/v1", - apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, - headers: { "X-Default": "on" }, - batch: { - enabled: false, - wait: true, - concurrency: 2, - pollIntervalMs: 2000, - timeoutMinutes: 60, - }, + expectMergedRemoteConfig(resolved, { + source: "env", + provider: "default", + id: "OPENAI_API_KEY", }); }); diff --git a/src/agents/memory-search.ts b/src/agents/memory-search.ts index e14fd5a0b3b..1cbc83b7781 100644 --- a/src/agents/memory-search.ts +++ b/src/agents/memory-search.ts @@ -3,6 +3,12 @@ import path from "node:path"; import type { OpenClawConfig, MemorySearchConfig } from "../config/config.js"; import { resolveStateDir } from "../config/paths.js"; import type { SecretInput } from "../config/types.secrets.js"; +import { + isMemoryMultimodalEnabled, + normalizeMemoryMultimodalSettings, + supportsMemoryMultimodalEmbeddings, + type MemoryMultimodalSettings, +} from "../memory/multimodal.js"; import { clampInt, clampNumber, resolveUserPath } from "../utils.js"; import { resolveAgentConfig } from "./agent-scope.js"; @@ -10,6 +16,7 @@ export type ResolvedMemorySearchConfig = { enabled: boolean; sources: Array<"memory" | "sessions">; extraPaths: string[]; + multimodal: MemoryMultimodalSettings; provider: "openai" | "local" | "gemini" | "voyage" | "mistral" | "ollama" | "auto"; remote?: { baseUrl?: string; @@ -28,6 +35,7 @@ export type ResolvedMemorySearchConfig = { }; fallback: "openai" | "gemini" | "local" | "voyage" | "mistral" | "ollama" | "none"; model: string; + outputDimensionality?: number; local: { modelPath?: string; modelCacheDir?: string; @@ -53,6 +61,7 @@ export type ResolvedMemorySearchConfig = { sessions: { deltaBytes: number; deltaMessages: number; + postCompactionForce: boolean; }; }; query: { @@ -193,6 +202,7 @@ function mergeConfig( ? DEFAULT_OLLAMA_MODEL : undefined; const model = overrides?.model ?? defaults?.model ?? modelDefault ?? ""; + const outputDimensionality = overrides?.outputDimensionality ?? defaults?.outputDimensionality; const local = { modelPath: overrides?.local?.modelPath ?? defaults?.local?.modelPath, modelCacheDir: overrides?.local?.modelCacheDir ?? defaults?.local?.modelCacheDir, @@ -202,6 +212,11 @@ function mergeConfig( .map((value) => value.trim()) .filter(Boolean); const extraPaths = Array.from(new Set(rawPaths)); + const multimodal = normalizeMemoryMultimodalSettings({ + enabled: overrides?.multimodal?.enabled ?? defaults?.multimodal?.enabled, + modalities: overrides?.multimodal?.modalities ?? defaults?.multimodal?.modalities, + maxFileBytes: overrides?.multimodal?.maxFileBytes ?? defaults?.multimodal?.maxFileBytes, + }); const vector = { enabled: overrides?.store?.vector?.enabled ?? defaults?.store?.vector?.enabled ?? true, extensionPath: @@ -234,6 +249,10 @@ function mergeConfig( overrides?.sync?.sessions?.deltaMessages ?? defaults?.sync?.sessions?.deltaMessages ?? DEFAULT_SESSION_DELTA_MESSAGES, + postCompactionForce: + overrides?.sync?.sessions?.postCompactionForce ?? + defaults?.sync?.sessions?.postCompactionForce ?? + true, }, }; const query = { @@ -301,10 +320,12 @@ function mergeConfig( ); const deltaBytes = clampInt(sync.sessions.deltaBytes, 0, Number.MAX_SAFE_INTEGER); const deltaMessages = clampInt(sync.sessions.deltaMessages, 0, Number.MAX_SAFE_INTEGER); + const postCompactionForce = sync.sessions.postCompactionForce; return { enabled, sources, extraPaths, + multimodal, provider, remote, experimental: { @@ -312,6 +333,7 @@ function mergeConfig( }, fallback, model, + outputDimensionality, local, store, chunking: { tokens: Math.max(1, chunking.tokens), overlap }, @@ -320,6 +342,7 @@ function mergeConfig( sessions: { deltaBytes, deltaMessages, + postCompactionForce, }, }, query: { @@ -362,5 +385,22 @@ export function resolveMemorySearchConfig( if (!resolved.enabled) { return null; } + const multimodalActive = isMemoryMultimodalEnabled(resolved.multimodal); + if ( + multimodalActive && + !supportsMemoryMultimodalEmbeddings({ + provider: resolved.provider, + model: resolved.model, + }) + ) { + throw new Error( + 'agents.*.memorySearch.multimodal requires memorySearch.provider = "gemini" and model = "gemini-embedding-2-preview".', + ); + } + if (multimodalActive && resolved.fallback !== "none") { + throw new Error( + 'agents.*.memorySearch.multimodal does not support memorySearch.fallback. Set fallback to "none".', + ); + } return resolved; } diff --git a/src/agents/model-auth-env-vars.ts b/src/agents/model-auth-env-vars.ts index c366138207c..c9cb9159138 100644 --- a/src/agents/model-auth-env-vars.ts +++ b/src/agents/model-auth-env-vars.ts @@ -4,6 +4,7 @@ export const PROVIDER_ENV_API_KEY_CANDIDATES: Record = { chutes: ["CHUTES_OAUTH_TOKEN", "CHUTES_API_KEY"], zai: ["ZAI_API_KEY", "Z_AI_API_KEY"], opencode: ["OPENCODE_API_KEY", "OPENCODE_ZEN_API_KEY"], + "opencode-go": ["OPENCODE_API_KEY", "OPENCODE_ZEN_API_KEY"], "qwen-portal": ["QWEN_OAUTH_TOKEN", "QWEN_PORTAL_API_KEY"], volcengine: ["VOLCANO_ENGINE_API_KEY"], "volcengine-plan": ["VOLCANO_ENGINE_API_KEY"], @@ -32,7 +33,9 @@ export const PROVIDER_ENV_API_KEY_CANDIDATES: Record = { mistral: ["MISTRAL_API_KEY"], together: ["TOGETHER_API_KEY"], qianfan: ["QIANFAN_API_KEY"], + modelstudio: ["MODELSTUDIO_API_KEY"], ollama: ["OLLAMA_API_KEY"], + sglang: ["SGLANG_API_KEY"], vllm: ["VLLM_API_KEY"], kilocode: ["KILOCODE_API_KEY"], }; diff --git a/src/agents/model-auth-label.test.ts b/src/agents/model-auth-label.test.ts index a46eebbbc34..41afd4bb426 100644 --- a/src/agents/model-auth-label.test.ts +++ b/src/agents/model-auth-label.test.ts @@ -12,7 +12,7 @@ vi.mock("./auth-profiles.js", () => ({ })); vi.mock("./model-auth.js", () => ({ - getCustomProviderApiKey: () => undefined, + resolveUsableCustomProviderApiKey: () => null, resolveEnvApiKey: () => null, })); diff --git a/src/agents/model-auth-label.ts b/src/agents/model-auth-label.ts index ca564ab4dec..f28013c9825 100644 --- a/src/agents/model-auth-label.ts +++ b/src/agents/model-auth-label.ts @@ -5,7 +5,7 @@ import { resolveAuthProfileDisplayLabel, resolveAuthProfileOrder, } from "./auth-profiles.js"; -import { getCustomProviderApiKey, resolveEnvApiKey } from "./model-auth.js"; +import { resolveEnvApiKey, resolveUsableCustomProviderApiKey } from "./model-auth.js"; import { normalizeProviderId } from "./model-selection.js"; export function resolveModelAuthLabel(params: { @@ -59,7 +59,10 @@ export function resolveModelAuthLabel(params: { return `api-key (${envKey.source})`; } - const customKey = getCustomProviderApiKey(params.cfg, providerKey); + const customKey = resolveUsableCustomProviderApiKey({ + cfg: params.cfg, + provider: providerKey, + }); if (customKey) { return `api-key (models.json)`; } diff --git a/src/agents/model-auth-markers.test.ts b/src/agents/model-auth-markers.test.ts index e2225588df7..b90f1fd9ffa 100644 --- a/src/agents/model-auth-markers.test.ts +++ b/src/agents/model-auth-markers.test.ts @@ -1,6 +1,10 @@ import { describe, expect, it } from "vitest"; import { listKnownProviderEnvApiKeyNames } from "./model-auth-env-vars.js"; -import { isNonSecretApiKeyMarker, NON_ENV_SECRETREF_MARKER } from "./model-auth-markers.js"; +import { + isKnownEnvApiKeyMarker, + isNonSecretApiKeyMarker, + NON_ENV_SECRETREF_MARKER, +} from "./model-auth-markers.js"; describe("model auth markers", () => { it("recognizes explicit non-secret markers", () => { @@ -23,4 +27,9 @@ describe("model auth markers", () => { it("can exclude env marker-name interpretation for display-only paths", () => { expect(isNonSecretApiKeyMarker("OPENAI_API_KEY", { includeEnvVarName: false })).toBe(false); }); + + it("excludes aws-sdk env markers from known api key env marker helper", () => { + expect(isKnownEnvApiKeyMarker("OPENAI_API_KEY")).toBe(true); + expect(isKnownEnvApiKeyMarker("AWS_PROFILE")).toBe(false); + }); }); diff --git a/src/agents/model-auth-markers.ts b/src/agents/model-auth-markers.ts index 0b3b4960eb8..8a890d3a694 100644 --- a/src/agents/model-auth-markers.ts +++ b/src/agents/model-auth-markers.ts @@ -4,6 +4,7 @@ import { listKnownProviderEnvApiKeyNames } from "./model-auth-env-vars.js"; export const MINIMAX_OAUTH_MARKER = "minimax-oauth"; export const QWEN_OAUTH_MARKER = "qwen-oauth"; export const OLLAMA_LOCAL_AUTH_MARKER = "ollama-local"; +export const CUSTOM_LOCAL_AUTH_MARKER = "custom-local"; export const NON_ENV_SECRETREF_MARKER = "secretref-managed"; // pragma: allowlist secret export const SECRETREF_ENV_HEADER_MARKER_PREFIX = "secretref-env:"; // pragma: allowlist secret @@ -35,6 +36,11 @@ export function isAwsSdkAuthMarker(value: string): boolean { return AWS_SDK_ENV_MARKERS.has(value.trim()); } +export function isKnownEnvApiKeyMarker(value: string): boolean { + const trimmed = value.trim(); + return KNOWN_ENV_API_KEY_MARKERS.has(trimmed) && !isAwsSdkAuthMarker(trimmed); +} + export function resolveNonEnvSecretRefApiKeyMarker(_source: SecretRefSource): string { return NON_ENV_SECRETREF_MARKER; } @@ -66,6 +72,7 @@ export function isNonSecretApiKeyMarker( trimmed === MINIMAX_OAUTH_MARKER || trimmed === QWEN_OAUTH_MARKER || trimmed === OLLAMA_LOCAL_AUTH_MARKER || + trimmed === CUSTOM_LOCAL_AUTH_MARKER || trimmed === NON_ENV_SECRETREF_MARKER || isAwsSdkAuthMarker(trimmed); if (isKnownMarker) { diff --git a/src/agents/model-auth.profiles.test.ts b/src/agents/model-auth.profiles.test.ts index 5fabcf2dcc6..a1fc511aaf8 100644 --- a/src/agents/model-auth.profiles.test.ts +++ b/src/agents/model-auth.profiles.test.ts @@ -230,6 +230,21 @@ describe("getApiKeyForModel", () => { }); }); + it("resolves Model Studio API key from env", async () => { + await withEnvAsync( + { [envVar("MODELSTUDIO", "API", "KEY")]: "modelstudio-test-key" }, + async () => { + // pragma: allowlist secret + const resolved = await resolveApiKeyForProvider({ + provider: "modelstudio", + store: { version: 1, profiles: {} }, + }); + expect(resolved.apiKey).toBe("modelstudio-test-key"); + expect(resolved.source).toContain("MODELSTUDIO_API_KEY"); + }, + ); + }); + it("resolves synthetic local auth key for configured ollama provider without apiKey", async () => { await withEnvAsync({ OLLAMA_API_KEY: undefined }, async () => { const resolved = await resolveApiKeyForProvider({ @@ -397,4 +412,18 @@ describe("getApiKeyForModel", () => { }, ); }); + + it("resolveEnvApiKey('opencode-go') falls back to OPENCODE_ZEN_API_KEY", async () => { + await withEnvAsync( + { + OPENCODE_API_KEY: undefined, + OPENCODE_ZEN_API_KEY: "sk-opencode-zen-fallback", // pragma: allowlist secret + }, + async () => { + const resolved = resolveEnvApiKey("opencode-go"); + expect(resolved?.apiKey).toBe("sk-opencode-zen-fallback"); + expect(resolved?.source).toContain("OPENCODE_ZEN_API_KEY"); + }, + ); + }); }); diff --git a/src/agents/model-auth.test.ts b/src/agents/model-auth.test.ts index 943070960d3..de8f0f1b752 100644 --- a/src/agents/model-auth.test.ts +++ b/src/agents/model-auth.test.ts @@ -1,6 +1,16 @@ -import { describe, expect, it } from "vitest"; +import { streamSimpleOpenAICompletions, type Model } from "@mariozechner/pi-ai"; +import { afterEach, describe, expect, it, vi } from "vitest"; import type { AuthProfileStore } from "./auth-profiles.js"; -import { requireApiKey, resolveAwsSdkEnvVarName, resolveModelAuthMode } from "./model-auth.js"; +import { CUSTOM_LOCAL_AUTH_MARKER, NON_ENV_SECRETREF_MARKER } from "./model-auth-markers.js"; +import { + applyLocalNoAuthHeaderOverride, + hasUsableCustomProviderApiKey, + requireApiKey, + resolveApiKeyForProvider, + resolveAwsSdkEnvVarName, + resolveModelAuthMode, + resolveUsableCustomProviderApiKey, +} from "./model-auth.js"; describe("resolveAwsSdkEnvVarName", () => { it("prefers bearer token over access keys and profile", () => { @@ -117,3 +127,433 @@ describe("requireApiKey", () => { ).toThrow('No API key resolved for provider "openai"'); }); }); + +describe("resolveUsableCustomProviderApiKey", () => { + it("returns literal custom provider keys", () => { + const resolved = resolveUsableCustomProviderApiKey({ + cfg: { + models: { + providers: { + custom: { + baseUrl: "https://example.com/v1", + apiKey: "sk-custom-runtime", // pragma: allowlist secret + models: [], + }, + }, + }, + }, + provider: "custom", + }); + expect(resolved).toEqual({ + apiKey: "sk-custom-runtime", + source: "models.json", + }); + }); + + it("does not treat non-env markers as usable credentials", () => { + const resolved = resolveUsableCustomProviderApiKey({ + cfg: { + models: { + providers: { + custom: { + baseUrl: "https://example.com/v1", + apiKey: NON_ENV_SECRETREF_MARKER, + models: [], + }, + }, + }, + }, + provider: "custom", + }); + expect(resolved).toBeNull(); + }); + + it("resolves known env marker names from process env for custom providers", () => { + const previous = process.env.OPENAI_API_KEY; + process.env.OPENAI_API_KEY = "sk-from-env"; // pragma: allowlist secret + try { + const resolved = resolveUsableCustomProviderApiKey({ + cfg: { + models: { + providers: { + custom: { + baseUrl: "https://example.com/v1", + apiKey: "OPENAI_API_KEY", + models: [], + }, + }, + }, + }, + provider: "custom", + }); + expect(resolved?.apiKey).toBe("sk-from-env"); + expect(resolved?.source).toContain("OPENAI_API_KEY"); + } finally { + if (previous === undefined) { + delete process.env.OPENAI_API_KEY; + } else { + process.env.OPENAI_API_KEY = previous; + } + } + }); + + it("does not treat known env marker names as usable when env value is missing", () => { + const previous = process.env.OPENAI_API_KEY; + delete process.env.OPENAI_API_KEY; + try { + expect( + hasUsableCustomProviderApiKey( + { + models: { + providers: { + custom: { + baseUrl: "https://example.com/v1", + apiKey: "OPENAI_API_KEY", + models: [], + }, + }, + }, + }, + "custom", + ), + ).toBe(false); + } finally { + if (previous === undefined) { + delete process.env.OPENAI_API_KEY; + } else { + process.env.OPENAI_API_KEY = previous; + } + } + }); +}); + +describe("resolveApiKeyForProvider – synthetic local auth for custom providers", () => { + it("synthesizes a local auth marker for custom providers with a local baseUrl and no apiKey", async () => { + const auth = await resolveApiKeyForProvider({ + provider: "custom-127-0-0-1-8080", + cfg: { + models: { + providers: { + "custom-127-0-0-1-8080": { + baseUrl: "http://127.0.0.1:8080/v1", + api: "openai-completions", + models: [ + { + id: "qwen-3.5", + name: "Qwen 3.5", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 8192, + maxTokens: 4096, + }, + ], + }, + }, + }, + }, + }); + expect(auth.apiKey).toBe(CUSTOM_LOCAL_AUTH_MARKER); + expect(auth.source).toContain("synthetic local key"); + }); + + it("synthesizes a local auth marker for localhost custom providers", async () => { + const auth = await resolveApiKeyForProvider({ + provider: "my-local", + cfg: { + models: { + providers: { + "my-local": { + baseUrl: "http://localhost:11434/v1", + api: "openai-completions", + models: [ + { + id: "llama3", + name: "Llama 3", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 8192, + maxTokens: 4096, + }, + ], + }, + }, + }, + }, + }); + expect(auth.apiKey).toBe(CUSTOM_LOCAL_AUTH_MARKER); + }); + + it("synthesizes a local auth marker for IPv6 loopback (::1)", async () => { + const auth = await resolveApiKeyForProvider({ + provider: "my-ipv6", + cfg: { + models: { + providers: { + "my-ipv6": { + baseUrl: "http://[::1]:8080/v1", + api: "openai-completions", + models: [ + { + id: "llama3", + name: "Llama 3", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 8192, + maxTokens: 4096, + }, + ], + }, + }, + }, + }, + }); + expect(auth.apiKey).toBe(CUSTOM_LOCAL_AUTH_MARKER); + }); + + it("synthesizes a local auth marker for 0.0.0.0", async () => { + const auth = await resolveApiKeyForProvider({ + provider: "my-wildcard", + cfg: { + models: { + providers: { + "my-wildcard": { + baseUrl: "http://0.0.0.0:11434/v1", + api: "openai-completions", + models: [ + { + id: "qwen", + name: "Qwen", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 8192, + maxTokens: 4096, + }, + ], + }, + }, + }, + }, + }); + expect(auth.apiKey).toBe(CUSTOM_LOCAL_AUTH_MARKER); + }); + + it("synthesizes a local auth marker for IPv4-mapped IPv6 (::ffff:127.0.0.1)", async () => { + const auth = await resolveApiKeyForProvider({ + provider: "my-mapped", + cfg: { + models: { + providers: { + "my-mapped": { + baseUrl: "http://[::ffff:127.0.0.1]:8080/v1", + api: "openai-completions", + models: [ + { + id: "llama3", + name: "Llama 3", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 8192, + maxTokens: 4096, + }, + ], + }, + }, + }, + }, + }); + expect(auth.apiKey).toBe(CUSTOM_LOCAL_AUTH_MARKER); + }); + + it("does not synthesize auth for remote custom providers without apiKey", async () => { + await expect( + resolveApiKeyForProvider({ + provider: "my-remote", + cfg: { + models: { + providers: { + "my-remote": { + baseUrl: "https://api.example.com/v1", + api: "openai-completions", + models: [ + { + id: "gpt-5", + name: "GPT-5", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 8192, + maxTokens: 4096, + }, + ], + }, + }, + }, + }, + }), + ).rejects.toThrow("No API key found"); + }); + + it("does not synthesize local auth when apiKey is explicitly configured but unresolved", async () => { + const previous = process.env.OPENAI_API_KEY; + delete process.env.OPENAI_API_KEY; + try { + await expect( + resolveApiKeyForProvider({ + provider: "custom", + cfg: { + models: { + providers: { + custom: { + baseUrl: "http://127.0.0.1:8080/v1", + api: "openai-completions", + apiKey: "OPENAI_API_KEY", + models: [ + { + id: "llama3", + name: "Llama 3", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 8192, + maxTokens: 4096, + }, + ], + }, + }, + }, + }, + }), + ).rejects.toThrow('No API key found for provider "custom"'); + } finally { + if (previous === undefined) { + delete process.env.OPENAI_API_KEY; + } else { + process.env.OPENAI_API_KEY = previous; + } + } + }); + + it("does not synthesize local auth when auth mode explicitly requires oauth", async () => { + await expect( + resolveApiKeyForProvider({ + provider: "custom", + cfg: { + models: { + providers: { + custom: { + baseUrl: "http://127.0.0.1:8080/v1", + api: "openai-completions", + auth: "oauth", + models: [ + { + id: "llama3", + name: "Llama 3", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 8192, + maxTokens: 4096, + }, + ], + }, + }, + }, + }, + }), + ).rejects.toThrow('No API key found for provider "custom"'); + }); + + it("keeps built-in aws-sdk fallback for local baseUrl overrides", async () => { + const auth = await resolveApiKeyForProvider({ + provider: "amazon-bedrock", + cfg: { + models: { + providers: { + "amazon-bedrock": { + baseUrl: "http://127.0.0.1:8080/v1", + models: [], + }, + }, + }, + }, + }); + + expect(auth.mode).toBe("aws-sdk"); + expect(auth.apiKey).toBeUndefined(); + }); +}); + +describe("applyLocalNoAuthHeaderOverride", () => { + const originalFetch = globalThis.fetch; + + afterEach(() => { + globalThis.fetch = originalFetch; + vi.restoreAllMocks(); + }); + + it("clears Authorization for synthetic local OpenAI-compatible auth markers", async () => { + let capturedAuthorization: string | null | undefined; + let capturedXTest: string | null | undefined; + let resolveRequest: (() => void) | undefined; + const requestSeen = new Promise((resolve) => { + resolveRequest = resolve; + }); + globalThis.fetch = vi.fn(async (_input, init) => { + const headers = new Headers(init?.headers); + capturedAuthorization = headers.get("Authorization"); + capturedXTest = headers.get("X-Test"); + resolveRequest?.(); + return new Response(JSON.stringify({ error: { message: "unauthorized" } }), { + status: 401, + headers: { "content-type": "application/json" }, + }); + }) as typeof fetch; + + const model = applyLocalNoAuthHeaderOverride( + { + id: "local-llm", + name: "local-llm", + api: "openai-completions", + provider: "custom", + baseUrl: "http://127.0.0.1:8080/v1", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 8192, + maxTokens: 4096, + headers: { "X-Test": "1" }, + } as Model<"openai-completions">, + { + apiKey: CUSTOM_LOCAL_AUTH_MARKER, + source: "models.providers.custom (synthetic local key)", + mode: "api-key", + }, + ); + + streamSimpleOpenAICompletions( + model, + { + messages: [ + { + role: "user", + content: "hello", + timestamp: Date.now(), + }, + ], + }, + { + apiKey: CUSTOM_LOCAL_AUTH_MARKER, + }, + ); + + await requestSeen; + + expect(capturedAuthorization).toBeNull(); + expect(capturedXTest).toBe("1"); + }); +}); diff --git a/src/agents/model-auth.ts b/src/agents/model-auth.ts index 51ba332ed7f..fb3abd1571e 100644 --- a/src/agents/model-auth.ts +++ b/src/agents/model-auth.ts @@ -3,7 +3,9 @@ import { type Api, getEnvApiKey, type Model } from "@mariozechner/pi-ai"; import { formatCliCommand } from "../cli/command-format.js"; import type { OpenClawConfig } from "../config/config.js"; import type { ModelProviderAuthMode, ModelProviderConfig } from "../config/types.js"; +import { coerceSecretRef } from "../config/types.secrets.js"; import { getShellEnvAppliedKeys } from "../infra/shell-env.js"; +import { createSubsystemLogger } from "../logging/subsystem.js"; import { normalizeOptionalSecretInput, normalizeSecretInput, @@ -17,11 +19,18 @@ import { resolveAuthStorePathForDisplay, } from "./auth-profiles.js"; import { PROVIDER_ENV_API_KEY_CANDIDATES } from "./model-auth-env-vars.js"; -import { OLLAMA_LOCAL_AUTH_MARKER } from "./model-auth-markers.js"; +import { + CUSTOM_LOCAL_AUTH_MARKER, + isKnownEnvApiKeyMarker, + isNonSecretApiKeyMarker, + OLLAMA_LOCAL_AUTH_MARKER, +} from "./model-auth-markers.js"; import { normalizeProviderId } from "./model-selection.js"; export { ensureAuthProfileStore, resolveAuthProfileOrder } from "./auth-profiles.js"; +const log = createSubsystemLogger("model-auth"); + const AWS_BEARER_ENV = "AWS_BEARER_TOKEN_BEDROCK"; const AWS_ACCESS_KEY_ENV = "AWS_ACCESS_KEY_ID"; const AWS_SECRET_KEY_ENV = "AWS_SECRET_ACCESS_KEY"; @@ -57,6 +66,49 @@ export function getCustomProviderApiKey( return normalizeOptionalSecretInput(entry?.apiKey); } +type ResolvedCustomProviderApiKey = { + apiKey: string; + source: string; +}; + +export function resolveUsableCustomProviderApiKey(params: { + cfg: OpenClawConfig | undefined; + provider: string; + env?: NodeJS.ProcessEnv; +}): ResolvedCustomProviderApiKey | null { + const customKey = getCustomProviderApiKey(params.cfg, params.provider); + if (!customKey) { + return null; + } + if (!isNonSecretApiKeyMarker(customKey)) { + return { apiKey: customKey, source: "models.json" }; + } + if (!isKnownEnvApiKeyMarker(customKey)) { + return null; + } + const envValue = normalizeOptionalSecretInput((params.env ?? process.env)[customKey]); + if (!envValue) { + return null; + } + const applied = new Set(getShellEnvAppliedKeys()); + return { + apiKey: envValue, + source: resolveEnvSourceLabel({ + applied, + envVars: [customKey], + label: `${customKey} (models.json marker)`, + }), + }; +} + +export function hasUsableCustomProviderApiKey( + cfg: OpenClawConfig | undefined, + provider: string, + env?: NodeJS.ProcessEnv, +): boolean { + return Boolean(resolveUsableCustomProviderApiKey({ cfg, provider, env })); +} + function resolveProviderAuthOverride( cfg: OpenClawConfig | undefined, provider: string, @@ -69,15 +121,44 @@ function resolveProviderAuthOverride( return undefined; } +function isLocalBaseUrl(baseUrl: string): boolean { + try { + const host = new URL(baseUrl).hostname.toLowerCase(); + return ( + host === "localhost" || + host === "127.0.0.1" || + host === "0.0.0.0" || + host === "[::1]" || + host === "[::ffff:7f00:1]" || + host === "[::ffff:127.0.0.1]" + ); + } catch { + return false; + } +} + +function hasExplicitProviderApiKeyConfig(providerConfig: ModelProviderConfig): boolean { + return ( + normalizeOptionalSecretInput(providerConfig.apiKey) !== undefined || + coerceSecretRef(providerConfig.apiKey) !== null + ); +} + +function isCustomLocalProviderConfig(providerConfig: ModelProviderConfig): boolean { + return ( + typeof providerConfig.baseUrl === "string" && + providerConfig.baseUrl.trim().length > 0 && + typeof providerConfig.api === "string" && + providerConfig.api.trim().length > 0 && + Array.isArray(providerConfig.models) && + providerConfig.models.length > 0 + ); +} + function resolveSyntheticLocalProviderAuth(params: { cfg: OpenClawConfig | undefined; provider: string; }): ResolvedProviderAuth | null { - const normalizedProvider = normalizeProviderId(params.provider); - if (normalizedProvider !== "ollama") { - return null; - } - const providerConfig = resolveProviderConfig(params.cfg, params.provider); if (!providerConfig) { return null; @@ -91,11 +172,38 @@ function resolveSyntheticLocalProviderAuth(params: { return null; } - return { - apiKey: OLLAMA_LOCAL_AUTH_MARKER, - source: "models.providers.ollama (synthetic local key)", - mode: "api-key", - }; + const normalizedProvider = normalizeProviderId(params.provider); + if (normalizedProvider === "ollama") { + return { + apiKey: OLLAMA_LOCAL_AUTH_MARKER, + source: "models.providers.ollama (synthetic local key)", + mode: "api-key", + }; + } + + const authOverride = resolveProviderAuthOverride(params.cfg, params.provider); + if (authOverride && authOverride !== "api-key") { + return null; + } + if (!isCustomLocalProviderConfig(providerConfig)) { + return null; + } + if (hasExplicitProviderApiKeyConfig(providerConfig)) { + return null; + } + + // Custom providers pointing at a local server (e.g. llama.cpp, vLLM, LocalAI) + // typically don't require auth. Synthesize a local key so the auth resolver + // doesn't reject them when the user left the API key blank during onboarding. + if (providerConfig.baseUrl && isLocalBaseUrl(providerConfig.baseUrl)) { + return { + apiKey: CUSTOM_LOCAL_AUTH_MARKER, + source: `models.providers.${params.provider} (synthetic local key)`, + mode: "api-key", + }; + } + + return null; } function resolveEnvSourceLabel(params: { @@ -221,7 +329,9 @@ export async function resolveApiKeyForProvider(params: { mode: mode === "oauth" ? "oauth" : mode === "token" ? "token" : "api-key", }; } - } catch {} + } catch (err) { + log.debug?.(`auth profile "${candidate}" failed for provider "${provider}": ${String(err)}`); + } } const envResolved = resolveEnvApiKey(provider); @@ -233,9 +343,9 @@ export async function resolveApiKeyForProvider(params: { }; } - const customKey = getCustomProviderApiKey(cfg, provider); + const customKey = resolveUsableCustomProviderApiKey({ cfg, provider }); if (customKey) { - return { apiKey: customKey, source: "models.json", mode: "api-key" }; + return { apiKey: customKey.apiKey, source: customKey.source, mode: "api-key" }; } const syntheticLocalAuth = resolveSyntheticLocalProviderAuth({ cfg, provider }); @@ -355,7 +465,7 @@ export function resolveModelAuthMode( return envKey.source.includes("OAUTH_TOKEN") ? "oauth" : "api-key"; } - if (getCustomProviderApiKey(cfg, resolved)) { + if (hasUsableCustomProviderApiKey(cfg, resolved)) { return "api-key"; } @@ -387,3 +497,25 @@ export function requireApiKey(auth: ResolvedProviderAuth, provider: string): str } throw new Error(`No API key resolved for provider "${provider}" (auth mode: ${auth.mode}).`); } + +export function applyLocalNoAuthHeaderOverride>( + model: T, + auth: ResolvedProviderAuth | null | undefined, +): T { + if (auth?.apiKey !== CUSTOM_LOCAL_AUTH_MARKER || model.api !== "openai-completions") { + return model; + } + + // OpenAI's SDK always generates Authorization from apiKey. Keep the non-secret + // placeholder so construction succeeds, then clear the header at request build + // time for local servers that intentionally do not require auth. + const headers = { + ...model.headers, + Authorization: null, + } as unknown as Record; + + return { + ...model, + headers, + }; +} diff --git a/src/agents/model-catalog.test.ts b/src/agents/model-catalog.test.ts index b891af4ed2d..cf7d6e444f2 100644 --- a/src/agents/model-catalog.test.ts +++ b/src/agents/model-catalog.test.ts @@ -114,6 +114,55 @@ describe("loadModelCatalog", () => { expect(spark?.reasoning).toBe(true); }); + it("filters stale openai gpt-5.3-codex-spark built-ins from the catalog", async () => { + mockPiDiscoveryModels([ + { + id: "gpt-5.3-codex-spark", + provider: "openai", + name: "GPT-5.3 Codex Spark", + reasoning: true, + contextWindow: 128000, + input: ["text", "image"], + }, + { + id: "gpt-5.3-codex-spark", + provider: "azure-openai-responses", + name: "GPT-5.3 Codex Spark", + reasoning: true, + contextWindow: 128000, + input: ["text", "image"], + }, + { + id: "gpt-5.3-codex-spark", + provider: "openai-codex", + name: "GPT-5.3 Codex Spark", + reasoning: true, + contextWindow: 128000, + input: ["text"], + }, + ]); + + const result = await loadModelCatalog({ config: {} as OpenClawConfig }); + expect(result).not.toContainEqual( + expect.objectContaining({ + provider: "openai", + id: "gpt-5.3-codex-spark", + }), + ); + expect(result).not.toContainEqual( + expect.objectContaining({ + provider: "azure-openai-responses", + id: "gpt-5.3-codex-spark", + }), + ); + expect(result).toContainEqual( + expect.objectContaining({ + provider: "openai-codex", + id: "gpt-5.3-codex-spark", + }), + ); + }); + it("adds gpt-5.4 forward-compat catalog entries when template models exist", async () => { mockPiDiscoveryModels([ { diff --git a/src/agents/model-catalog.ts b/src/agents/model-catalog.ts index 06423b0604b..6f66e85c49c 100644 --- a/src/agents/model-catalog.ts +++ b/src/agents/model-catalog.ts @@ -1,6 +1,7 @@ import { type OpenClawConfig, loadConfig } from "../config/config.js"; import { createSubsystemLogger } from "../logging/subsystem.js"; import { resolveOpenClawAgentDir } from "./agent-paths.js"; +import { shouldSuppressBuiltInModel } from "./model-suppression.js"; import { ensureOpenClawModelsJson } from "./models-config.js"; const log = createSubsystemLogger("model-catalog"); @@ -29,7 +30,7 @@ type PiSdkModule = typeof import("./pi-model-discovery.js"); let modelCatalogPromise: Promise | null = null; let hasLoggedModelCatalogError = false; -const defaultImportPiSdk = () => import("./pi-model-discovery.js"); +const defaultImportPiSdk = () => import("./pi-model-discovery-runtime.js"); let importPiSdk = defaultImportPiSdk; const CODEX_PROVIDER = "openai-codex"; @@ -242,6 +243,9 @@ export async function loadModelCatalog(params?: { if (!provider) { continue; } + if (shouldSuppressBuiltInModel({ provider, id })) { + continue; + } const name = String(entry?.name ?? id).trim() || id; const contextWindow = typeof entry?.contextWindow === "number" && entry.contextWindow > 0 diff --git a/src/agents/model-compat.test.ts b/src/agents/model-compat.test.ts index 3c1894bb390..56b9c16203c 100644 --- a/src/agents/model-compat.test.ts +++ b/src/agents/model-compat.test.ts @@ -251,7 +251,7 @@ describe("normalizeModelCompat", () => { }); }); - it("overrides explicit supportsDeveloperRole true on non-native endpoints", () => { + it("respects explicit supportsDeveloperRole true on non-native endpoints", () => { const model = { ...baseModel(), provider: "custom-cpa", @@ -259,10 +259,10 @@ describe("normalizeModelCompat", () => { compat: { supportsDeveloperRole: true }, }; const normalized = normalizeModelCompat(model); - expect(supportsDeveloperRole(normalized)).toBe(false); + expect(supportsDeveloperRole(normalized)).toBe(true); }); - it("overrides explicit supportsUsageInStreaming true on non-native endpoints", () => { + it("respects explicit supportsUsageInStreaming true on non-native endpoints", () => { const model = { ...baseModel(), provider: "custom-cpa", @@ -270,6 +270,18 @@ describe("normalizeModelCompat", () => { compat: { supportsUsageInStreaming: true }, }; const normalized = normalizeModelCompat(model); + expect(supportsUsageInStreaming(normalized)).toBe(true); + }); + + it("still forces flags off when not explicitly set by user", () => { + const model = { + ...baseModel(), + provider: "custom-cpa", + baseUrl: "https://proxy.example.com/v1", + }; + delete (model as { compat?: unknown }).compat; + const normalized = normalizeModelCompat(model); + expect(supportsDeveloperRole(normalized)).toBe(false); expect(supportsUsageInStreaming(normalized)).toBe(false); }); @@ -313,6 +325,12 @@ describe("isModernModelRef", () => { expect(isModernModelRef({ provider: "opencode", id: "claude-opus-4-6" })).toBe(true); expect(isModernModelRef({ provider: "opencode", id: "gemini-3-pro" })).toBe(true); }); + + it("accepts all opencode-go models without zen exclusions", () => { + expect(isModernModelRef({ provider: "opencode-go", id: "kimi-k2.5" })).toBe(true); + expect(isModernModelRef({ provider: "opencode-go", id: "glm-5" })).toBe(true); + expect(isModernModelRef({ provider: "opencode-go", id: "minimax-m2.5" })).toBe(true); + }); }); describe("resolveForwardCompatModel", () => { diff --git a/src/agents/model-compat.ts b/src/agents/model-compat.ts index 7bad084fe57..72deb0c655f 100644 --- a/src/agents/model-compat.ts +++ b/src/agents/model-compat.ts @@ -55,17 +55,22 @@ export function normalizeModelCompat(model: Model): Model { // The `developer` role and stream usage chunks are OpenAI-native behaviors. // Many OpenAI-compatible backends reject `developer` and/or emit usage-only // chunks that break strict parsers expecting choices[0]. For non-native - // openai-completions endpoints, force both compat flags off. + // openai-completions endpoints, force both compat flags off — unless the + // user has explicitly opted in via their model config. const compat = model.compat ?? undefined; // When baseUrl is empty the pi-ai library defaults to api.openai.com, so // leave compat unchanged and let default native behavior apply. - // Note: explicit true values are intentionally overridden for non-native - // endpoints for safety. const needsForce = baseUrl ? !isOpenAINativeEndpoint(baseUrl) : false; if (!needsForce) { return model; } - if (compat?.supportsDeveloperRole === false && compat?.supportsUsageInStreaming === false) { + + // Respect explicit user overrides: if the user has set a compat flag to + // true in their model definition, they know their endpoint supports it. + const forcedDeveloperRole = compat?.supportsDeveloperRole === true; + const forcedUsageStreaming = compat?.supportsUsageInStreaming === true; + + if (forcedDeveloperRole && forcedUsageStreaming) { return model; } @@ -73,7 +78,11 @@ export function normalizeModelCompat(model: Model): Model { return { ...model, compat: compat - ? { ...compat, supportsDeveloperRole: false, supportsUsageInStreaming: false } + ? { + ...compat, + supportsDeveloperRole: forcedDeveloperRole || false, + supportsUsageInStreaming: forcedUsageStreaming || false, + } : { supportsDeveloperRole: false, supportsUsageInStreaming: false }, } as typeof model; } diff --git a/src/agents/model-fallback-observation.ts b/src/agents/model-fallback-observation.ts new file mode 100644 index 00000000000..450e047c7d7 --- /dev/null +++ b/src/agents/model-fallback-observation.ts @@ -0,0 +1,93 @@ +import { createSubsystemLogger } from "../logging/subsystem.js"; +import { sanitizeForLog } from "../terminal/ansi.js"; +import type { FallbackAttempt, ModelCandidate } from "./model-fallback.types.js"; +import { buildTextObservationFields } from "./pi-embedded-error-observation.js"; +import type { FailoverReason } from "./pi-embedded-helpers.js"; + +const decisionLog = createSubsystemLogger("model-fallback").child("decision"); + +function buildErrorObservationFields(error?: string): { + errorPreview?: string; + errorHash?: string; + errorFingerprint?: string; + httpCode?: string; + providerErrorType?: string; + providerErrorMessagePreview?: string; + requestIdHash?: string; +} { + const observed = buildTextObservationFields(error); + return { + errorPreview: observed.textPreview, + errorHash: observed.textHash, + errorFingerprint: observed.textFingerprint, + httpCode: observed.httpCode, + providerErrorType: observed.providerErrorType, + providerErrorMessagePreview: observed.providerErrorMessagePreview, + requestIdHash: observed.requestIdHash, + }; +} + +export function logModelFallbackDecision(params: { + decision: + | "skip_candidate" + | "probe_cooldown_candidate" + | "candidate_failed" + | "candidate_succeeded"; + runId?: string; + requestedProvider: string; + requestedModel: string; + candidate: ModelCandidate; + attempt?: number; + total?: number; + reason?: FailoverReason | null; + status?: number; + code?: string; + error?: string; + nextCandidate?: ModelCandidate; + isPrimary?: boolean; + requestedModelMatched?: boolean; + fallbackConfigured?: boolean; + allowTransientCooldownProbe?: boolean; + profileCount?: number; + previousAttempts?: FallbackAttempt[]; +}): void { + const nextText = params.nextCandidate + ? `${sanitizeForLog(params.nextCandidate.provider)}/${sanitizeForLog(params.nextCandidate.model)}` + : "none"; + const reasonText = params.reason ?? "unknown"; + const observedError = buildErrorObservationFields(params.error); + decisionLog.warn("model fallback decision", { + event: "model_fallback_decision", + tags: ["error_handling", "model_fallback", params.decision], + runId: params.runId, + decision: params.decision, + requestedProvider: params.requestedProvider, + requestedModel: params.requestedModel, + candidateProvider: params.candidate.provider, + candidateModel: params.candidate.model, + attempt: params.attempt, + total: params.total, + reason: params.reason, + status: params.status, + code: params.code, + ...observedError, + nextCandidateProvider: params.nextCandidate?.provider, + nextCandidateModel: params.nextCandidate?.model, + isPrimary: params.isPrimary, + requestedModelMatched: params.requestedModelMatched, + fallbackConfigured: params.fallbackConfigured, + allowTransientCooldownProbe: params.allowTransientCooldownProbe, + profileCount: params.profileCount, + previousAttempts: params.previousAttempts?.map((attempt) => ({ + provider: attempt.provider, + model: attempt.model, + reason: attempt.reason, + status: attempt.status, + code: attempt.code, + ...buildErrorObservationFields(attempt.error), + })), + consoleMessage: + `model fallback decision: decision=${params.decision} requested=${sanitizeForLog(params.requestedProvider)}/${sanitizeForLog(params.requestedModel)} ` + + `candidate=${sanitizeForLog(params.candidate.provider)}/${sanitizeForLog(params.candidate.model)} reason=${reasonText} next=${nextText}`, + }); +} diff --git a/src/agents/model-fallback.probe.test.ts b/src/agents/model-fallback.probe.test.ts index 01bcb2dc3a8..e80c3e3edd4 100644 --- a/src/agents/model-fallback.probe.test.ts +++ b/src/agents/model-fallback.probe.test.ts @@ -1,5 +1,8 @@ +import os from "node:os"; +import path from "node:path"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import type { OpenClawConfig } from "../config/config.js"; +import { registerLogTransport, resetLogger, setLoggerOverride } from "../logging/logger.js"; import type { AuthProfileStore } from "./auth-profiles.js"; import { makeModelFallbackCfg } from "./test-helpers/model-fallback-config-fixture.js"; @@ -28,6 +31,7 @@ const mockedResolveProfilesUnavailableReason = vi.mocked(resolveProfilesUnavaila const mockedResolveAuthProfileOrder = vi.mocked(resolveAuthProfileOrder); const makeCfg = makeModelFallbackCfg; +let unregisterLogTransport: (() => void) | undefined; function expectFallbackUsed( result: { result: unknown; attempts: Array<{ reason?: string }> }, @@ -42,6 +46,20 @@ function expectFallbackUsed( expect(result.attempts[0]?.reason).toBe("rate_limit"); } +function expectPrimarySkippedForReason( + result: { result: unknown; attempts: Array<{ reason?: string }> }, + run: { + (...args: unknown[]): unknown; + mock: { calls: unknown[][] }; + }, + reason: string, +) { + expect(result.result).toBe("ok"); + expect(run).toHaveBeenCalledTimes(1); + expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5"); + expect(result.attempts[0]?.reason).toBe(reason); +} + function expectPrimaryProbeSuccess( result: { result: unknown }, run: { @@ -149,6 +167,10 @@ describe("runWithModelFallback – probe logic", () => { afterEach(() => { Date.now = realDateNow; + unregisterLogTransport?.(); + unregisterLogTransport = undefined; + setLoggerOverride(null); + resetLogger(); vi.restoreAllMocks(); }); @@ -175,11 +197,7 @@ describe("runWithModelFallback – probe logic", () => { const run = vi.fn().mockResolvedValue("ok"); const result = await runPrimaryCandidate(cfg, run); - - expect(result.result).toBe("ok"); - expect(run).toHaveBeenCalledTimes(1); - expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5"); - expect(result.attempts[0]?.reason).toBe("billing"); + expectPrimarySkippedForReason(result, run, "billing"); }); it("probes primary model when within 2-min margin of cooldown expiry", async () => { @@ -194,6 +212,99 @@ describe("runWithModelFallback – probe logic", () => { expectPrimaryProbeSuccess(result, run, "probed-ok"); }); + it("logs primary metadata on probe success and failure fallback decisions", async () => { + const cfg = makeCfg(); + const records: Array> = []; + mockedGetSoonestCooldownExpiry.mockReturnValue(NOW + 60 * 1000); + setLoggerOverride({ + level: "trace", + consoleLevel: "silent", + file: path.join(os.tmpdir(), `openclaw-model-fallback-probe-${Date.now()}.log`), + }); + unregisterLogTransport = registerLogTransport((record) => { + records.push(record); + }); + + const run = vi.fn().mockResolvedValue("probed-ok"); + + const result = await runPrimaryCandidate(cfg, run); + + expectPrimaryProbeSuccess(result, run, "probed-ok"); + + _probeThrottleInternals.lastProbeAttempt.clear(); + + const fallbackCfg = makeCfg({ + agents: { + defaults: { + model: { + primary: "openai/gpt-4.1-mini", + fallbacks: ["anthropic/claude-haiku-3-5", "google/gemini-2-flash"], + }, + }, + }, + } as Partial); + mockedGetSoonestCooldownExpiry.mockReturnValue(NOW + 60 * 1000); + const fallbackRun = vi + .fn() + .mockRejectedValueOnce(Object.assign(new Error("rate limited"), { status: 429 })) + .mockResolvedValueOnce("fallback-ok"); + + const fallbackResult = await runPrimaryCandidate(fallbackCfg, fallbackRun); + + expect(fallbackResult.result).toBe("fallback-ok"); + expect(fallbackRun).toHaveBeenNthCalledWith(1, "openai", "gpt-4.1-mini", { + allowTransientCooldownProbe: true, + }); + expect(fallbackRun).toHaveBeenNthCalledWith(2, "anthropic", "claude-haiku-3-5"); + + const decisionPayloads = records + .filter( + (record) => + record["2"] === "model fallback decision" && + record["1"] && + typeof record["1"] === "object", + ) + .map((record) => record["1"] as Record); + + expect(decisionPayloads).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + event: "model_fallback_decision", + decision: "probe_cooldown_candidate", + candidateProvider: "openai", + candidateModel: "gpt-4.1-mini", + allowTransientCooldownProbe: true, + }), + expect.objectContaining({ + event: "model_fallback_decision", + decision: "candidate_succeeded", + candidateProvider: "openai", + candidateModel: "gpt-4.1-mini", + isPrimary: true, + requestedModelMatched: true, + }), + expect.objectContaining({ + event: "model_fallback_decision", + decision: "candidate_failed", + candidateProvider: "openai", + candidateModel: "gpt-4.1-mini", + isPrimary: true, + requestedModelMatched: true, + nextCandidateProvider: "anthropic", + nextCandidateModel: "claude-haiku-3-5", + }), + expect.objectContaining({ + event: "model_fallback_decision", + decision: "candidate_succeeded", + candidateProvider: "anthropic", + candidateModel: "claude-haiku-3-5", + isPrimary: false, + requestedModelMatched: false, + }), + ]), + ); + }); + it("probes primary model when cooldown already expired", async () => { const cfg = makeCfg(); // Cooldown expired 5 min ago @@ -220,6 +331,77 @@ describe("runWithModelFallback – probe logic", () => { }); }); + it("keeps walking remaining fallbacks after an abort-wrapped RESOURCE_EXHAUSTED probe failure", async () => { + const cfg = makeCfg({ + agents: { + defaults: { + model: { + primary: "google/gemini-3-flash-preview", + fallbacks: ["anthropic/claude-haiku-3-5", "deepseek/deepseek-chat"], + }, + }, + }, + } as Partial); + + mockedResolveAuthProfileOrder.mockImplementation(({ provider }: { provider: string }) => { + if (provider === "google") { + return ["google-profile-1"]; + } + if (provider === "anthropic") { + return ["anthropic-profile-1"]; + } + if (provider === "deepseek") { + return ["deepseek-profile-1"]; + } + return []; + }); + mockedIsProfileInCooldown.mockImplementation((_store, profileId: string) => + profileId.startsWith("google"), + ); + mockedGetSoonestCooldownExpiry.mockReturnValue(NOW + 30 * 1000); + mockedResolveProfilesUnavailableReason.mockReturnValue("rate_limit"); + + // Simulate Google Vertex abort-wrapped RESOURCE_EXHAUSTED (the shape that was + // previously swallowed by shouldRethrowAbort before the fallback loop could continue) + const primaryAbort = Object.assign(new Error("request aborted"), { + name: "AbortError", + cause: { + error: { + code: 429, + message: "Resource has been exhausted (e.g. check quota).", + status: "RESOURCE_EXHAUSTED", + }, + }, + }); + const run = vi + .fn() + .mockRejectedValueOnce(primaryAbort) + .mockRejectedValueOnce( + Object.assign(new Error("fallback still rate limited"), { status: 429 }), + ) + .mockRejectedValueOnce( + Object.assign(new Error("final fallback still rate limited"), { status: 429 }), + ); + + await expect( + runWithModelFallback({ + cfg, + provider: "google", + model: "gemini-3-flash-preview", + run, + }), + ).rejects.toThrow(/All models failed \(3\)/); + + // All three candidates must be attempted — the abort must not short-circuit + expect(run).toHaveBeenCalledTimes(3); + + expect(run).toHaveBeenNthCalledWith(1, "google", "gemini-3-flash-preview", { + allowTransientCooldownProbe: true, + }); + expect(run).toHaveBeenNthCalledWith(2, "anthropic", "claude-haiku-3-5"); + expect(run).toHaveBeenNthCalledWith(3, "deepseek", "deepseek-chat"); + }); + it("throttles probe when called within 30s interval", async () => { const cfg = makeCfg(); // Cooldown just about to expire (within probe margin) @@ -251,6 +433,36 @@ describe("runWithModelFallback – probe logic", () => { expectPrimaryProbeSuccess(result, run, "probed-ok"); }); + it("prunes stale probe throttle entries before checking eligibility", () => { + _probeThrottleInternals.lastProbeAttempt.set( + "stale", + NOW - _probeThrottleInternals.PROBE_STATE_TTL_MS - 1, + ); + _probeThrottleInternals.lastProbeAttempt.set("fresh", NOW - 5_000); + + expect(_probeThrottleInternals.lastProbeAttempt.has("stale")).toBe(true); + + expect(_probeThrottleInternals.isProbeThrottleOpen(NOW, "fresh")).toBe(false); + + expect(_probeThrottleInternals.lastProbeAttempt.has("stale")).toBe(false); + expect(_probeThrottleInternals.lastProbeAttempt.has("fresh")).toBe(true); + }); + + it("caps probe throttle state by evicting the oldest entries", () => { + for (let i = 0; i < _probeThrottleInternals.MAX_PROBE_KEYS; i += 1) { + _probeThrottleInternals.lastProbeAttempt.set(`key-${i}`, NOW - (i + 1)); + } + + _probeThrottleInternals.markProbeAttempt(NOW, "freshest"); + + expect(_probeThrottleInternals.lastProbeAttempt.size).toBe( + _probeThrottleInternals.MAX_PROBE_KEYS, + ); + expect(_probeThrottleInternals.lastProbeAttempt.has("freshest")).toBe(true); + expect(_probeThrottleInternals.lastProbeAttempt.has("key-255")).toBe(false); + expect(_probeThrottleInternals.lastProbeAttempt.has("key-0")).toBe(true); + }); + it("handles non-finite soonest safely (treats as probe-worthy)", async () => { const cfg = makeCfg(); @@ -346,7 +558,7 @@ describe("runWithModelFallback – probe logic", () => { }); }); - it("skips billing-cooldowned primary when no fallback candidates exist", async () => { + it("probes billing-cooldowned primary when no fallback candidates exist", async () => { const cfg = makeCfg({ agents: { defaults: { @@ -358,20 +570,28 @@ describe("runWithModelFallback – probe logic", () => { }, } as Partial); - // Billing cooldown far from expiry — would normally be skipped + // Single-provider setups need periodic probes even when the billing + // cooldown is far from expiry, otherwise topping up credits never recovers + // without a restart. const expiresIn30Min = NOW + 30 * 60 * 1000; mockedGetSoonestCooldownExpiry.mockReturnValue(expiresIn30Min); mockedResolveProfilesUnavailableReason.mockReturnValue("billing"); - await expect( - runWithModelFallback({ - cfg, - provider: "openai", - model: "gpt-4.1-mini", - fallbacksOverride: [], - run: vi.fn().mockResolvedValue("billing-recovered"), - }), - ).rejects.toThrow("All models failed"); + const run = vi.fn().mockResolvedValue("billing-recovered"); + + const result = await runWithModelFallback({ + cfg, + provider: "openai", + model: "gpt-4.1-mini", + fallbacksOverride: [], + run, + }); + + expect(result.result).toBe("billing-recovered"); + expect(run).toHaveBeenCalledTimes(1); + expect(run).toHaveBeenCalledWith("openai", "gpt-4.1-mini", { + allowTransientCooldownProbe: true, + }); }); it("probes billing-cooldowned primary with fallbacks when near cooldown expiry", async () => { @@ -401,10 +621,6 @@ describe("runWithModelFallback – probe logic", () => { const run = vi.fn().mockResolvedValue("ok"); const result = await runPrimaryCandidate(cfg, run); - - expect(result.result).toBe("ok"); - expect(run).toHaveBeenCalledTimes(1); - expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5"); - expect(result.attempts[0]?.reason).toBe("billing"); + expectPrimarySkippedForReason(result, run, "billing"); }); }); diff --git a/src/agents/model-fallback.run-embedded.e2e.test.ts b/src/agents/model-fallback.run-embedded.e2e.test.ts index 2e5a8202e95..504b1457143 100644 --- a/src/agents/model-fallback.run-embedded.e2e.test.ts +++ b/src/agents/model-fallback.run-embedded.e2e.test.ts @@ -207,6 +207,7 @@ async function runEmbeddedFallback(params: { cfg, provider: "openai", model: "mock-1", + runId: params.runId, agentDir: params.agentDir, run: (provider, model, options) => runEmbeddedPiAgent({ diff --git a/src/agents/model-fallback.test.ts b/src/agents/model-fallback.test.ts index c99d0a9bed9..f8422b4aa14 100644 --- a/src/agents/model-fallback.test.ts +++ b/src/agents/model-fallback.test.ts @@ -536,7 +536,9 @@ describe("runWithModelFallback", () => { }); expect(result.result).toBe("ok"); - const warning = warnSpy.mock.calls[0]?.[0] as string; + const warning = warnSpy.mock.calls + .map((call) => call[0] as string) + .find((value) => value.includes('Model "openai/gpt-6spoof" not found')); expect(warning).toContain('Model "openai/gpt-6spoof" not found'); expect(warning).not.toContain("\u001B"); expect(warning).not.toContain("\n"); @@ -553,7 +555,7 @@ describe("runWithModelFallback", () => { usageStat: { cooldownUntil: Date.now() + 5 * 60_000, }, - expectedReason: "rate_limit", + expectedReason: "unknown", }); }); @@ -1316,6 +1318,86 @@ describe("runWithModelFallback", () => { }); // Rate limit allows attempt expect(run).toHaveBeenNthCalledWith(2, "groq", "llama-3.3-70b-versatile"); // Cross-provider works }); + + it("limits cooldown probes to one per provider before moving to cross-provider fallback", async () => { + const { dir } = await makeAuthStoreWithCooldown("anthropic", "rate_limit"); + const cfg = makeCfg({ + agents: { + defaults: { + model: { + primary: "anthropic/claude-opus-4-6", + fallbacks: [ + "anthropic/claude-sonnet-4-5", + "anthropic/claude-haiku-3-5", + "groq/llama-3.3-70b-versatile", + ], + }, + }, + }, + }); + + const run = vi + .fn() + .mockRejectedValueOnce(new Error("Still rate limited")) // First same-provider probe fails + .mockResolvedValueOnce("groq success"); // Next provider succeeds + + const result = await runWithModelFallback({ + cfg, + provider: "anthropic", + model: "claude-opus-4-6", + run, + agentDir: dir, + }); + + expect(result.result).toBe("groq success"); + // Primary is skipped, first same-provider fallback is probed, second same-provider fallback + // is skipped (probe already attempted), then cross-provider fallback runs. + expect(run).toHaveBeenCalledTimes(2); + expect(run).toHaveBeenNthCalledWith(1, "anthropic", "claude-sonnet-4-5", { + allowTransientCooldownProbe: true, + }); + expect(run).toHaveBeenNthCalledWith(2, "groq", "llama-3.3-70b-versatile"); + }); + + it("does not consume transient probe slot when first same-provider probe fails with model_not_found", async () => { + const { dir } = await makeAuthStoreWithCooldown("anthropic", "rate_limit"); + const cfg = makeCfg({ + agents: { + defaults: { + model: { + primary: "anthropic/claude-opus-4-6", + fallbacks: [ + "anthropic/claude-sonnet-4-5", + "anthropic/claude-haiku-3-5", + "groq/llama-3.3-70b-versatile", + ], + }, + }, + }, + }); + + const run = vi + .fn() + .mockRejectedValueOnce(new Error("Model not found: anthropic/claude-sonnet-4-5")) + .mockResolvedValueOnce("haiku success"); + + const result = await runWithModelFallback({ + cfg, + provider: "anthropic", + model: "claude-opus-4-6", + run, + agentDir: dir, + }); + + expect(result.result).toBe("haiku success"); + expect(run).toHaveBeenCalledTimes(2); + expect(run).toHaveBeenNthCalledWith(1, "anthropic", "claude-sonnet-4-5", { + allowTransientCooldownProbe: true, + }); + expect(run).toHaveBeenNthCalledWith(2, "anthropic", "claude-haiku-3-5", { + allowTransientCooldownProbe: true, + }); + }); }); }); diff --git a/src/agents/model-fallback.ts b/src/agents/model-fallback.ts index ad2b5759233..5fd6e533a1a 100644 --- a/src/agents/model-fallback.ts +++ b/src/agents/model-fallback.ts @@ -19,6 +19,8 @@ import { isFailoverError, isTimeoutError, } from "./failover-error.js"; +import { logModelFallbackDecision } from "./model-fallback-observation.js"; +import type { FallbackAttempt, ModelCandidate } from "./model-fallback.types.js"; import { buildConfiguredAllowlistKeys, buildModelAliasIndex, @@ -32,11 +34,6 @@ import { isLikelyContextOverflowError } from "./pi-embedded-helpers.js"; const log = createSubsystemLogger("model-fallback"); -type ModelCandidate = { - provider: string; - model: string; -}; - export type ModelFallbackRunOptions = { allowTransientCooldownProbe?: boolean; }; @@ -47,15 +44,6 @@ type ModelFallbackRunFn = ( options?: ModelFallbackRunOptions, ) => Promise; -type FallbackAttempt = { - provider: string; - model: string; - error: string; - reason?: FailoverReason; - status?: number; - code?: string; -}; - /** * Fallback abort check. Only treats explicit AbortError names as user aborts. * Message-based checks (e.g., "aborted") can mask timeouts and skip fallback. @@ -152,10 +140,16 @@ async function runFallbackCandidate(params: { result, }; } catch (err) { - if (shouldRethrowAbort(err)) { + // Normalize abort-wrapped rate-limit errors (e.g. Google Vertex RESOURCE_EXHAUSTED) + // so they become FailoverErrors and continue the fallback loop instead of aborting. + const normalizedFailover = coerceToFailoverError(err, { + provider: params.provider, + model: params.model, + }); + if (shouldRethrowAbort(err) && !normalizedFailover) { throw err; } - return { ok: false, error: err }; + return { ok: false, error: normalizedFailover ?? err }; } } @@ -342,12 +336,51 @@ const lastProbeAttempt = new Map(); const MIN_PROBE_INTERVAL_MS = 30_000; // 30 seconds between probes per key const PROBE_MARGIN_MS = 2 * 60 * 1000; const PROBE_SCOPE_DELIMITER = "::"; +const PROBE_STATE_TTL_MS = 24 * 60 * 60 * 1000; +const MAX_PROBE_KEYS = 256; function resolveProbeThrottleKey(provider: string, agentDir?: string): string { const scope = String(agentDir ?? "").trim(); return scope ? `${scope}${PROBE_SCOPE_DELIMITER}${provider}` : provider; } +function pruneProbeState(now: number): void { + for (const [key, ts] of lastProbeAttempt) { + if (!Number.isFinite(ts) || ts <= 0 || now - ts > PROBE_STATE_TTL_MS) { + lastProbeAttempt.delete(key); + } + } +} + +function enforceProbeStateCap(): void { + while (lastProbeAttempt.size > MAX_PROBE_KEYS) { + let oldestKey: string | null = null; + let oldestTs = Number.POSITIVE_INFINITY; + for (const [key, ts] of lastProbeAttempt) { + if (ts < oldestTs) { + oldestKey = key; + oldestTs = ts; + } + } + if (!oldestKey) { + break; + } + lastProbeAttempt.delete(oldestKey); + } +} + +function isProbeThrottleOpen(now: number, throttleKey: string): boolean { + pruneProbeState(now); + const lastProbe = lastProbeAttempt.get(throttleKey) ?? 0; + return now - lastProbe >= MIN_PROBE_INTERVAL_MS; +} + +function markProbeAttempt(now: number, throttleKey: string): void { + pruneProbeState(now); + lastProbeAttempt.set(throttleKey, now); + enforceProbeStateCap(); +} + function shouldProbePrimaryDuringCooldown(params: { isPrimary: boolean; hasFallbackCandidates: boolean; @@ -360,8 +393,7 @@ function shouldProbePrimaryDuringCooldown(params: { return false; } - const lastProbe = lastProbeAttempt.get(params.throttleKey) ?? 0; - if (params.now - lastProbe < MIN_PROBE_INTERVAL_MS) { + if (!isProbeThrottleOpen(params.now, params.throttleKey)) { return false; } @@ -379,7 +411,12 @@ export const _probeThrottleInternals = { lastProbeAttempt, MIN_PROBE_INTERVAL_MS, PROBE_MARGIN_MS, + PROBE_STATE_TTL_MS, + MAX_PROBE_KEYS, resolveProbeThrottleKey, + isProbeThrottleOpen, + pruneProbeState, + markProbeAttempt, } as const; type CooldownDecision = @@ -418,7 +455,7 @@ function resolveCooldownDecision(params: { store: params.authStore, profileIds: params.profileIds, now: params.now, - }) ?? "rate_limit"; + }) ?? "unknown"; const isPersistentAuthIssue = inferredReason === "auth" || inferredReason === "auth_permanent"; if (isPersistentAuthIssue) { return { @@ -429,11 +466,15 @@ function resolveCooldownDecision(params: { } // Billing is semi-persistent: the user may fix their balance, or a transient - // 402 might have been misclassified. Probe the primary only when fallbacks - // exist; otherwise repeated single-provider probes just churn the disabled - // auth state without opening any recovery path. + // 402 might have been misclassified. Probe single-provider setups on the + // standard throttle so they can recover without a restart; when fallbacks + // exist, only probe near cooldown expiry so the fallback chain stays preferred. if (inferredReason === "billing") { - if (params.isPrimary && params.hasFallbackCandidates && shouldProbe) { + const shouldProbeSingleProviderBilling = + params.isPrimary && + !params.hasFallbackCandidates && + isProbeThrottleOpen(params.now, params.probeThrottleKey); + if (params.isPrimary && (shouldProbe || shouldProbeSingleProviderBilling)) { return { type: "attempt", reason: inferredReason, markProbe: true }; } return { @@ -448,7 +489,10 @@ function resolveCooldownDecision(params: { // limits, which are often model-scoped and can recover on a sibling model. const shouldAttemptDespiteCooldown = (params.isPrimary && (!params.requestedModel || shouldProbe)) || - (!params.isPrimary && (inferredReason === "rate_limit" || inferredReason === "overloaded")); + (!params.isPrimary && + (inferredReason === "rate_limit" || + inferredReason === "overloaded" || + inferredReason === "unknown")); if (!shouldAttemptDespiteCooldown) { return { type: "skip", @@ -468,6 +512,7 @@ export async function runWithModelFallback(params: { cfg: OpenClawConfig | undefined; provider: string; model: string; + runId?: string; agentDir?: string; /** Optional explicit fallbacks list; when provided (even empty), replaces agents.defaults.model.fallbacks. */ fallbacksOverride?: string[]; @@ -485,12 +530,18 @@ export async function runWithModelFallback(params: { : null; const attempts: FallbackAttempt[] = []; let lastError: unknown; + const cooldownProbeUsedProviders = new Set(); const hasFallbackCandidates = candidates.length > 1; for (let i = 0; i < candidates.length; i += 1) { const candidate = candidates[i]; + const isPrimary = i === 0; + const requestedModel = + params.provider === candidate.provider && params.model === candidate.model; let runOptions: ModelFallbackRunOptions | undefined; + let attemptedDuringCooldown = false; + let transientProbeProviderForAttempt: string | null = null; if (authStore) { const profileIds = resolveAuthProfileOrder({ cfg: params.cfg, @@ -501,9 +552,6 @@ export async function runWithModelFallback(params: { if (profileIds.length > 0 && !isAnyProfileAvailable) { // All profiles for this provider are in cooldown. - const isPrimary = i === 0; - const requestedModel = - params.provider === candidate.provider && params.model === candidate.model; const now = Date.now(); const probeThrottleKey = resolveProbeThrottleKey(candidate.provider, params.agentDir); const decision = resolveCooldownDecision({ @@ -524,19 +572,89 @@ export async function runWithModelFallback(params: { error: decision.error, reason: decision.reason, }); + logModelFallbackDecision({ + decision: "skip_candidate", + runId: params.runId, + requestedProvider: params.provider, + requestedModel: params.model, + candidate, + attempt: i + 1, + total: candidates.length, + reason: decision.reason, + error: decision.error, + nextCandidate: candidates[i + 1], + isPrimary, + requestedModelMatched: requestedModel, + fallbackConfigured: hasFallbackCandidates, + profileCount: profileIds.length, + }); continue; } if (decision.markProbe) { - lastProbeAttempt.set(probeThrottleKey, now); + markProbeAttempt(now, probeThrottleKey); } if ( decision.reason === "rate_limit" || decision.reason === "overloaded" || - decision.reason === "billing" + decision.reason === "billing" || + decision.reason === "unknown" ) { + // Probe at most once per provider per fallback run when all profiles + // are cooldowned. Re-probing every same-provider candidate can stall + // cross-provider fallback on providers with long internal retries. + const isTransientCooldownReason = + decision.reason === "rate_limit" || + decision.reason === "overloaded" || + decision.reason === "unknown"; + if (isTransientCooldownReason && cooldownProbeUsedProviders.has(candidate.provider)) { + const error = `Provider ${candidate.provider} is in cooldown (probe already attempted this run)`; + attempts.push({ + provider: candidate.provider, + model: candidate.model, + error, + reason: decision.reason, + }); + logModelFallbackDecision({ + decision: "skip_candidate", + runId: params.runId, + requestedProvider: params.provider, + requestedModel: params.model, + candidate, + attempt: i + 1, + total: candidates.length, + reason: decision.reason, + error, + nextCandidate: candidates[i + 1], + isPrimary, + requestedModelMatched: requestedModel, + fallbackConfigured: hasFallbackCandidates, + profileCount: profileIds.length, + }); + continue; + } runOptions = { allowTransientCooldownProbe: true }; + if (isTransientCooldownReason) { + transientProbeProviderForAttempt = candidate.provider; + } } + attemptedDuringCooldown = true; + logModelFallbackDecision({ + decision: "probe_cooldown_candidate", + runId: params.runId, + requestedProvider: params.provider, + requestedModel: params.model, + candidate, + attempt: i + 1, + total: candidates.length, + reason: decision.reason, + nextCandidate: candidates[i + 1], + isPrimary, + requestedModelMatched: requestedModel, + fallbackConfigured: hasFallbackCandidates, + allowTransientCooldownProbe: runOptions?.allowTransientCooldownProbe, + profileCount: profileIds.length, + }); } } @@ -547,6 +665,21 @@ export async function runWithModelFallback(params: { options: runOptions, }); if ("success" in attemptRun) { + if (i > 0 || attempts.length > 0 || attemptedDuringCooldown) { + logModelFallbackDecision({ + decision: "candidate_succeeded", + runId: params.runId, + requestedProvider: params.provider, + requestedModel: params.model, + candidate, + attempt: i + 1, + total: candidates.length, + previousAttempts: attempts, + isPrimary, + requestedModelMatched: requestedModel, + fallbackConfigured: hasFallbackCandidates, + }); + } const notFoundAttempt = i > 0 ? attempts.find((a) => a.reason === "model_not_found") : undefined; if (notFoundAttempt) { @@ -558,6 +691,18 @@ export async function runWithModelFallback(params: { } const err = attemptRun.error; { + if (transientProbeProviderForAttempt) { + const probeFailureReason = describeFailoverError(err).reason; + const shouldPreserveTransientProbeSlot = + probeFailureReason === "model_not_found" || + probeFailureReason === "format" || + probeFailureReason === "auth" || + probeFailureReason === "auth_permanent" || + probeFailureReason === "session_expired"; + if (!shouldPreserveTransientProbeSlot) { + cooldownProbeUsedProviders.add(transientProbeProviderForAttempt); + } + } // Context overflow errors should be handled by the inner runner's // compaction/retry logic, not by model fallback. If one escapes as a // throw, rethrow it immediately rather than trying a different model @@ -590,6 +735,23 @@ export async function runWithModelFallback(params: { status: described.status, code: described.code, }); + logModelFallbackDecision({ + decision: "candidate_failed", + runId: params.runId, + requestedProvider: params.provider, + requestedModel: params.model, + candidate, + attempt: i + 1, + total: candidates.length, + reason: described.reason, + status: described.status, + code: described.code, + error: described.message, + nextCandidate: candidates[i + 1], + isPrimary, + requestedModelMatched: requestedModel, + fallbackConfigured: hasFallbackCandidates, + }); await params.onError?.({ provider: candidate.provider, model: candidate.model, diff --git a/src/agents/model-fallback.types.ts b/src/agents/model-fallback.types.ts new file mode 100644 index 00000000000..92b5f974788 --- /dev/null +++ b/src/agents/model-fallback.types.ts @@ -0,0 +1,15 @@ +import type { FailoverReason } from "./pi-embedded-helpers.js"; + +export type ModelCandidate = { + provider: string; + model: string; +}; + +export type FallbackAttempt = { + provider: string; + model: string; + error: string; + reason?: FailoverReason; + status?: number; + code?: string; +}; diff --git a/src/agents/model-forward-compat.ts b/src/agents/model-forward-compat.ts index 8735193346e..4afaff4a7a9 100644 --- a/src/agents/model-forward-compat.ts +++ b/src/agents/model-forward-compat.ts @@ -16,6 +16,9 @@ const OPENAI_CODEX_GPT_54_CONTEXT_TOKENS = 1_050_000; const OPENAI_CODEX_GPT_54_MAX_TOKENS = 128_000; const OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.3-codex", "gpt-5.2-codex"] as const; const OPENAI_CODEX_GPT_53_MODEL_ID = "gpt-5.3-codex"; +const OPENAI_CODEX_GPT_53_SPARK_MODEL_ID = "gpt-5.3-codex-spark"; +const OPENAI_CODEX_GPT_53_SPARK_CONTEXT_TOKENS = 128_000; +const OPENAI_CODEX_GPT_53_SPARK_MAX_TOKENS = 128_000; const OPENAI_CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const; const ANTHROPIC_OPUS_46_MODEL_ID = "claude-opus-4-6"; @@ -133,6 +136,19 @@ function resolveOpenAICodexForwardCompatModel( contextWindow: OPENAI_CODEX_GPT_54_CONTEXT_TOKENS, maxTokens: OPENAI_CODEX_GPT_54_MAX_TOKENS, }; + } else if (lower === OPENAI_CODEX_GPT_53_SPARK_MODEL_ID) { + templateIds = [OPENAI_CODEX_GPT_53_MODEL_ID, ...OPENAI_CODEX_TEMPLATE_MODEL_IDS]; + eligibleProviders = CODEX_GPT54_ELIGIBLE_PROVIDERS; + patch = { + api: "openai-codex-responses", + provider: normalizedProvider, + baseUrl: "https://chatgpt.com/backend-api", + reasoning: true, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: OPENAI_CODEX_GPT_53_SPARK_CONTEXT_TOKENS, + maxTokens: OPENAI_CODEX_GPT_53_SPARK_MAX_TOKENS, + }; } else if (lower === OPENAI_CODEX_GPT_53_MODEL_ID) { templateIds = OPENAI_CODEX_TEMPLATE_MODEL_IDS; eligibleProviders = CODEX_GPT53_ELIGIBLE_PROVIDERS; diff --git a/src/agents/model-scan.ts b/src/agents/model-scan.ts index a0f05e05475..dec46b4db21 100644 --- a/src/agents/model-scan.ts +++ b/src/agents/model-scan.ts @@ -326,12 +326,12 @@ async function probeImage( } function ensureImageInput(model: OpenAIModel): OpenAIModel { - if (model.input.includes("image")) { + if (model.input?.includes("image")) { return model; } return { ...model, - input: Array.from(new Set([...model.input, "image"])), + input: Array.from(new Set([...(model.input ?? []), "image"])), }; } @@ -472,7 +472,7 @@ export async function scanOpenRouterModels( }; const toolResult = await probeTool(model, apiKey, timeoutMs); - const imageResult = model.input.includes("image") + const imageResult = model.input?.includes("image") ? await probeImage(ensureImageInput(model), apiKey, timeoutMs) : { ok: false, latencyMs: null, skipped: true }; diff --git a/src/agents/model-selection.test.ts b/src/agents/model-selection.test.ts index a9029540ee1..7fa8832e0e7 100644 --- a/src/agents/model-selection.test.ts +++ b/src/agents/model-selection.test.ts @@ -50,6 +50,60 @@ function resolveAnthropicOpusThinking(cfg: OpenClawConfig) { }); } +function createAgentFallbackConfig(params: { + primary?: string; + fallbacks?: string[]; + agentFallbacks?: string[]; +}) { + return { + agents: { + defaults: { + models: { + "openai/gpt-4o": {}, + }, + model: { + primary: params.primary ?? "openai/gpt-4o", + fallbacks: params.fallbacks ?? [], + }, + }, + ...(params.agentFallbacks + ? { + list: [ + { + id: "coder", + model: { + primary: params.primary ?? "openai/gpt-4o", + fallbacks: params.agentFallbacks, + }, + }, + ], + } + : {}), + }, + } as OpenClawConfig; +} + +function createProviderWithModelsConfig(provider: string, models: Array>) { + return { + models: { + providers: { + [provider]: { + baseUrl: `https://${provider}.example.com`, + models, + }, + }, + }, + } as Partial; +} + +function resolveConfiguredRefForTest(cfg: Partial) { + return resolveConfiguredModelRef({ + cfg: cfg as OpenClawConfig, + defaultProvider: "anthropic", + defaultModel: "claude-opus-4-6", + }); +} + describe("model-selection", () => { describe("normalizeProviderId", () => { it("should normalize provider names", () => { @@ -73,132 +127,139 @@ describe("model-selection", () => { }); }); + describe("modelKey", () => { + it("keeps canonical OpenRouter native ids without duplicating the provider", () => { + expect(modelKey("openrouter", "openrouter/hunter-alpha")).toBe("openrouter/hunter-alpha"); + }); + }); + describe("parseModelRef", () => { - it("should parse full model refs", () => { - expect(parseModelRef("anthropic/claude-3-5-sonnet", "openai")).toEqual({ - provider: "anthropic", - model: "claude-3-5-sonnet", - }); + const expectParsedModelVariants = ( + variants: string[], + defaultProvider: string, + expected: { provider: string; model: string }, + ) => { + for (const raw of variants) { + expect(parseModelRef(raw, defaultProvider), raw).toEqual(expected); + } + }; + + it.each([ + { + name: "parses explicit provider/model refs", + variants: ["anthropic/claude-3-5-sonnet"], + defaultProvider: "openai", + expected: { provider: "anthropic", model: "claude-3-5-sonnet" }, + }, + { + name: "uses the default provider when omitted", + variants: ["claude-3-5-sonnet"], + defaultProvider: "anthropic", + expected: { provider: "anthropic", model: "claude-3-5-sonnet" }, + }, + { + name: "preserves nested model ids after the provider prefix", + variants: ["nvidia/moonshotai/kimi-k2.5"], + defaultProvider: "anthropic", + expected: { provider: "nvidia", model: "moonshotai/kimi-k2.5" }, + }, + { + name: "normalizes anthropic shorthand aliases", + variants: ["anthropic/opus-4.6", "opus-4.6", " anthropic / opus-4.6 "], + defaultProvider: "anthropic", + expected: { provider: "anthropic", model: "claude-opus-4-6" }, + }, + { + name: "normalizes anthropic sonnet aliases", + variants: ["anthropic/sonnet-4.6", "sonnet-4.6"], + defaultProvider: "anthropic", + expected: { provider: "anthropic", model: "claude-sonnet-4-6" }, + }, + { + name: "keeps dated anthropic model ids unchanged", + variants: ["anthropic/claude-sonnet-4-20250514", "claude-sonnet-4-20250514"], + defaultProvider: "anthropic", + expected: { provider: "anthropic", model: "claude-sonnet-4-20250514" }, + }, + { + name: "normalizes deprecated google flash preview ids", + variants: ["google/gemini-3.1-flash-preview", "gemini-3.1-flash-preview"], + defaultProvider: "google", + expected: { provider: "google", model: "gemini-3-flash-preview" }, + }, + { + name: "normalizes gemini 3.1 flash-lite ids", + variants: ["google/gemini-3.1-flash-lite", "gemini-3.1-flash-lite"], + defaultProvider: "google", + expected: { provider: "google", model: "gemini-3.1-flash-lite-preview" }, + }, + { + name: "keeps OpenAI codex refs on the openai provider", + variants: ["openai/gpt-5.3-codex", "gpt-5.3-codex"], + defaultProvider: "openai", + expected: { provider: "openai", model: "gpt-5.3-codex" }, + }, + { + name: "preserves openrouter native model prefixes", + variants: ["openrouter/aurora-alpha"], + defaultProvider: "openai", + expected: { provider: "openrouter", model: "openrouter/aurora-alpha" }, + }, + { + name: "passes through openrouter upstream provider ids", + variants: ["openrouter/anthropic/claude-sonnet-4-5"], + defaultProvider: "openai", + expected: { provider: "openrouter", model: "anthropic/claude-sonnet-4-5" }, + }, + { + name: "normalizes Vercel Claude shorthand to anthropic-prefixed model ids", + variants: ["vercel-ai-gateway/claude-opus-4.6"], + defaultProvider: "openai", + expected: { provider: "vercel-ai-gateway", model: "anthropic/claude-opus-4.6" }, + }, + { + name: "normalizes Vercel Anthropic aliases without double-prefixing", + variants: ["vercel-ai-gateway/opus-4.6"], + defaultProvider: "openai", + expected: { provider: "vercel-ai-gateway", model: "anthropic/claude-opus-4-6" }, + }, + { + name: "keeps already-prefixed Vercel Anthropic models unchanged", + variants: ["vercel-ai-gateway/anthropic/claude-opus-4.6"], + defaultProvider: "openai", + expected: { provider: "vercel-ai-gateway", model: "anthropic/claude-opus-4.6" }, + }, + { + name: "passes through non-Claude Vercel model ids unchanged", + variants: ["vercel-ai-gateway/openai/gpt-5.2"], + defaultProvider: "openai", + expected: { provider: "vercel-ai-gateway", model: "openai/gpt-5.2" }, + }, + { + name: "keeps already-suffixed codex variants unchanged", + variants: ["openai/gpt-5.3-codex-codex"], + defaultProvider: "anthropic", + expected: { provider: "openai", model: "gpt-5.3-codex-codex" }, + }, + { + name: "normalizes gemini 3.1 flash-lite ids for google-vertex", + variants: ["google-vertex/gemini-3.1-flash-lite", "gemini-3.1-flash-lite"], + defaultProvider: "google-vertex", + expected: { provider: "google-vertex", model: "gemini-3.1-flash-lite-preview" }, + }, + ])("$name", ({ variants, defaultProvider, expected }) => { + expectParsedModelVariants(variants, defaultProvider, expected); }); - it("preserves nested model ids after provider prefix", () => { - expect(parseModelRef("nvidia/moonshotai/kimi-k2.5", "anthropic")).toEqual({ - provider: "nvidia", - model: "moonshotai/kimi-k2.5", - }); + it("round-trips normalized refs through modelKey", () => { + const parsed = parseModelRef(" opus-4.6 ", "anthropic"); + expect(parsed).toEqual({ provider: "anthropic", model: "claude-opus-4-6" }); + expect(modelKey(parsed?.provider ?? "", parsed?.model ?? "")).toBe( + "anthropic/claude-opus-4-6", + ); }); - - it("normalizes anthropic alias refs to canonical model ids", () => { - expect(parseModelRef("anthropic/opus-4.6", "openai")).toEqual({ - provider: "anthropic", - model: "claude-opus-4-6", - }); - expect(parseModelRef("opus-4.6", "anthropic")).toEqual({ - provider: "anthropic", - model: "claude-opus-4-6", - }); - expect(parseModelRef("anthropic/sonnet-4.6", "openai")).toEqual({ - provider: "anthropic", - model: "claude-sonnet-4-6", - }); - expect(parseModelRef("sonnet-4.6", "anthropic")).toEqual({ - provider: "anthropic", - model: "claude-sonnet-4-6", - }); - }); - - it("should use default provider if none specified", () => { - expect(parseModelRef("claude-3-5-sonnet", "anthropic")).toEqual({ - provider: "anthropic", - model: "claude-3-5-sonnet", - }); - }); - - it("normalizes deprecated google flash preview ids to the working model id", () => { - expect(parseModelRef("google/gemini-3.1-flash-preview", "openai")).toEqual({ - provider: "google", - model: "gemini-3-flash-preview", - }); - expect(parseModelRef("gemini-3.1-flash-preview", "google")).toEqual({ - provider: "google", - model: "gemini-3-flash-preview", - }); - }); - - it("normalizes gemini 3.1 flash-lite to the preview model id", () => { - expect(parseModelRef("google/gemini-3.1-flash-lite", "openai")).toEqual({ - provider: "google", - model: "gemini-3.1-flash-lite-preview", - }); - expect(parseModelRef("gemini-3.1-flash-lite", "google")).toEqual({ - provider: "google", - model: "gemini-3.1-flash-lite-preview", - }); - }); - - it("keeps openai gpt-5.3 codex refs on the openai provider", () => { - expect(parseModelRef("openai/gpt-5.3-codex", "anthropic")).toEqual({ - provider: "openai", - model: "gpt-5.3-codex", - }); - expect(parseModelRef("gpt-5.3-codex", "openai")).toEqual({ - provider: "openai", - model: "gpt-5.3-codex", - }); - expect(parseModelRef("openai/gpt-5.3-codex-codex", "anthropic")).toEqual({ - provider: "openai", - model: "gpt-5.3-codex-codex", - }); - }); - - it("should return null for empty strings", () => { - expect(parseModelRef("", "anthropic")).toBeNull(); - expect(parseModelRef(" ", "anthropic")).toBeNull(); - }); - - it("should preserve openrouter/ prefix for native models", () => { - expect(parseModelRef("openrouter/aurora-alpha", "openai")).toEqual({ - provider: "openrouter", - model: "openrouter/aurora-alpha", - }); - }); - - it("should pass through openrouter external provider models as-is", () => { - expect(parseModelRef("openrouter/anthropic/claude-sonnet-4-5", "openai")).toEqual({ - provider: "openrouter", - model: "anthropic/claude-sonnet-4-5", - }); - }); - - it("normalizes Vercel Claude shorthand to anthropic-prefixed model ids", () => { - expect(parseModelRef("vercel-ai-gateway/claude-opus-4.6", "openai")).toEqual({ - provider: "vercel-ai-gateway", - model: "anthropic/claude-opus-4.6", - }); - expect(parseModelRef("vercel-ai-gateway/opus-4.6", "openai")).toEqual({ - provider: "vercel-ai-gateway", - model: "anthropic/claude-opus-4-6", - }); - }); - - it("keeps already-prefixed Vercel Anthropic models unchanged", () => { - expect(parseModelRef("vercel-ai-gateway/anthropic/claude-opus-4.6", "openai")).toEqual({ - provider: "vercel-ai-gateway", - model: "anthropic/claude-opus-4.6", - }); - }); - - it("passes through non-Claude Vercel model ids unchanged", () => { - expect(parseModelRef("vercel-ai-gateway/openai/gpt-5.2", "openai")).toEqual({ - provider: "vercel-ai-gateway", - model: "openai/gpt-5.2", - }); - }); - - it("should handle invalid slash usage", () => { - expect(parseModelRef("/", "anthropic")).toBeNull(); - expect(parseModelRef("anthropic/", "anthropic")).toBeNull(); - expect(parseModelRef("/model", "anthropic")).toBeNull(); + it.each(["", " ", "/", "anthropic/", "/model"])("returns null for invalid ref %j", (raw) => { + expect(parseModelRef(raw, "anthropic")).toBeNull(); }); }); @@ -322,6 +383,58 @@ describe("model-selection", () => { { provider: "anthropic", id: "claude-sonnet-4-6", name: "claude-sonnet-4-6" }, ]); }); + + it("includes fallback models in allowed set", () => { + const cfg = createAgentFallbackConfig({ + fallbacks: ["anthropic/claude-sonnet-4-6", "google/gemini-3-pro"], + }); + + const result = buildAllowedModelSet({ + cfg, + catalog: [], + defaultProvider: "openai", + defaultModel: "gpt-4o", + }); + + expect(result.allowedKeys.has("openai/gpt-4o")).toBe(true); + expect(result.allowedKeys.has("anthropic/claude-sonnet-4-6")).toBe(true); + expect(result.allowedKeys.has("google/gemini-3-pro-preview")).toBe(true); + expect(result.allowAny).toBe(false); + }); + + it("handles empty fallbacks gracefully", () => { + const cfg = createAgentFallbackConfig({}); + + const result = buildAllowedModelSet({ + cfg, + catalog: [], + defaultProvider: "openai", + defaultModel: "gpt-4o", + }); + + expect(result.allowedKeys.has("openai/gpt-4o")).toBe(true); + expect(result.allowAny).toBe(false); + }); + + it("prefers per-agent fallback overrides when agentId is provided", () => { + const cfg = createAgentFallbackConfig({ + fallbacks: ["google/gemini-3-pro"], + agentFallbacks: ["anthropic/claude-sonnet-4-6"], + }); + + const result = buildAllowedModelSet({ + cfg, + catalog: [], + defaultProvider: "openai", + defaultModel: "gpt-4o", + agentId: "coder", + }); + + expect(result.allowedKeys.has("openai/gpt-4o")).toBe(true); + expect(result.allowedKeys.has("anthropic/claude-sonnet-4-6")).toBe(true); + expect(result.allowedKeys.has("google/gemini-3-pro-preview")).toBe(false); + expect(result.allowAny).toBe(false); + }); }); describe("resolveAllowedModelRef", () => { @@ -538,79 +651,40 @@ describe("model-selection", () => { }); it("should prefer configured custom provider when default provider is not in models.providers", () => { - const cfg: Partial = { - models: { - providers: { - n1n: { - baseUrl: "https://n1n.example.com", - models: [ - { - id: "gpt-5.4", - name: "GPT 5.4", - reasoning: false, - input: ["text"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 128000, - maxTokens: 4096, - }, - ], - }, - }, + const cfg = createProviderWithModelsConfig("n1n", [ + { + id: "gpt-5.4", + name: "GPT 5.4", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 128000, + maxTokens: 4096, }, - }; - const result = resolveConfiguredModelRef({ - cfg: cfg as OpenClawConfig, - defaultProvider: "anthropic", - defaultModel: "claude-opus-4-6", - }); + ]); + const result = resolveConfiguredRefForTest(cfg); expect(result).toEqual({ provider: "n1n", model: "gpt-5.4" }); }); it("should keep default provider when it is in models.providers", () => { - const cfg: Partial = { - models: { - providers: { - anthropic: { - baseUrl: "https://api.anthropic.com", - models: [ - { - id: "claude-opus-4-6", - name: "Claude Opus 4.6", - reasoning: true, - input: ["text", "image"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 200000, - maxTokens: 4096, - }, - ], - }, - }, + const cfg = createProviderWithModelsConfig("anthropic", [ + { + id: "claude-opus-4-6", + name: "Claude Opus 4.6", + reasoning: true, + input: ["text", "image"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 200000, + maxTokens: 4096, }, - }; - const result = resolveConfiguredModelRef({ - cfg: cfg as OpenClawConfig, - defaultProvider: "anthropic", - defaultModel: "claude-opus-4-6", - }); + ]); + const result = resolveConfiguredRefForTest(cfg); expect(result).toEqual({ provider: "anthropic", model: "claude-opus-4-6" }); }); it("should fall back to hardcoded default when no custom providers have models", () => { - const cfg: Partial = { - models: { - providers: { - "empty-provider": { - baseUrl: "https://example.com", - models: [], - }, - }, - }, - }; - const result = resolveConfiguredModelRef({ - cfg: cfg as OpenClawConfig, - defaultProvider: "anthropic", - defaultModel: "claude-opus-4-6", - }); + const cfg = createProviderWithModelsConfig("empty-provider", []); + const result = resolveConfiguredRefForTest(cfg); expect(result).toEqual({ provider: "anthropic", model: "claude-opus-4-6" }); }); @@ -662,6 +736,28 @@ describe("model-selection", () => { expect(resolveAnthropicOpusThinking(cfg)).toBe("high"); }); + it("accepts legacy duplicated OpenRouter keys for per-model thinking", () => { + const cfg = { + agents: { + defaults: { + models: { + "openrouter/openrouter/hunter-alpha": { + params: { thinking: "high" }, + }, + }, + }, + }, + } as OpenClawConfig; + + expect( + resolveThinkingDefault({ + cfg, + provider: "openrouter", + model: "openrouter/hunter-alpha", + }), + ).toBe("high"); + }); + it("accepts per-model params.thinking=adaptive", () => { const cfg = { agents: { diff --git a/src/agents/model-selection.ts b/src/agents/model-selection.ts index 75df5ed22fa..72cd5951292 100644 --- a/src/agents/model-selection.ts +++ b/src/agents/model-selection.ts @@ -1,8 +1,17 @@ +import { resolveThinkingDefaultForModel } from "../auto-reply/thinking.js"; import type { OpenClawConfig } from "../config/config.js"; -import { resolveAgentModelPrimaryValue, toAgentModelListLike } from "../config/model-input.js"; +import { + resolveAgentModelFallbackValues, + resolveAgentModelPrimaryValue, + toAgentModelListLike, +} from "../config/model-input.js"; import { createSubsystemLogger } from "../logging/subsystem.js"; import { sanitizeForLog } from "../terminal/ansi.js"; -import { resolveAgentConfig, resolveAgentEffectiveModelPrimary } from "./agent-scope.js"; +import { + resolveAgentConfig, + resolveAgentEffectiveModelPrimary, + resolveAgentModelFallbacksOverride, +} from "./agent-scope.js"; import { DEFAULT_MODEL, DEFAULT_PROVIDER } from "./defaults.js"; import type { ModelCatalogEntry } from "./model-catalog.js"; import { splitTrailingAuthProfile } from "./model-ref-profile.js"; @@ -22,20 +31,33 @@ export type ModelAliasIndex = { byKey: Map; }; -const ANTHROPIC_MODEL_ALIASES: Record = { - "opus-4.6": "claude-opus-4-6", - "opus-4.5": "claude-opus-4-5", - "sonnet-4.6": "claude-sonnet-4-6", - "sonnet-4.5": "claude-sonnet-4-5", -}; -const CLAUDE_46_MODEL_RE = /claude-(?:opus|sonnet)-4(?:\.|-)6(?:$|[-.])/i; - function normalizeAliasKey(value: string): string { return value.trim().toLowerCase(); } export function modelKey(provider: string, model: string) { - return `${provider}/${model}`; + const providerId = provider.trim(); + const modelId = model.trim(); + if (!providerId) { + return modelId; + } + if (!modelId) { + return providerId; + } + return modelId.toLowerCase().startsWith(`${providerId.toLowerCase()}/`) + ? modelId + : `${providerId}/${modelId}`; +} + +export function legacyModelKey(provider: string, model: string): string | null { + const providerId = provider.trim(); + const modelId = model.trim(); + if (!providerId || !modelId) { + return null; + } + const rawKey = `${providerId}/${modelId}`; + const canonicalKey = modelKey(providerId, modelId); + return rawKey === canonicalKey ? null : rawKey; } export function normalizeProviderId(provider: string): string { @@ -46,6 +68,9 @@ export function normalizeProviderId(provider: string): string { if (normalized === "opencode-zen") { return "opencode"; } + if (normalized === "opencode-go-auth") { + return "opencode-go"; + } if (normalized === "qwen") { return "qwen-portal"; } @@ -119,7 +144,20 @@ function normalizeAnthropicModelId(model: string): string { return trimmed; } const lower = trimmed.toLowerCase(); - return ANTHROPIC_MODEL_ALIASES[lower] ?? trimmed; + // Keep alias resolution local so bundled startup paths cannot trip a TDZ on + // a module-level alias table while config parsing is still initializing. + switch (lower) { + case "opus-4.6": + return "claude-opus-4-6"; + case "opus-4.5": + return "claude-opus-4-5"; + case "sonnet-4.6": + return "claude-sonnet-4-6"; + case "sonnet-4.5": + return "claude-sonnet-4-5"; + default: + return trimmed; + } } function normalizeProviderModelId(provider: string, model: string): string { @@ -133,7 +171,7 @@ function normalizeProviderModelId(provider: string, model: string): string { return `anthropic/${normalizedAnthropicModel}`; } } - if (provider === "google") { + if (provider === "google" || provider === "google-vertex") { return normalizeGoogleModelId(model); } // OpenRouter-native models (e.g. "openrouter/aurora-alpha") need the full @@ -379,6 +417,16 @@ export function resolveDefaultModelForAgent(params: { }); } +function resolveAllowedFallbacks(params: { cfg: OpenClawConfig; agentId?: string }): string[] { + if (params.agentId) { + const override = resolveAgentModelFallbacksOverride(params.cfg, params.agentId); + if (override !== undefined) { + return override; + } + } + return resolveAgentModelFallbackValues(params.cfg.agents?.defaults?.model); +} + export function resolveSubagentConfiguredModelSelection(params: { cfg: OpenClawConfig; agentId: string; @@ -416,6 +464,7 @@ export function buildAllowedModelSet(params: { catalog: ModelCatalogEntry[]; defaultProvider: string; defaultModel?: string; + agentId?: string; }): { allowAny: boolean; allowedCatalog: ModelCatalogEntry[]; @@ -466,6 +515,25 @@ export function buildAllowedModelSet(params: { } } + for (const fallback of resolveAllowedFallbacks({ + cfg: params.cfg, + agentId: params.agentId, + })) { + const parsed = parseModelRef(String(fallback), params.defaultProvider); + if (parsed) { + const key = modelKey(parsed.provider, parsed.model); + allowedKeys.add(key); + + if (!catalogKeys.has(key) && !syntheticCatalogEntries.has(key)) { + syntheticCatalogEntries.set(key, { + id: parsed.model, + name: parsed.model, + provider: parsed.provider, + }); + } + } + } + if (defaultKey) { allowedKeys.add(defaultKey); } @@ -567,11 +635,14 @@ export function resolveThinkingDefault(params: { model: string; catalog?: ModelCatalogEntry[]; }): ThinkLevel { - const normalizedProvider = normalizeProviderId(params.provider); - const modelLower = params.model.toLowerCase(); + const _normalizedProvider = normalizeProviderId(params.provider); + const _modelLower = params.model.toLowerCase(); + const configuredModels = params.cfg.agents?.defaults?.models; + const canonicalKey = modelKey(params.provider, params.model); + const legacyKey = legacyModelKey(params.provider, params.model); const perModelThinking = - params.cfg.agents?.defaults?.models?.[modelKey(params.provider, params.model)]?.params - ?.thinking; + configuredModels?.[canonicalKey]?.params?.thinking ?? + (legacyKey ? configuredModels?.[legacyKey]?.params?.thinking : undefined); if ( perModelThinking === "off" || perModelThinking === "minimal" || @@ -587,21 +658,11 @@ export function resolveThinkingDefault(params: { if (configured) { return configured; } - const isAnthropicFamilyModel = - normalizedProvider === "anthropic" || - normalizedProvider === "amazon-bedrock" || - modelLower.includes("anthropic/") || - modelLower.includes(".anthropic."); - if (isAnthropicFamilyModel && CLAUDE_46_MODEL_RE.test(modelLower)) { - return "adaptive"; - } - const candidate = params.catalog?.find( - (entry) => entry.provider === params.provider && entry.id === params.model, - ); - if (candidate?.reasoning) { - return "low"; - } - return "off"; + return resolveThinkingDefaultForModel({ + provider: params.provider, + model: params.model, + catalog: params.catalog, + }); } /** Default reasoning level when session/directive do not set it: "on" if model supports reasoning, else "off". */ diff --git a/src/agents/model-suppression.ts b/src/agents/model-suppression.ts new file mode 100644 index 00000000000..378096ea732 --- /dev/null +++ b/src/agents/model-suppression.ts @@ -0,0 +1,27 @@ +import { normalizeProviderId } from "./model-selection.js"; + +const OPENAI_DIRECT_SPARK_MODEL_ID = "gpt-5.3-codex-spark"; +const SUPPRESSED_SPARK_PROVIDERS = new Set(["openai", "azure-openai-responses"]); + +export function shouldSuppressBuiltInModel(params: { + provider?: string | null; + id?: string | null; +}) { + const provider = normalizeProviderId(params.provider?.trim().toLowerCase() ?? ""); + const id = params.id?.trim().toLowerCase() ?? ""; + + // pi-ai still ships non-Codex Spark rows, but OpenClaw treats Spark as + // Codex-only until upstream availability is proven on direct API paths. + return SUPPRESSED_SPARK_PROVIDERS.has(provider) && id === OPENAI_DIRECT_SPARK_MODEL_ID; +} + +export function buildSuppressedBuiltInModelError(params: { + provider?: string | null; + id?: string | null; +}): string | undefined { + if (!shouldSuppressBuiltInModel(params)) { + return undefined; + } + const provider = normalizeProviderId(params.provider?.trim().toLowerCase() ?? "") || "openai"; + return `Unknown model: ${provider}/${OPENAI_DIRECT_SPARK_MODEL_ID}. ${OPENAI_DIRECT_SPARK_MODEL_ID} is only supported via openai-codex OAuth. Use openai-codex/${OPENAI_DIRECT_SPARK_MODEL_ID}.`; +} diff --git a/src/agents/models-config.e2e-harness.ts b/src/agents/models-config.e2e-harness.ts index 71577b27e69..81518ec9aee 100644 --- a/src/agents/models-config.e2e-harness.ts +++ b/src/agents/models-config.e2e-harness.ts @@ -101,6 +101,7 @@ export const MODELS_CONFIG_IMPLICIT_ENV_VARS = [ "OPENROUTER_API_KEY", "PI_CODING_AGENT_DIR", "QIANFAN_API_KEY", + "MODELSTUDIO_API_KEY", "QWEN_OAUTH_TOKEN", "QWEN_PORTAL_API_KEY", "SYNTHETIC_API_KEY", diff --git a/src/agents/models-config.fills-missing-provider-apikey-from-env-var.test.ts b/src/agents/models-config.fills-missing-provider-apikey-from-env-var.test.ts index ef03fb3863b..036f4d00824 100644 --- a/src/agents/models-config.fills-missing-provider-apikey-from-env-var.test.ts +++ b/src/agents/models-config.fills-missing-provider-apikey-from-env-var.test.ts @@ -60,13 +60,31 @@ function createMergeConfigProvider() { }; } -async function runCustomProviderMergeTest(params: { - seedProvider: { - baseUrl: string; - apiKey: string; - api: string; - models: Array<{ id: string; name: string; input: string[]; api?: string }>; +type MergeSeedProvider = { + baseUrl: string; + apiKey: string; + api: string; + models: Array<{ id: string; name: string; input: string[]; api?: string }>; +}; + +type MergeConfigApiKeyRef = { + source: "env"; + provider: "default"; + id: string; +}; + +function createAgentSeedProvider(overrides: Partial = {}): MergeSeedProvider { + return { + baseUrl: "https://agent.example/v1", + apiKey: "AGENT_KEY", // pragma: allowlist secret + api: "openai-responses", + models: [{ id: "agent-model", name: "Agent model", input: ["text"] }], + ...overrides, }; +} + +async function runCustomProviderMergeTest(params: { + seedProvider: MergeSeedProvider; existingProviderKey?: string; configProviderKey?: string; }) { @@ -86,6 +104,56 @@ async function runCustomProviderMergeTest(params: { }>(); } +async function expectCustomProviderMergeResult(params: { + seedProvider?: MergeSeedProvider; + existingProviderKey?: string; + configProviderKey?: string; + expectedApiKey: string; + expectedBaseUrl: string; +}) { + await withTempHome(async () => { + const parsed = await runCustomProviderMergeTest({ + seedProvider: params.seedProvider ?? createAgentSeedProvider(), + existingProviderKey: params.existingProviderKey, + configProviderKey: params.configProviderKey, + }); + expect(parsed.providers.custom?.apiKey).toBe(params.expectedApiKey); + expect(parsed.providers.custom?.baseUrl).toBe(params.expectedBaseUrl); + }); +} + +async function expectCustomProviderApiKeyRewrite(params: { + existingApiKey: string; + configuredApiKey: string | MergeConfigApiKeyRef; + expectedApiKey: string; +}) { + await withTempHome(async () => { + await writeAgentModelsJson({ + providers: { + custom: createAgentSeedProvider({ apiKey: params.existingApiKey }), + }, + }); + + await ensureOpenClawModelsJson({ + models: { + mode: "merge", + providers: { + custom: { + ...createMergeConfigProvider(), + apiKey: params.configuredApiKey, + }, + }, + }, + }); + + const parsed = await readGeneratedModelsJson<{ + providers: Record; + }>(); + expect(parsed.providers.custom?.apiKey).toBe(params.expectedApiKey); + expect(parsed.providers.custom?.baseUrl).toBe("https://config.example/v1"); + }); +} + function createMoonshotConfig(overrides: { contextWindow: number; maxTokens: number; @@ -113,6 +181,92 @@ function createMoonshotConfig(overrides: { }; } +function createOpenAiConfigWithResolvedApiKey(mergeMode = false): OpenClawConfig { + return { + models: { + ...(mergeMode ? { mode: "merge" as const } : {}), + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; simulates resolved ${OPENAI_API_KEY} + api: "openai-completions", + models: [ + { + id: "gpt-4.1", + name: "GPT-4.1", + input: ["text"], + reasoning: false, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 128000, + maxTokens: 16384, + }, + ], + }, + }, + }, + }; +} + +async function expectOpenAiEnvMarkerApiKey(options?: { seedMergedProvider?: boolean }) { + await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => { + await withTempHome(async () => { + if (options?.seedMergedProvider) { + await writeAgentModelsJson({ + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: "STALE_AGENT_KEY", // pragma: allowlist secret + api: "openai-completions", + models: [{ id: "gpt-4.1", name: "GPT-4.1", input: ["text"] }], + }, + }, + }); + } + + await ensureOpenClawModelsJson( + createOpenAiConfigWithResolvedApiKey(options?.seedMergedProvider), + ); + const result = await readGeneratedModelsJson<{ + providers: Record; + }>(); + expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret + }); + }); +} + +async function expectMoonshotTokenLimits(params: { + contextWindow: number; + maxTokens: number; + expectedContextWindow: number; + expectedMaxTokens: number; +}) { + await withTempHome(async () => { + await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => { + await ensureOpenClawModelsJson( + createMoonshotConfig({ + contextWindow: params.contextWindow, + maxTokens: params.maxTokens, + }), + ); + const parsed = await readGeneratedModelsJson<{ + providers: Record< + string, + { + models?: Array<{ + id: string; + contextWindow?: number; + maxTokens?: number; + }>; + } + >; + }>(); + const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5"); + expect(kimi?.contextWindow).toBe(params.expectedContextWindow); + expect(kimi?.maxTokens).toBe(params.expectedMaxTokens); + }); + }); +} + describe("models-config", () => { it("keeps anthropic api defaults when model entries omit api", async () => { await withTempHome(async () => { @@ -215,49 +369,26 @@ describe("models-config", () => { }); it("preserves non-empty agent apiKey but lets explicit config baseUrl win in merge mode", async () => { - await withTempHome(async () => { - const parsed = await runCustomProviderMergeTest({ - seedProvider: { - baseUrl: "https://agent.example/v1", - apiKey: "AGENT_KEY", // pragma: allowlist secret - api: "openai-responses", - models: [{ id: "agent-model", name: "Agent model", input: ["text"] }], - }, - }); - expect(parsed.providers.custom?.apiKey).toBe("AGENT_KEY"); - expect(parsed.providers.custom?.baseUrl).toBe("https://config.example/v1"); + await expectCustomProviderMergeResult({ + expectedApiKey: "AGENT_KEY", + expectedBaseUrl: "https://config.example/v1", }); }); it("lets explicit config baseUrl win in merge mode when the config provider key is normalized", async () => { - await withTempHome(async () => { - const parsed = await runCustomProviderMergeTest({ - seedProvider: { - baseUrl: "https://agent.example/v1", - apiKey: "AGENT_KEY", // pragma: allowlist secret - api: "openai-responses", - models: [{ id: "agent-model", name: "Agent model", input: ["text"] }], - }, - existingProviderKey: "custom", - configProviderKey: " custom ", - }); - expect(parsed.providers.custom?.apiKey).toBe("AGENT_KEY"); - expect(parsed.providers.custom?.baseUrl).toBe("https://config.example/v1"); + await expectCustomProviderMergeResult({ + existingProviderKey: "custom", + configProviderKey: " custom ", + expectedApiKey: "AGENT_KEY", + expectedBaseUrl: "https://config.example/v1", }); }); it("replaces stale merged baseUrl when the provider api changes", async () => { - await withTempHome(async () => { - const parsed = await runCustomProviderMergeTest({ - seedProvider: { - baseUrl: "https://agent.example/v1", - apiKey: "AGENT_KEY", // pragma: allowlist secret - api: "openai-completions", - models: [{ id: "agent-model", name: "Agent model", input: ["text"] }], - }, - }); - expect(parsed.providers.custom?.apiKey).toBe("AGENT_KEY"); - expect(parsed.providers.custom?.baseUrl).toBe("https://config.example/v1"); + await expectCustomProviderMergeResult({ + seedProvider: createAgentSeedProvider({ api: "openai-completions" }), + expectedApiKey: "AGENT_KEY", + expectedBaseUrl: "https://config.example/v1", }); }); @@ -284,34 +415,14 @@ describe("models-config", () => { }); it("replaces stale merged apiKey when provider is SecretRef-managed in current config", async () => { - await withTempHome(async () => { - await writeAgentModelsJson({ - providers: { - custom: { - baseUrl: "https://agent.example/v1", - apiKey: "STALE_AGENT_KEY", // pragma: allowlist secret - api: "openai-responses", - models: [{ id: "agent-model", name: "Agent model", input: ["text"] }], - }, - }, - }); - await ensureOpenClawModelsJson({ - models: { - mode: "merge", - providers: { - custom: { - ...createMergeConfigProvider(), - apiKey: { source: "env", provider: "default", id: "CUSTOM_PROVIDER_API_KEY" }, // pragma: allowlist secret - }, - }, - }, - }); - - const parsed = await readGeneratedModelsJson<{ - providers: Record; - }>(); - expect(parsed.providers.custom?.apiKey).toBe("CUSTOM_PROVIDER_API_KEY"); // pragma: allowlist secret - expect(parsed.providers.custom?.baseUrl).toBe("https://config.example/v1"); + await expectCustomProviderApiKeyRewrite({ + existingApiKey: "STALE_AGENT_KEY", // pragma: allowlist secret + configuredApiKey: { + source: "env", + provider: "default", + id: "CUSTOM_PROVIDER_API_KEY", // pragma: allowlist secret + }, + expectedApiKey: "CUSTOM_PROVIDER_API_KEY", // pragma: allowlist secret }); }); @@ -363,34 +474,10 @@ describe("models-config", () => { }); it("replaces stale non-env marker when provider transitions back to plaintext config", async () => { - await withTempHome(async () => { - await writeAgentModelsJson({ - providers: { - custom: { - baseUrl: "https://agent.example/v1", - apiKey: NON_ENV_SECRETREF_MARKER, - api: "openai-responses", - models: [{ id: "agent-model", name: "Agent model", input: ["text"] }], - }, - }, - }); - - await ensureOpenClawModelsJson({ - models: { - mode: "merge", - providers: { - custom: { - ...createMergeConfigProvider(), - apiKey: "ALLCAPS_SAMPLE", // pragma: allowlist secret - }, - }, - }, - }); - - const parsed = await readGeneratedModelsJson<{ - providers: Record; - }>(); - expect(parsed.providers.custom?.apiKey).toBe("ALLCAPS_SAMPLE"); + await expectCustomProviderApiKeyRewrite({ + existingApiKey: NON_ENV_SECRETREF_MARKER, + configuredApiKey: "ALLCAPS_SAMPLE", // pragma: allowlist secret + expectedApiKey: "ALLCAPS_SAMPLE", // pragma: allowlist secret }); }); @@ -444,86 +531,28 @@ describe("models-config", () => { }); it("does not persist resolved env var value as plaintext in models.json", async () => { - await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => { - await withTempHome(async () => { - const cfg: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; already resolved by loadConfig - api: "openai-completions", - models: [ - { - id: "gpt-4.1", - name: "GPT-4.1", - input: ["text"], - reasoning: false, - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 128000, - maxTokens: 16384, - }, - ], - }, - }, - }, - }; - await ensureOpenClawModelsJson(cfg); - const result = await readGeneratedModelsJson<{ - providers: Record; - }>(); - expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); - }); - }); + await expectOpenAiEnvMarkerApiKey(); + }); + + it("replaces stale merged apiKey when config key normalizes to a known env marker", async () => { + await expectOpenAiEnvMarkerApiKey({ seedMergedProvider: true }); }); it("preserves explicit larger token limits when they exceed implicit catalog defaults", async () => { - await withTempHome(async () => { - await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => { - const cfg = createMoonshotConfig({ contextWindow: 350000, maxTokens: 16384 }); - - await ensureOpenClawModelsJson(cfg); - const parsed = await readGeneratedModelsJson<{ - providers: Record< - string, - { - models?: Array<{ - id: string; - contextWindow?: number; - maxTokens?: number; - }>; - } - >; - }>(); - const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5"); - expect(kimi?.contextWindow).toBe(350000); - expect(kimi?.maxTokens).toBe(16384); - }); + await expectMoonshotTokenLimits({ + contextWindow: 350000, + maxTokens: 16384, + expectedContextWindow: 350000, + expectedMaxTokens: 16384, }); }); it("falls back to implicit token limits when explicit values are invalid", async () => { - await withTempHome(async () => { - await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => { - const cfg = createMoonshotConfig({ contextWindow: 0, maxTokens: -1 }); - - await ensureOpenClawModelsJson(cfg); - const parsed = await readGeneratedModelsJson<{ - providers: Record< - string, - { - models?: Array<{ - id: string; - contextWindow?: number; - maxTokens?: number; - }>; - } - >; - }>(); - const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5"); - expect(kimi?.contextWindow).toBe(256000); - expect(kimi?.maxTokens).toBe(8192); - }); + await expectMoonshotTokenLimits({ + contextWindow: 0, + maxTokens: -1, + expectedContextWindow: 256000, + expectedMaxTokens: 8192, }); }); }); diff --git a/src/agents/models-config.merge.test.ts b/src/agents/models-config.merge.test.ts index 5e0483fdb59..b84d4e363d6 100644 --- a/src/agents/models-config.merge.test.ts +++ b/src/agents/models-config.merge.test.ts @@ -66,6 +66,42 @@ describe("models-config merge helpers", () => { }); }); + it("preserves implicit provider headers when explicit config adds extra headers", () => { + const merged = mergeProviderModels( + { + baseUrl: "https://api.example.com", + api: "anthropic-messages", + headers: { "User-Agent": "claude-code/0.1.0" }, + models: [ + { + id: "k2p5", + name: "Kimi for Coding", + input: ["text", "image"], + reasoning: true, + }, + ], + } as unknown as ProviderConfig, + { + baseUrl: "https://api.example.com", + api: "anthropic-messages", + headers: { "X-Kimi-Tenant": "tenant-a" }, + models: [ + { + id: "k2p5", + name: "Kimi for Coding", + input: ["text", "image"], + reasoning: true, + }, + ], + } as unknown as ProviderConfig, + ); + + expect(merged.headers).toEqual({ + "User-Agent": "claude-code/0.1.0", + "X-Kimi-Tenant": "tenant-a", + }); + }); + it("replaces stale baseUrl when model api surface changes", () => { const merged = mergeWithExistingProviderSecrets({ nextProviders: { @@ -92,4 +128,25 @@ describe("models-config merge helpers", () => { }), ); }); + + it("does not preserve stale plaintext apiKey when next entry is a marker", () => { + const merged = mergeWithExistingProviderSecrets({ + nextProviders: { + custom: { + apiKey: "OPENAI_API_KEY", // pragma: allowlist secret + models: [{ id: "model", api: "openai-responses" }], + } as ProviderConfig, + }, + existingProviders: { + custom: { + apiKey: preservedApiKey, + models: [{ id: "model", api: "openai-responses" }], + } as ExistingProviderConfig, + }, + secretRefManagedProviders: new Set(), + explicitBaseUrlProviders: new Set(), + }); + + expect(merged.custom?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret + }); }); diff --git a/src/agents/models-config.merge.ts b/src/agents/models-config.merge.ts index da8a4abdaa2..da4f0e8a005 100644 --- a/src/agents/models-config.merge.ts +++ b/src/agents/models-config.merge.ts @@ -39,8 +39,27 @@ export function mergeProviderModels( ): ProviderConfig { const implicitModels = Array.isArray(implicit.models) ? implicit.models : []; const explicitModels = Array.isArray(explicit.models) ? explicit.models : []; + const implicitHeaders = + implicit.headers && typeof implicit.headers === "object" && !Array.isArray(implicit.headers) + ? implicit.headers + : undefined; + const explicitHeaders = + explicit.headers && typeof explicit.headers === "object" && !Array.isArray(explicit.headers) + ? explicit.headers + : undefined; if (implicitModels.length === 0) { - return { ...implicit, ...explicit }; + return { + ...implicit, + ...explicit, + ...(implicitHeaders || explicitHeaders + ? { + headers: { + ...implicitHeaders, + ...explicitHeaders, + }, + } + : {}), + }; } const implicitById = new Map( @@ -93,6 +112,14 @@ export function mergeProviderModels( return { ...implicit, ...explicit, + ...(implicitHeaders || explicitHeaders + ? { + headers: { + ...implicitHeaders, + ...explicitHeaders, + }, + } + : {}), models: mergedModels, }; } @@ -148,9 +175,14 @@ function resolveProviderApiSurface( function shouldPreserveExistingApiKey(params: { providerKey: string; existing: ExistingProviderConfig; + nextEntry: ProviderConfig; secretRefManagedProviders: ReadonlySet; }): boolean { - const { providerKey, existing, secretRefManagedProviders } = params; + const { providerKey, existing, nextEntry, secretRefManagedProviders } = params; + const nextApiKey = typeof nextEntry.apiKey === "string" ? nextEntry.apiKey : ""; + if (nextApiKey && isNonSecretApiKeyMarker(nextApiKey)) { + return false; + } return ( !secretRefManagedProviders.has(providerKey) && typeof existing.apiKey === "string" && @@ -198,7 +230,14 @@ export function mergeWithExistingProviderSecrets(params: { continue; } const preserved: Record = {}; - if (shouldPreserveExistingApiKey({ providerKey: key, existing, secretRefManagedProviders })) { + if ( + shouldPreserveExistingApiKey({ + providerKey: key, + existing, + nextEntry: newEntry, + secretRefManagedProviders, + }) + ) { preserved.apiKey = existing.apiKey; } if ( diff --git a/src/agents/models-config.normalizes-gemini-3-ids-preview-google-providers.test.ts b/src/agents/models-config.normalizes-gemini-3-ids-preview-google-providers.test.ts index 8414fb10d08..890be151c6f 100644 --- a/src/agents/models-config.normalizes-gemini-3-ids-preview-google-providers.test.ts +++ b/src/agents/models-config.normalizes-gemini-3-ids-preview-google-providers.test.ts @@ -1,91 +1,82 @@ import { describe, expect, it } from "vitest"; import type { OpenClawConfig } from "../config/config.js"; +import type { ModelDefinitionConfig } from "../config/types.models.js"; import { installModelsConfigTestHooks, withModelsTempHome } from "./models-config.e2e-harness.js"; import { ensureOpenClawModelsJson } from "./models-config.js"; import { readGeneratedModelsJson } from "./models-config.test-utils.js"; +function createGoogleModelsConfig(models: ModelDefinitionConfig[]): OpenClawConfig { + return { + models: { + providers: { + google: { + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + apiKey: "GEMINI_KEY", // pragma: allowlist secret + api: "google-generative-ai", + models, + }, + }, + }, + }; +} + +async function expectGeneratedGoogleModelIds(ids: string[]) { + const parsed = await readGeneratedModelsJson<{ + providers: Record }>; + }>(); + expect(parsed.providers.google?.models?.map((model) => model.id)).toEqual(ids); +} + describe("models-config", () => { installModelsConfigTestHooks(); it("normalizes gemini 3 ids to preview for google providers", async () => { await withModelsTempHome(async () => { - const cfg: OpenClawConfig = { - models: { - providers: { - google: { - baseUrl: "https://generativelanguage.googleapis.com/v1beta", - apiKey: "GEMINI_KEY", // pragma: allowlist secret - api: "google-generative-ai", - models: [ - { - id: "gemini-3-pro", - name: "Gemini 3 Pro", - api: "google-generative-ai", - reasoning: true, - input: ["text", "image"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 1048576, - maxTokens: 65536, - }, - { - id: "gemini-3-flash", - name: "Gemini 3 Flash", - api: "google-generative-ai", - reasoning: false, - input: ["text", "image"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 1048576, - maxTokens: 65536, - }, - ], - }, - }, + const cfg = createGoogleModelsConfig([ + { + id: "gemini-3-pro", + name: "Gemini 3 Pro", + api: "google-generative-ai", + reasoning: true, + input: ["text", "image"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 1048576, + maxTokens: 65536, }, - }; + { + id: "gemini-3-flash", + name: "Gemini 3 Flash", + api: "google-generative-ai", + reasoning: false, + input: ["text", "image"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 1048576, + maxTokens: 65536, + }, + ]); await ensureOpenClawModelsJson(cfg); - - const parsed = await readGeneratedModelsJson<{ - providers: Record }>; - }>(); - const ids = parsed.providers.google?.models?.map((model) => model.id); - expect(ids).toEqual(["gemini-3-pro-preview", "gemini-3-flash-preview"]); + await expectGeneratedGoogleModelIds(["gemini-3-pro-preview", "gemini-3-flash-preview"]); }); }); it("normalizes the deprecated google flash preview id to the working preview id", async () => { await withModelsTempHome(async () => { - const cfg: OpenClawConfig = { - models: { - providers: { - google: { - baseUrl: "https://generativelanguage.googleapis.com/v1beta", - apiKey: "GEMINI_KEY", // pragma: allowlist secret - api: "google-generative-ai", - models: [ - { - id: "gemini-3.1-flash-preview", - name: "Gemini 3.1 Flash Preview", - api: "google-generative-ai", - reasoning: false, - input: ["text", "image"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 1048576, - maxTokens: 65536, - }, - ], - }, - }, + const cfg = createGoogleModelsConfig([ + { + id: "gemini-3.1-flash-preview", + name: "Gemini 3.1 Flash Preview", + api: "google-generative-ai", + reasoning: false, + input: ["text", "image"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 1048576, + maxTokens: 65536, }, - }; + ]); await ensureOpenClawModelsJson(cfg); - - const parsed = await readGeneratedModelsJson<{ - providers: Record }>; - }>(); - const ids = parsed.providers.google?.models?.map((model) => model.id); - expect(ids).toEqual(["gemini-3-flash-preview"]); + await expectGeneratedGoogleModelIds(["gemini-3-flash-preview"]); }); }); }); diff --git a/src/agents/models-config.plan.ts b/src/agents/models-config.plan.ts index 40777c2cd0d..601a0edfda1 100644 --- a/src/agents/models-config.plan.ts +++ b/src/agents/models-config.plan.ts @@ -6,6 +6,7 @@ import { type ExistingProviderConfig, } from "./models-config.merge.js"; import { + enforceSourceManagedProviderSecrets, normalizeProviders, resolveImplicitProviders, type ProviderConfig, @@ -86,6 +87,7 @@ async function resolveProvidersForMode(params: { export async function planOpenClawModelsJson(params: { cfg: OpenClawConfig; + sourceConfigForSecrets?: OpenClawConfig; agentDir: string; env: NodeJS.ProcessEnv; existingRaw: string; @@ -106,6 +108,8 @@ export async function planOpenClawModelsJson(params: { agentDir, env, secretDefaults: cfg.secrets?.defaults, + sourceProviders: params.sourceConfigForSecrets?.models?.providers, + sourceSecretDefaults: params.sourceConfigForSecrets?.secrets?.defaults, secretRefManagedProviders, }) ?? providers; const mergedProviders = await resolveProvidersForMode({ @@ -115,7 +119,14 @@ export async function planOpenClawModelsJson(params: { secretRefManagedProviders, explicitBaseUrlProviders: resolveExplicitBaseUrlProviders(cfg.models), }); - const nextContents = `${JSON.stringify({ providers: mergedProviders }, null, 2)}\n`; + const secretEnforcedProviders = + enforceSourceManagedProviderSecrets({ + providers: mergedProviders, + sourceProviders: params.sourceConfigForSecrets?.models?.providers, + sourceSecretDefaults: params.sourceConfigForSecrets?.secrets?.defaults, + secretRefManagedProviders, + }) ?? mergedProviders; + const nextContents = `${JSON.stringify({ providers: secretEnforcedProviders }, null, 2)}\n`; if (params.existingRaw === nextContents) { return { action: "noop" }; diff --git a/src/agents/models-config.providers.discovery-auth.test.ts b/src/agents/models-config.providers.discovery-auth.test.ts index e6aebc0d7cb..6fc492c1565 100644 --- a/src/agents/models-config.providers.discovery-auth.test.ts +++ b/src/agents/models-config.providers.discovery-auth.test.ts @@ -6,6 +6,11 @@ import { afterEach, describe, expect, it, vi } from "vitest"; import { NON_ENV_SECRETREF_MARKER } from "./model-auth-markers.js"; import { resolveImplicitProvidersForTest } from "./models-config.e2e-harness.js"; +type AuthProfilesFile = { + version: 1; + profiles: Record>; +}; + describe("provider discovery auth marker guardrails", () => { let originalVitest: string | undefined; let originalNodeEnv: string | undefined; @@ -35,33 +40,35 @@ describe("provider discovery auth marker guardrails", () => { delete process.env.NODE_ENV; } - it("does not send marker value as vLLM bearer token during discovery", async () => { - enableDiscovery(); - const fetchMock = vi.fn().mockResolvedValue({ - ok: true, - json: async () => ({ data: [] }), - }); + function installFetchMock(response?: unknown) { + const fetchMock = + response === undefined + ? vi.fn() + : vi.fn().mockResolvedValue({ ok: true, json: async () => response }); globalThis.fetch = fetchMock as unknown as typeof fetch; + return fetchMock; + } + async function createAgentDirWithAuthProfiles(profiles: AuthProfilesFile["profiles"]) { const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-")); await writeFile( join(agentDir, "auth-profiles.json"), - JSON.stringify( - { - version: 1, - profiles: { - "vllm:default": { - type: "api_key", - provider: "vllm", - keyRef: { source: "file", provider: "vault", id: "/vllm/apiKey" }, - }, - }, - }, - null, - 2, - ), + JSON.stringify({ version: 1, profiles } satisfies AuthProfilesFile, null, 2), "utf8", ); + return agentDir; + } + + it("does not send marker value as vLLM bearer token during discovery", async () => { + enableDiscovery(); + const fetchMock = installFetchMock({ data: [] }); + const agentDir = await createAgentDirWithAuthProfiles({ + "vllm:default": { + type: "api_key", + provider: "vllm", + keyRef: { source: "file", provider: "vault", id: "/vllm/apiKey" }, + }, + }); const providers = await resolveImplicitProvidersForTest({ agentDir, env: {} }); expect(providers?.vllm?.apiKey).toBe(NON_ENV_SECRETREF_MARKER); @@ -73,28 +80,14 @@ describe("provider discovery auth marker guardrails", () => { it("does not call Hugging Face discovery with marker-backed credentials", async () => { enableDiscovery(); - const fetchMock = vi.fn(); - globalThis.fetch = fetchMock as unknown as typeof fetch; - - const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-")); - await writeFile( - join(agentDir, "auth-profiles.json"), - JSON.stringify( - { - version: 1, - profiles: { - "huggingface:default": { - type: "api_key", - provider: "huggingface", - keyRef: { source: "exec", provider: "vault", id: "providers/hf/token" }, - }, - }, - }, - null, - 2, - ), - "utf8", - ); + const fetchMock = installFetchMock(); + const agentDir = await createAgentDirWithAuthProfiles({ + "huggingface:default": { + type: "api_key", + provider: "huggingface", + keyRef: { source: "exec", provider: "vault", id: "providers/hf/token" }, + }, + }); const providers = await resolveImplicitProvidersForTest({ agentDir, env: {} }); expect(providers?.huggingface?.apiKey).toBe(NON_ENV_SECRETREF_MARKER); @@ -106,31 +99,14 @@ describe("provider discovery auth marker guardrails", () => { it("keeps all-caps plaintext API keys for authenticated discovery", async () => { enableDiscovery(); - const fetchMock = vi.fn().mockResolvedValue({ - ok: true, - json: async () => ({ data: [{ id: "vllm/test-model" }] }), + const fetchMock = installFetchMock({ data: [{ id: "vllm/test-model" }] }); + const agentDir = await createAgentDirWithAuthProfiles({ + "vllm:default": { + type: "api_key", + provider: "vllm", + key: "ALLCAPS_SAMPLE", + }, }); - globalThis.fetch = fetchMock as unknown as typeof fetch; - - const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-")); - await writeFile( - join(agentDir, "auth-profiles.json"), - JSON.stringify( - { - version: 1, - profiles: { - "vllm:default": { - type: "api_key", - provider: "vllm", - key: "ALLCAPS_SAMPLE", - }, - }, - }, - null, - 2, - ), - "utf8", - ); await resolveImplicitProvidersForTest({ agentDir, env: {} }); const vllmCall = fetchMock.mock.calls.find(([url]) => String(url).includes(":8000")); diff --git a/src/agents/models-config.providers.discovery.ts b/src/agents/models-config.providers.discovery.ts index caab5cafb4e..a6d99afa89f 100644 --- a/src/agents/models-config.providers.discovery.ts +++ b/src/agents/models-config.providers.discovery.ts @@ -9,108 +9,47 @@ import { buildHuggingfaceModelDefinition, } from "./huggingface-models.js"; import { discoverKilocodeModels } from "./kilocode-models.js"; -import { OLLAMA_NATIVE_BASE_URL } from "./ollama-stream.js"; +import { + enrichOllamaModelsWithContext, + OLLAMA_DEFAULT_CONTEXT_WINDOW, + OLLAMA_DEFAULT_COST, + OLLAMA_DEFAULT_MAX_TOKENS, + isReasoningModelHeuristic, + resolveOllamaApiBase, + type OllamaTagsResponse, +} from "./ollama-models.js"; import { discoverVeniceModels, VENICE_BASE_URL } from "./venice-models.js"; import { discoverVercelAiGatewayModels, VERCEL_AI_GATEWAY_BASE_URL } from "./vercel-ai-gateway.js"; +export { resolveOllamaApiBase } from "./ollama-models.js"; + type ModelsConfig = NonNullable; type ProviderConfig = NonNullable[string]; const log = createSubsystemLogger("agents/model-providers"); -const OLLAMA_BASE_URL = OLLAMA_NATIVE_BASE_URL; -const OLLAMA_API_BASE_URL = OLLAMA_BASE_URL; const OLLAMA_SHOW_CONCURRENCY = 8; const OLLAMA_SHOW_MAX_MODELS = 200; -const OLLAMA_DEFAULT_CONTEXT_WINDOW = 128000; -const OLLAMA_DEFAULT_MAX_TOKENS = 8192; -const OLLAMA_DEFAULT_COST = { + +const OPENAI_COMPAT_LOCAL_DEFAULT_CONTEXT_WINDOW = 128000; +const OPENAI_COMPAT_LOCAL_DEFAULT_MAX_TOKENS = 8192; +const OPENAI_COMPAT_LOCAL_DEFAULT_COST = { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, }; +const SGLANG_BASE_URL = "http://127.0.0.1:30000/v1"; + const VLLM_BASE_URL = "http://127.0.0.1:8000/v1"; -const VLLM_DEFAULT_CONTEXT_WINDOW = 128000; -const VLLM_DEFAULT_MAX_TOKENS = 8192; -const VLLM_DEFAULT_COST = { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, -}; -interface OllamaModel { - name: string; - modified_at: string; - size: number; - digest: string; - details?: { - family?: string; - parameter_size?: string; - }; -} - -interface OllamaTagsResponse { - models: OllamaModel[]; -} - -type VllmModelsResponse = { +type OpenAICompatModelsResponse = { data?: Array<{ id?: string; }>; }; -/** - * Derive the Ollama native API base URL from a configured base URL. - * - * Users typically configure `baseUrl` with a `/v1` suffix (e.g. - * `http://192.168.20.14:11434/v1`) for the OpenAI-compatible endpoint. - * The native Ollama API lives at the root (e.g. `/api/tags`), so we - * strip the `/v1` suffix when present. - */ -export function resolveOllamaApiBase(configuredBaseUrl?: string): string { - if (!configuredBaseUrl) { - return OLLAMA_API_BASE_URL; - } - // Strip trailing slash, then strip /v1 suffix if present - const trimmed = configuredBaseUrl.replace(/\/+$/, ""); - return trimmed.replace(/\/v1$/i, ""); -} - -async function queryOllamaContextWindow( - apiBase: string, - modelName: string, -): Promise { - try { - const response = await fetch(`${apiBase}/api/show`, { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ name: modelName }), - signal: AbortSignal.timeout(3000), - }); - if (!response.ok) { - return undefined; - } - const data = (await response.json()) as { model_info?: Record }; - if (!data.model_info) { - return undefined; - } - for (const [key, value] of Object.entries(data.model_info)) { - if (key.endsWith(".context_length") && typeof value === "number" && Number.isFinite(value)) { - const contextWindow = Math.floor(value); - if (contextWindow > 0) { - return contextWindow; - } - } - } - return undefined; - } catch { - return undefined; - } -} - async function discoverOllamaModels( baseUrl?: string, opts?: { quiet?: boolean }, @@ -140,29 +79,18 @@ async function discoverOllamaModels( `Capping Ollama /api/show inspection to ${OLLAMA_SHOW_MAX_MODELS} models (received ${data.models.length})`, ); } - const discovered: ModelDefinitionConfig[] = []; - for (let index = 0; index < modelsToInspect.length; index += OLLAMA_SHOW_CONCURRENCY) { - const batch = modelsToInspect.slice(index, index + OLLAMA_SHOW_CONCURRENCY); - const batchDiscovered = await Promise.all( - batch.map(async (model) => { - const modelId = model.name; - const contextWindow = await queryOllamaContextWindow(apiBase, modelId); - const isReasoning = - modelId.toLowerCase().includes("r1") || modelId.toLowerCase().includes("reasoning"); - return { - id: modelId, - name: modelId, - reasoning: isReasoning, - input: ["text"], - cost: OLLAMA_DEFAULT_COST, - contextWindow: contextWindow ?? OLLAMA_DEFAULT_CONTEXT_WINDOW, - maxTokens: OLLAMA_DEFAULT_MAX_TOKENS, - } satisfies ModelDefinitionConfig; - }), - ); - discovered.push(...batchDiscovered); - } - return discovered; + const discovered = await enrichOllamaModelsWithContext(apiBase, modelsToInspect, { + concurrency: OLLAMA_SHOW_CONCURRENCY, + }); + return discovered.map((model) => ({ + id: model.name, + name: model.name, + reasoning: isReasoningModelHeuristic(model.name), + input: ["text"], + cost: OLLAMA_DEFAULT_COST, + contextWindow: model.contextWindow ?? OLLAMA_DEFAULT_CONTEXT_WINDOW, + maxTokens: OLLAMA_DEFAULT_MAX_TOKENS, + })); } catch (error) { if (!opts?.quiet) { log.warn(`Failed to discover Ollama models: ${String(error)}`); @@ -171,31 +99,34 @@ async function discoverOllamaModels( } } -async function discoverVllmModels( - baseUrl: string, - apiKey?: string, -): Promise { +async function discoverOpenAICompatibleLocalModels(params: { + baseUrl: string; + apiKey?: string; + label: string; + contextWindow?: number; + maxTokens?: number; +}): Promise { if (process.env.VITEST || process.env.NODE_ENV === "test") { return []; } - const trimmedBaseUrl = baseUrl.trim().replace(/\/+$/, ""); + const trimmedBaseUrl = params.baseUrl.trim().replace(/\/+$/, ""); const url = `${trimmedBaseUrl}/models`; try { - const trimmedApiKey = apiKey?.trim(); + const trimmedApiKey = params.apiKey?.trim(); const response = await fetch(url, { headers: trimmedApiKey ? { Authorization: `Bearer ${trimmedApiKey}` } : undefined, signal: AbortSignal.timeout(5000), }); if (!response.ok) { - log.warn(`Failed to discover vLLM models: ${response.status}`); + log.warn(`Failed to discover ${params.label} models: ${response.status}`); return []; } - const data = (await response.json()) as VllmModelsResponse; + const data = (await response.json()) as OpenAICompatModelsResponse; const models = data.data ?? []; if (models.length === 0) { - log.warn("No vLLM models found on local instance"); + log.warn(`No ${params.label} models found on local instance`); return []; } @@ -204,21 +135,18 @@ async function discoverVllmModels( .filter((model) => Boolean(model.id)) .map((model) => { const modelId = model.id; - const lower = modelId.toLowerCase(); - const isReasoning = - lower.includes("r1") || lower.includes("reasoning") || lower.includes("think"); return { id: modelId, name: modelId, - reasoning: isReasoning, + reasoning: isReasoningModelHeuristic(modelId), input: ["text"], - cost: VLLM_DEFAULT_COST, - contextWindow: VLLM_DEFAULT_CONTEXT_WINDOW, - maxTokens: VLLM_DEFAULT_MAX_TOKENS, + cost: OPENAI_COMPAT_LOCAL_DEFAULT_COST, + contextWindow: params.contextWindow ?? OPENAI_COMPAT_LOCAL_DEFAULT_CONTEXT_WINDOW, + maxTokens: params.maxTokens ?? OPENAI_COMPAT_LOCAL_DEFAULT_MAX_TOKENS, } satisfies ModelDefinitionConfig; }); } catch (error) { - log.warn(`Failed to discover vLLM models: ${String(error)}`); + log.warn(`Failed to discover ${params.label} models: ${String(error)}`); return []; } } @@ -270,7 +198,28 @@ export async function buildVllmProvider(params?: { apiKey?: string; }): Promise { const baseUrl = (params?.baseUrl?.trim() || VLLM_BASE_URL).replace(/\/+$/, ""); - const models = await discoverVllmModels(baseUrl, params?.apiKey); + const models = await discoverOpenAICompatibleLocalModels({ + baseUrl, + apiKey: params?.apiKey, + label: "vLLM", + }); + return { + baseUrl, + api: "openai-completions", + models, + }; +} + +export async function buildSglangProvider(params?: { + baseUrl?: string; + apiKey?: string; +}): Promise { + const baseUrl = (params?.baseUrl?.trim() || SGLANG_BASE_URL).replace(/\/+$/, ""); + const models = await discoverOpenAICompatibleLocalModels({ + baseUrl, + apiKey: params?.apiKey, + label: "SGLang", + }); return { baseUrl, api: "openai-completions", diff --git a/src/agents/models-config.providers.google-antigravity.test.ts b/src/agents/models-config.providers.google-antigravity.test.ts index 3886b237e27..ea20608b866 100644 --- a/src/agents/models-config.providers.google-antigravity.test.ts +++ b/src/agents/models-config.providers.google-antigravity.test.ts @@ -97,3 +97,33 @@ describe("google-antigravity provider normalization", () => { expect(normalized).toBe(providers); }); }); + +describe("google-vertex provider normalization", () => { + it("normalizes gemini flash-lite IDs for google-vertex providers", () => { + const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-")); + const providers = { + "google-vertex": buildProvider(["gemini-3.1-flash-lite", "gemini-3-flash-preview"]), + openai: buildProvider(["gpt-5"]), + }; + + const normalized = normalizeProviders({ providers, agentDir }); + + expect(normalized).not.toBe(providers); + expect(normalized?.["google-vertex"]?.models.map((model) => model.id)).toEqual([ + "gemini-3.1-flash-lite-preview", + "gemini-3-flash-preview", + ]); + expect(normalized?.openai).toBe(providers.openai); + }); + + it("returns original providers object when no google-vertex IDs need normalization", () => { + const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-")); + const providers = { + "google-vertex": buildProvider(["gemini-3.1-flash-lite-preview", "gemini-3-flash-preview"]), + }; + + const normalized = normalizeProviders({ providers, agentDir }); + + expect(normalized).toBe(providers); + }); +}); diff --git a/src/agents/models-config.providers.kimi-coding.test.ts b/src/agents/models-config.providers.kimi-coding.test.ts index 33e94a2f1c3..91ca62f34e2 100644 --- a/src/agents/models-config.providers.kimi-coding.test.ts +++ b/src/agents/models-config.providers.kimi-coding.test.ts @@ -26,6 +26,7 @@ describe("kimi-coding implicit provider (#22409)", () => { const provider = buildKimiCodingProvider(); expect(provider.api).toBe("anthropic-messages"); expect(provider.baseUrl).toBe("https://api.kimi.com/coding/"); + expect(provider.headers).toEqual({ "User-Agent": "claude-code/0.1.0" }); expect(provider.models).toBeDefined(); expect(provider.models.length).toBeGreaterThan(0); expect(provider.models[0].id).toBe("k2p5"); @@ -43,4 +44,55 @@ describe("kimi-coding implicit provider (#22409)", () => { envSnapshot.restore(); } }); + + it("uses explicit kimi-coding baseUrl when provided", async () => { + const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-")); + const envSnapshot = captureEnv(["KIMI_API_KEY"]); + process.env.KIMI_API_KEY = "test-key"; + + try { + const providers = await resolveImplicitProvidersForTest({ + agentDir, + explicitProviders: { + "kimi-coding": { + baseUrl: "https://kimi.example.test/coding/", + api: "anthropic-messages", + models: buildKimiCodingProvider().models, + }, + }, + }); + expect(providers?.["kimi-coding"]?.baseUrl).toBe("https://kimi.example.test/coding/"); + } finally { + envSnapshot.restore(); + } + }); + + it("merges explicit kimi-coding headers on top of the built-in user agent", async () => { + const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-")); + const envSnapshot = captureEnv(["KIMI_API_KEY"]); + process.env.KIMI_API_KEY = "test-key"; + + try { + const providers = await resolveImplicitProvidersForTest({ + agentDir, + explicitProviders: { + "kimi-coding": { + baseUrl: "https://api.kimi.com/coding/", + api: "anthropic-messages", + headers: { + "User-Agent": "custom-kimi-client/1.0", + "X-Kimi-Tenant": "tenant-a", + }, + models: buildKimiCodingProvider().models, + }, + }, + }); + expect(providers?.["kimi-coding"]?.headers).toEqual({ + "User-Agent": "custom-kimi-client/1.0", + "X-Kimi-Tenant": "tenant-a", + }); + } finally { + envSnapshot.restore(); + } + }); }); diff --git a/src/agents/models-config.providers.modelstudio.test.ts b/src/agents/models-config.providers.modelstudio.test.ts new file mode 100644 index 00000000000..df4000cc27d --- /dev/null +++ b/src/agents/models-config.providers.modelstudio.test.ts @@ -0,0 +1,32 @@ +import { mkdtempSync } from "node:fs"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; +import { describe, expect, it } from "vitest"; +import { withEnvAsync } from "../test-utils/env.js"; +import { resolveImplicitProvidersForTest } from "./models-config.e2e-harness.js"; +import { buildModelStudioProvider } from "./models-config.providers.js"; + +const modelStudioApiKeyEnv = ["MODELSTUDIO_API", "KEY"].join("_"); + +describe("Model Studio implicit provider", () => { + it("should include modelstudio when MODELSTUDIO_API_KEY is configured", async () => { + const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-")); + const modelStudioApiKey = "test-key"; // pragma: allowlist secret + await withEnvAsync({ [modelStudioApiKeyEnv]: modelStudioApiKey }, async () => { + const providers = await resolveImplicitProvidersForTest({ agentDir }); + expect(providers?.modelstudio).toBeDefined(); + expect(providers?.modelstudio?.apiKey).toBe("MODELSTUDIO_API_KEY"); + expect(providers?.modelstudio?.baseUrl).toBe("https://coding-intl.dashscope.aliyuncs.com/v1"); + }); + }); + + it("should build the static Model Studio provider catalog", () => { + const provider = buildModelStudioProvider(); + const modelIds = provider.models.map((model) => model.id); + expect(provider.api).toBe("openai-completions"); + expect(provider.baseUrl).toBe("https://coding-intl.dashscope.aliyuncs.com/v1"); + expect(modelIds).toContain("qwen3.5-plus"); + expect(modelIds).toContain("qwen3-coder-plus"); + expect(modelIds).toContain("kimi-k2.5"); + }); +}); diff --git a/src/agents/models-config.providers.moonshot.test.ts b/src/agents/models-config.providers.moonshot.test.ts new file mode 100644 index 00000000000..00e1f5949c6 --- /dev/null +++ b/src/agents/models-config.providers.moonshot.test.ts @@ -0,0 +1,60 @@ +import { mkdtempSync } from "node:fs"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; +import { describe, expect, it } from "vitest"; +import { + MOONSHOT_BASE_URL as MOONSHOT_AI_BASE_URL, + MOONSHOT_CN_BASE_URL, +} from "../commands/onboard-auth.models.js"; +import { captureEnv } from "../test-utils/env.js"; +import { resolveImplicitProviders } from "./models-config.providers.js"; + +describe("moonshot implicit provider (#33637)", () => { + it("uses explicit CN baseUrl when provided", async () => { + const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-")); + const envSnapshot = captureEnv(["MOONSHOT_API_KEY"]); + process.env.MOONSHOT_API_KEY = "sk-test-cn"; + + try { + const providers = await resolveImplicitProviders({ + agentDir, + explicitProviders: { + moonshot: { + baseUrl: MOONSHOT_CN_BASE_URL, + api: "openai-completions", + models: [ + { + id: "kimi-k2.5", + name: "Kimi K2.5", + reasoning: false, + input: ["text", "image"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 256000, + maxTokens: 8192, + }, + ], + }, + }, + }); + expect(providers?.moonshot).toBeDefined(); + expect(providers?.moonshot?.baseUrl).toBe(MOONSHOT_CN_BASE_URL); + expect(providers?.moonshot?.apiKey).toBeDefined(); + } finally { + envSnapshot.restore(); + } + }); + + it("defaults to .ai baseUrl when no explicit provider", async () => { + const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-")); + const envSnapshot = captureEnv(["MOONSHOT_API_KEY"]); + process.env.MOONSHOT_API_KEY = "sk-test"; + + try { + const providers = await resolveImplicitProviders({ agentDir }); + expect(providers?.moonshot).toBeDefined(); + expect(providers?.moonshot?.baseUrl).toBe(MOONSHOT_AI_BASE_URL); + } finally { + envSnapshot.restore(); + } + }); +}); diff --git a/src/agents/models-config.providers.normalize-keys.test.ts b/src/agents/models-config.providers.normalize-keys.test.ts index be92bbcd474..b39705d8ec2 100644 --- a/src/agents/models-config.providers.normalize-keys.test.ts +++ b/src/agents/models-config.providers.normalize-keys.test.ts @@ -4,7 +4,10 @@ import path from "node:path"; import { describe, expect, it } from "vitest"; import type { OpenClawConfig } from "../config/config.js"; import { NON_ENV_SECRETREF_MARKER } from "./model-auth-markers.js"; -import { normalizeProviders } from "./models-config.providers.js"; +import { + enforceSourceManagedProviderSecrets, + normalizeProviders, +} from "./models-config.providers.js"; describe("normalizeProviders", () => { it("trims provider keys so image models remain discoverable for custom providers", async () => { @@ -78,6 +81,7 @@ describe("normalizeProviders", () => { const agentDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-agent-")); const original = process.env.OPENAI_API_KEY; process.env.OPENAI_API_KEY = "sk-test-secret-value-12345"; // pragma: allowlist secret + const secretRefManagedProviders = new Set(); try { const providers: NonNullable["providers"]> = { openai: { @@ -97,8 +101,9 @@ describe("normalizeProviders", () => { ], }, }; - const normalized = normalizeProviders({ providers, agentDir }); + const normalized = normalizeProviders({ providers, agentDir, secretRefManagedProviders }); expect(normalized?.openai?.apiKey).toBe("OPENAI_API_KEY"); + expect(secretRefManagedProviders.has("openai")).toBe(true); } finally { if (original === undefined) { delete process.env.OPENAI_API_KEY; @@ -134,4 +139,38 @@ describe("normalizeProviders", () => { await fs.rm(agentDir, { recursive: true, force: true }); } }); + + it("ignores non-object provider entries during source-managed enforcement", () => { + const providers = { + openai: null, + moonshot: { + baseUrl: "https://api.moonshot.ai/v1", + api: "openai-completions", + apiKey: "sk-runtime-moonshot", // pragma: allowlist secret + models: [], + }, + } as unknown as NonNullable["providers"]>; + + const sourceProviders: NonNullable["providers"]> = { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions", + apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret + models: [], + }, + moonshot: { + baseUrl: "https://api.moonshot.ai/v1", + api: "openai-completions", + apiKey: { source: "env", provider: "default", id: "MOONSHOT_API_KEY" }, // pragma: allowlist secret + models: [], + }, + }; + + const enforced = enforceSourceManagedProviderSecrets({ + providers, + sourceProviders, + }); + expect((enforced as Record).openai).toBeNull(); + expect(enforced?.moonshot?.apiKey).toBe("MOONSHOT_API_KEY"); // pragma: allowlist secret + }); }); diff --git a/src/agents/models-config.providers.static.ts b/src/agents/models-config.providers.static.ts index 0a766fe983e..a0aa879c727 100644 --- a/src/agents/models-config.providers.static.ts +++ b/src/agents/models-config.providers.static.ts @@ -95,6 +95,7 @@ const MOONSHOT_DEFAULT_COST = { }; const KIMI_CODING_BASE_URL = "https://api.kimi.com/coding/"; +const KIMI_CODING_USER_AGENT = "claude-code/0.1.0"; const KIMI_CODING_DEFAULT_MODEL_ID = "k2p5"; const KIMI_CODING_DEFAULT_CONTEXT_WINDOW = 262144; const KIMI_CODING_DEFAULT_MAX_TOKENS = 32768; @@ -137,6 +138,90 @@ const QIANFAN_DEFAULT_COST = { cacheWrite: 0, }; +export const MODELSTUDIO_BASE_URL = "https://coding-intl.dashscope.aliyuncs.com/v1"; +export const MODELSTUDIO_DEFAULT_MODEL_ID = "qwen3.5-plus"; +const MODELSTUDIO_DEFAULT_COST = { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, +}; + +const MODELSTUDIO_MODEL_CATALOG: ReadonlyArray = [ + { + id: "qwen3.5-plus", + name: "qwen3.5-plus", + reasoning: false, + input: ["text", "image"], + cost: MODELSTUDIO_DEFAULT_COST, + contextWindow: 1_000_000, + maxTokens: 65_536, + }, + { + id: "qwen3-max-2026-01-23", + name: "qwen3-max-2026-01-23", + reasoning: false, + input: ["text"], + cost: MODELSTUDIO_DEFAULT_COST, + contextWindow: 262_144, + maxTokens: 65_536, + }, + { + id: "qwen3-coder-next", + name: "qwen3-coder-next", + reasoning: false, + input: ["text"], + cost: MODELSTUDIO_DEFAULT_COST, + contextWindow: 262_144, + maxTokens: 65_536, + }, + { + id: "qwen3-coder-plus", + name: "qwen3-coder-plus", + reasoning: false, + input: ["text"], + cost: MODELSTUDIO_DEFAULT_COST, + contextWindow: 1_000_000, + maxTokens: 65_536, + }, + { + id: "MiniMax-M2.5", + name: "MiniMax-M2.5", + reasoning: true, + input: ["text"], + cost: MODELSTUDIO_DEFAULT_COST, + contextWindow: 1_000_000, + maxTokens: 65_536, + }, + { + id: "glm-5", + name: "glm-5", + reasoning: false, + input: ["text"], + cost: MODELSTUDIO_DEFAULT_COST, + contextWindow: 202_752, + maxTokens: 16_384, + }, + { + id: "glm-4.7", + name: "glm-4.7", + reasoning: false, + input: ["text"], + cost: MODELSTUDIO_DEFAULT_COST, + contextWindow: 202_752, + maxTokens: 16_384, + }, + { + id: "kimi-k2.5", + name: "kimi-k2.5", + reasoning: false, + input: ["text", "image"], + cost: MODELSTUDIO_DEFAULT_COST, + contextWindow: 262_144, + maxTokens: 32_768, + }, +]; + const NVIDIA_BASE_URL = "https://integrate.api.nvidia.com/v1"; const NVIDIA_DEFAULT_MODEL_ID = "nvidia/llama-3.1-nemotron-70b-instruct"; const NVIDIA_DEFAULT_CONTEXT_WINDOW = 131072; @@ -224,6 +309,9 @@ export function buildKimiCodingProvider(): ProviderConfig { return { baseUrl: KIMI_CODING_BASE_URL, api: "anthropic-messages", + headers: { + "User-Agent": KIMI_CODING_USER_AGENT, + }, models: [ { id: KIMI_CODING_DEFAULT_MODEL_ID, @@ -345,6 +433,24 @@ export function buildOpenrouterProvider(): ProviderConfig { contextWindow: OPENROUTER_DEFAULT_CONTEXT_WINDOW, maxTokens: OPENROUTER_DEFAULT_MAX_TOKENS, }, + { + id: "openrouter/hunter-alpha", + name: "Hunter Alpha", + reasoning: true, + input: ["text"], + cost: OPENROUTER_DEFAULT_COST, + contextWindow: 1048576, + maxTokens: 65536, + }, + { + id: "openrouter/healer-alpha", + name: "Healer Alpha", + reasoning: true, + input: ["text", "image"], + cost: OPENROUTER_DEFAULT_COST, + contextWindow: 262144, + maxTokens: 65536, + }, ], }; } @@ -384,6 +490,14 @@ export function buildQianfanProvider(): ProviderConfig { }; } +export function buildModelStudioProvider(): ProviderConfig { + return { + baseUrl: MODELSTUDIO_BASE_URL, + api: "openai-completions", + models: MODELSTUDIO_MODEL_CATALOG.map((model) => ({ ...model })), + }; +} + export function buildNvidiaProvider(): ProviderConfig { return { baseUrl: NVIDIA_BASE_URL, diff --git a/src/agents/models-config.providers.ts b/src/agents/models-config.providers.ts index 8f8ffb9201c..b4ef8f4b0b1 100644 --- a/src/agents/models-config.providers.ts +++ b/src/agents/models-config.providers.ts @@ -4,6 +4,7 @@ import { DEFAULT_COPILOT_API_BASE_URL, resolveCopilotApiToken, } from "../providers/github-copilot-token.js"; +import { isRecord } from "../utils.js"; import { normalizeOptionalSecretInput } from "../utils/normalize-secret-input.js"; import { ensureAuthProfileStore, listProfilesForProvider } from "./auth-profiles.js"; import { discoverBedrockModels } from "./bedrock-discovery.js"; @@ -14,10 +15,8 @@ import { import { buildHuggingfaceProvider, buildKilocodeProviderWithDiscovery, - buildOllamaProvider, buildVeniceProvider, buildVercelAiGatewayProvider, - buildVllmProvider, resolveOllamaApiBase, } from "./models-config.providers.discovery.js"; import { @@ -29,6 +28,7 @@ import { buildKilocodeProvider, buildMinimaxPortalProvider, buildMinimaxProvider, + buildModelStudioProvider, buildMoonshotProvider, buildNvidiaProvider, buildOpenAICodexProvider, @@ -46,15 +46,22 @@ export { buildKimiCodingProvider, buildKilocodeProvider, buildNvidiaProvider, + buildModelStudioProvider, buildQianfanProvider, buildXiaomiProvider, + MODELSTUDIO_BASE_URL, + MODELSTUDIO_DEFAULT_MODEL_ID, QIANFAN_BASE_URL, QIANFAN_DEFAULT_MODEL_ID, XIAOMI_DEFAULT_MODEL_ID, } from "./models-config.providers.static.js"; +import { + groupPluginDiscoveryProvidersByOrder, + normalizePluginDiscoveryResult, + resolvePluginDiscoveryProviders, +} from "../plugins/provider-discovery.js"; import { MINIMAX_OAUTH_MARKER, - OLLAMA_LOCAL_AUTH_MARKER, QWEN_OAUTH_MARKER, isNonSecretApiKeyMarker, resolveNonEnvSecretRefApiKeyMarker, @@ -66,6 +73,11 @@ export { resolveOllamaApiBase } from "./models-config.providers.discovery.js"; type ModelsConfig = NonNullable; export type ProviderConfig = NonNullable[string]; +type SecretDefaults = { + env?: string; + file?: string; + exec?: string; +}; const ENV_VAR_NAME_RE = /^[A-Z_][A-Z0-9_]*$/; @@ -93,13 +105,7 @@ function resolveAwsSdkApiKeyVarName(env: NodeJS.ProcessEnv = process.env): strin function normalizeHeaderValues(params: { headers: ProviderConfig["headers"] | undefined; - secretDefaults: - | { - env?: string; - file?: string; - exec?: string; - } - | undefined; + secretDefaults: SecretDefaults | undefined; }): { headers: ProviderConfig["headers"] | undefined; mutated: boolean } { const { headers } = params; if (!headers) { @@ -272,15 +278,155 @@ function normalizeAntigravityProvider(provider: ProviderConfig): ProviderConfig return normalizeProviderModels(provider, normalizeAntigravityModelId); } +function normalizeSourceProviderLookup( + providers: ModelsConfig["providers"] | undefined, +): Record { + if (!providers) { + return {}; + } + const out: Record = {}; + for (const [key, provider] of Object.entries(providers)) { + const normalizedKey = key.trim(); + if (!normalizedKey || !isRecord(provider)) { + continue; + } + out[normalizedKey] = provider; + } + return out; +} + +function resolveSourceManagedApiKeyMarker(params: { + sourceProvider: ProviderConfig | undefined; + sourceSecretDefaults: SecretDefaults | undefined; +}): string | undefined { + const sourceApiKeyRef = resolveSecretInputRef({ + value: params.sourceProvider?.apiKey, + defaults: params.sourceSecretDefaults, + }).ref; + if (!sourceApiKeyRef || !sourceApiKeyRef.id.trim()) { + return undefined; + } + return sourceApiKeyRef.source === "env" + ? sourceApiKeyRef.id.trim() + : resolveNonEnvSecretRefApiKeyMarker(sourceApiKeyRef.source); +} + +function resolveSourceManagedHeaderMarkers(params: { + sourceProvider: ProviderConfig | undefined; + sourceSecretDefaults: SecretDefaults | undefined; +}): Record { + const sourceHeaders = isRecord(params.sourceProvider?.headers) + ? (params.sourceProvider.headers as Record) + : undefined; + if (!sourceHeaders) { + return {}; + } + const markers: Record = {}; + for (const [headerName, headerValue] of Object.entries(sourceHeaders)) { + const sourceHeaderRef = resolveSecretInputRef({ + value: headerValue, + defaults: params.sourceSecretDefaults, + }).ref; + if (!sourceHeaderRef || !sourceHeaderRef.id.trim()) { + continue; + } + markers[headerName] = + sourceHeaderRef.source === "env" + ? resolveEnvSecretRefHeaderValueMarker(sourceHeaderRef.id) + : resolveNonEnvSecretRefHeaderValueMarker(sourceHeaderRef.source); + } + return markers; +} + +export function enforceSourceManagedProviderSecrets(params: { + providers: ModelsConfig["providers"]; + sourceProviders: ModelsConfig["providers"] | undefined; + sourceSecretDefaults?: SecretDefaults; + secretRefManagedProviders?: Set; +}): ModelsConfig["providers"] { + const { providers } = params; + if (!providers) { + return providers; + } + const sourceProvidersByKey = normalizeSourceProviderLookup(params.sourceProviders); + if (Object.keys(sourceProvidersByKey).length === 0) { + return providers; + } + + let nextProviders: Record | null = null; + for (const [providerKey, provider] of Object.entries(providers)) { + if (!isRecord(provider)) { + continue; + } + const sourceProvider = sourceProvidersByKey[providerKey.trim()]; + if (!sourceProvider) { + continue; + } + let nextProvider = provider; + let providerMutated = false; + + const sourceApiKeyMarker = resolveSourceManagedApiKeyMarker({ + sourceProvider, + sourceSecretDefaults: params.sourceSecretDefaults, + }); + if (sourceApiKeyMarker) { + params.secretRefManagedProviders?.add(providerKey.trim()); + if (nextProvider.apiKey !== sourceApiKeyMarker) { + providerMutated = true; + nextProvider = { + ...nextProvider, + apiKey: sourceApiKeyMarker, + }; + } + } + + const sourceHeaderMarkers = resolveSourceManagedHeaderMarkers({ + sourceProvider, + sourceSecretDefaults: params.sourceSecretDefaults, + }); + if (Object.keys(sourceHeaderMarkers).length > 0) { + const currentHeaders = isRecord(nextProvider.headers) + ? (nextProvider.headers as Record) + : undefined; + const nextHeaders = { + ...(currentHeaders as Record[string]>), + }; + let headersMutated = !currentHeaders; + for (const [headerName, marker] of Object.entries(sourceHeaderMarkers)) { + if (nextHeaders[headerName] === marker) { + continue; + } + headersMutated = true; + nextHeaders[headerName] = marker; + } + if (headersMutated) { + providerMutated = true; + nextProvider = { + ...nextProvider, + headers: nextHeaders, + }; + } + } + + if (!providerMutated) { + continue; + } + if (!nextProviders) { + nextProviders = { ...providers }; + } + nextProviders[providerKey] = nextProvider; + } + + return nextProviders ?? providers; +} + export function normalizeProviders(params: { providers: ModelsConfig["providers"]; agentDir: string; env?: NodeJS.ProcessEnv; - secretDefaults?: { - env?: string; - file?: string; - exec?: string; - }; + secretDefaults?: SecretDefaults; + sourceProviders?: ModelsConfig["providers"]; + sourceSecretDefaults?: SecretDefaults; secretRefManagedProviders?: Set; }): ModelsConfig["providers"] { const { providers } = params; @@ -343,6 +489,9 @@ export function normalizeProviders(params: { apiKey: normalizedConfiguredApiKey, }; } + if (isNonSecretApiKeyMarker(normalizedConfiguredApiKey)) { + params.secretRefManagedProviders?.add(normalizedKey); + } if ( profileApiKey && profileApiKey.source !== "plaintext" && @@ -366,6 +515,7 @@ export function normalizeProviders(params: { if (envVarName && env[envVarName] === currentApiKey) { mutated = true; normalizedProvider = { ...normalizedProvider, apiKey: envVarName }; + params.secretRefManagedProviders?.add(normalizedKey); } } @@ -395,7 +545,7 @@ export function normalizeProviders(params: { } } - if (normalizedKey === "google") { + if (normalizedKey === "google" || normalizedKey === "google-vertex") { const googleNormalized = normalizeGoogleProvider(normalizedProvider); if (googleNormalized !== normalizedProvider) { mutated = true; @@ -426,13 +576,20 @@ export function normalizeProviders(params: { next[normalizedKey] = normalizedProvider; } - return mutated ? next : providers; + const normalizedProviders = mutated ? next : providers; + return enforceSourceManagedProviderSecrets({ + providers: normalizedProviders, + sourceProviders: params.sourceProviders, + sourceSecretDefaults: params.sourceSecretDefaults, + secretRefManagedProviders: params.secretRefManagedProviders, + }); } type ImplicitProviderParams = { agentDir: string; config?: OpenClawConfig; env?: NodeJS.ProcessEnv; + workspaceDir?: string; explicitProviders?: Record | null; }; @@ -456,6 +613,7 @@ function withApiKey( build: (params: { apiKey: string; discoveryApiKey?: string; + explicitProvider?: ProviderConfig; }) => ProviderConfig | Promise, ): ImplicitProviderLoader { return async (ctx) => { @@ -464,7 +622,11 @@ function withApiKey( return undefined; } return { - [providerKey]: await build({ apiKey, discoveryApiKey }), + [providerKey]: await build({ + apiKey, + discoveryApiKey, + explicitProvider: ctx.explicitProviders?.[providerKey], + }), }; }; } @@ -497,8 +659,38 @@ function mergeImplicitProviderSet( const SIMPLE_IMPLICIT_PROVIDER_LOADERS: ImplicitProviderLoader[] = [ withApiKey("minimax", async ({ apiKey }) => ({ ...buildMinimaxProvider(), apiKey })), - withApiKey("moonshot", async ({ apiKey }) => ({ ...buildMoonshotProvider(), apiKey })), - withApiKey("kimi-coding", async ({ apiKey }) => ({ ...buildKimiCodingProvider(), apiKey })), + withApiKey("moonshot", async ({ apiKey, explicitProvider }) => { + const explicitBaseUrl = explicitProvider?.baseUrl; + return { + ...buildMoonshotProvider(), + ...(typeof explicitBaseUrl === "string" && explicitBaseUrl.trim() + ? { baseUrl: explicitBaseUrl.trim() } + : {}), + apiKey, + }; + }), + withApiKey("kimi-coding", async ({ apiKey, explicitProvider }) => { + const builtInProvider = buildKimiCodingProvider(); + const explicitBaseUrl = explicitProvider?.baseUrl; + const explicitHeaders = isRecord(explicitProvider?.headers) + ? (explicitProvider.headers as ProviderConfig["headers"]) + : undefined; + return { + ...builtInProvider, + ...(typeof explicitBaseUrl === "string" && explicitBaseUrl.trim() + ? { baseUrl: explicitBaseUrl.trim() } + : {}), + ...(explicitHeaders + ? { + headers: { + ...builtInProvider.headers, + ...explicitHeaders, + }, + } + : {}), + apiKey, + }; + }), withApiKey("synthetic", async ({ apiKey }) => ({ ...buildSyntheticProvider(), apiKey })), withApiKey("venice", async ({ apiKey }) => ({ ...(await buildVeniceProvider()), apiKey })), withApiKey("xiaomi", async ({ apiKey }) => ({ ...buildXiaomiProvider(), apiKey })), @@ -512,6 +704,7 @@ const SIMPLE_IMPLICIT_PROVIDER_LOADERS: ImplicitProviderLoader[] = [ apiKey, })), withApiKey("qianfan", async ({ apiKey }) => ({ ...buildQianfanProvider(), apiKey })), + withApiKey("modelstudio", async ({ apiKey }) => ({ ...buildModelStudioProvider(), apiKey })), withApiKey("openrouter", async ({ apiKey }) => ({ ...buildOpenrouterProvider(), apiKey })), withApiKey("nvidia", async ({ apiKey }) => ({ ...buildNvidiaProvider(), apiKey })), withApiKey("kilocode", async ({ apiKey }) => ({ @@ -606,56 +799,35 @@ async function resolveCloudflareAiGatewayImplicitProvider( return undefined; } -async function resolveOllamaImplicitProvider( +async function resolvePluginImplicitProviders( ctx: ImplicitProviderContext, + order: import("../plugins/types.js").ProviderDiscoveryOrder, ): Promise | undefined> { - const ollamaKey = ctx.resolveProviderApiKey("ollama").apiKey; - const explicitOllama = ctx.explicitProviders?.ollama; - const hasExplicitModels = - Array.isArray(explicitOllama?.models) && explicitOllama.models.length > 0; - if (hasExplicitModels && explicitOllama) { - return { - ollama: { - ...explicitOllama, - baseUrl: resolveOllamaApiBase(explicitOllama.baseUrl), - api: explicitOllama.api ?? "ollama", - apiKey: ollamaKey ?? explicitOllama.apiKey ?? OLLAMA_LOCAL_AUTH_MARKER, - }, - }; - } - - const ollamaBaseUrl = explicitOllama?.baseUrl; - const hasExplicitOllamaConfig = Boolean(explicitOllama); - const ollamaProvider = await buildOllamaProvider(ollamaBaseUrl, { - quiet: !ollamaKey && !hasExplicitOllamaConfig, + const providers = resolvePluginDiscoveryProviders({ + config: ctx.config, + workspaceDir: ctx.workspaceDir, + env: ctx.env, }); - if (ollamaProvider.models.length === 0 && !ollamaKey && !explicitOllama?.apiKey) { - return undefined; + const byOrder = groupPluginDiscoveryProvidersByOrder(providers); + const discovered: Record = {}; + for (const provider of byOrder[order]) { + const result = await provider.discovery?.run({ + config: ctx.config ?? {}, + agentDir: ctx.agentDir, + workspaceDir: ctx.workspaceDir, + env: ctx.env, + resolveProviderApiKey: (providerId) => + ctx.resolveProviderApiKey(providerId?.trim() || provider.id), + }); + mergeImplicitProviderSet( + discovered, + normalizePluginDiscoveryResult({ + provider, + result, + }), + ); } - return { - ollama: { - ...ollamaProvider, - apiKey: ollamaKey ?? explicitOllama?.apiKey ?? OLLAMA_LOCAL_AUTH_MARKER, - }, - }; -} - -async function resolveVllmImplicitProvider( - ctx: ImplicitProviderContext, -): Promise | undefined> { - if (ctx.explicitProviders?.vllm) { - return undefined; - } - const { apiKey: vllmKey, discoveryApiKey } = ctx.resolveProviderApiKey("vllm"); - if (!vllmKey) { - return undefined; - } - return { - vllm: { - ...(await buildVllmProvider({ apiKey: discoveryApiKey })), - apiKey: vllmKey, - }, - }; + return Object.keys(discovered).length > 0 ? discovered : undefined; } export async function resolveImplicitProviders( @@ -692,15 +864,17 @@ export async function resolveImplicitProviders( for (const loader of SIMPLE_IMPLICIT_PROVIDER_LOADERS) { mergeImplicitProviderSet(providers, await loader(context)); } + mergeImplicitProviderSet(providers, await resolvePluginImplicitProviders(context, "simple")); for (const loader of PROFILE_IMPLICIT_PROVIDER_LOADERS) { mergeImplicitProviderSet(providers, await loader(context)); } + mergeImplicitProviderSet(providers, await resolvePluginImplicitProviders(context, "profile")); for (const loader of PAIRED_IMPLICIT_PROVIDER_LOADERS) { mergeImplicitProviderSet(providers, await loader(context)); } + mergeImplicitProviderSet(providers, await resolvePluginImplicitProviders(context, "paired")); mergeImplicitProviderSet(providers, await resolveCloudflareAiGatewayImplicitProvider(context)); - mergeImplicitProviderSet(providers, await resolveOllamaImplicitProvider(context)); - mergeImplicitProviderSet(providers, await resolveVllmImplicitProvider(context)); + mergeImplicitProviderSet(providers, await resolvePluginImplicitProviders(context, "late")); if (!providers["github-copilot"]) { const implicitCopilot = await resolveImplicitCopilotProvider({ diff --git a/src/agents/models-config.runtime-source-snapshot.test.ts b/src/agents/models-config.runtime-source-snapshot.test.ts index 6d6ea0284ee..a80ac010e86 100644 --- a/src/agents/models-config.runtime-source-snapshot.test.ts +++ b/src/agents/models-config.runtime-source-snapshot.test.ts @@ -16,47 +16,137 @@ import { readGeneratedModelsJson } from "./models-config.test-utils.js"; installModelsConfigTestHooks(); +function createOpenAiApiKeySourceConfig(): OpenClawConfig { + return { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret + api: "openai-completions" as const, + models: [], + }, + }, + }, + }; +} + +function createOpenAiApiKeyRuntimeConfig(): OpenClawConfig { + return { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: "sk-runtime-resolved", // pragma: allowlist secret + api: "openai-completions" as const, + models: [], + }, + }, + }, + }; +} + +function createOpenAiHeaderSourceConfig(): OpenClawConfig { + return { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions" as const, + headers: { + Authorization: { + source: "env", + provider: "default", + id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret + }, + "X-Tenant-Token": { + source: "file", + provider: "vault", + id: "/providers/openai/tenantToken", + }, + }, + models: [], + }, + }, + }, + }; +} + +function createOpenAiHeaderRuntimeConfig(): OpenClawConfig { + return { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions" as const, + headers: { + Authorization: "Bearer runtime-openai-token", + "X-Tenant-Token": "runtime-tenant-token", + }, + models: [], + }, + }, + }, + }; +} + +function withGatewayTokenMode(config: OpenClawConfig): OpenClawConfig { + return { + ...config, + gateway: { + auth: { + mode: "token", + }, + }, + }; +} + +async function withGeneratedModelsFromRuntimeSource( + params: { + sourceConfig: OpenClawConfig; + runtimeConfig: OpenClawConfig; + candidateConfig?: OpenClawConfig; + }, + runAssertions: () => Promise, +) { + await withTempHome(async () => { + try { + setRuntimeConfigSnapshot(params.runtimeConfig, params.sourceConfig); + await ensureOpenClawModelsJson(params.candidateConfig ?? loadConfig()); + await runAssertions(); + } finally { + clearRuntimeConfigSnapshot(); + clearConfigCache(); + } + }); +} + +async function expectGeneratedProviderApiKey(providerId: string, expected: string) { + const parsed = await readGeneratedModelsJson<{ + providers: Record; + }>(); + expect(parsed.providers[providerId]?.apiKey).toBe(expected); +} + +async function expectGeneratedOpenAiHeaderMarkers() { + const parsed = await readGeneratedModelsJson<{ + providers: Record }>; + }>(); + expect(parsed.providers.openai?.headers?.Authorization).toBe( + "secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret + ); + expect(parsed.providers.openai?.headers?.["X-Tenant-Token"]).toBe(NON_ENV_SECRETREF_MARKER); +} + describe("models-config runtime source snapshot", () => { it("uses runtime source snapshot markers when passed the active runtime config", async () => { - await withTempHome(async () => { - const sourceConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret - api: "openai-completions" as const, - models: [], - }, - }, - }, - }; - const runtimeConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: "sk-runtime-resolved", // pragma: allowlist secret - api: "openai-completions" as const, - models: [], - }, - }, - }, - }; - - try { - setRuntimeConfigSnapshot(runtimeConfig, sourceConfig); - await ensureOpenClawModelsJson(loadConfig()); - - const parsed = await readGeneratedModelsJson<{ - providers: Record; - }>(); - expect(parsed.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret - } finally { - clearRuntimeConfigSnapshot(); - clearConfigCache(); - } - }); + await withGeneratedModelsFromRuntimeSource( + { + sourceConfig: createOpenAiApiKeySourceConfig(), + runtimeConfig: createOpenAiApiKeyRuntimeConfig(), + }, + async () => expectGeneratedProviderApiKey("openai", "OPENAI_API_KEY"), // pragma: allowlist secret + ); }); it("uses non-env marker from runtime source snapshot for file refs", async () => { @@ -101,58 +191,71 @@ describe("models-config runtime source snapshot", () => { }); }); - it("uses header markers from runtime source snapshot instead of resolved runtime values", async () => { + it("projects cloned runtime configs onto source snapshot when preserving provider auth", async () => { await withTempHome(async () => { - const sourceConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions" as const, - headers: { - Authorization: { - source: "env", - provider: "default", - id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret - }, - "X-Tenant-Token": { - source: "file", - provider: "vault", - id: "/providers/openai/tenantToken", - }, - }, - models: [], - }, - }, - }, - }; - const runtimeConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions" as const, - headers: { - Authorization: "Bearer runtime-openai-token", - "X-Tenant-Token": "runtime-tenant-token", - }, - models: [], - }, + const sourceConfig = createOpenAiApiKeySourceConfig(); + const runtimeConfig = createOpenAiApiKeyRuntimeConfig(); + const clonedRuntimeConfig: OpenClawConfig = { + ...runtimeConfig, + agents: { + defaults: { + imageModel: "openai/gpt-image-1", }, }, }; try { setRuntimeConfigSnapshot(runtimeConfig, sourceConfig); - await ensureOpenClawModelsJson(loadConfig()); + await ensureOpenClawModelsJson(clonedRuntimeConfig); + await expectGeneratedProviderApiKey("openai", "OPENAI_API_KEY"); // pragma: allowlist secret + } finally { + clearRuntimeConfigSnapshot(); + clearConfigCache(); + } + }); + }); - const parsed = await readGeneratedModelsJson<{ - providers: Record }>; - }>(); - expect(parsed.providers.openai?.headers?.Authorization).toBe( - "secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret - ); - expect(parsed.providers.openai?.headers?.["X-Tenant-Token"]).toBe(NON_ENV_SECRETREF_MARKER); + it("uses header markers from runtime source snapshot instead of resolved runtime values", async () => { + await withGeneratedModelsFromRuntimeSource( + { + sourceConfig: createOpenAiHeaderSourceConfig(), + runtimeConfig: createOpenAiHeaderRuntimeConfig(), + }, + expectGeneratedOpenAiHeaderMarkers, + ); + }); + + it("keeps source markers when runtime projection is skipped for incompatible top-level shape", async () => { + await withTempHome(async () => { + const sourceConfig = withGatewayTokenMode(createOpenAiApiKeySourceConfig()); + const runtimeConfig = withGatewayTokenMode(createOpenAiApiKeyRuntimeConfig()); + const incompatibleCandidate: OpenClawConfig = { + ...createOpenAiApiKeyRuntimeConfig(), + }; + + try { + setRuntimeConfigSnapshot(runtimeConfig, sourceConfig); + await ensureOpenClawModelsJson(incompatibleCandidate); + await expectGeneratedProviderApiKey("openai", "OPENAI_API_KEY"); // pragma: allowlist secret + } finally { + clearRuntimeConfigSnapshot(); + clearConfigCache(); + } + }); + }); + + it("keeps source header markers when runtime projection is skipped for incompatible top-level shape", async () => { + await withTempHome(async () => { + const sourceConfig = withGatewayTokenMode(createOpenAiHeaderSourceConfig()); + const runtimeConfig = withGatewayTokenMode(createOpenAiHeaderRuntimeConfig()); + const incompatibleCandidate: OpenClawConfig = { + ...createOpenAiHeaderRuntimeConfig(), + }; + + try { + setRuntimeConfigSnapshot(runtimeConfig, sourceConfig); + await ensureOpenClawModelsJson(incompatibleCandidate); + await expectGeneratedOpenAiHeaderMarkers(); } finally { clearRuntimeConfigSnapshot(); clearConfigCache(); diff --git a/src/agents/models-config.ts b/src/agents/models-config.ts index b9b8a7316d3..3e013799b0b 100644 --- a/src/agents/models-config.ts +++ b/src/agents/models-config.ts @@ -1,8 +1,8 @@ import fs from "node:fs/promises"; import path from "node:path"; import { - getRuntimeConfigSnapshot, getRuntimeConfigSourceSnapshot, + projectConfigOntoRuntimeSourceSnapshot, type OpenClawConfig, loadConfig, } from "../config/config.js"; @@ -42,19 +42,31 @@ async function writeModelsFileAtomic(targetPath: string, contents: string): Prom await fs.rename(tempPath, targetPath); } -function resolveModelsConfigInput(config?: OpenClawConfig): OpenClawConfig { +function resolveModelsConfigInput(config?: OpenClawConfig): { + config: OpenClawConfig; + sourceConfigForSecrets: OpenClawConfig; +} { const runtimeSource = getRuntimeConfigSourceSnapshot(); - if (!runtimeSource) { - return config ?? loadConfig(); - } if (!config) { - return runtimeSource; + const loaded = loadConfig(); + return { + config: runtimeSource ?? loaded, + sourceConfigForSecrets: runtimeSource ?? loaded, + }; } - const runtimeResolved = getRuntimeConfigSnapshot(); - if (runtimeResolved && config === runtimeResolved) { - return runtimeSource; + if (!runtimeSource) { + return { + config, + sourceConfigForSecrets: config, + }; } - return config; + const projected = projectConfigOntoRuntimeSourceSnapshot(config); + return { + config: projected, + // If projection is skipped (for example incompatible top-level shape), + // keep managed secret persistence anchored to the active source snapshot. + sourceConfigForSecrets: projected === config ? runtimeSource : projected, + }; } async function withModelsJsonWriteLock(targetPath: string, run: () => Promise): Promise { @@ -80,7 +92,8 @@ export async function ensureOpenClawModelsJson( config?: OpenClawConfig, agentDirOverride?: string, ): Promise<{ agentDir: string; wrote: boolean }> { - const cfg = resolveModelsConfigInput(config); + const resolved = resolveModelsConfigInput(config); + const cfg = resolved.config; const agentDir = agentDirOverride?.trim() ? agentDirOverride.trim() : resolveOpenClawAgentDir(); const targetPath = path.join(agentDir, "models.json"); @@ -91,6 +104,7 @@ export async function ensureOpenClawModelsJson( const existingModelsFile = await readExistingModelsFile(targetPath); const plan = await planOpenClawModelsJson({ cfg, + sourceConfigForSecrets: resolved.sourceConfigForSecrets, agentDir, env, existingRaw: existingModelsFile.raw, diff --git a/src/agents/models.profiles.live.test.ts b/src/agents/models.profiles.live.test.ts index 6386eaef158..515d2b48ce6 100644 --- a/src/agents/models.profiles.live.test.ts +++ b/src/agents/models.profiles.live.test.ts @@ -9,12 +9,9 @@ import { isAnthropicBillingError, isAnthropicRateLimitError, } from "./live-auth-keys.js"; -import { - isMiniMaxModelNotFoundErrorMessage, - isModelNotFoundErrorMessage, -} from "./live-model-errors.js"; import { isModernModelRef } from "./live-model-filter.js"; import { getApiKeyForModel, requireApiKey } from "./model-auth.js"; +import { shouldSuppressBuiltInModel } from "./model-suppression.js"; import { ensureOpenClawModelsJson } from "./models-config.js"; import { isRateLimitErrorMessage } from "./pi-embedded-helpers/errors.js"; import { discoverAuthStorage, discoverModels } from "./pi-model-discovery.js"; @@ -86,6 +83,35 @@ function isGoogleModelNotFoundError(err: unknown): boolean { return false; } +function isModelNotFoundErrorMessage(raw: string): boolean { + const msg = raw.trim(); + if (!msg) { + return false; + } + if (/\b404\b/.test(msg) && /not(?:[\s_-]+)?found/i.test(msg)) { + return true; + } + if (/not_found_error/i.test(msg)) { + return true; + } + if (/model:\s*[a-z0-9._-]+/i.test(msg) && /not(?:[\s_-]+)?found/i.test(msg)) { + return true; + } + return false; +} + +describe("isModelNotFoundErrorMessage", () => { + it("matches whitespace-separated not found errors", () => { + expect(isModelNotFoundErrorMessage("404 model not found")).toBe(true); + expect(isModelNotFoundErrorMessage("model: minimax-text-01 not found")).toBe(true); + }); + + it("still matches underscore and hyphen variants", () => { + expect(isModelNotFoundErrorMessage("404 model not_found")).toBe(true); + expect(isModelNotFoundErrorMessage("404 model not-found")).toBe(true); + }); +}); + function isChatGPTUsageLimitErrorMessage(raw: string): boolean { const msg = raw.toLowerCase(); return msg.includes("hit your chatgpt usage limit") && msg.includes("try again in"); @@ -177,6 +203,31 @@ function resolveTestReasoning( return "low"; } +function resolveLiveSystemPrompt(model: Model): string | undefined { + if (model.provider === "openai-codex") { + return "You are a concise assistant. Follow the user's instruction exactly."; + } + return undefined; +} + +describe("resolveLiveSystemPrompt", () => { + it("adds instructions for openai-codex probes", () => { + expect( + resolveLiveSystemPrompt({ + provider: "openai-codex", + } as Model), + ).toContain("Follow the user's instruction exactly."); + }); + + it("keeps other providers unchanged", () => { + expect( + resolveLiveSystemPrompt({ + provider: "openai", + } as Model), + ).toBeUndefined(); + }); +}); + async function completeSimpleWithTimeout( model: Model, context: Parameters>[1], @@ -221,6 +272,7 @@ async function completeOkWithRetry(params: { const res = await completeSimpleWithTimeout( params.model, { + systemPrompt: resolveLiveSystemPrompt(params.model), messages: [ { role: "user", @@ -292,6 +344,9 @@ describeLive("live models (profile keys)", () => { }> = []; for (const model of models) { + if (shouldSuppressBuiltInModel({ provider: model.provider, id: model.id })) { + continue; + } if (providers && !providers.has(model.provider)) { continue; } @@ -475,11 +530,7 @@ describeLive("live models (profile keys)", () => { if (ok.res.stopReason === "error") { const msg = ok.res.errorMessage ?? ""; - if ( - allowNotFoundSkip && - (isModelNotFoundErrorMessage(msg) || - (model.provider === "minimax" && isMiniMaxModelNotFoundErrorMessage(msg))) - ) { + if (allowNotFoundSkip && isModelNotFoundErrorMessage(msg)) { skipped.push({ model: id, reason: msg }); logProgress(`${progressLabel}: skip (model not found)`); break; @@ -500,7 +551,9 @@ describeLive("live models (profile keys)", () => { } if ( ok.text.length === 0 && - (model.provider === "openrouter" || model.provider === "opencode") + (model.provider === "openrouter" || + model.provider === "opencode" || + model.provider === "opencode-go") ) { skipped.push({ model: id, @@ -563,15 +616,6 @@ describeLive("live models (profile keys)", () => { logProgress(`${progressLabel}: skip (google model not found)`); break; } - if ( - allowNotFoundSkip && - model.provider === "minimax" && - isMiniMaxModelNotFoundErrorMessage(message) - ) { - skipped.push({ model: id, reason: message }); - logProgress(`${progressLabel}: skip (model not found)`); - break; - } if ( allowNotFoundSkip && model.provider === "minimax" && @@ -592,7 +636,7 @@ describeLive("live models (profile keys)", () => { } if ( allowNotFoundSkip && - model.provider === "opencode" && + (model.provider === "opencode" || model.provider === "opencode-go") && isRateLimitErrorMessage(message) ) { skipped.push({ model: id, reason: message }); diff --git a/src/agents/ollama-models.test.ts b/src/agents/ollama-models.test.ts new file mode 100644 index 00000000000..d7b7d066c6f --- /dev/null +++ b/src/agents/ollama-models.test.ts @@ -0,0 +1,41 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import { jsonResponse, requestBodyText, requestUrl } from "../test-helpers/http.js"; +import { + enrichOllamaModelsWithContext, + resolveOllamaApiBase, + type OllamaTagModel, +} from "./ollama-models.js"; + +describe("ollama-models", () => { + afterEach(() => { + vi.unstubAllGlobals(); + }); + + it("strips /v1 when resolving the Ollama API base", () => { + expect(resolveOllamaApiBase("http://127.0.0.1:11434/v1")).toBe("http://127.0.0.1:11434"); + expect(resolveOllamaApiBase("http://127.0.0.1:11434///")).toBe("http://127.0.0.1:11434"); + }); + + it("enriches discovered models with context windows from /api/show", async () => { + const models: OllamaTagModel[] = [{ name: "llama3:8b" }, { name: "deepseek-r1:14b" }]; + const fetchMock = vi.fn(async (input: string | URL | Request, init?: RequestInit) => { + const url = requestUrl(input); + if (!url.endsWith("/api/show")) { + throw new Error(`Unexpected fetch: ${url}`); + } + const body = JSON.parse(requestBodyText(init?.body)) as { name?: string }; + if (body.name === "llama3:8b") { + return jsonResponse({ model_info: { "llama.context_length": 65536 } }); + } + return jsonResponse({}); + }); + vi.stubGlobal("fetch", fetchMock); + + const enriched = await enrichOllamaModelsWithContext("http://127.0.0.1:11434", models); + + expect(enriched).toEqual([ + { name: "llama3:8b", contextWindow: 65536 }, + { name: "deepseek-r1:14b", contextWindow: undefined }, + ]); + }); +}); diff --git a/src/agents/ollama-models.ts b/src/agents/ollama-models.ts new file mode 100644 index 00000000000..20406b3a80e --- /dev/null +++ b/src/agents/ollama-models.ts @@ -0,0 +1,143 @@ +import type { ModelDefinitionConfig } from "../config/types.models.js"; +import { OLLAMA_NATIVE_BASE_URL } from "./ollama-stream.js"; + +export const OLLAMA_DEFAULT_BASE_URL = OLLAMA_NATIVE_BASE_URL; +export const OLLAMA_DEFAULT_CONTEXT_WINDOW = 128000; +export const OLLAMA_DEFAULT_MAX_TOKENS = 8192; +export const OLLAMA_DEFAULT_COST = { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, +}; + +export type OllamaTagModel = { + name: string; + modified_at?: string; + size?: number; + digest?: string; + remote_host?: string; + details?: { + family?: string; + parameter_size?: string; + }; +}; + +export type OllamaTagsResponse = { + models?: OllamaTagModel[]; +}; + +export type OllamaModelWithContext = OllamaTagModel & { + contextWindow?: number; +}; + +const OLLAMA_SHOW_CONCURRENCY = 8; + +/** + * Derive the Ollama native API base URL from a configured base URL. + * + * Users typically configure `baseUrl` with a `/v1` suffix (e.g. + * `http://192.168.20.14:11434/v1`) for the OpenAI-compatible endpoint. + * The native Ollama API lives at the root (e.g. `/api/tags`), so we + * strip the `/v1` suffix when present. + */ +export function resolveOllamaApiBase(configuredBaseUrl?: string): string { + if (!configuredBaseUrl) { + return OLLAMA_DEFAULT_BASE_URL; + } + const trimmed = configuredBaseUrl.replace(/\/+$/, ""); + return trimmed.replace(/\/v1$/i, ""); +} + +export async function queryOllamaContextWindow( + apiBase: string, + modelName: string, +): Promise { + try { + const response = await fetch(`${apiBase}/api/show`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ name: modelName }), + signal: AbortSignal.timeout(3000), + }); + if (!response.ok) { + return undefined; + } + const data = (await response.json()) as { model_info?: Record }; + if (!data.model_info) { + return undefined; + } + for (const [key, value] of Object.entries(data.model_info)) { + if (key.endsWith(".context_length") && typeof value === "number" && Number.isFinite(value)) { + const contextWindow = Math.floor(value); + if (contextWindow > 0) { + return contextWindow; + } + } + } + return undefined; + } catch { + return undefined; + } +} + +export async function enrichOllamaModelsWithContext( + apiBase: string, + models: OllamaTagModel[], + opts?: { concurrency?: number }, +): Promise { + const concurrency = Math.max(1, Math.floor(opts?.concurrency ?? OLLAMA_SHOW_CONCURRENCY)); + const enriched: OllamaModelWithContext[] = []; + for (let index = 0; index < models.length; index += concurrency) { + const batch = models.slice(index, index + concurrency); + const batchResults = await Promise.all( + batch.map(async (model) => ({ + ...model, + contextWindow: await queryOllamaContextWindow(apiBase, model.name), + })), + ); + enriched.push(...batchResults); + } + return enriched; +} + +/** Heuristic: treat models with "r1", "reasoning", or "think" in the name as reasoning models. */ +export function isReasoningModelHeuristic(modelId: string): boolean { + return /r1|reasoning|think|reason/i.test(modelId); +} + +/** Build a ModelDefinitionConfig for an Ollama model with default values. */ +export function buildOllamaModelDefinition( + modelId: string, + contextWindow?: number, +): ModelDefinitionConfig { + return { + id: modelId, + name: modelId, + reasoning: isReasoningModelHeuristic(modelId), + input: ["text"], + cost: OLLAMA_DEFAULT_COST, + contextWindow: contextWindow ?? OLLAMA_DEFAULT_CONTEXT_WINDOW, + maxTokens: OLLAMA_DEFAULT_MAX_TOKENS, + }; +} + +/** Fetch the model list from a running Ollama instance. */ +export async function fetchOllamaModels( + baseUrl: string, +): Promise<{ reachable: boolean; models: OllamaTagModel[] }> { + try { + const apiBase = resolveOllamaApiBase(baseUrl); + const response = await fetch(`${apiBase}/api/tags`, { + signal: AbortSignal.timeout(5000), + }); + if (!response.ok) { + return { reachable: true, models: [] }; + } + const data = (await response.json()) as OllamaTagsResponse; + const models = (data.models ?? []).filter((m) => m.name); + return { reachable: true, models }; + } catch { + return { reachable: false, models: [] }; + } +} diff --git a/src/agents/ollama-stream.test.ts b/src/agents/ollama-stream.test.ts index 2af5e490c7f..ded8064ea19 100644 --- a/src/agents/ollama-stream.test.ts +++ b/src/agents/ollama-stream.test.ts @@ -106,7 +106,7 @@ describe("buildAssistantMessage", () => { expect(result.usage.totalTokens).toBe(15); }); - it("falls back to thinking when content is empty", () => { + it("drops thinking-only output when content is empty", () => { const response = { model: "qwen3:32b", created_at: "2026-01-01T00:00:00Z", @@ -119,10 +119,10 @@ describe("buildAssistantMessage", () => { }; const result = buildAssistantMessage(response, modelInfo); expect(result.stopReason).toBe("stop"); - expect(result.content).toEqual([{ type: "text", text: "Thinking output" }]); + expect(result.content).toEqual([]); }); - it("falls back to reasoning when content and thinking are empty", () => { + it("drops reasoning-only output when content and thinking are empty", () => { const response = { model: "qwen3:32b", created_at: "2026-01-01T00:00:00Z", @@ -135,7 +135,7 @@ describe("buildAssistantMessage", () => { }; const result = buildAssistantMessage(response, modelInfo); expect(result.stopReason).toBe("stop"); - expect(result.content).toEqual([{ type: "text", text: "Reasoning output" }]); + expect(result.content).toEqual([]); }); it("builds response with tool calls", () => { @@ -203,6 +203,20 @@ function mockNdjsonReader(lines: string[]): ReadableStreamDefaultReader; } +async function expectDoneEventContent(lines: string[], expectedContent: unknown) { + await withMockNdjsonFetch(lines, async () => { + const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" }); + const events = await collectStreamEvents(stream); + + const doneEvent = events.at(-1); + if (!doneEvent || doneEvent.type !== "done") { + throw new Error("Expected done event"); + } + + expect(doneEvent.message.content).toEqual(expectedContent); + }); +} + describe("parseNdjsonStream", () => { it("parses text-only streaming chunks", async () => { const reader = mockNdjsonReader([ @@ -485,89 +499,49 @@ describe("createOllamaStreamFn", () => { ); }); - it("accumulates thinking chunks when content is empty", async () => { - await withMockNdjsonFetch( + it("drops thinking chunks when no final content is emitted", async () => { + await expectDoneEventContent( [ '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":"reasoned"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":" output"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}', ], - async () => { - const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" }); - const events = await collectStreamEvents(stream); - - const doneEvent = events.at(-1); - if (!doneEvent || doneEvent.type !== "done") { - throw new Error("Expected done event"); - } - - expect(doneEvent.message.content).toEqual([{ type: "text", text: "reasoned output" }]); - }, + [], ); }); it("prefers streamed content over earlier thinking chunks", async () => { - await withMockNdjsonFetch( + await expectDoneEventContent( [ '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":"internal"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":"final"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":" answer"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}', ], - async () => { - const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" }); - const events = await collectStreamEvents(stream); - - const doneEvent = events.at(-1); - if (!doneEvent || doneEvent.type !== "done") { - throw new Error("Expected done event"); - } - - expect(doneEvent.message.content).toEqual([{ type: "text", text: "final answer" }]); - }, + [{ type: "text", text: "final answer" }], ); }); - it("accumulates reasoning chunks when thinking is absent", async () => { - await withMockNdjsonFetch( + it("drops reasoning chunks when no final content is emitted", async () => { + await expectDoneEventContent( [ '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":"reasoned"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":" output"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}', ], - async () => { - const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" }); - const events = await collectStreamEvents(stream); - - const doneEvent = events.at(-1); - if (!doneEvent || doneEvent.type !== "done") { - throw new Error("Expected done event"); - } - - expect(doneEvent.message.content).toEqual([{ type: "text", text: "reasoned output" }]); - }, + [], ); }); it("prefers streamed content over earlier reasoning chunks", async () => { - await withMockNdjsonFetch( + await expectDoneEventContent( [ '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":"internal"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":"final"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":" answer"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}', ], - async () => { - const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" }); - const events = await collectStreamEvents(stream); - - const doneEvent = events.at(-1); - if (!doneEvent || doneEvent.type !== "done") { - throw new Error("Expected done event"); - } - - expect(doneEvent.message.content).toEqual([{ type: "text", text: "final answer" }]); - }, + [{ type: "text", text: "final answer" }], ); }); }); diff --git a/src/agents/ollama-stream.ts b/src/agents/ollama-stream.ts index 9d23852bb31..70a2ef33cf1 100644 --- a/src/agents/ollama-stream.ts +++ b/src/agents/ollama-stream.ts @@ -340,10 +340,9 @@ export function buildAssistantMessage( ): AssistantMessage { const content: (TextContent | ToolCall)[] = []; - // Ollama-native reasoning models may emit their answer in `thinking` or - // `reasoning` with an empty `content`. Fall back so replies are not dropped. - const text = - response.message.content || response.message.thinking || response.message.reasoning || ""; + // Native Ollama reasoning fields are internal model output. The reply text + // must come from `content`; reasoning visibility is controlled elsewhere. + const text = response.message.content || ""; if (text) { content.push({ type: "text", text }); } @@ -497,20 +496,12 @@ export function createOllamaStreamFn( const reader = response.body.getReader(); let accumulatedContent = ""; - let fallbackContent = ""; - let sawContent = false; const accumulatedToolCalls: OllamaToolCall[] = []; let finalResponse: OllamaChatResponse | undefined; for await (const chunk of parseNdjsonStream(reader)) { if (chunk.message?.content) { - sawContent = true; accumulatedContent += chunk.message.content; - } else if (!sawContent && chunk.message?.thinking) { - fallbackContent += chunk.message.thinking; - } else if (!sawContent && chunk.message?.reasoning) { - // Backward compatibility for older/native variants that still use reasoning. - fallbackContent += chunk.message.reasoning; } // Ollama sends tool_calls in intermediate (done:false) chunks, @@ -529,7 +520,7 @@ export function createOllamaStreamFn( throw new Error("Ollama API stream ended without a final response"); } - finalResponse.message.content = accumulatedContent || fallbackContent; + finalResponse.message.content = accumulatedContent; if (accumulatedToolCalls.length > 0) { finalResponse.message.tool_calls = accumulatedToolCalls; } diff --git a/src/agents/openai-responses.reasoning-replay.test.ts b/src/agents/openai-responses.reasoning-replay.test.ts index b5ccc50e4b4..0fcb02ece6d 100644 --- a/src/agents/openai-responses.reasoning-replay.test.ts +++ b/src/agents/openai-responses.reasoning-replay.test.ts @@ -30,6 +30,13 @@ function extractInputTypes(input: unknown[]) { .filter((t): t is string => typeof t === "string"); } +function extractInputMessages(input: unknown[]) { + return input.filter( + (item): item is Record => + !!item && typeof item === "object" && (item as Record).type === "message", + ); +} + const ZERO_USAGE = { input: 0, output: 0, @@ -184,4 +191,36 @@ describe("openai-responses reasoning replay", () => { expect(types).toContain("reasoning"); expect(types).toContain("message"); }); + + it.each(["commentary", "final_answer"] as const)( + "replays assistant message phase metadata for %s", + async (phase) => { + const assistantWithText = buildAssistantMessage({ + stopReason: "stop", + content: [ + buildReasoningPart(), + { + type: "text", + text: "hello", + textSignature: JSON.stringify({ v: 1, id: `msg_${phase}`, phase }), + }, + ], + }); + + const { input, types } = await runAbortedOpenAIResponsesStream({ + messages: [ + { role: "user", content: "Hi", timestamp: Date.now() }, + assistantWithText, + { role: "user", content: "Ok", timestamp: Date.now() }, + ], + }); + + expect(types).toContain("message"); + + const replayedMessage = extractInputMessages(input).find( + (item) => item.id === `msg_${phase}`, + ); + expect(replayedMessage?.phase).toBe(phase); + }, + ); }); diff --git a/src/agents/openai-ws-connection.test.ts b/src/agents/openai-ws-connection.test.ts index fb80f510ac1..2a7b95f7eb9 100644 --- a/src/agents/openai-ws-connection.test.ts +++ b/src/agents/openai-ws-connection.test.ts @@ -595,14 +595,12 @@ describe("OpenAIWebSocketManager", () => { manager.warmUp({ model: "gpt-5.2", - tools: [{ type: "function", function: { name: "exec", description: "Run a command" } }], + tools: [{ type: "function", name: "exec", description: "Run a command" }], }); const sent = JSON.parse(sock.sentMessages[0] ?? "{}") as Record; expect(sent["tools"]).toHaveLength(1); - expect((sent["tools"] as Array<{ function?: { name?: string } }>)[0]?.function?.name).toBe( - "exec", - ); + expect((sent["tools"] as Array<{ name?: string }>)[0]?.name).toBe("exec"); }); }); diff --git a/src/agents/openai-ws-connection.ts b/src/agents/openai-ws-connection.ts index a765c0f3780..2d9c6ffe7e6 100644 --- a/src/agents/openai-ws-connection.ts +++ b/src/agents/openai-ws-connection.ts @@ -37,12 +37,15 @@ export interface UsageInfo { total_tokens: number; } +export type OpenAIResponsesAssistantPhase = "commentary" | "final_answer"; + export type OutputItem = | { type: "message"; id: string; role: "assistant"; content: Array<{ type: "output_text"; text: string }>; + phase?: OpenAIResponsesAssistantPhase; status?: "in_progress" | "completed"; } | { @@ -190,6 +193,7 @@ export type InputItem = type: "message"; role: "system" | "developer" | "user" | "assistant"; content: string | ContentPart[]; + phase?: OpenAIResponsesAssistantPhase; } | { type: "function_call"; id?: string; call_id?: string; name: string; arguments: string } | { type: "function_call_output"; call_id: string; output: string } @@ -204,11 +208,10 @@ export type ToolChoice = export interface FunctionToolDefinition { type: "function"; - function: { - name: string; - description?: string; - parameters?: Record; - }; + name: string; + description?: string; + parameters?: Record; + strict?: boolean; } /** Standard response.create event payload (full turn) */ diff --git a/src/agents/openai-ws-stream.e2e.test.ts b/src/agents/openai-ws-stream.e2e.test.ts index 2b90d0dbc78..1146d71ffe3 100644 --- a/src/agents/openai-ws-stream.e2e.test.ts +++ b/src/agents/openai-ws-stream.e2e.test.ts @@ -14,6 +14,7 @@ * Skipped in CI — no API key available and we avoid billable external calls. */ +import type { AssistantMessage, Context } from "@mariozechner/pi-ai"; import { describe, it, expect, afterEach } from "vitest"; import { createOpenAIWebSocketStreamFn, @@ -28,14 +29,13 @@ const testFn = LIVE ? it : it.skip; const model = { api: "openai-responses" as const, provider: "openai", - id: "gpt-4o-mini", - name: "gpt-4o-mini", - baseUrl: "", - reasoning: false, - input: { maxTokens: 128_000 }, - output: { maxTokens: 16_384 }, - cache: false, - compat: {}, + id: "gpt-5.2", + name: "gpt-5.2", + contextWindow: 128_000, + maxTokens: 4_096, + reasoning: true, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, } as unknown as Parameters>[0]; type StreamFnParams = Parameters>; @@ -47,6 +47,61 @@ function makeContext(userMessage: string): StreamFnParams[1] { } as unknown as StreamFnParams[1]; } +function makeToolContext(userMessage: string): StreamFnParams[1] { + return { + systemPrompt: "You are a precise assistant. Follow tool instructions exactly.", + messages: [{ role: "user" as const, content: userMessage }], + tools: [ + { + name: "noop", + description: "Return the supplied tool result to the user.", + parameters: { + type: "object", + additionalProperties: false, + properties: {}, + }, + }, + ], + } as unknown as Context; +} + +function makeToolResultMessage( + callId: string, + output: string, +): StreamFnParams[1]["messages"][number] { + return { + role: "toolResult" as const, + toolCallId: callId, + toolName: "noop", + content: [{ type: "text" as const, text: output }], + isError: false, + timestamp: Date.now(), + } as unknown as StreamFnParams[1]["messages"][number]; +} + +async function collectEvents( + stream: ReturnType>, +): Promise> { + const events: Array<{ type: string; message?: AssistantMessage }> = []; + for await (const event of stream as AsyncIterable<{ type: string; message?: AssistantMessage }>) { + events.push(event); + } + return events; +} + +function expectDone(events: Array<{ type: string; message?: AssistantMessage }>): AssistantMessage { + const done = events.find((event) => event.type === "done")?.message; + expect(done).toBeDefined(); + return done!; +} + +function assistantText(message: AssistantMessage): string { + return message.content + .filter((block) => block.type === "text") + .map((block) => block.text) + .join(""); +} + /** Each test gets a unique session ID to avoid cross-test interference. */ const sessions: string[] = []; function freshSession(name: string): string { @@ -68,26 +123,14 @@ describe("OpenAI WebSocket e2e", () => { async () => { const sid = freshSession("single"); const streamFn = createOpenAIWebSocketStreamFn(API_KEY!, sid); - const stream = streamFn(model, makeContext("What is 2+2?"), {}); + const stream = streamFn(model, makeContext("What is 2+2?"), { transport: "websocket" }); + const done = expectDone(await collectEvents(stream)); - const events: Array<{ type: string }> = []; - for await (const event of stream as AsyncIterable<{ type: string }>) { - events.push(event); - } - - const done = events.find((e) => e.type === "done") as - | { type: "done"; message: { content: Array<{ type: string; text?: string }> } } - | undefined; - expect(done).toBeDefined(); - expect(done!.message.content.length).toBeGreaterThan(0); - - const text = done!.message.content - .filter((c) => c.type === "text") - .map((c) => c.text) - .join(""); + expect(done.content.length).toBeGreaterThan(0); + const text = assistantText(done); expect(text).toMatch(/4/); }, - 30_000, + 45_000, ); testFn( @@ -96,19 +139,80 @@ describe("OpenAI WebSocket e2e", () => { const sid = freshSession("temp"); const streamFn = createOpenAIWebSocketStreamFn(API_KEY!, sid); const stream = streamFn(model, makeContext("Pick a random number between 1 and 1000."), { + transport: "websocket", temperature: 0.8, }); - - const events: Array<{ type: string }> = []; - for await (const event of stream as AsyncIterable<{ type: string }>) { - events.push(event); - } + const events = await collectEvents(stream); // Stream must complete (done or error with fallback) — must NOT hang. const hasTerminal = events.some((e) => e.type === "done" || e.type === "error"); expect(hasTerminal).toBe(true); }, - 30_000, + 45_000, + ); + + testFn( + "reuses the websocket session for tool-call follow-up turns", + async () => { + const sid = freshSession("tool-roundtrip"); + const streamFn = createOpenAIWebSocketStreamFn(API_KEY!, sid); + const firstContext = makeToolContext( + "Call the tool `noop` with {}. After the tool result arrives, reply with exactly the tool output and nothing else.", + ); + const firstEvents = await collectEvents( + streamFn(model, firstContext, { + transport: "websocket", + toolChoice: "required", + maxTokens: 128, + } as unknown as StreamFnParams[2]), + ); + const firstDone = expectDone(firstEvents); + const toolCall = firstDone.content.find((block) => block.type === "toolCall") as + | { type: "toolCall"; id: string; name: string } + | undefined; + expect(toolCall?.name).toBe("noop"); + expect(toolCall?.id).toBeTruthy(); + + const secondContext = { + ...firstContext, + messages: [ + ...firstContext.messages, + firstDone, + makeToolResultMessage(toolCall!.id, "TOOL_OK"), + ], + } as unknown as StreamFnParams[1]; + const secondDone = expectDone( + await collectEvents( + streamFn(model, secondContext, { + transport: "websocket", + maxTokens: 128, + }), + ), + ); + + expect(assistantText(secondDone)).toMatch(/TOOL_OK/); + }, + 60_000, + ); + + testFn( + "supports websocket warm-up before the first request", + async () => { + const sid = freshSession("warmup"); + const streamFn = createOpenAIWebSocketStreamFn(API_KEY!, sid); + const done = expectDone( + await collectEvents( + streamFn(model, makeContext("Reply with the word warmed."), { + transport: "websocket", + openaiWsWarmup: true, + maxTokens: 32, + } as unknown as StreamFnParams[2]), + ), + ); + + expect(assistantText(done).toLowerCase()).toContain("warmed"); + }, + 45_000, ); testFn( @@ -119,16 +223,13 @@ describe("OpenAI WebSocket e2e", () => { expect(hasWsSession(sid)).toBe(false); - const stream = streamFn(model, makeContext("Say hello."), {}); - for await (const _ of stream as AsyncIterable) { - /* consume */ - } + await collectEvents(streamFn(model, makeContext("Say hello."), { transport: "websocket" })); expect(hasWsSession(sid)).toBe(true); releaseWsSession(sid); expect(hasWsSession(sid)).toBe(false); }, - 30_000, + 45_000, ); testFn( @@ -137,15 +238,11 @@ describe("OpenAI WebSocket e2e", () => { const sid = freshSession("fallback"); const streamFn = createOpenAIWebSocketStreamFn("sk-invalid-key", sid); const stream = streamFn(model, makeContext("Hello"), {}); - - const events: Array<{ type: string }> = []; - for await (const event of stream as AsyncIterable<{ type: string }>) { - events.push(event); - } + const events = await collectEvents(stream); const hasTerminal = events.some((e) => e.type === "done" || e.type === "error"); expect(hasTerminal).toBe(true); }, - 30_000, + 45_000, ); }); diff --git a/src/agents/openai-ws-stream.test.ts b/src/agents/openai-ws-stream.test.ts index a9c3679f561..cd3425bec83 100644 --- a/src/agents/openai-ws-stream.test.ts +++ b/src/agents/openai-ws-stream.test.ts @@ -224,6 +224,7 @@ type FakeMessage = | { role: "assistant"; content: unknown[]; + phase?: "commentary" | "final_answer"; stopReason: string; api: string; provider: string; @@ -247,6 +248,7 @@ function userMsg(text: string): FakeMessage { function assistantMsg( textBlocks: string[], toolCalls: Array<{ id: string; name: string; args: Record }> = [], + phase?: "commentary" | "final_answer", ): FakeMessage { const content: unknown[] = []; for (const t of textBlocks) { @@ -258,6 +260,7 @@ function assistantMsg( return { role: "assistant", content, + phase, stopReason: toolCalls.length > 0 ? "toolUse" : "stop", api: "openai-responses", provider: "openai", @@ -302,6 +305,7 @@ function makeResponseObject( id: string, outputText?: string, toolCallName?: string, + phase?: "commentary" | "final_answer", ): ResponseObject { const output: ResponseObject["output"] = []; if (outputText) { @@ -310,6 +314,7 @@ function makeResponseObject( id: "item_1", role: "assistant", content: [{ type: "output_text", text: outputText }], + phase, }); } if (toolCallName) { @@ -357,18 +362,16 @@ describe("convertTools", () => { expect(result).toHaveLength(1); expect(result[0]).toMatchObject({ type: "function", - function: { - name: "exec", - description: "Run a command", - parameters: { type: "object", properties: { cmd: { type: "string" } } }, - }, + name: "exec", + description: "Run a command", + parameters: { type: "object", properties: { cmd: { type: "string" } } }, }); }); it("handles tools without description", () => { const tools = [{ name: "ping", description: "", parameters: {} }]; const result = convertTools(tools as Parameters[0]); - expect(result[0]?.function?.name).toBe("ping"); + expect(result[0]?.name).toBe("ping"); }); }); @@ -391,6 +394,19 @@ describe("convertMessagesToInputItems", () => { expect(items[0]).toMatchObject({ type: "message", role: "assistant", content: "Hi there." }); }); + it("preserves assistant phase on replayed assistant messages", () => { + const items = convertMessagesToInputItems([ + assistantMsg(["Working on it."], [], "commentary"), + ] as Parameters[0]); + expect(items).toHaveLength(1); + expect(items[0]).toMatchObject({ + type: "message", + role: "assistant", + content: "Working on it.", + phase: "commentary", + }); + }); + it("converts an assistant message with a tool call", () => { const msg = assistantMsg( ["Let me run that."], @@ -408,10 +424,58 @@ describe("convertMessagesToInputItems", () => { call_id: "call_1", name: "exec", }); + expect(textItem).not.toHaveProperty("phase"); const fc = fcItem as { arguments: string }; expect(JSON.parse(fc.arguments)).toEqual({ cmd: "ls" }); }); + it("preserves assistant phase on commentary text before tool calls", () => { + const msg = assistantMsg( + ["Let me run that."], + [{ id: "call_1", name: "exec", args: { cmd: "ls" } }], + "commentary", + ); + const items = convertMessagesToInputItems([msg] as Parameters< + typeof convertMessagesToInputItems + >[0]); + const textItem = items.find((i) => i.type === "message"); + expect(textItem).toMatchObject({ + type: "message", + role: "assistant", + content: "Let me run that.", + phase: "commentary", + }); + }); + + it("preserves assistant phase from textSignature metadata without local phase field", () => { + const msg = { + role: "assistant" as const, + content: [ + { + type: "text" as const, + text: "Working on it.", + textSignature: JSON.stringify({ v: 1, id: "msg_sig", phase: "commentary" }), + }, + ], + stopReason: "stop", + api: "openai-responses", + provider: "openai", + model: "gpt-5.2", + usage: {}, + timestamp: 0, + }; + const items = convertMessagesToInputItems([msg] as Parameters< + typeof convertMessagesToInputItems + >[0]); + expect(items).toHaveLength(1); + expect(items[0]).toMatchObject({ + type: "message", + role: "assistant", + content: "Working on it.", + phase: "commentary", + }); + }); + it("converts a tool result message", () => { const items = convertMessagesToInputItems([toolResultMsg("call_1", "file.txt")] as Parameters< typeof convertMessagesToInputItems @@ -518,6 +582,34 @@ describe("convertMessagesToInputItems", () => { expect((items[0] as { content?: unknown }).content).toBe("Here is my answer."); }); + it("replays reasoning blocks from thinking signatures", () => { + const msg = { + role: "assistant" as const, + content: [ + { + type: "thinking" as const, + thinking: "internal reasoning...", + thinkingSignature: JSON.stringify({ + type: "reasoning", + id: "rs_test", + summary: [], + }), + }, + { type: "text" as const, text: "Here is my answer." }, + ], + stopReason: "stop", + api: "openai-responses", + provider: "openai", + model: "gpt-5.2", + usage: {}, + timestamp: 0, + }; + const items = convertMessagesToInputItems([msg] as Parameters< + typeof convertMessagesToInputItems + >[0]); + expect(items.map((item) => item.type)).toEqual(["reasoning", "message"]); + }); + it("returns empty array for empty messages", () => { expect(convertMessagesToInputItems([])).toEqual([]); }); @@ -594,6 +686,16 @@ describe("buildAssistantMessageFromResponse", () => { expect(msg.content).toEqual([]); expect(msg.stopReason).toBe("stop"); }); + + it("preserves phase from assistant message output items", () => { + const response = makeResponseObject("resp_8", "Final answer", undefined, "final_answer"); + const msg = buildAssistantMessageFromResponse(response, modelInfo) as { + phase?: string; + content: Array<{ type: string; text?: string }>; + }; + expect(msg.phase).toBe("final_answer"); + expect(msg.content[0]?.text).toBe("Final answer"); + }); }); // ───────────────────────────────────────────────────────────────────────────── @@ -633,6 +735,7 @@ describe("createOpenAIWebSocketStreamFn", () => { releaseWsSession("sess-fallback"); releaseWsSession("sess-incremental"); releaseWsSession("sess-full"); + releaseWsSession("sess-phase"); releaseWsSession("sess-tools"); releaseWsSession("sess-store-default"); releaseWsSession("sess-store-compat"); @@ -795,6 +898,40 @@ describe("createOpenAIWebSocketStreamFn", () => { expect(doneEvent?.message.content[0]?.text).toBe("Hello back!"); }); + it("keeps assistant phase on completed WebSocket responses", async () => { + const streamFn = createOpenAIWebSocketStreamFn("sk-test", "sess-phase"); + const stream = streamFn( + modelStub as Parameters[0], + contextStub as Parameters[1], + ); + + const events: unknown[] = []; + const done = (async () => { + for await (const ev of await resolveStream(stream)) { + events.push(ev); + } + })(); + + await new Promise((r) => setImmediate(r)); + const manager = MockManager.lastInstance!; + manager.simulateEvent({ + type: "response.completed", + response: makeResponseObject("resp_phase", "Working...", "exec", "commentary"), + }); + + await done; + + const doneEvent = events.find((e) => (e as { type?: string }).type === "done") as + | { + type: string; + reason: string; + message: { phase?: string; stopReason: string }; + } + | undefined; + expect(doneEvent?.message.phase).toBe("commentary"); + expect(doneEvent?.message.stopReason).toBe("toolUse"); + }); + it("falls back to HTTP when WebSocket connect fails (session pre-broken via flag)", async () => { // Set the class-level flag BEFORE calling streamFn so the new instance // fails on connect(). We patch the static default via MockManager directly. diff --git a/src/agents/openai-ws-stream.ts b/src/agents/openai-ws-stream.ts index e04cac5a7b6..307812e6be5 100644 --- a/src/agents/openai-ws-stream.ts +++ b/src/agents/openai-ws-stream.ts @@ -37,6 +37,7 @@ import { type ContentPart, type FunctionToolDefinition, type InputItem, + type OpenAIResponsesAssistantPhase, type OpenAIWebSocketManagerOptions, type ResponseObject, } from "./openai-ws-connection.js"; @@ -100,6 +101,8 @@ export function hasWsSession(sessionId: string): boolean { // ───────────────────────────────────────────────────────────────────────────── type AnyMessage = Message & { role: string; content: unknown }; +type AssistantMessageWithPhase = AssistantMessage & { phase?: OpenAIResponsesAssistantPhase }; +type ReplayModelInfo = { input?: ReadonlyArray }; function toNonEmptyString(value: unknown): string | null { if (typeof value !== "string") { @@ -109,6 +112,50 @@ function toNonEmptyString(value: unknown): string | null { return trimmed.length > 0 ? trimmed : null; } +function normalizeAssistantPhase(value: unknown): OpenAIResponsesAssistantPhase | undefined { + return value === "commentary" || value === "final_answer" ? value : undefined; +} + +function encodeAssistantTextSignature(params: { + id: string; + phase?: OpenAIResponsesAssistantPhase; +}): string { + return JSON.stringify({ + v: 1, + id: params.id, + ...(params.phase ? { phase: params.phase } : {}), + }); +} + +function parseAssistantTextSignature( + value: unknown, +): { id: string; phase?: OpenAIResponsesAssistantPhase } | null { + if (typeof value !== "string" || value.trim().length === 0) { + return null; + } + if (!value.startsWith("{")) { + return { id: value }; + } + try { + const parsed = JSON.parse(value) as { v?: unknown; id?: unknown; phase?: unknown }; + if (parsed.v !== 1 || typeof parsed.id !== "string") { + return null; + } + return { + id: parsed.id, + ...(normalizeAssistantPhase(parsed.phase) + ? { phase: normalizeAssistantPhase(parsed.phase) } + : {}), + }; + } catch { + return null; + } +} + +function supportsImageInput(modelOverride?: ReplayModelInfo): boolean { + return !Array.isArray(modelOverride?.input) || modelOverride.input.includes("image"); +} + /** Convert pi-ai content (string | ContentPart[]) to plain text. */ function contentToText(content: unknown): string { if (typeof content === "string") { @@ -117,30 +164,50 @@ function contentToText(content: unknown): string { if (!Array.isArray(content)) { return ""; } - return (content as Array<{ type?: string; text?: string }>) - .filter((p) => p.type === "text" && typeof p.text === "string") - .map((p) => p.text as string) + return content + .filter( + (part): part is { type?: string; text?: string } => Boolean(part) && typeof part === "object", + ) + .filter( + (part) => + (part.type === "text" || part.type === "input_text" || part.type === "output_text") && + typeof part.text === "string", + ) + .map((part) => part.text as string) .join(""); } /** Convert pi-ai content to OpenAI ContentPart[]. */ -function contentToOpenAIParts(content: unknown): ContentPart[] { +function contentToOpenAIParts(content: unknown, modelOverride?: ReplayModelInfo): ContentPart[] { if (typeof content === "string") { return content ? [{ type: "input_text", text: content }] : []; } if (!Array.isArray(content)) { return []; } + + const includeImages = supportsImageInput(modelOverride); const parts: ContentPart[] = []; for (const part of content as Array<{ type?: string; text?: string; data?: string; mimeType?: string; + source?: unknown; }>) { - if (part.type === "text" && typeof part.text === "string") { + if ( + (part.type === "text" || part.type === "input_text" || part.type === "output_text") && + typeof part.text === "string" + ) { parts.push({ type: "input_text", text: part.text }); - } else if (part.type === "image" && typeof part.data === "string") { + continue; + } + + if (!includeImages) { + continue; + } + + if (part.type === "image" && typeof part.data === "string") { parts.push({ type: "input_image", source: { @@ -149,11 +216,60 @@ function contentToOpenAIParts(content: unknown): ContentPart[] { data: part.data, }, }); + continue; + } + + if ( + part.type === "input_image" && + part.source && + typeof part.source === "object" && + typeof (part.source as { type?: unknown }).type === "string" + ) { + parts.push({ + type: "input_image", + source: part.source as + | { type: "url"; url: string } + | { type: "base64"; media_type: string; data: string }, + }); } } return parts; } +function parseReasoningItem(value: unknown): Extract | null { + if (!value || typeof value !== "object") { + return null; + } + const record = value as { + type?: unknown; + content?: unknown; + encrypted_content?: unknown; + summary?: unknown; + }; + if (record.type !== "reasoning") { + return null; + } + return { + type: "reasoning", + ...(typeof record.content === "string" ? { content: record.content } : {}), + ...(typeof record.encrypted_content === "string" + ? { encrypted_content: record.encrypted_content } + : {}), + ...(typeof record.summary === "string" ? { summary: record.summary } : {}), + }; +} + +function parseThinkingSignature(value: unknown): Extract | null { + if (typeof value !== "string" || value.trim().length === 0) { + return null; + } + try { + return parseReasoningItem(JSON.parse(value)); + } catch { + return null; + } +} + /** Convert pi-ai tool array to OpenAI FunctionToolDefinition[]. */ export function convertTools(tools: Context["tools"]): FunctionToolDefinition[] { if (!tools || tools.length === 0) { @@ -161,11 +277,9 @@ export function convertTools(tools: Context["tools"]): FunctionToolDefinition[] } return tools.map((tool) => ({ type: "function" as const, - function: { - name: tool.name, - description: typeof tool.description === "string" ? tool.description : undefined, - parameters: (tool.parameters ?? {}) as Record, - }, + name: tool.name, + description: typeof tool.description === "string" ? tool.description : undefined, + parameters: (tool.parameters ?? {}) as Record, })); } @@ -173,14 +287,24 @@ export function convertTools(tools: Context["tools"]): FunctionToolDefinition[] * Convert the full pi-ai message history to an OpenAI `input` array. * Handles user messages, assistant text+tool-call messages, and tool results. */ -export function convertMessagesToInputItems(messages: Message[]): InputItem[] { +export function convertMessagesToInputItems( + messages: Message[], + modelOverride?: ReplayModelInfo, +): InputItem[] { const items: InputItem[] = []; for (const msg of messages) { - const m = msg as AnyMessage; + const m = msg as AnyMessage & { + phase?: unknown; + toolCallId?: unknown; + toolUseId?: unknown; + }; if (m.role === "user") { - const parts = contentToOpenAIParts(m.content); + const parts = contentToOpenAIParts(m.content, modelOverride); + if (parts.length === 0) { + continue; + } items.push({ type: "message", role: "user", @@ -194,87 +318,116 @@ export function convertMessagesToInputItems(messages: Message[]): InputItem[] { if (m.role === "assistant") { const content = m.content; + let assistantPhase = normalizeAssistantPhase(m.phase); if (Array.isArray(content)) { - // Collect text blocks and tool calls separately const textParts: string[] = []; - for (const block of content as Array<{ - type?: string; - text?: string; - id?: string; - name?: string; - arguments?: Record; - thinking?: string; - }>) { - if (block.type === "text" && typeof block.text === "string") { - textParts.push(block.text); - } else if (block.type === "thinking" && typeof block.thinking === "string") { - // Skip thinking blocks — not sent back to the model - } else if (block.type === "toolCall") { - // Push accumulated text first - if (textParts.length > 0) { - items.push({ - type: "message", - role: "assistant", - content: textParts.join(""), - }); - textParts.length = 0; - } - const callId = toNonEmptyString(block.id); - const toolName = toNonEmptyString(block.name); - if (!callId || !toolName) { - continue; - } - // Push function_call item - items.push({ - type: "function_call", - call_id: callId, - name: toolName, - arguments: - typeof block.arguments === "string" - ? block.arguments - : JSON.stringify(block.arguments ?? {}), - }); + const pushAssistantText = () => { + if (textParts.length === 0) { + return; } - } - if (textParts.length > 0) { items.push({ type: "message", role: "assistant", content: textParts.join(""), + ...(assistantPhase ? { phase: assistantPhase } : {}), }); - } - } else { - const text = contentToText(m.content); - if (text) { + textParts.length = 0; + }; + + for (const block of content as Array<{ + type?: string; + text?: string; + textSignature?: unknown; + id?: unknown; + name?: unknown; + arguments?: unknown; + thinkingSignature?: unknown; + }>) { + if (block.type === "text" && typeof block.text === "string") { + const parsedSignature = parseAssistantTextSignature(block.textSignature); + if (!assistantPhase) { + assistantPhase = parsedSignature?.phase; + } + textParts.push(block.text); + continue; + } + + if (block.type === "thinking") { + pushAssistantText(); + const reasoningItem = parseThinkingSignature(block.thinkingSignature); + if (reasoningItem) { + items.push(reasoningItem); + } + continue; + } + + if (block.type !== "toolCall") { + continue; + } + + pushAssistantText(); + const callIdRaw = toNonEmptyString(block.id); + const toolName = toNonEmptyString(block.name); + if (!callIdRaw || !toolName) { + continue; + } + const [callId, itemId] = callIdRaw.split("|", 2); items.push({ - type: "message", - role: "assistant", - content: text, + type: "function_call", + ...(itemId ? { id: itemId } : {}), + call_id: callId, + name: toolName, + arguments: + typeof block.arguments === "string" + ? block.arguments + : JSON.stringify(block.arguments ?? {}), }); } + + pushAssistantText(); + continue; } + + const text = contentToText(content); + if (!text) { + continue; + } + items.push({ + type: "message", + role: "assistant", + content: text, + ...(assistantPhase ? { phase: assistantPhase } : {}), + }); continue; } - if (m.role === "toolResult") { - const tr = m as unknown as { - toolCallId?: string; - toolUseId?: string; - content: unknown; - isError: boolean; - }; - const callId = toNonEmptyString(tr.toolCallId) ?? toNonEmptyString(tr.toolUseId); - if (!callId) { - continue; - } - const outputText = contentToText(tr.content); - items.push({ - type: "function_call_output", - call_id: callId, - output: outputText, - }); + if (m.role !== "toolResult") { continue; } + + const toolCallId = toNonEmptyString(m.toolCallId) ?? toNonEmptyString(m.toolUseId); + if (!toolCallId) { + continue; + } + const [callId] = toolCallId.split("|", 2); + const parts = Array.isArray(m.content) ? contentToOpenAIParts(m.content, modelOverride) : []; + const textOutput = contentToText(m.content); + const imageParts = parts.filter((part) => part.type === "input_image"); + items.push({ + type: "function_call_output", + call_id: callId, + output: textOutput || (imageParts.length > 0 ? "(see attached image)" : ""), + }); + if (imageParts.length > 0) { + items.push({ + type: "message", + role: "user", + content: [ + { type: "input_text", text: "Attached image(s) from tool result:" }, + ...imageParts, + ], + }); + } } return items; @@ -289,12 +442,24 @@ export function buildAssistantMessageFromResponse( modelInfo: { api: string; provider: string; id: string }, ): AssistantMessage { const content: (TextContent | ToolCall)[] = []; + let assistantPhase: OpenAIResponsesAssistantPhase | undefined; for (const item of response.output ?? []) { if (item.type === "message") { + const itemPhase = normalizeAssistantPhase(item.phase); + if (itemPhase) { + assistantPhase = itemPhase; + } for (const part of item.content ?? []) { if (part.type === "output_text" && part.text) { - content.push({ type: "text", text: part.text }); + content.push({ + type: "text", + text: part.text, + textSignature: encodeAssistantTextSignature({ + id: item.id, + ...(itemPhase ? { phase: itemPhase } : {}), + }), + }); } } } else if (item.type === "function_call") { @@ -321,7 +486,7 @@ export function buildAssistantMessageFromResponse( const hasToolCalls = content.some((c) => c.type === "toolCall"); const stopReason: StopReason = hasToolCalls ? "toolUse" : "stop"; - return buildAssistantMessage({ + const message = buildAssistantMessage({ model: modelInfo, content, stopReason, @@ -331,6 +496,10 @@ export function buildAssistantMessageFromResponse( totalTokens: response.usage?.total_tokens ?? 0, }), }); + + return assistantPhase + ? ({ ...message, phase: assistantPhase } as AssistantMessageWithPhase) + : message; } // ───────────────────────────────────────────────────────────────────────────── @@ -504,6 +673,7 @@ export function createOpenAIWebSocketStreamFn( if (resolveWsWarmup(options) && !session.warmUpAttempted) { session.warmUpAttempted = true; + let warmupFailed = false; try { await runWarmUp({ manager: session.manager, @@ -517,10 +687,33 @@ export function createOpenAIWebSocketStreamFn( if (signal?.aborted) { throw warmErr instanceof Error ? warmErr : new Error(String(warmErr)); } + warmupFailed = true; log.warn( `[ws-stream] warm-up failed for session=${sessionId}; continuing without warm-up. error=${String(warmErr)}`, ); } + if (warmupFailed && !session.manager.isConnected()) { + try { + session.manager.close(); + } catch { + /* ignore */ + } + try { + await session.manager.connect(apiKey); + session.everConnected = true; + log.debug(`[ws-stream] reconnected after warm-up failure for session=${sessionId}`); + } catch (reconnectErr) { + session.broken = true; + wsRegistry.delete(sessionId); + if (transport === "websocket") { + throw reconnectErr instanceof Error ? reconnectErr : new Error(String(reconnectErr)); + } + log.warn( + `[ws-stream] reconnect after warm-up failed for session=${sessionId}; falling back to HTTP. error=${String(reconnectErr)}`, + ); + return fallbackToHttp(model, context, options, eventStream, opts.signal); + } + } } // ── 3. Compute incremental vs full input ───────────────────────────── @@ -537,16 +730,16 @@ export function createOpenAIWebSocketStreamFn( log.debug( `[ws-stream] session=${sessionId}: no new tool results found; sending full context`, ); - inputItems = buildFullInput(context); + inputItems = buildFullInput(context, model); } else { - inputItems = convertMessagesToInputItems(toolResults); + inputItems = convertMessagesToInputItems(toolResults, model); } log.debug( `[ws-stream] session=${sessionId}: incremental send (${inputItems.length} tool results) previous_response_id=${prevResponseId}`, ); } else { // First turn: send full context - inputItems = buildFullInput(context); + inputItems = buildFullInput(context, model); log.debug( `[ws-stream] session=${sessionId}: full context send (${inputItems.length} items)`, ); @@ -604,10 +797,13 @@ export function createOpenAIWebSocketStreamFn( ...(prevResponseId ? { previous_response_id: prevResponseId } : {}), ...extraParams, }; - options?.onPayload?.(payload, model); + const nextPayload = options?.onPayload?.(payload, model); + const requestPayload = (nextPayload ?? payload) as Parameters< + OpenAIWebSocketManager["send"] + >[0]; try { - session.manager.send(payload as Parameters[0]); + session.manager.send(requestPayload); } catch (sendErr) { if (transport === "websocket") { throw sendErr instanceof Error ? sendErr : new Error(String(sendErr)); @@ -730,8 +926,8 @@ export function createOpenAIWebSocketStreamFn( // ───────────────────────────────────────────────────────────────────────────── /** Build full input items from context (system prompt is passed via `instructions` field). */ -function buildFullInput(context: Context): InputItem[] { - return convertMessagesToInputItems(context.messages); +function buildFullInput(context: Context, model: ReplayModelInfo): InputItem[] { + return convertMessagesToInputItems(context.messages, model); } /** diff --git a/src/agents/openclaw-tools.camera.test.ts b/src/agents/openclaw-tools.camera.test.ts index 83c4d3e48d6..5d3f14772fd 100644 --- a/src/agents/openclaw-tools.camera.test.ts +++ b/src/agents/openclaw-tools.camera.test.ts @@ -135,11 +135,10 @@ function setupNodeInvokeMock(params: { function createSystemRunPreparePayload(cwd: string | null) { return { payload: { - cmdText: "echo hi", plan: { argv: ["echo", "hi"], cwd, - rawCommand: "echo hi", + commandText: "echo hi", agentId: null, sessionKey: null, }, @@ -662,10 +661,9 @@ describe("nodes run", () => { onApprovalRequest: (approvalParams) => { expect(approvalParams).toMatchObject({ id: expect.any(String), - command: "echo hi", - commandArgv: ["echo", "hi"], systemRunPlan: expect.objectContaining({ argv: ["echo", "hi"], + commandText: "echo hi", }), nodeId: NODE_ID, host: "node", diff --git a/src/agents/openclaw-tools.owner-authorization.test.ts b/src/agents/openclaw-tools.owner-authorization.test.ts new file mode 100644 index 00000000000..47892235bb6 --- /dev/null +++ b/src/agents/openclaw-tools.owner-authorization.test.ts @@ -0,0 +1,22 @@ +import { describe, expect, it } from "vitest"; +import "./test-helpers/fast-core-tools.js"; +import { createOpenClawTools } from "./openclaw-tools.js"; + +function readToolByName() { + return new Map(createOpenClawTools().map((tool) => [tool.name, tool])); +} + +describe("createOpenClawTools owner authorization", () => { + it("marks owner-only core tools in raw registration", () => { + const tools = readToolByName(); + expect(tools.get("cron")?.ownerOnly).toBe(true); + expect(tools.get("gateway")?.ownerOnly).toBe(true); + expect(tools.get("nodes")?.ownerOnly).toBe(true); + }); + + it("keeps canvas non-owner-only in raw registration", () => { + const tools = readToolByName(); + expect(tools.get("canvas")).toBeDefined(); + expect(tools.get("canvas")?.ownerOnly).not.toBe(true); + }); +}); diff --git a/src/agents/openclaw-tools.session-status.test.ts b/src/agents/openclaw-tools.session-status.test.ts index dd361b70e67..0bc079d4ced 100644 --- a/src/agents/openclaw-tools.session-status.test.ts +++ b/src/agents/openclaw-tools.session-status.test.ts @@ -2,6 +2,23 @@ import { describe, expect, it, vi } from "vitest"; const loadSessionStoreMock = vi.fn(); const updateSessionStoreMock = vi.fn(); +const callGatewayMock = vi.fn(); +const loadCombinedSessionStoreForGatewayMock = vi.fn(); + +const createMockConfig = () => ({ + session: { mainKey: "main", scope: "per-sender" }, + agents: { + defaults: { + model: { primary: "anthropic/claude-opus-4-5" }, + models: {}, + }, + }, + tools: { + agentToAgent: { enabled: false }, + }, +}); + +let mockConfig: Record = createMockConfig(); vi.mock("../config/sessions.js", async (importOriginal) => { const actual = await importOriginal(); @@ -22,19 +39,24 @@ vi.mock("../config/sessions.js", async (importOriginal) => { }; }); +vi.mock("../gateway/call.js", () => ({ + callGateway: (opts: unknown) => callGatewayMock(opts), +})); + +vi.mock("../gateway/session-utils.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + loadCombinedSessionStoreForGateway: (cfg: unknown) => + loadCombinedSessionStoreForGatewayMock(cfg), + }; +}); + vi.mock("../config/config.js", async (importOriginal) => { const actual = await importOriginal(); return { ...actual, - loadConfig: () => ({ - session: { mainKey: "main", scope: "per-sender" }, - agents: { - defaults: { - model: { primary: "anthropic/claude-opus-4-5" }, - models: {}, - }, - }, - }), + loadConfig: () => mockConfig, }; }); @@ -63,7 +85,7 @@ vi.mock("../agents/auth-profiles.js", () => ({ vi.mock("../agents/model-auth.js", () => ({ resolveEnvApiKey: () => null, - getCustomProviderApiKey: () => null, + resolveUsableCustomProviderApiKey: () => null, resolveModelAuthMode: () => "api-key", })); @@ -82,13 +104,66 @@ import { createOpenClawTools } from "./openclaw-tools.js"; function resetSessionStore(store: Record) { loadSessionStoreMock.mockClear(); updateSessionStoreMock.mockClear(); + callGatewayMock.mockClear(); + loadCombinedSessionStoreForGatewayMock.mockClear(); loadSessionStoreMock.mockReturnValue(store); + loadCombinedSessionStoreForGatewayMock.mockReturnValue({ + storePath: "(multiple)", + store, + }); + callGatewayMock.mockResolvedValue({}); + mockConfig = createMockConfig(); } -function getSessionStatusTool(agentSessionKey = "main") { - const tool = createOpenClawTools({ agentSessionKey }).find( - (candidate) => candidate.name === "session_status", - ); +function installSandboxedSessionStatusConfig() { + mockConfig = { + session: { mainKey: "main", scope: "per-sender" }, + tools: { + sessions: { visibility: "all" }, + agentToAgent: { enabled: true, allow: ["*"] }, + }, + agents: { + defaults: { + model: { primary: "anthropic/claude-opus-4-5" }, + models: {}, + sandbox: { sessionToolsVisibility: "spawned" }, + }, + }, + }; +} + +function mockSpawnedSessionList( + resolveSessions: (spawnedBy: string | undefined) => Array>, +) { + callGatewayMock.mockImplementation(async (opts: unknown) => { + const request = opts as { method?: string; params?: Record }; + if (request.method === "sessions.list") { + return { sessions: resolveSessions(request.params?.spawnedBy as string | undefined) }; + } + return {}; + }); +} + +function expectSpawnedSessionLookupCalls(spawnedBy: string) { + const expectedCall = { + method: "sessions.list", + params: { + includeGlobal: false, + includeUnknown: false, + limit: 500, + spawnedBy, + }, + }; + expect(callGatewayMock).toHaveBeenCalledTimes(2); + expect(callGatewayMock).toHaveBeenNthCalledWith(1, expectedCall); + expect(callGatewayMock).toHaveBeenNthCalledWith(2, expectedCall); +} + +function getSessionStatusTool(agentSessionKey = "main", options?: { sandboxed?: boolean }) { + const tool = createOpenClawTools({ + agentSessionKey, + sandboxed: options?.sandboxed, + }).find((candidate) => candidate.name === "session_status"); expect(tool).toBeDefined(); if (!tool) { throw new Error("missing session_status tool"); @@ -145,6 +220,30 @@ describe("session_status tool", () => { expect(details.sessionKey).toBe("agent:main:main"); }); + it("resolves duplicate sessionId inputs deterministically", async () => { + resetSessionStore({ + "agent:main:main": { + sessionId: "current", + updatedAt: 10, + }, + "agent:main:other": { + sessionId: "run-dup", + updatedAt: 999, + }, + "agent:main:acp:run-dup": { + sessionId: "run-dup", + updatedAt: 100, + }, + }); + + const tool = getSessionStatusTool(); + + const result = await tool.execute("call-dup", { sessionKey: "run-dup" }); + const details = result.details as { ok?: boolean; sessionKey?: string }; + expect(details.ok).toBe(true); + expect(details.sessionKey).toBe("agent:main:acp:run-dup"); + }); + it("uses non-standard session keys without sessionId resolution", async () => { resetSessionStore({ "temp:slug-generator": { @@ -176,6 +275,78 @@ describe("session_status tool", () => { ); }); + it("blocks sandboxed child session_status access outside its tree before store lookup", async () => { + resetSessionStore({ + "agent:main:subagent:child": { + sessionId: "s-child", + updatedAt: 20, + }, + "agent:main:main": { + sessionId: "s-parent", + updatedAt: 10, + }, + }); + installSandboxedSessionStatusConfig(); + mockSpawnedSessionList(() => []); + + const tool = getSessionStatusTool("agent:main:subagent:child", { + sandboxed: true, + }); + const expectedError = "Session status visibility is restricted to the current session tree"; + + await expect( + tool.execute("call6", { + sessionKey: "agent:main:main", + model: "anthropic/claude-sonnet-4-5", + }), + ).rejects.toThrow(expectedError); + + await expect( + tool.execute("call7", { + sessionKey: "agent:main:subagent:missing", + }), + ).rejects.toThrow(expectedError); + + expect(loadSessionStoreMock).not.toHaveBeenCalled(); + expect(updateSessionStoreMock).not.toHaveBeenCalled(); + expectSpawnedSessionLookupCalls("agent:main:subagent:child"); + }); + + it("keeps legacy main requester keys for sandboxed session tree checks", async () => { + resetSessionStore({ + "agent:main:main": { + sessionId: "s-main", + updatedAt: 10, + }, + "agent:main:subagent:child": { + sessionId: "s-child", + updatedAt: 20, + }, + }); + installSandboxedSessionStatusConfig(); + mockSpawnedSessionList((spawnedBy) => + spawnedBy === "main" ? [{ key: "agent:main:subagent:child" }] : [], + ); + + const tool = getSessionStatusTool("main", { + sandboxed: true, + }); + + const mainResult = await tool.execute("call8", {}); + const mainDetails = mainResult.details as { ok?: boolean; sessionKey?: string }; + expect(mainDetails.ok).toBe(true); + expect(mainDetails.sessionKey).toBe("agent:main:main"); + + const childResult = await tool.execute("call9", { + sessionKey: "agent:main:subagent:child", + }); + const childDetails = childResult.details as { ok?: boolean; sessionKey?: string }; + expect(childDetails.ok).toBe(true); + expect(childDetails.sessionKey).toBe("agent:main:subagent:child"); + + expectSpawnedSessionLookupCalls("main"); + }); + it("scopes bare session keys to the requester agent", async () => { loadSessionStoreMock.mockClear(); updateSessionStoreMock.mockClear(); diff --git a/src/agents/openclaw-tools.subagents.scope.test.ts b/src/agents/openclaw-tools.subagents.scope.test.ts new file mode 100644 index 00000000000..fc233015064 --- /dev/null +++ b/src/agents/openclaw-tools.subagents.scope.test.ts @@ -0,0 +1,226 @@ +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; +import { beforeEach, describe, expect, it } from "vitest"; +import { + callGatewayMock, + resetSubagentsConfigOverride, + setSubagentsConfigOverride, +} from "./openclaw-tools.subagents.test-harness.js"; +import { addSubagentRunForTests, resetSubagentRegistryForTests } from "./subagent-registry.js"; +import "./test-helpers/fast-core-tools.js"; +import { createPerSenderSessionConfig } from "./test-helpers/session-config.js"; +import { createSubagentsTool } from "./tools/subagents-tool.js"; + +function writeStore(storePath: string, store: Record) { + fs.mkdirSync(path.dirname(storePath), { recursive: true }); + fs.writeFileSync(storePath, JSON.stringify(store, null, 2), "utf-8"); +} + +function seedLeafOwnedChildSession(storePath: string, leafKey = "agent:main:subagent:leaf") { + const childKey = `${leafKey}:subagent:child`; + writeStore(storePath, { + [leafKey]: { + sessionId: "leaf-session", + updatedAt: Date.now(), + spawnedBy: "agent:main:main", + subagentRole: "leaf", + subagentControlScope: "none", + }, + [childKey]: { + sessionId: "child-session", + updatedAt: Date.now(), + spawnedBy: leafKey, + subagentRole: "leaf", + subagentControlScope: "none", + }, + }); + + addSubagentRunForTests({ + runId: "run-child", + childSessionKey: childKey, + controllerSessionKey: leafKey, + requesterSessionKey: leafKey, + requesterDisplayKey: leafKey, + task: "impossible child", + cleanup: "keep", + createdAt: Date.now() - 30_000, + startedAt: Date.now() - 30_000, + }); + + return { + childKey, + tool: createSubagentsTool({ agentSessionKey: leafKey }), + }; +} + +async function expectLeafSubagentControlForbidden(params: { + storePath: string; + action: "kill" | "steer"; + callId: string; + message?: string; +}) { + const { childKey, tool } = seedLeafOwnedChildSession(params.storePath); + const result = await tool.execute(params.callId, { + action: params.action, + target: childKey, + ...(params.message ? { message: params.message } : {}), + }); + + expect(result.details).toMatchObject({ + status: "forbidden", + error: "Leaf subagents cannot control other sessions.", + }); + expect(callGatewayMock).not.toHaveBeenCalled(); +} + +describe("openclaw-tools: subagents scope isolation", () => { + let storePath = ""; + + beforeEach(() => { + resetSubagentRegistryForTests(); + resetSubagentsConfigOverride(); + callGatewayMock.mockReset(); + storePath = path.join( + os.tmpdir(), + `openclaw-subagents-scope-${Date.now()}-${Math.random().toString(16).slice(2)}.json`, + ); + setSubagentsConfigOverride({ + session: createPerSenderSessionConfig({ store: storePath }), + }); + writeStore(storePath, {}); + }); + + it("leaf subagents do not inherit parent sibling control scope", async () => { + const leafKey = "agent:main:subagent:leaf"; + const siblingKey = "agent:main:subagent:unsandboxed"; + + writeStore(storePath, { + [leafKey]: { + sessionId: "leaf-session", + updatedAt: Date.now(), + spawnedBy: "agent:main:main", + }, + [siblingKey]: { + sessionId: "sibling-session", + updatedAt: Date.now(), + spawnedBy: "agent:main:main", + }, + }); + + addSubagentRunForTests({ + runId: "run-leaf", + childSessionKey: leafKey, + requesterSessionKey: "agent:main:main", + requesterDisplayKey: "main", + task: "sandboxed leaf", + cleanup: "keep", + createdAt: Date.now() - 30_000, + startedAt: Date.now() - 30_000, + }); + addSubagentRunForTests({ + runId: "run-sibling", + childSessionKey: siblingKey, + requesterSessionKey: "agent:main:main", + requesterDisplayKey: "main", + task: "unsandboxed sibling", + cleanup: "keep", + createdAt: Date.now() - 20_000, + startedAt: Date.now() - 20_000, + }); + + const tool = createSubagentsTool({ agentSessionKey: leafKey }); + const result = await tool.execute("call-leaf-list", { action: "list" }); + + expect(result.details).toMatchObject({ + status: "ok", + requesterSessionKey: leafKey, + callerSessionKey: leafKey, + callerIsSubagent: true, + total: 0, + active: [], + recent: [], + }); + expect(callGatewayMock).not.toHaveBeenCalled(); + }); + + it("orchestrator subagents still see children they spawned", async () => { + const orchestratorKey = "agent:main:subagent:orchestrator"; + const workerKey = `${orchestratorKey}:subagent:worker`; + const siblingKey = "agent:main:subagent:sibling"; + + writeStore(storePath, { + [orchestratorKey]: { + sessionId: "orchestrator-session", + updatedAt: Date.now(), + spawnedBy: "agent:main:main", + }, + [workerKey]: { + sessionId: "worker-session", + updatedAt: Date.now(), + spawnedBy: orchestratorKey, + }, + [siblingKey]: { + sessionId: "sibling-session", + updatedAt: Date.now(), + spawnedBy: "agent:main:main", + }, + }); + + addSubagentRunForTests({ + runId: "run-worker", + childSessionKey: workerKey, + requesterSessionKey: orchestratorKey, + requesterDisplayKey: orchestratorKey, + task: "worker child", + cleanup: "keep", + createdAt: Date.now() - 30_000, + startedAt: Date.now() - 30_000, + }); + addSubagentRunForTests({ + runId: "run-sibling", + childSessionKey: siblingKey, + requesterSessionKey: "agent:main:main", + requesterDisplayKey: "main", + task: "sibling of orchestrator", + cleanup: "keep", + createdAt: Date.now() - 20_000, + startedAt: Date.now() - 20_000, + }); + + const tool = createSubagentsTool({ agentSessionKey: orchestratorKey }); + const result = await tool.execute("call-orchestrator-list", { action: "list" }); + const details = result.details as { + status?: string; + requesterSessionKey?: string; + total?: number; + active?: Array<{ sessionKey?: string }>; + }; + + expect(details.status).toBe("ok"); + expect(details.requesterSessionKey).toBe(orchestratorKey); + expect(details.total).toBe(1); + expect(details.active).toEqual([ + expect.objectContaining({ + sessionKey: workerKey, + }), + ]); + }); + + it("leaf subagents cannot kill even explicitly-owned child sessions", async () => { + await expectLeafSubagentControlForbidden({ + storePath, + action: "kill", + callId: "call-leaf-kill", + }); + }); + + it("leaf subagents cannot steer even explicitly-owned child sessions", async () => { + await expectLeafSubagentControlForbidden({ + storePath, + action: "steer", + callId: "call-leaf-steer", + message: "continue", + }); + }); +}); diff --git a/src/agents/openclaw-tools.subagents.sessions-spawn-depth-limits.test.ts b/src/agents/openclaw-tools.subagents.sessions-spawn-depth-limits.test.ts index 7a5b93d7ae1..34fcbfbafd4 100644 --- a/src/agents/openclaw-tools.subagents.sessions-spawn-depth-limits.test.ts +++ b/src/agents/openclaw-tools.subagents.sessions-spawn-depth-limits.test.ts @@ -85,7 +85,10 @@ describe("sessions_spawn depth + child limits", () => { }); it("rejects spawning when caller depth reaches maxSpawnDepth", async () => { - const tool = createSessionsSpawnTool({ agentSessionKey: "agent:main:subagent:parent" }); + const tool = createSessionsSpawnTool({ + agentSessionKey: "agent:main:subagent:parent", + workspaceDir: "/parent/workspace", + }); const result = await tool.execute("call-depth-reject", { task: "hello" }); expect(result.details).toMatchObject({ @@ -109,13 +112,20 @@ describe("sessions_spawn depth + child limits", () => { const calls = callGatewayMock.mock.calls.map( (call) => call[0] as { method?: string; params?: Record }, ); - const agentCall = calls.find((entry) => entry.method === "agent"); - expect(agentCall?.params?.spawnedBy).toBe("agent:main:subagent:parent"); + const spawnedByPatch = calls.find( + (entry) => + entry.method === "sessions.patch" && + entry.params?.spawnedBy === "agent:main:subagent:parent", + ); + expect(spawnedByPatch?.params?.key).toMatch(/^agent:main:subagent:/); + expect(typeof spawnedByPatch?.params?.spawnedWorkspaceDir).toBe("string"); const spawnDepthPatch = calls.find( (entry) => entry.method === "sessions.patch" && entry.params?.spawnDepth === 2, ); expect(spawnDepthPatch?.params?.key).toMatch(/^agent:main:subagent:/); + expect(spawnDepthPatch?.params?.subagentRole).toBe("leaf"); + expect(spawnDepthPatch?.params?.subagentControlScope).toBe("none"); }); it("rejects depth-2 callers when maxSpawnDepth is 2 (using stored spawnDepth on flat keys)", async () => { diff --git a/src/agents/openclaw-tools.ts b/src/agents/openclaw-tools.ts index 17f8e6dadb4..25b5cae0f59 100644 --- a/src/agents/openclaw-tools.ts +++ b/src/agents/openclaw-tools.ts @@ -1,5 +1,6 @@ import type { OpenClawConfig } from "../config/config.js"; import { resolvePluginTools } from "../plugins/tools.js"; +import { getActiveRuntimeWebToolsMetadata } from "../secrets/runtime.js"; import type { GatewayMessageChannel } from "../utils/message-channel.js"; import { resolveSessionAgentId } from "./agent-scope.js"; import type { SandboxFsBridge } from "./sandbox/fs-bridge.js"; @@ -20,6 +21,7 @@ import { createSessionsHistoryTool } from "./tools/sessions-history-tool.js"; import { createSessionsListTool } from "./tools/sessions-list-tool.js"; import { createSessionsSendTool } from "./tools/sessions-send-tool.js"; import { createSessionsSpawnTool } from "./tools/sessions-spawn-tool.js"; +import { createSessionsYieldTool } from "./tools/sessions-yield-tool.js"; import { createSubagentsTool } from "./tools/subagents-tool.js"; import { createTtsTool } from "./tools/tts-tool.js"; import { createWebFetchTool, createWebSearchTool } from "./tools/web-tools.js"; @@ -69,18 +71,32 @@ export function createOpenClawTools( senderIsOwner?: boolean; /** Ephemeral session UUID — regenerated on /new and /reset. */ sessionId?: string; + /** + * Workspace directory to pass to spawned subagents for inheritance. + * Defaults to workspaceDir. Use this to pass the actual agent workspace when the + * session itself is running in a copied-workspace sandbox (`ro` or `none`) so + * subagents inherit the real workspace path instead of the sandbox copy. + */ + spawnWorkspaceDir?: string; + /** Callback invoked when sessions_yield tool is called. */ + onYield?: (message: string) => Promise | void; } & SpawnedToolContext, ): AnyAgentTool[] { const workspaceDir = resolveWorkspaceRoot(options?.workspaceDir); + const spawnWorkspaceDir = resolveWorkspaceRoot( + options?.spawnWorkspaceDir ?? options?.workspaceDir, + ); + const runtimeWebTools = getActiveRuntimeWebToolsMetadata(); + const sandbox = + options?.sandboxRoot && options?.sandboxFsBridge + ? { root: options.sandboxRoot, bridge: options.sandboxFsBridge } + : undefined; const imageTool = options?.agentDir?.trim() ? createImageTool({ config: options?.config, agentDir: options.agentDir, workspaceDir, - sandbox: - options?.sandboxRoot && options?.sandboxFsBridge - ? { root: options.sandboxRoot, bridge: options.sandboxFsBridge } - : undefined, + sandbox, fsPolicy: options?.fsPolicy, modelHasVision: options?.modelHasVision, }) @@ -90,20 +106,19 @@ export function createOpenClawTools( config: options?.config, agentDir: options.agentDir, workspaceDir, - sandbox: - options?.sandboxRoot && options?.sandboxFsBridge - ? { root: options.sandboxRoot, bridge: options.sandboxFsBridge } - : undefined, + sandbox, fsPolicy: options?.fsPolicy, }) : null; const webSearchTool = createWebSearchTool({ config: options?.config, sandboxed: options?.sandboxed, + runtimeWebSearch: runtimeWebTools?.search, }); const webFetchTool = createWebFetchTool({ config: options?.config, sandboxed: options?.sandboxed, + runtimeFirecrawl: runtimeWebTools?.fetch.firecrawl, }); const messageTool = options?.disableMessageTool ? null @@ -157,15 +172,22 @@ export function createOpenClawTools( createSessionsListTool({ agentSessionKey: options?.agentSessionKey, sandboxed: options?.sandboxed, + config: options?.config, }), createSessionsHistoryTool({ agentSessionKey: options?.agentSessionKey, sandboxed: options?.sandboxed, + config: options?.config, }), createSessionsSendTool({ agentSessionKey: options?.agentSessionKey, agentChannel: options?.agentChannel, sandboxed: options?.sandboxed, + config: options?.config, + }), + createSessionsYieldTool({ + sessionId: options?.sessionId, + onYield: options?.onYield, }), createSessionsSpawnTool({ agentSessionKey: options?.agentSessionKey, @@ -178,7 +200,7 @@ export function createOpenClawTools( agentGroupSpace: options?.agentGroupSpace, sandboxed: options?.sandboxed, requesterAgentIdOverride: options?.requesterAgentIdOverride, - workspaceDir, + workspaceDir: spawnWorkspaceDir, }), createSubagentsTool({ agentSessionKey: options?.agentSessionKey, @@ -186,6 +208,7 @@ export function createOpenClawTools( createSessionStatusTool({ agentSessionKey: options?.agentSessionKey, config: options?.config, + sandboxed: options?.sandboxed, }), ...(webSearchTool ? [webSearchTool] : []), ...(webFetchTool ? [webFetchTool] : []), diff --git a/src/agents/openclaw-tools.web-runtime.test.ts b/src/agents/openclaw-tools.web-runtime.test.ts new file mode 100644 index 00000000000..94478930cf1 --- /dev/null +++ b/src/agents/openclaw-tools.web-runtime.test.ts @@ -0,0 +1,135 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import type { OpenClawConfig } from "../config/config.js"; +import { + activateSecretsRuntimeSnapshot, + clearSecretsRuntimeSnapshot, + prepareSecretsRuntimeSnapshot, +} from "../secrets/runtime.js"; +import { withFetchPreconnect } from "../test-utils/fetch-mock.js"; +import { createOpenClawTools } from "./openclaw-tools.js"; + +vi.mock("../plugins/tools.js", () => ({ + resolvePluginTools: () => [], +})); + +function asConfig(value: unknown): OpenClawConfig { + return value as OpenClawConfig; +} + +function findTool(name: string, config: OpenClawConfig) { + const allTools = createOpenClawTools({ config, sandboxed: true }); + const tool = allTools.find((candidate) => candidate.name === name); + expect(tool).toBeDefined(); + if (!tool) { + throw new Error(`missing ${name} tool`); + } + return tool; +} + +function makeHeaders(map: Record): { get: (key: string) => string | null } { + return { + get: (key) => map[key.toLowerCase()] ?? null, + }; +} + +async function prepareAndActivate(params: { config: OpenClawConfig; env?: NodeJS.ProcessEnv }) { + const snapshot = await prepareSecretsRuntimeSnapshot({ + config: params.config, + env: params.env, + agentDirs: ["/tmp/openclaw-agent-main"], + loadAuthStore: () => ({ version: 1, profiles: {} }), + }); + activateSecretsRuntimeSnapshot(snapshot); + return snapshot; +} + +describe("openclaw tools runtime web metadata wiring", () => { + const priorFetch = global.fetch; + + afterEach(() => { + global.fetch = priorFetch; + clearSecretsRuntimeSnapshot(); + }); + + it("uses runtime-selected provider when higher-precedence provider ref is unresolved", async () => { + const snapshot = await prepareAndActivate({ + config: asConfig({ + tools: { + web: { + search: { + apiKey: { source: "env", provider: "default", id: "MISSING_BRAVE_KEY_REF" }, + gemini: { + apiKey: { source: "env", provider: "default", id: "GEMINI_WEB_KEY_REF" }, + }, + }, + }, + }, + }), + env: { + GEMINI_WEB_KEY_REF: "gemini-runtime-key", + }, + }); + + expect(snapshot.webTools.search.selectedProvider).toBe("gemini"); + + const mockFetch = vi.fn((_input?: unknown, _init?: unknown) => + Promise.resolve({ + ok: true, + json: () => + Promise.resolve({ + candidates: [ + { + content: { parts: [{ text: "runtime gemini ok" }] }, + groundingMetadata: { groundingChunks: [] }, + }, + ], + }), + } as Response), + ); + global.fetch = withFetchPreconnect(mockFetch); + + const webSearch = findTool("web_search", snapshot.config); + const result = await webSearch.execute("call-runtime-search", { query: "runtime search" }); + + expect(mockFetch).toHaveBeenCalled(); + expect(String(mockFetch.mock.calls[0]?.[0])).toContain("generativelanguage.googleapis.com"); + expect((result.details as { provider?: string }).provider).toBe("gemini"); + }); + + it("skips Firecrawl key resolution when runtime marks Firecrawl inactive", async () => { + const snapshot = await prepareAndActivate({ + config: asConfig({ + tools: { + web: { + fetch: { + firecrawl: { + enabled: false, + apiKey: { source: "env", provider: "default", id: "MISSING_FIRECRAWL_KEY_REF" }, + }, + }, + }, + }, + }), + }); + + const mockFetch = vi.fn((_input?: unknown, _init?: unknown) => + Promise.resolve({ + ok: true, + status: 200, + headers: makeHeaders({ "content-type": "text/html; charset=utf-8" }), + text: () => + Promise.resolve( + "

Runtime Off

Use direct fetch.

", + ), + } as Response), + ); + global.fetch = withFetchPreconnect(mockFetch); + + const webFetch = findTool("web_fetch", snapshot.config); + await webFetch.execute("call-runtime-fetch", { url: "https://example.com/runtime-off" }); + + expect(mockFetch).toHaveBeenCalled(); + expect(mockFetch.mock.calls[0]?.[0]).toBe("https://example.com/runtime-off"); + expect(String(mockFetch.mock.calls[0]?.[0])).not.toContain("api.firecrawl.dev"); + }); +}); diff --git a/src/agents/pi-embedded-error-observation.test.ts b/src/agents/pi-embedded-error-observation.test.ts new file mode 100644 index 00000000000..4e1d6162d5c --- /dev/null +++ b/src/agents/pi-embedded-error-observation.test.ts @@ -0,0 +1,185 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import * as loggingConfigModule from "../logging/config.js"; +import { + buildApiErrorObservationFields, + buildTextObservationFields, + sanitizeForConsole, +} from "./pi-embedded-error-observation.js"; + +const OBSERVATION_BEARER_TOKEN = "sk-redact-test-token"; +const OBSERVATION_COOKIE_VALUE = "session-cookie-token"; + +afterEach(() => { + vi.restoreAllMocks(); +}); + +describe("buildApiErrorObservationFields", () => { + it("redacts request ids and exposes stable hashes instead of raw payloads", () => { + const observed = buildApiErrorObservationFields( + '{"type":"error","error":{"type":"overloaded_error","message":"Overloaded"},"request_id":"req_overload"}', + ); + + expect(observed).toMatchObject({ + rawErrorPreview: expect.stringContaining('"request_id":"sha256:'), + rawErrorHash: expect.stringMatching(/^sha256:/), + rawErrorFingerprint: expect.stringMatching(/^sha256:/), + providerErrorType: "overloaded_error", + providerErrorMessagePreview: "Overloaded", + requestIdHash: expect.stringMatching(/^sha256:/), + }); + expect(observed.rawErrorPreview).not.toContain("req_overload"); + }); + + it("forces token redaction for observation previews", () => { + const observed = buildApiErrorObservationFields( + `Authorization: Bearer ${OBSERVATION_BEARER_TOKEN}`, + ); + + expect(observed.rawErrorPreview).not.toContain(OBSERVATION_BEARER_TOKEN); + expect(observed.rawErrorPreview).toContain(OBSERVATION_BEARER_TOKEN.slice(0, 6)); + expect(observed.rawErrorHash).toMatch(/^sha256:/); + }); + + it("redacts observation-only header and cookie formats", () => { + const observed = buildApiErrorObservationFields( + `x-api-key: ${OBSERVATION_BEARER_TOKEN} Cookie: session=${OBSERVATION_COOKIE_VALUE}`, + ); + + expect(observed.rawErrorPreview).not.toContain(OBSERVATION_COOKIE_VALUE); + expect(observed.rawErrorPreview).toContain("x-api-key: ***"); + expect(observed.rawErrorPreview).toContain("Cookie: session="); + }); + + it("does not let cookie redaction consume unrelated fields on the same line", () => { + const observed = buildApiErrorObservationFields( + `Cookie: session=${OBSERVATION_COOKIE_VALUE} status=503 request_id=req_cookie`, + ); + + expect(observed.rawErrorPreview).toContain("Cookie: session="); + expect(observed.rawErrorPreview).toContain("status=503"); + expect(observed.rawErrorPreview).toContain("request_id=sha256:"); + }); + + it("builds sanitized generic text observation fields", () => { + const observed = buildTextObservationFields( + '{"type":"error","error":{"type":"overloaded_error","message":"Overloaded"},"request_id":"req_prev"}', + ); + + expect(observed).toMatchObject({ + textPreview: expect.stringContaining('"request_id":"sha256:'), + textHash: expect.stringMatching(/^sha256:/), + textFingerprint: expect.stringMatching(/^sha256:/), + providerErrorType: "overloaded_error", + providerErrorMessagePreview: "Overloaded", + requestIdHash: expect.stringMatching(/^sha256:/), + }); + expect(observed.textPreview).not.toContain("req_prev"); + }); + + it("redacts request ids in formatted plain-text errors", () => { + const observed = buildApiErrorObservationFields( + "LLM error overloaded_error: Overloaded (request_id: req_plaintext_123)", + ); + + expect(observed).toMatchObject({ + rawErrorPreview: expect.stringContaining("request_id: sha256:"), + rawErrorFingerprint: expect.stringMatching(/^sha256:/), + requestIdHash: expect.stringMatching(/^sha256:/), + }); + expect(observed.rawErrorPreview).not.toContain("req_plaintext_123"); + }); + + it("keeps fingerprints stable across request ids for equivalent errors", () => { + const first = buildApiErrorObservationFields( + '{"type":"error","error":{"type":"overloaded_error","message":"Overloaded"},"request_id":"req_001"}', + ); + const second = buildApiErrorObservationFields( + '{"type":"error","error":{"type":"overloaded_error","message":"Overloaded"},"request_id":"req_002"}', + ); + + expect(first.rawErrorFingerprint).toBe(second.rawErrorFingerprint); + expect(first.rawErrorHash).not.toBe(second.rawErrorHash); + }); + + it("truncates oversized raw and provider previews", () => { + const longMessage = "X".repeat(260); + const observed = buildApiErrorObservationFields( + `{"type":"error","error":{"type":"server_error","message":"${longMessage}"},"request_id":"req_long"}`, + ); + + expect(observed.rawErrorPreview).toBeDefined(); + expect(observed.providerErrorMessagePreview).toBeDefined(); + expect(observed.rawErrorPreview?.length).toBeLessThanOrEqual(401); + expect(observed.providerErrorMessagePreview?.length).toBeLessThanOrEqual(201); + expect(observed.providerErrorMessagePreview?.endsWith("…")).toBe(true); + }); + + it("caps oversized raw inputs before hashing and fingerprinting", () => { + const oversized = "X".repeat(70_000); + const bounded = "X".repeat(64_000); + + expect(buildApiErrorObservationFields(oversized)).toMatchObject({ + rawErrorHash: buildApiErrorObservationFields(bounded).rawErrorHash, + rawErrorFingerprint: buildApiErrorObservationFields(bounded).rawErrorFingerprint, + }); + }); + + it("returns empty observation fields for empty input", () => { + expect(buildApiErrorObservationFields(undefined)).toEqual({}); + expect(buildApiErrorObservationFields("")).toEqual({}); + expect(buildApiErrorObservationFields(" ")).toEqual({}); + }); + + it("re-reads configured redact patterns on each call", () => { + const readLoggingConfig = vi.spyOn(loggingConfigModule, "readLoggingConfig"); + readLoggingConfig.mockReturnValueOnce(undefined); + readLoggingConfig.mockReturnValueOnce({ + redactPatterns: [String.raw`\bcustom-secret-[A-Za-z0-9]+\b`], + }); + + const first = buildApiErrorObservationFields("custom-secret-abc123"); + const second = buildApiErrorObservationFields("custom-secret-abc123"); + + expect(first.rawErrorPreview).toContain("custom-secret-abc123"); + expect(second.rawErrorPreview).not.toContain("custom-secret-abc123"); + expect(second.rawErrorPreview).toContain("custom"); + }); + + it("fails closed when observation sanitization throws", () => { + vi.spyOn(loggingConfigModule, "readLoggingConfig").mockImplementation(() => { + throw new Error("boom"); + }); + + expect(buildApiErrorObservationFields("request_id=req_123")).toEqual({}); + expect(buildTextObservationFields("request_id=req_123")).toEqual({ + textPreview: undefined, + textHash: undefined, + textFingerprint: undefined, + httpCode: undefined, + providerErrorType: undefined, + providerErrorMessagePreview: undefined, + requestIdHash: undefined, + }); + }); + + it("ignores non-string configured redact patterns", () => { + vi.spyOn(loggingConfigModule, "readLoggingConfig").mockReturnValue({ + redactPatterns: [ + 123 as never, + { bad: true } as never, + String.raw`\bcustom-secret-[A-Za-z0-9]+\b`, + ], + }); + + const observed = buildApiErrorObservationFields("custom-secret-abc123"); + + expect(observed.rawErrorPreview).not.toContain("custom-secret-abc123"); + expect(observed.rawErrorPreview).toContain("custom"); + }); +}); + +describe("sanitizeForConsole", () => { + it("strips control characters from console-facing values", () => { + expect(sanitizeForConsole("run-1\nprovider\tmodel\rtest")).toBe("run-1 provider model test"); + }); +}); diff --git a/src/agents/pi-embedded-error-observation.ts b/src/agents/pi-embedded-error-observation.ts new file mode 100644 index 00000000000..260bf83f4c5 --- /dev/null +++ b/src/agents/pi-embedded-error-observation.ts @@ -0,0 +1,199 @@ +import { readLoggingConfig } from "../logging/config.js"; +import { redactIdentifier } from "../logging/redact-identifier.js"; +import { getDefaultRedactPatterns, redactSensitiveText } from "../logging/redact.js"; +import { getApiErrorPayloadFingerprint, parseApiErrorInfo } from "./pi-embedded-helpers.js"; +import { stableStringify } from "./stable-stringify.js"; + +const MAX_OBSERVATION_INPUT_CHARS = 64_000; +const MAX_FINGERPRINT_MESSAGE_CHARS = 8_000; +const RAW_ERROR_PREVIEW_MAX_CHARS = 400; +const PROVIDER_ERROR_PREVIEW_MAX_CHARS = 200; +const REQUEST_ID_RE = /\brequest[_ ]?id\b\s*[:=]\s*["'()]*([A-Za-z0-9._:-]+)/i; +const OBSERVATION_EXTRA_REDACT_PATTERNS = [ + String.raw`\b(?:x-)?api[-_]?key\b\s*[:=]\s*(["']?)([^\s"'\\;]+)\1`, + String.raw`"(?:api[-_]?key|api_key)"\s*:\s*"([^"]+)"`, + String.raw`(?:\bCookie\b\s*[:=]\s*[^;=\s]+=|;\s*[^;=\s]+=)([^;\s\r\n]+)`, +]; + +function resolveConfiguredRedactPatterns(): string[] { + const configured = readLoggingConfig()?.redactPatterns; + if (!Array.isArray(configured)) { + return []; + } + return configured.filter((pattern): pattern is string => typeof pattern === "string"); +} + +function truncateForObservation(text: string | undefined, maxChars: number): string | undefined { + const trimmed = text?.trim(); + if (!trimmed) { + return undefined; + } + return trimmed.length > maxChars ? `${trimmed.slice(0, maxChars)}…` : trimmed; +} + +function boundObservationInput(text: string | undefined): string | undefined { + const trimmed = text?.trim(); + if (!trimmed) { + return undefined; + } + return trimmed.length > MAX_OBSERVATION_INPUT_CHARS + ? trimmed.slice(0, MAX_OBSERVATION_INPUT_CHARS) + : trimmed; +} + +export function sanitizeForConsole(text: string | undefined, maxChars = 200): string | undefined { + const trimmed = text?.trim(); + if (!trimmed) { + return undefined; + } + const withoutControlChars = Array.from(trimmed) + .filter((char) => { + const code = char.charCodeAt(0); + return !( + code <= 0x08 || + code === 0x0b || + code === 0x0c || + (code >= 0x0e && code <= 0x1f) || + code === 0x7f + ); + }) + .join(""); + const sanitized = withoutControlChars + .replace(/[\r\n\t]+/g, " ") + .replace(/\s+/g, " ") + .trim(); + return sanitized.length > maxChars ? `${sanitized.slice(0, maxChars)}…` : sanitized; +} + +function replaceRequestIdPreview( + text: string | undefined, + requestId: string | undefined, +): string | undefined { + if (!text || !requestId) { + return text; + } + return text.split(requestId).join(redactIdentifier(requestId, { len: 12 })); +} + +function redactObservationText(text: string | undefined): string | undefined { + if (!text) { + return text; + } + // Observation logs must stay redacted even when operators disable general-purpose + // log redaction, otherwise raw provider payloads leak back into always-on logs. + const configuredPatterns = resolveConfiguredRedactPatterns(); + return redactSensitiveText(text, { + mode: "tools", + patterns: [ + ...getDefaultRedactPatterns(), + ...configuredPatterns, + ...OBSERVATION_EXTRA_REDACT_PATTERNS, + ], + }); +} + +function extractRequestId(text: string | undefined): string | undefined { + if (!text) { + return undefined; + } + const match = text.match(REQUEST_ID_RE); + return match?.[1]?.trim() || undefined; +} + +function buildObservationFingerprint(params: { + raw: string; + requestId?: string; + httpCode?: string; + type?: string; + message?: string; +}): string | null { + const boundedMessage = + params.message && params.message.length > MAX_FINGERPRINT_MESSAGE_CHARS + ? params.message.slice(0, MAX_FINGERPRINT_MESSAGE_CHARS) + : params.message; + const structured = + params.httpCode || params.type || boundedMessage + ? stableStringify({ + httpCode: params.httpCode, + type: params.type, + message: boundedMessage, + }) + : null; + if (structured) { + return structured; + } + if (params.requestId) { + return params.raw.split(params.requestId).join(""); + } + return getApiErrorPayloadFingerprint(params.raw); +} + +export function buildApiErrorObservationFields(rawError?: string): { + rawErrorPreview?: string; + rawErrorHash?: string; + rawErrorFingerprint?: string; + httpCode?: string; + providerErrorType?: string; + providerErrorMessagePreview?: string; + requestIdHash?: string; +} { + const trimmed = boundObservationInput(rawError); + if (!trimmed) { + return {}; + } + try { + const parsed = parseApiErrorInfo(trimmed); + const requestId = parsed?.requestId ?? extractRequestId(trimmed); + const requestIdHash = requestId ? redactIdentifier(requestId, { len: 12 }) : undefined; + const rawFingerprint = buildObservationFingerprint({ + raw: trimmed, + requestId, + httpCode: parsed?.httpCode, + type: parsed?.type, + message: parsed?.message, + }); + const redactedRawPreview = replaceRequestIdPreview(redactObservationText(trimmed), requestId); + const redactedProviderMessage = replaceRequestIdPreview( + redactObservationText(parsed?.message), + requestId, + ); + + return { + rawErrorPreview: truncateForObservation(redactedRawPreview, RAW_ERROR_PREVIEW_MAX_CHARS), + rawErrorHash: redactIdentifier(trimmed, { len: 12 }), + rawErrorFingerprint: rawFingerprint + ? redactIdentifier(rawFingerprint, { len: 12 }) + : undefined, + httpCode: parsed?.httpCode, + providerErrorType: parsed?.type, + providerErrorMessagePreview: truncateForObservation( + redactedProviderMessage, + PROVIDER_ERROR_PREVIEW_MAX_CHARS, + ), + requestIdHash, + }; + } catch { + return {}; + } +} + +export function buildTextObservationFields(text?: string): { + textPreview?: string; + textHash?: string; + textFingerprint?: string; + httpCode?: string; + providerErrorType?: string; + providerErrorMessagePreview?: string; + requestIdHash?: string; +} { + const observed = buildApiErrorObservationFields(text); + return { + textPreview: observed.rawErrorPreview, + textHash: observed.rawErrorHash, + textFingerprint: observed.rawErrorFingerprint, + httpCode: observed.httpCode, + providerErrorType: observed.providerErrorType, + providerErrorMessagePreview: observed.providerErrorMessagePreview, + requestIdHash: observed.requestIdHash, + }; +} diff --git a/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts b/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts index 86fd90e7161..8c0a0b1994d 100644 --- a/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts +++ b/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts @@ -2,6 +2,7 @@ import { describe, expect, it } from "vitest"; import { classifyFailoverReason, classifyFailoverReasonFromHttpStatus, + extractObservedOverflowTokenCount, isAuthErrorMessage, isAuthPermanentErrorMessage, isBillingErrorMessage, @@ -32,7 +33,7 @@ const OPENROUTER_CREDITS_MESSAGE = "Payment Required: insufficient credits"; // Issue-backed Anthropic/OpenAI-compatible insufficient_quota payload under HTTP 400: // https://github.com/openclaw/openclaw/issues/23440 const INSUFFICIENT_QUOTA_PAYLOAD = - '{"type":"error","error":{"type":"insufficient_quota","message":"Your account has insufficient quota balance to run this request."}}'; + '{"type":"error","error":{"type":"insufficient_quota","message":"Your account has insufficient quota balance to run this request."}}'; // pragma: allowlist secret // Together AI error code examples: https://docs.together.ai/docs/error-codes const TOGETHER_PAYMENT_REQUIRED_MESSAGE = "402 Payment Required: The account associated with this API key has reached its maximum allowed monthly spending limit."; @@ -42,94 +43,127 @@ const TOGETHER_ENGINE_OVERLOADED_MESSAGE = const GROQ_TOO_MANY_REQUESTS_MESSAGE = "429 Too Many Requests: Too many requests were sent in a given timeframe."; const GROQ_SERVICE_UNAVAILABLE_MESSAGE = - "503 Service Unavailable: The server is temporarily unable to handle the request due to overloading or maintenance."; + "503 Service Unavailable: The server is temporarily unable to handle the request due to overloading or maintenance."; // pragma: allowlist secret + +function expectMessageMatches( + matcher: (message: string) => boolean, + samples: readonly string[], + expected: boolean, +) { + for (const sample of samples) { + expect(matcher(sample), sample).toBe(expected); + } +} + +function expectTimeoutFailoverSamples(samples: readonly string[]) { + for (const sample of samples) { + expect(isTimeoutErrorMessage(sample)).toBe(true); + expect(classifyFailoverReason(sample)).toBe("timeout"); + expect(isFailoverErrorMessage(sample)).toBe(true); + } +} describe("isAuthPermanentErrorMessage", () => { - it("matches permanent auth failure patterns", () => { - const samples = [ - "invalid_api_key", - "api key revoked", - "api key deactivated", - "key has been disabled", - "key has been revoked", - "account has been deactivated", - "could not authenticate api key", - "could not validate credentials", - "API_KEY_REVOKED", - "api_key_deleted", - ]; - for (const sample of samples) { - expect(isAuthPermanentErrorMessage(sample)).toBe(true); - } - }); - it("does not match transient auth errors", () => { - const samples = [ - "unauthorized", - "invalid token", - "authentication failed", - "forbidden", - "access denied", - "token has expired", - ]; - for (const sample of samples) { - expect(isAuthPermanentErrorMessage(sample)).toBe(false); - } + it.each([ + { + name: "matches permanent auth failure patterns", + samples: [ + "invalid_api_key", + "api key revoked", + "api key deactivated", + "key has been disabled", + "key has been revoked", + "account has been deactivated", + "could not authenticate api key", + "could not validate credentials", + "API_KEY_REVOKED", + "api_key_deleted", + ], + expected: true, + }, + { + name: "does not match transient auth errors", + samples: [ + "unauthorized", + "invalid token", + "authentication failed", + "forbidden", + "access denied", + "token has expired", + ], + expected: false, + }, + ])("$name", ({ samples, expected }) => { + expectMessageMatches(isAuthPermanentErrorMessage, samples, expected); }); }); describe("isAuthErrorMessage", () => { - it("matches credential validation errors", () => { - const samples = [ - 'No credentials found for profile "anthropic:default".', - "No API key found for profile openai.", - ]; - for (const sample of samples) { - expect(isAuthErrorMessage(sample)).toBe(true); - } - }); - it("matches OAuth refresh failures", () => { - const samples = [ - "OAuth token refresh failed for anthropic: Failed to refresh OAuth token for anthropic. Please try again or re-authenticate.", - "Please re-authenticate to continue.", - ]; - for (const sample of samples) { - expect(isAuthErrorMessage(sample)).toBe(true); - } + it.each([ + 'No credentials found for profile "anthropic:default".', + "No API key found for profile openai.", + "OAuth token refresh failed for anthropic: Failed to refresh OAuth token for anthropic. Please try again or re-authenticate.", + "Please re-authenticate to continue.", + ])("matches auth errors for %j", (sample) => { + expect(isAuthErrorMessage(sample)).toBe(true); }); }); describe("isBillingErrorMessage", () => { - it("matches credit / payment failures", () => { - const samples = [ - "Your credit balance is too low to access the Anthropic API.", - "insufficient credits", - "Payment Required", - "HTTP 402 Payment Required", - "plans & billing", - ]; - for (const sample of samples) { - expect(isBillingErrorMessage(sample)).toBe(true); - } - }); - it("does not false-positive on issue IDs or text containing 402", () => { - const falsePositives = [ - "Fixed issue CHE-402 in the latest release", - "See ticket #402 for details", - "ISSUE-402 has been resolved", - "Room 402 is available", - "Error code 403 was returned, not 402-related", - "The building at 402 Main Street", - "processed 402 records", - "402 items found in the database", - "port 402 is open", - "Use a 402 stainless bolt", - "Book a 402 room", - "There is a 402 near me", - ]; - for (const sample of falsePositives) { - expect(isBillingErrorMessage(sample)).toBe(false); - } + it.each([ + { + name: "matches credit and payment failures", + samples: [ + "Your credit balance is too low to access the Anthropic API.", + "insufficient credits", + "Payment Required", + "HTTP 402 Payment Required", + "plans & billing", + "Insufficient USD or Diem balance to complete request. Visit https://venice.ai/settings/api to add credits.", + "This model requires more credits to use", + "This endpoint require more credits", + ], + expected: true, + }, + { + name: "does not false-positive on issue ids and numeric references", + samples: [ + "Fixed issue CHE-402 in the latest release", + "See ticket #402 for details", + "ISSUE-402 has been resolved", + "Room 402 is available", + "Error code 403 was returned, not 402-related", + "The building at 402 Main Street", + "processed 402 records", + "402 items found in the database", + "port 402 is open", + "Use a 402 stainless bolt", + "Book a 402 room", + "There is a 402 near me", + ], + expected: false, + }, + { + name: "still matches real HTTP 402 billing errors", + samples: [ + "HTTP 402 Payment Required", + "status: 402", + "error code 402", + "http 402", + "status=402 payment required", + "got a 402 from the API", + "returned 402", + "received a 402 response", + '{"status":402,"type":"error"}', + '{"code":402,"message":"payment required"}', + '{"error":{"code":402,"message":"billing hard limit reached"}}', + ], + expected: true, + }, + ])("$name", ({ samples, expected }) => { + expectMessageMatches(isBillingErrorMessage, samples, expected); }); + it("does not false-positive on long assistant responses mentioning billing keywords", () => { // Simulate a multi-paragraph assistant response that mentions billing terms const longResponse = @@ -149,6 +183,11 @@ describe("isBillingErrorMessage", () => { expect(longResponse.length).toBeGreaterThan(512); expect(isBillingErrorMessage(longResponse)).toBe(false); }); + it("does not false-positive on short non-billing text that mentions insufficient and balance", () => { + const sample = "The evidence is insufficient to reconcile the final balance after compaction."; + expect(isBillingErrorMessage(sample)).toBe(false); + expect(classifyFailoverReason(sample)).toBeNull(); + }); it("still matches explicit 402 markers in long payloads", () => { const longStructuredError = '{"error":{"code":402,"message":"payment required","details":"' + "x".repeat(700) + '"}}'; @@ -164,37 +203,27 @@ describe("isBillingErrorMessage", () => { expect(longNonError.length).toBeGreaterThan(512); expect(isBillingErrorMessage(longNonError)).toBe(false); }); - it("still matches real HTTP 402 billing errors", () => { - const realErrors = [ - "HTTP 402 Payment Required", - "status: 402", - "error code 402", - "http 402", - "status=402 payment required", - "got a 402 from the API", - "returned 402", - "received a 402 response", - '{"status":402,"type":"error"}', - '{"code":402,"message":"payment required"}', - '{"error":{"code":402,"message":"billing hard limit reached"}}', - ]; - for (const sample of realErrors) { - expect(isBillingErrorMessage(sample)).toBe(true); - } + + it("prefers billing when API-key and 402 hints both appear", () => { + const sample = + "402 Payment Required: The account associated with this API key has reached its maximum allowed monthly spending limit."; + expect(isBillingErrorMessage(sample)).toBe(true); + expect(classifyFailoverReason(sample)).toBe("billing"); }); }); describe("isCloudCodeAssistFormatError", () => { it("matches format errors", () => { - const samples = [ - "INVALID_REQUEST_ERROR: string should match pattern", - "messages.1.content.1.tool_use.id", - "tool_use.id should match pattern", - "invalid request format", - ]; - for (const sample of samples) { - expect(isCloudCodeAssistFormatError(sample)).toBe(true); - } + expectMessageMatches( + isCloudCodeAssistFormatError, + [ + "INVALID_REQUEST_ERROR: string should match pattern", + "messages.1.content.1.tool_use.id", + "tool_use.id should match pattern", + "invalid request format", + ], + true, + ); }); }); @@ -226,20 +255,24 @@ describe("isCloudflareOrHtmlErrorPage", () => { }); describe("isCompactionFailureError", () => { - it("matches compaction overflow failures", () => { - const samples = [ - 'Context overflow: Summarization failed: 400 {"message":"prompt is too long"}', - "auto-compaction failed due to context overflow", - "Compaction failed: prompt is too long", - "Summarization failed: context window exceeded for this request", - ]; - for (const sample of samples) { - expect(isCompactionFailureError(sample)).toBe(true); - } - }); - it("ignores non-compaction overflow errors", () => { - expect(isCompactionFailureError("Context overflow: prompt too large")).toBe(false); - expect(isCompactionFailureError("rate limit exceeded")).toBe(false); + it.each([ + { + name: "matches compaction overflow failures", + samples: [ + 'Context overflow: Summarization failed: 400 {"message":"prompt is too long"}', + "auto-compaction failed due to context overflow", + "Compaction failed: prompt is too long", + "Summarization failed: context window exceeded for this request", + ], + expected: true, + }, + { + name: "ignores non-compaction overflow errors", + samples: ["Context overflow: prompt too large", "rate limit exceeded"], + expected: false, + }, + ])("$name", ({ samples, expected }) => { + expectMessageMatches(isCompactionFailureError, samples, expected); }); }); @@ -439,10 +472,46 @@ describe("isLikelyContextOverflowError", () => { expect(isLikelyContextOverflowError(sample)).toBe(false); } }); + + it("excludes billing errors even when text matches context overflow patterns", () => { + const samples = [ + "402 Payment Required: request token limit exceeded for this billing plan", + "insufficient credits: request size exceeds your current plan limits", + "Your credit balance is too low. Maximum request token limit exceeded.", + ]; + for (const sample of samples) { + expect(isBillingErrorMessage(sample)).toBe(true); + expect(isLikelyContextOverflowError(sample)).toBe(false); + } + }); +}); + +describe("extractObservedOverflowTokenCount", () => { + it("extracts provider-reported prompt token counts", () => { + expect( + extractObservedOverflowTokenCount( + '400 {"type":"error","error":{"message":"prompt is too long: 277403 tokens > 200000 maximum"}}', + ), + ).toBe(277403); + expect( + extractObservedOverflowTokenCount("Context window exceeded: requested 12000 tokens"), + ).toBe(12000); + expect( + extractObservedOverflowTokenCount( + "This model's maximum context length is 128000 tokens. However, your messages resulted in 145000 tokens.", + ), + ).toBe(145000); + }); + + it("returns undefined when overflow counts are not present", () => { + expect(extractObservedOverflowTokenCount("Prompt too large for this model")).toBeUndefined(); + expect(extractObservedOverflowTokenCount("rate limit exceeded")).toBeUndefined(); + }); }); describe("isTransientHttpError", () => { it("returns true for retryable 5xx status codes", () => { + expect(isTransientHttpError("499 Client Closed Request")).toBe(true); expect(isTransientHttpError("500 Internal Server Error")).toBe(true); expect(isTransientHttpError("502 Bad Gateway")).toBe(true); expect(isTransientHttpError("503 Service Unavailable")).toBe(true); @@ -457,6 +526,39 @@ describe("isTransientHttpError", () => { }); }); +describe("classifyFailoverReasonFromHttpStatus", () => { + it("treats HTTP 401 permanent auth failures as auth_permanent", () => { + expect(classifyFailoverReasonFromHttpStatus(401, "invalid_api_key")).toBe("auth_permanent"); + }); + + it("treats HTTP 422 as format error", () => { + expect(classifyFailoverReasonFromHttpStatus(422)).toBe("format"); + expect(classifyFailoverReasonFromHttpStatus(422, "check open ai req parameter error")).toBe( + "format", + ); + expect(classifyFailoverReasonFromHttpStatus(422, "Unprocessable Entity")).toBe("format"); + }); + + it("treats 422 with billing message as billing instead of format", () => { + expect(classifyFailoverReasonFromHttpStatus(422, "insufficient credits")).toBe("billing"); + }); + + it("treats HTTP 400 insufficient-quota payloads as billing instead of format", () => { + expect(classifyFailoverReasonFromHttpStatus(400, INSUFFICIENT_QUOTA_PAYLOAD)).toBe("billing"); + }); + + it("treats HTTP 499 as transient for structured errors", () => { + expect(classifyFailoverReasonFromHttpStatus(499)).toBe("timeout"); + expect(classifyFailoverReasonFromHttpStatus(499, "499 Client Closed Request")).toBe("timeout"); + expect( + classifyFailoverReasonFromHttpStatus( + 499, + '{"type":"error","error":{"type":"overloaded_error","message":"Overloaded"}}', + ), + ).toBe("overloaded"); + }); +}); + describe("isFailoverErrorMessage", () => { it("matches auth/rate/billing/timeout", () => { const samples = [ @@ -473,19 +575,49 @@ describe("isFailoverErrorMessage", () => { }); it("matches abort stop-reason timeout variants", () => { - const samples = [ + expectTimeoutFailoverSamples([ "Unhandled stop reason: abort", "Unhandled stop reason: error", "stop reason: abort", "stop reason: error", "reason: abort", "reason: error", - ]; - for (const sample of samples) { - expect(isTimeoutErrorMessage(sample)).toBe(true); - expect(classifyFailoverReason(sample)).toBe("timeout"); - expect(isFailoverErrorMessage(sample)).toBe(true); - } + ]); + }); + + it("matches Gemini MALFORMED_RESPONSE stop reason as timeout (#42149)", () => { + expectTimeoutFailoverSamples([ + "Unhandled stop reason: MALFORMED_RESPONSE", + "Unhandled stop reason: malformed_response", + "stop reason: MALFORMED_RESPONSE", + ]); + }); + + it("matches network errno codes in serialized error messages", () => { + expectTimeoutFailoverSamples([ + "Error: connect ETIMEDOUT 10.0.0.1:443", + "Error: connect ESOCKETTIMEDOUT 10.0.0.1:443", + "Error: connect EHOSTUNREACH 10.0.0.1:443", + "Error: connect ENETUNREACH 10.0.0.1:443", + "Error: write EPIPE", + "Error: read ENETRESET", + "Error: connect EHOSTDOWN 192.168.1.1:443", + ]); + }); + + it("matches z.ai network_error stop reason as timeout", () => { + expectTimeoutFailoverSamples([ + "Unhandled stop reason: network_error", + "stop reason: network_error", + "reason: network_error", + ]); + }); + + it("does not classify MALFORMED_FUNCTION_CALL as timeout", () => { + const sample = "Unhandled stop reason: MALFORMED_FUNCTION_CALL"; + expect(isTimeoutErrorMessage(sample)).toBe(false); + expect(classifyFailoverReason(sample)).toBe(null); + expect(isFailoverErrorMessage(sample)).toBe(false); }); }); @@ -604,6 +736,14 @@ describe("classifyFailoverReason", () => { expect(classifyFailoverReason(TOGETHER_ENGINE_OVERLOADED_MESSAGE)).toBe("overloaded"); expect(classifyFailoverReason(GROQ_TOO_MANY_REQUESTS_MESSAGE)).toBe("rate_limit"); expect(classifyFailoverReason(GROQ_SERVICE_UNAVAILABLE_MESSAGE)).toBe("overloaded"); + // Venice 402 billing error with extra words between "insufficient" and "balance" + expect( + classifyFailoverReason( + "Insufficient USD or Diem balance to complete request. Visit https://venice.ai/settings/api to add credits.", + ), + ).toBe("billing"); + // OpenRouter "requires more credits" billing text + expect(classifyFailoverReason("This model requires more credits to use")).toBe("billing"); }); it("classifies internal and compatibility error messages", () => { @@ -632,6 +772,12 @@ describe("classifyFailoverReason", () => { expect(classifyFailoverReason("402 Payment Required: Weekly/Monthly Limit Exhausted")).toBe( "billing", ); + // Poe returns 402 without "payment required"; must be recognized for fallback + expect( + classifyFailoverReason( + "402 You've used up your points! Visit https://poe.com/api/keys to get more.", + ), + ).toBe("billing"); expect(classifyFailoverReason(INSUFFICIENT_QUOTA_PAYLOAD)).toBe("billing"); expect(classifyFailoverReason("deadline exceeded")).toBe("timeout"); expect(classifyFailoverReason("request ended without sending any chunks")).toBe("timeout"); diff --git a/src/agents/pi-embedded-helpers.ts b/src/agents/pi-embedded-helpers.ts index 53f21814492..77ae492bc32 100644 --- a/src/agents/pi-embedded-helpers.ts +++ b/src/agents/pi-embedded-helpers.ts @@ -22,6 +22,7 @@ export { isAuthPermanentErrorMessage, isModelNotFoundErrorMessage, isBillingAssistantError, + extractObservedOverflowTokenCount, parseApiErrorInfo, sanitizeUserFacingText, isBillingErrorMessage, diff --git a/src/agents/pi-embedded-helpers/errors.ts b/src/agents/pi-embedded-helpers/errors.ts index 4cf347150bf..6e38d831ad9 100644 --- a/src/agents/pi-embedded-helpers/errors.ts +++ b/src/agents/pi-embedded-helpers/errors.ts @@ -138,6 +138,13 @@ export function isLikelyContextOverflowError(errorMessage?: string): boolean { return false; } + // Billing/quota errors can contain patterns like "request size exceeds" or + // "maximum token limit exceeded" that match the context overflow heuristic. + // Billing is a more specific error class — exclude it early. + if (isBillingErrorMessage(errorMessage)) { + return false; + } + if (CONTEXT_WINDOW_TOO_SMALL_RE.test(errorMessage)) { return false; } @@ -178,6 +185,32 @@ export function isCompactionFailureError(errorMessage?: string): boolean { return lower.includes("context overflow"); } +const OBSERVED_OVERFLOW_TOKEN_PATTERNS = [ + /prompt is too long:\s*([\d,]+)\s+tokens\s*>\s*[\d,]+\s+maximum/i, + /requested\s+([\d,]+)\s+tokens/i, + /resulted in\s+([\d,]+)\s+tokens/i, +]; + +export function extractObservedOverflowTokenCount(errorMessage?: string): number | undefined { + if (!errorMessage) { + return undefined; + } + + for (const pattern of OBSERVED_OVERFLOW_TOKEN_PATTERNS) { + const match = errorMessage.match(pattern); + const rawCount = match?.[1]?.replaceAll(",", ""); + if (!rawCount) { + continue; + } + const parsed = Number(rawCount); + if (Number.isFinite(parsed) && parsed > 0) { + return Math.floor(parsed); + } + } + + return undefined; +} + const ERROR_PAYLOAD_PREFIX_RE = /^(?:error|api\s*error|apierror|openai\s*error|anthropic\s*error|gateway\s*error)[:\s-]+/i; const FINAL_TAG_RE = /<\s*\/?\s*final\s*>/gi; @@ -189,7 +222,7 @@ const HTTP_STATUS_PREFIX_RE = /^(?:http\s*)?(\d{3})\s+(.+)$/i; const HTTP_STATUS_CODE_PREFIX_RE = /^(?:http\s*)?(\d{3})(?:\s+([\s\S]+))?$/i; const HTML_ERROR_PREFIX_RE = /^\s*(?: { @@ -65,6 +69,79 @@ describeLive("pi embedded extra params (live)", () => { // Should respect maxTokens from config (16) — allow a small buffer for provider rounding. expect(outputTokens ?? 0).toBeLessThanOrEqual(20); }, 30_000); + + it("verifies OpenAI fast-mode service_tier semantics against the live API", async () => { + const headers = { + "content-type": "application/json", + authorization: `Bearer ${OPENAI_KEY}`, + }; + + const runProbe = async (serviceTier: "default" | "priority") => { + const res = await fetch("https://api.openai.com/v1/responses", { + method: "POST", + headers, + body: JSON.stringify({ + model: "gpt-5.4", + input: "Reply with OK.", + max_output_tokens: 32, + service_tier: serviceTier, + }), + }); + const json = (await res.json()) as { + error?: { message?: string }; + service_tier?: string; + status?: string; + }; + expect(res.ok, json.error?.message ?? `HTTP ${res.status}`).toBe(true); + return json; + }; + + const standard = await runProbe("default"); + expect(standard.service_tier).toBe("default"); + expect(standard.status).toBe("completed"); + + const fast = await runProbe("priority"); + expect(fast.service_tier).toBe("priority"); + expect(fast.status).toBe("completed"); + }, 45_000); +}); + +describeAnthropicLive("pi embedded extra params (anthropic live)", () => { + it("verifies Anthropic fast-mode service_tier semantics against the live API", async () => { + const headers = { + "content-type": "application/json", + "x-api-key": ANTHROPIC_KEY, + "anthropic-version": "2023-06-01", + }; + + const runProbe = async (serviceTier: "auto" | "standard_only") => { + const res = await fetch("https://api.anthropic.com/v1/messages", { + method: "POST", + headers, + body: JSON.stringify({ + model: "claude-sonnet-4-5", + max_tokens: 32, + service_tier: serviceTier, + messages: [{ role: "user", content: "Reply with OK." }], + }), + }); + const json = (await res.json()) as { + error?: { message?: string }; + stop_reason?: string; + usage?: { service_tier?: string }; + }; + expect(res.ok, json.error?.message ?? `HTTP ${res.status}`).toBe(true); + return json; + }; + + const standard = await runProbe("standard_only"); + expect(standard.usage?.service_tier).toBe("standard"); + expect(standard.stop_reason).toBe("end_turn"); + + const fast = await runProbe("auto"); + expect(["standard", "priority"]).toContain(fast.usage?.service_tier); + expect(fast.stop_reason).toBe("end_turn"); + }, 45_000); }); describeGeminiLive("pi embedded extra params (gemini live)", () => { @@ -101,7 +178,7 @@ describeGeminiLive("pi embedded extra params (gemini live)", () => { oneByOneRedPngBase64: string; includeImage?: boolean; prompt: string; - onPayload?: (payload: Record, model: Model<"google-generative-ai">) => void; + onPayload?: (payload: Record) => void; }): Promise<{ sawDone: boolean; stopReason?: string; errorMessage?: string }> { const userContent: Array< { type: "text"; text: string } | { type: "image"; mimeType: string; data: string } @@ -129,11 +206,8 @@ describeGeminiLive("pi embedded extra params (gemini live)", () => { apiKey: params.apiKey, reasoning: "high", maxTokens: 64, - onPayload: (payload, streamModel) => { - params.onPayload?.( - payload as Record, - streamModel as Model<"google-generative-ai">, - ); + onPayload: (payload) => { + params.onPayload?.(payload as Record); }, }, ); diff --git a/src/agents/pi-embedded-runner-extraparams.test.ts b/src/agents/pi-embedded-runner-extraparams.test.ts index c0541116075..7a29f30f9eb 100644 --- a/src/agents/pi-embedded-runner-extraparams.test.ts +++ b/src/agents/pi-embedded-runner-extraparams.test.ts @@ -201,9 +201,11 @@ describe("applyExtraParamsToAgent", () => { model: | Model<"openai-responses"> | Model<"openai-codex-responses"> - | Model<"openai-completions">; + | Model<"openai-completions"> + | Model<"anthropic-messages">; options?: SimpleStreamOptions; cfg?: Record; + extraParamsOverride?: Record; payload?: Record; }) { const payload = params.payload ?? { store: false }; @@ -217,6 +219,7 @@ describe("applyExtraParamsToAgent", () => { params.cfg as Parameters[1], params.applyProvider, params.applyModelId, + params.extraParamsOverride, ); const context: Context = { messages: [] }; void agent.streamFn?.(params.model, context, params.options ?? {}); @@ -276,7 +279,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = { model: "deepseek/deepseek-r1" }; - options?.onPayload?.(payload, model); + options?.onPayload?.(payload, _model); payloads.push(payload); return {} as ReturnType; }; @@ -308,7 +311,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = {}; - options?.onPayload?.(payload, model); + options?.onPayload?.(payload, _model); payloads.push(payload); return {} as ReturnType; }; @@ -332,7 +335,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = { reasoning_effort: "high" }; - options?.onPayload?.(payload, model); + options?.onPayload?.(payload, _model); payloads.push(payload); return {} as ReturnType; }; @@ -357,7 +360,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = { reasoning: { max_tokens: 256 } }; - options?.onPayload?.(payload, model); + options?.onPayload?.(payload, _model); payloads.push(payload); return {} as ReturnType; }; @@ -381,7 +384,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = { reasoning_effort: "medium" }; - options?.onPayload?.(payload, model); + options?.onPayload?.(payload, _model); payloads.push(payload); return {} as ReturnType; }; @@ -588,7 +591,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = { thinking: "off" }; - options?.onPayload?.(payload, model); + options?.onPayload?.(payload, _model); payloads.push(payload); return {} as ReturnType; }; @@ -619,7 +622,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = { thinking: "off" }; - options?.onPayload?.(payload, model); + options?.onPayload?.(payload, _model); payloads.push(payload); return {} as ReturnType; }; @@ -650,7 +653,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = {}; - options?.onPayload?.(payload, model); + options?.onPayload?.(payload, _model); payloads.push(payload); return {} as ReturnType; }; @@ -674,7 +677,7 @@ describe("applyExtraParamsToAgent", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = { tool_choice: "required" }; - options?.onPayload?.(payload, model); + options?.onPayload?.(payload, _model); payloads.push(payload); return {} as ReturnType; }; @@ -695,11 +698,38 @@ describe("applyExtraParamsToAgent", () => { expect(payloads[0]?.tool_choice).toBe("auto"); }); + it("disables thinking instead of broadening pinned Moonshot tool_choice", () => { + const payloads: Record[] = []; + const baseStreamFn: StreamFn = (_model, _context, options) => { + const payload: Record = { + tool_choice: { type: "tool", name: "read" }, + }; + options?.onPayload?.(payload, _model); + payloads.push(payload); + return {} as ReturnType; + }; + const agent = { streamFn: baseStreamFn }; + + applyExtraParamsToAgent(agent, undefined, "moonshot", "kimi-k2.5", undefined, "low"); + + const model = { + api: "openai-completions", + provider: "moonshot", + id: "kimi-k2.5", + } as Model<"openai-completions">; + const context: Context = { messages: [] }; + void agent.streamFn?.(model, context, {}); + + expect(payloads).toHaveLength(1); + expect(payloads[0]?.thinking).toEqual({ type: "disabled" }); + expect(payloads[0]?.tool_choice).toEqual({ type: "tool", name: "read" }); + }); + it("respects explicit Moonshot thinking param from model config", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { const payload: Record = {}; - options?.onPayload?.(payload, model); + options?.onPayload?.(payload, _model); payloads.push(payload); return {} as ReturnType; }; @@ -732,6 +762,85 @@ describe("applyExtraParamsToAgent", () => { expect(payloads[0]?.thinking).toEqual({ type: "disabled" }); }); + it("applies Moonshot payload compatibility to Ollama Kimi cloud models", () => { + const payloads: Record[] = []; + const baseStreamFn: StreamFn = (_model, _context, options) => { + const payload: Record = { tool_choice: "required" }; + options?.onPayload?.(payload, _model); + payloads.push(payload); + return {} as ReturnType; + }; + const agent = { streamFn: baseStreamFn }; + + applyExtraParamsToAgent(agent, undefined, "ollama", "kimi-k2.5:cloud", undefined, "low"); + + const model = { + api: "openai-completions", + provider: "ollama", + id: "kimi-k2.5:cloud", + } as Model<"openai-completions">; + const context: Context = { messages: [] }; + void agent.streamFn?.(model, context, {}); + + expect(payloads).toHaveLength(1); + expect(payloads[0]?.thinking).toEqual({ type: "enabled" }); + expect(payloads[0]?.tool_choice).toBe("auto"); + }); + + it("maps thinkingLevel=off for Ollama Kimi cloud models through Moonshot compatibility", () => { + const payloads: Record[] = []; + const baseStreamFn: StreamFn = (_model, _context, options) => { + const payload: Record = {}; + options?.onPayload?.(payload, _model); + payloads.push(payload); + return {} as ReturnType; + }; + const agent = { streamFn: baseStreamFn }; + + applyExtraParamsToAgent(agent, undefined, "ollama", "kimi-k2.5:cloud", undefined, "off"); + + const model = { + api: "openai-completions", + provider: "ollama", + id: "kimi-k2.5:cloud", + } as Model<"openai-completions">; + const context: Context = { messages: [] }; + void agent.streamFn?.(model, context, {}); + + expect(payloads).toHaveLength(1); + expect(payloads[0]?.thinking).toEqual({ type: "disabled" }); + }); + + it("disables thinking instead of broadening pinned Ollama Kimi cloud tool_choice", () => { + const payloads: Record[] = []; + const baseStreamFn: StreamFn = (_model, _context, options) => { + const payload: Record = { + tool_choice: { type: "function", function: { name: "read" } }, + }; + options?.onPayload?.(payload, _model); + payloads.push(payload); + return {} as ReturnType; + }; + const agent = { streamFn: baseStreamFn }; + + applyExtraParamsToAgent(agent, undefined, "ollama", "kimi-k2.5:cloud", undefined, "low"); + + const model = { + api: "openai-completions", + provider: "ollama", + id: "kimi-k2.5:cloud", + } as Model<"openai-completions">; + const context: Context = { messages: [] }; + void agent.streamFn?.(model, context, {}); + + expect(payloads).toHaveLength(1); + expect(payloads[0]?.thinking).toEqual({ type: "disabled" }); + expect(payloads[0]?.tool_choice).toEqual({ + type: "function", + function: { name: "read" }, + }); + }); + it("does not rewrite tool schema for kimi-coding (native Anthropic format)", () => { const payloads: Record[] = []; const baseStreamFn: StreamFn = (_model, _context, options) => { @@ -749,7 +858,7 @@ describe("applyExtraParamsToAgent", () => { ], tool_choice: { type: "tool", name: "read" }, }; - options?.onPayload?.(payload, model); + options?.onPayload?.(payload, _model); payloads.push(payload); return {} as ReturnType; }; @@ -793,7 +902,7 @@ describe("applyExtraParamsToAgent", () => { }, ], }; - options?.onPayload?.(payload, model); + options?.onPayload?.(payload, _model); payloads.push(payload); return {} as ReturnType; }; @@ -832,7 +941,7 @@ describe("applyExtraParamsToAgent", () => { }, ], }; - options?.onPayload?.(payload, model); + options?.onPayload?.(payload, _model); payloads.push(payload); return {} as ReturnType; }; @@ -896,7 +1005,7 @@ describe("applyExtraParamsToAgent", () => { }, }, }; - options?.onPayload?.(payload, model); + options?.onPayload?.(payload, _model); payloads.push(payload); return {} as ReturnType; }; @@ -943,7 +1052,7 @@ describe("applyExtraParamsToAgent", () => { }, }, }; - options?.onPayload?.(payload, model); + options?.onPayload?.(payload, _model); payloads.push(payload); return {} as ReturnType; }; @@ -1081,7 +1190,7 @@ describe("applyExtraParamsToAgent", () => { expect(calls).toHaveLength(1); expect(calls[0]?.transport).toBe("auto"); - expect(calls[0]?.openaiWsWarmup).toBe(true); + expect(calls[0]?.openaiWsWarmup).toBe(false); }); it("lets runtime options override OpenAI default transport", () => { @@ -1449,6 +1558,20 @@ describe("applyExtraParamsToAgent", () => { expect(payload.store).toBe(true); }); + it("forces store=true for azure-openai provider with openai-responses API (#42800)", () => { + const payload = runResponsesPayloadMutationCase({ + applyProvider: "azure-openai", + applyModelId: "gpt-5-mini", + model: { + api: "openai-responses", + provider: "azure-openai", + id: "gpt-5-mini", + baseUrl: "https://myresource.openai.azure.com/openai/v1", + } as unknown as Model<"openai-responses">, + }); + expect(payload.store).toBe(true); + }); + it("injects configured OpenAI service_tier into Responses payloads", () => { const payload = runResponsesPayloadMutationCase({ applyProvider: "openai", @@ -1507,6 +1630,165 @@ describe("applyExtraParamsToAgent", () => { expect(payload.service_tier).toBe("default"); }); + it("injects fast-mode payload defaults for direct OpenAI Responses", () => { + const payload = runResponsesPayloadMutationCase({ + applyProvider: "openai", + applyModelId: "gpt-5.4", + cfg: { + agents: { + defaults: { + models: { + "openai/gpt-5.4": { + params: { + fastMode: true, + }, + }, + }, + }, + }, + }, + model: { + api: "openai-responses", + provider: "openai", + id: "gpt-5.4", + baseUrl: "https://api.openai.com/v1", + } as unknown as Model<"openai-responses">, + payload: { + store: false, + }, + }); + expect(payload.reasoning).toEqual({ effort: "low" }); + expect(payload.text).toEqual({ verbosity: "low" }); + expect(payload.service_tier).toBe("priority"); + }); + + it("preserves caller-provided OpenAI payload fields when fast mode is enabled", () => { + const payload = runResponsesPayloadMutationCase({ + applyProvider: "openai", + applyModelId: "gpt-5.4", + extraParamsOverride: { fastMode: true }, + model: { + api: "openai-responses", + provider: "openai", + id: "gpt-5.4", + baseUrl: "https://api.openai.com/v1", + } as unknown as Model<"openai-responses">, + payload: { + reasoning: { effort: "medium" }, + text: { verbosity: "high" }, + service_tier: "default", + }, + }); + expect(payload.reasoning).toEqual({ effort: "medium" }); + expect(payload.text).toEqual({ verbosity: "high" }); + expect(payload.service_tier).toBe("default"); + }); + + it("injects service_tier=auto for Anthropic fast mode on direct API-key models", () => { + const payload = runResponsesPayloadMutationCase({ + applyProvider: "anthropic", + applyModelId: "claude-sonnet-4-5", + extraParamsOverride: { fastMode: true }, + model: { + api: "anthropic-messages", + provider: "anthropic", + id: "claude-sonnet-4-5", + baseUrl: "https://api.anthropic.com", + } as unknown as Model<"anthropic-messages">, + payload: {}, + }); + expect(payload.service_tier).toBe("auto"); + }); + + it("injects service_tier=standard_only for Anthropic fast mode off", () => { + const payload = runResponsesPayloadMutationCase({ + applyProvider: "anthropic", + applyModelId: "claude-sonnet-4-5", + extraParamsOverride: { fastMode: false }, + model: { + api: "anthropic-messages", + provider: "anthropic", + id: "claude-sonnet-4-5", + baseUrl: "https://api.anthropic.com", + } as unknown as Model<"anthropic-messages">, + payload: {}, + }); + expect(payload.service_tier).toBe("standard_only"); + }); + + it("preserves caller-provided Anthropic service_tier values", () => { + const payload = runResponsesPayloadMutationCase({ + applyProvider: "anthropic", + applyModelId: "claude-sonnet-4-5", + extraParamsOverride: { fastMode: true }, + model: { + api: "anthropic-messages", + provider: "anthropic", + id: "claude-sonnet-4-5", + baseUrl: "https://api.anthropic.com", + } as unknown as Model<"anthropic-messages">, + payload: { + service_tier: "standard_only", + }, + }); + expect(payload.service_tier).toBe("standard_only"); + }); + + it("does not inject Anthropic fast mode service_tier for OAuth auth", () => { + const payload = runResponsesPayloadMutationCase({ + applyProvider: "anthropic", + applyModelId: "claude-sonnet-4-5", + extraParamsOverride: { fastMode: true }, + model: { + api: "anthropic-messages", + provider: "anthropic", + id: "claude-sonnet-4-5", + baseUrl: "https://api.anthropic.com", + } as unknown as Model<"anthropic-messages">, + options: { + apiKey: "sk-ant-oat-test-token", + }, + payload: {}, + }); + expect(payload).not.toHaveProperty("service_tier"); + }); + + it("does not inject Anthropic fast mode service_tier for proxied base URLs", () => { + const payload = runResponsesPayloadMutationCase({ + applyProvider: "anthropic", + applyModelId: "claude-sonnet-4-5", + extraParamsOverride: { fastMode: true }, + model: { + api: "anthropic-messages", + provider: "anthropic", + id: "claude-sonnet-4-5", + baseUrl: "https://proxy.example.com/anthropic", + } as unknown as Model<"anthropic-messages">, + payload: {}, + }); + expect(payload).not.toHaveProperty("service_tier"); + }); + + it("applies fast-mode defaults for openai-codex responses without service_tier", () => { + const payload = runResponsesPayloadMutationCase({ + applyProvider: "openai-codex", + applyModelId: "gpt-5.4", + extraParamsOverride: { fastMode: true }, + model: { + api: "openai-codex-responses", + provider: "openai-codex", + id: "gpt-5.4", + baseUrl: "https://chatgpt.com/backend-api", + } as unknown as Model<"openai-codex-responses">, + payload: { + store: false, + }, + }); + expect(payload.reasoning).toEqual({ effort: "low" }); + expect(payload.text).toEqual({ verbosity: "low" }); + expect(payload).not.toHaveProperty("service_tier"); + }); + it("does not inject service_tier for non-openai providers", () => { const payload = runResponsesPayloadMutationCase({ applyProvider: "azure-openai-responses", diff --git a/src/agents/pi-embedded-runner.e2e.test.ts b/src/agents/pi-embedded-runner.e2e.test.ts index 31056f6ffe1..5c7722b5d16 100644 --- a/src/agents/pi-embedded-runner.e2e.test.ts +++ b/src/agents/pi-embedded-runner.e2e.test.ts @@ -1,9 +1,14 @@ import fs from "node:fs/promises"; -import os from "node:os"; import path from "node:path"; import "./test-helpers/fast-coding-tools.js"; import { afterAll, beforeAll, describe, expect, it, vi } from "vitest"; -import type { OpenClawConfig } from "../config/config.js"; +import { + cleanupEmbeddedPiRunnerTestWorkspace, + createEmbeddedPiRunnerOpenAiConfig, + createEmbeddedPiRunnerTestWorkspace, + type EmbeddedPiRunnerTestWorkspace, + immediateEnqueue, +} from "./test-helpers/pi-embedded-runner-e2e-fixtures.js"; function createMockUsage(input: number, output: number) { return { @@ -88,7 +93,7 @@ vi.mock("@mariozechner/pi-ai", async () => { let runEmbeddedPiAgent: typeof import("./pi-embedded-runner/run.js").runEmbeddedPiAgent; let SessionManager: typeof import("@mariozechner/pi-coding-agent").SessionManager; -let tempRoot: string | undefined; +let e2eWorkspace: EmbeddedPiRunnerTestWorkspace | undefined; let agentDir: string; let workspaceDir: string; let sessionCounter = 0; @@ -98,50 +103,21 @@ beforeAll(async () => { vi.useRealTimers(); ({ runEmbeddedPiAgent } = await import("./pi-embedded-runner/run.js")); ({ SessionManager } = await import("@mariozechner/pi-coding-agent")); - tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-embedded-agent-")); - agentDir = path.join(tempRoot, "agent"); - workspaceDir = path.join(tempRoot, "workspace"); - await fs.mkdir(agentDir, { recursive: true }); - await fs.mkdir(workspaceDir, { recursive: true }); + e2eWorkspace = await createEmbeddedPiRunnerTestWorkspace("openclaw-embedded-agent-"); + ({ agentDir, workspaceDir } = e2eWorkspace); }, 180_000); afterAll(async () => { - if (!tempRoot) { - return; - } - await fs.rm(tempRoot, { recursive: true, force: true }); - tempRoot = undefined; + await cleanupEmbeddedPiRunnerTestWorkspace(e2eWorkspace); + e2eWorkspace = undefined; }); -const makeOpenAiConfig = (modelIds: string[]) => - ({ - models: { - providers: { - openai: { - api: "openai-responses", - apiKey: "sk-test", - baseUrl: "https://example.com", - models: modelIds.map((id) => ({ - id, - name: `Mock ${id}`, - reasoning: false, - input: ["text"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 16_000, - maxTokens: 2048, - })), - }, - }, - }, - }) satisfies OpenClawConfig; - const nextSessionFile = () => { sessionCounter += 1; return path.join(workspaceDir, `session-${sessionCounter}.jsonl`); }; const nextRunId = (prefix = "run-embedded-test") => `${prefix}-${++runCounter}`; const nextSessionKey = () => `agent:test:embedded:${nextRunId("session-key")}`; -const immediateEnqueue = async (task: () => Promise) => task(); const runWithOrphanedSingleUserMessage = async (text: string, sessionKey: string) => { const sessionFile = nextSessionFile(); @@ -152,7 +128,7 @@ const runWithOrphanedSingleUserMessage = async (text: string, sessionKey: string timestamp: Date.now(), }); - const cfg = makeOpenAiConfig(["mock-1"]); + const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-1"]); return await runEmbeddedPiAgent({ sessionId: "session:test", sessionKey, @@ -197,7 +173,7 @@ const readSessionMessages = async (sessionFile: string) => { }; const runDefaultEmbeddedTurn = async (sessionFile: string, prompt: string, sessionKey: string) => { - const cfg = makeOpenAiConfig(["mock-error"]); + const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-error"]); await runEmbeddedPiAgent({ sessionId: "session:test", sessionKey, @@ -217,7 +193,7 @@ const runDefaultEmbeddedTurn = async (sessionFile: string, prompt: string, sessi describe("runEmbeddedPiAgent", () => { it("handles prompt error paths without dropping user state", async () => { const sessionFile = nextSessionFile(); - const cfg = makeOpenAiConfig(["mock-error"]); + const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-error"]); const sessionKey = nextSessionKey(); const result = await runEmbeddedPiAgent({ sessionId: "session:test", diff --git a/src/agents/pi-embedded-runner.run-embedded-pi-agent.auth-profile-rotation.e2e.test.ts b/src/agents/pi-embedded-runner.run-embedded-pi-agent.auth-profile-rotation.e2e.test.ts index 75ce17eb197..0aa665e0635 100644 --- a/src/agents/pi-embedded-runner.run-embedded-pi-agent.auth-profile-rotation.e2e.test.ts +++ b/src/agents/pi-embedded-runner.run-embedded-pi-agent.auth-profile-rotation.e2e.test.ts @@ -2,8 +2,10 @@ import fs from "node:fs/promises"; import os from "node:os"; import path from "node:path"; import type { AssistantMessage } from "@mariozechner/pi-ai"; -import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; +import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; import type { OpenClawConfig } from "../config/config.js"; +import { registerLogTransport, resetLogger, setLoggerOverride } from "../logging/logger.js"; +import { redactIdentifier } from "../logging/redact-identifier.js"; import type { AuthProfileFailureReason } from "./auth-profiles.js"; import type { EmbeddedRunAttemptResult } from "./pi-embedded-runner/run/types.js"; @@ -51,6 +53,7 @@ vi.mock("./models-config.js", async (importOriginal) => { }); let runEmbeddedPiAgent: typeof import("./pi-embedded-runner/run.js").runEmbeddedPiAgent; +let unregisterLogTransport: (() => void) | undefined; beforeAll(async () => { ({ runEmbeddedPiAgent } = await import("./pi-embedded-runner/run.js")); @@ -64,6 +67,13 @@ beforeEach(() => { sleepWithAbortMock.mockClear(); }); +afterEach(() => { + unregisterLogTransport?.(); + unregisterLogTransport = undefined; + setLoggerOverride(null); + resetLogger(); +}); + const baseUsage = { input: 0, output: 0, @@ -720,6 +730,61 @@ describe("runEmbeddedPiAgent auth profile rotation", () => { expect(sleepWithAbortMock).toHaveBeenCalledWith(321, undefined); }); + it("logs structured failover decision metadata for overloaded assistant rotation", async () => { + const records: Array> = []; + setLoggerOverride({ + level: "trace", + consoleLevel: "silent", + file: path.join(os.tmpdir(), `openclaw-auth-rotation-${Date.now()}.log`), + }); + unregisterLogTransport = registerLogTransport((record) => { + records.push(record); + }); + + await runAutoPinnedRotationCase({ + errorMessage: + '{"type":"error","error":{"type":"overloaded_error","message":"Overloaded"},"request_id":"req_overload"}', + sessionKey: "agent:test:overloaded-logging", + runId: "run:overloaded-logging", + }); + + const decisionRecord = records.find( + (record) => + record["2"] === "embedded run failover decision" && + record["1"] && + typeof record["1"] === "object" && + (record["1"] as Record).decision === "rotate_profile", + ); + + expect(decisionRecord).toBeDefined(); + const safeProfileId = redactIdentifier("openai:p1", { len: 12 }); + expect((decisionRecord as Record)["1"]).toMatchObject({ + event: "embedded_run_failover_decision", + runId: "run:overloaded-logging", + decision: "rotate_profile", + failoverReason: "overloaded", + profileId: safeProfileId, + providerErrorType: "overloaded_error", + rawErrorPreview: expect.stringContaining('"request_id":"sha256:'), + }); + + const stateRecord = records.find( + (record) => + record["2"] === "auth profile failure state updated" && + record["1"] && + typeof record["1"] === "object" && + (record["1"] as Record).profileId === safeProfileId, + ); + + expect(stateRecord).toBeDefined(); + expect((stateRecord as Record)["1"]).toMatchObject({ + event: "auth_profile_failure_state_updated", + runId: "run:overloaded-logging", + profileId: safeProfileId, + reason: "overloaded", + }); + }); + it("rotates for overloaded prompt failures across auto-pinned profiles", async () => { const { usageStats } = await runAutoPinnedPromptErrorRotationCase({ errorMessage: '{"type":"error","error":{"type":"overloaded_error","message":"Overloaded"}}', @@ -916,7 +981,7 @@ describe("runEmbeddedPiAgent auth profile rotation", () => { }), ).rejects.toMatchObject({ name: "FailoverError", - reason: "rate_limit", + reason: "unknown", provider: "openai", model: "mock-1", }); @@ -1013,6 +1078,54 @@ describe("runEmbeddedPiAgent auth profile rotation", () => { }); }); + it("can probe one billing-disabled profile when transient cooldown probe is allowed without fallback models", async () => { + await withTimedAgentWorkspace(async ({ agentDir, workspaceDir, now }) => { + await writeAuthStore(agentDir, { + usageStats: { + "openai:p1": { + lastUsed: 1, + disabledUntil: now + 60 * 60 * 1000, + disabledReason: "billing", + }, + "openai:p2": { + lastUsed: 2, + disabledUntil: now + 60 * 60 * 1000, + disabledReason: "billing", + }, + }, + }); + + runEmbeddedAttemptMock.mockResolvedValueOnce( + makeAttempt({ + assistantTexts: ["ok"], + lastAssistant: buildAssistant({ + stopReason: "stop", + content: [{ type: "text", text: "ok" }], + }), + }), + ); + + const result = await runEmbeddedPiAgent({ + sessionId: "session:test", + sessionKey: "agent:test:billing-cooldown-probe-no-fallbacks", + sessionFile: path.join(workspaceDir, "session.jsonl"), + workspaceDir, + agentDir, + config: makeConfig(), + prompt: "hello", + provider: "openai", + model: "mock-1", + authProfileIdSource: "auto", + allowTransientCooldownProbe: true, + timeoutMs: 5_000, + runId: "run:billing-cooldown-probe-no-fallbacks", + }); + + expect(runEmbeddedAttemptMock).toHaveBeenCalledTimes(1); + expect(result.payloads?.[0]?.text ?? "").toContain("ok"); + }); + }); + it("treats agent-level fallbacks as configured when defaults have none", async () => { await withTimedAgentWorkspace(async ({ agentDir, workspaceDir, now }) => { await writeAuthStore(agentDir, { @@ -1040,7 +1153,7 @@ describe("runEmbeddedPiAgent auth profile rotation", () => { }), ).rejects.toMatchObject({ name: "FailoverError", - reason: "rate_limit", + reason: "unknown", provider: "openai", model: "mock-1", }); diff --git a/src/agents/pi-embedded-runner.sanitize-session-history.policy.test.ts b/src/agents/pi-embedded-runner.sanitize-session-history.policy.test.ts index fceb809bbee..cd5238cf89b 100644 --- a/src/agents/pi-embedded-runner.sanitize-session-history.policy.test.ts +++ b/src/agents/pi-embedded-runner.sanitize-session-history.policy.test.ts @@ -1,10 +1,9 @@ import { beforeEach, describe, expect, it, vi } from "vitest"; -import * as helpers from "./pi-embedded-helpers.js"; import { - expectGoogleModelApiFullSanitizeCall, loadSanitizeSessionHistoryWithCleanMocks, makeMockSessionManager, makeSimpleUserMessages, + type SanitizeSessionHistoryHarness, sanitizeSnapshotChangedOpenAIReasoning, sanitizeWithOpenAIResponses, } from "./pi-embedded-runner.sanitize-session-history.test-harness.js"; @@ -15,42 +14,43 @@ vi.mock("./pi-embedded-helpers.js", async () => ({ sanitizeSessionMessagesImages: vi.fn(async (msgs) => msgs), })); -type SanitizeSessionHistory = Awaited>; -let sanitizeSessionHistory: SanitizeSessionHistory; +let sanitizeSessionHistory: SanitizeSessionHistoryHarness["sanitizeSessionHistory"]; +let mockedHelpers: SanitizeSessionHistoryHarness["mockedHelpers"]; describe("sanitizeSessionHistory e2e smoke", () => { const mockSessionManager = makeMockSessionManager(); const mockMessages = makeSimpleUserMessages(); beforeEach(async () => { - sanitizeSessionHistory = await loadSanitizeSessionHistoryWithCleanMocks(); + const harness = await loadSanitizeSessionHistoryWithCleanMocks(); + sanitizeSessionHistory = harness.sanitizeSessionHistory; + mockedHelpers = harness.mockedHelpers; }); - it("applies full sanitize policy for google model APIs", async () => { - await expectGoogleModelApiFullSanitizeCall({ - sanitizeSessionHistory, + it("passes simple user-only history through for google model APIs", async () => { + vi.mocked(mockedHelpers.isGoogleModelApi).mockReturnValue(true); + + const result = await sanitizeSessionHistory({ messages: mockMessages, + modelApi: "google-generative-ai", + provider: "google-vertex", sessionManager: mockSessionManager, + sessionId: "test-session", }); + + expect(result).toEqual(mockMessages); }); - it("keeps images-only sanitize policy without tool-call id rewriting for openai-responses", async () => { - vi.mocked(helpers.isGoogleModelApi).mockReturnValue(false); + it("passes simple user-only history through for openai-responses", async () => { + vi.mocked(mockedHelpers.isGoogleModelApi).mockReturnValue(false); - await sanitizeWithOpenAIResponses({ + const result = await sanitizeWithOpenAIResponses({ sanitizeSessionHistory, messages: mockMessages, sessionManager: mockSessionManager, }); - expect(helpers.sanitizeSessionMessagesImages).toHaveBeenCalledWith( - mockMessages, - "session:history", - expect.objectContaining({ - sanitizeMode: "images-only", - sanitizeToolCallIds: false, - }), - ); + expect(result).toEqual(mockMessages); }); it("downgrades openai reasoning blocks when the model snapshot changed", async () => { diff --git a/src/agents/pi-embedded-runner.sanitize-session-history.test-harness.ts b/src/agents/pi-embedded-runner.sanitize-session-history.test-harness.ts index 97750fc1dbc..c0321852236 100644 --- a/src/agents/pi-embedded-runner.sanitize-session-history.test-harness.ts +++ b/src/agents/pi-embedded-runner.sanitize-session-history.test-harness.ts @@ -1,7 +1,6 @@ import type { AgentMessage } from "@mariozechner/pi-agent-core"; import type { SessionManager } from "@mariozechner/pi-coding-agent"; import { expect, vi } from "vitest"; -import * as helpers from "./pi-embedded-helpers.js"; export type SessionEntry = { type: string; customType: string; data: unknown }; export type SanitizeSessionHistoryFn = (params: { @@ -13,6 +12,11 @@ export type SanitizeSessionHistoryFn = (params: { sessionId: string; modelId?: string; }) => Promise; +export type SanitizeSessionHistoryMockedHelpers = typeof import("./pi-embedded-helpers.js"); +export type SanitizeSessionHistoryHarness = { + sanitizeSessionHistory: SanitizeSessionHistoryFn; + mockedHelpers: SanitizeSessionHistoryMockedHelpers; +}; export const TEST_SESSION_ID = "test-session"; export function makeModelSnapshotEntry(data: { @@ -54,11 +58,16 @@ export function makeSimpleUserMessages(): AgentMessage[] { return messages as unknown as AgentMessage[]; } -export async function loadSanitizeSessionHistoryWithCleanMocks(): Promise { +export async function loadSanitizeSessionHistoryWithCleanMocks(): Promise { + vi.resetModules(); vi.resetAllMocks(); - vi.mocked(helpers.sanitizeSessionMessagesImages).mockImplementation(async (msgs) => msgs); + const mockedHelpers = await import("./pi-embedded-helpers.js"); + vi.mocked(mockedHelpers.sanitizeSessionMessagesImages).mockImplementation(async (msgs) => msgs); const mod = await import("./pi-embedded-runner/google.js"); - return mod.sanitizeSessionHistory; + return { + sanitizeSessionHistory: mod.sanitizeSessionHistory, + mockedHelpers, + }; } export function makeReasoningAssistantMessages(opts?: { @@ -118,26 +127,6 @@ export function expectOpenAIResponsesStrictSanitizeCall( ); } -export async function expectGoogleModelApiFullSanitizeCall(params: { - sanitizeSessionHistory: SanitizeSessionHistoryFn; - messages: AgentMessage[]; - sessionManager: SessionManager; -}) { - vi.mocked(helpers.isGoogleModelApi).mockReturnValue(true); - await params.sanitizeSessionHistory({ - messages: params.messages, - modelApi: "google-generative-ai", - provider: "google-vertex", - sessionManager: params.sessionManager, - sessionId: TEST_SESSION_ID, - }); - expect(helpers.sanitizeSessionMessagesImages).toHaveBeenCalledWith( - params.messages, - "session:history", - expect.objectContaining({ sanitizeMode: "full", sanitizeToolCallIds: true }), - ); -} - export function makeSnapshotChangedOpenAIReasoningScenario() { const sessionEntries = [ makeModelSnapshotEntry({ diff --git a/src/agents/pi-embedded-runner.sanitize-session-history.test.ts b/src/agents/pi-embedded-runner.sanitize-session-history.test.ts index 4fb4659c15d..2003523e03f 100644 --- a/src/agents/pi-embedded-runner.sanitize-session-history.test.ts +++ b/src/agents/pi-embedded-runner.sanitize-session-history.test.ts @@ -1,9 +1,7 @@ import type { AgentMessage } from "@mariozechner/pi-agent-core"; import type { AssistantMessage, UserMessage, Usage } from "@mariozechner/pi-ai"; import { beforeEach, describe, expect, it, vi } from "vitest"; -import * as helpers from "./pi-embedded-helpers.js"; import { - expectGoogleModelApiFullSanitizeCall, loadSanitizeSessionHistoryWithCleanMocks, makeMockSessionManager, makeInMemorySessionManager, @@ -11,6 +9,7 @@ import { makeReasoningAssistantMessages, makeSimpleUserMessages, sanitizeSnapshotChangedOpenAIReasoning, + type SanitizeSessionHistoryHarness, type SanitizeSessionHistoryFn, sanitizeWithOpenAIResponses, TEST_SESSION_ID, @@ -25,6 +24,7 @@ vi.mock("./pi-embedded-helpers.js", async () => ({ })); let sanitizeSessionHistory: SanitizeSessionHistoryFn; +let mockedHelpers: SanitizeSessionHistoryHarness["mockedHelpers"]; let testTimestamp = 1; const nextTimestamp = () => testTimestamp++; @@ -35,7 +35,7 @@ describe("sanitizeSessionHistory", () => { const mockSessionManager = makeMockSessionManager(); const mockMessages = makeSimpleUserMessages(); const setNonGoogleModelApi = () => { - vi.mocked(helpers.isGoogleModelApi).mockReturnValue(false); + vi.mocked(mockedHelpers.isGoogleModelApi).mockReturnValue(false); }; const sanitizeGithubCopilotHistory = async (params: { @@ -52,6 +52,21 @@ describe("sanitizeSessionHistory", () => { sessionId: TEST_SESSION_ID, }); + const sanitizeAnthropicHistory = async (params: { + messages: AgentMessage[]; + provider?: string; + modelApi?: string; + modelId?: string; + }) => + sanitizeSessionHistory({ + messages: params.messages, + modelApi: params.modelApi ?? "anthropic-messages", + provider: params.provider ?? "anthropic", + modelId: params.modelId ?? "claude-opus-4-6", + sessionManager: makeMockSessionManager(), + sessionId: TEST_SESSION_ID, + }); + const getAssistantMessage = (messages: AgentMessage[]) => { expect(messages[1]?.role).toBe("assistant"); return messages[1] as Extract; @@ -162,23 +177,39 @@ describe("sanitizeSessionHistory", () => { AgentMessage & { usage?: unknown; content?: unknown } >; + const getSingleAssistantUsage = async (messages: AgentMessage[]) => { + vi.mocked(mockedHelpers.isGoogleModelApi).mockReturnValue(false); + const result = await sanitizeOpenAIHistory(messages); + return result.find((message) => message.role === "assistant") as + | (AgentMessage & { usage?: unknown }) + | undefined; + }; + beforeEach(async () => { testTimestamp = 1; - sanitizeSessionHistory = await loadSanitizeSessionHistoryWithCleanMocks(); + const harness = await loadSanitizeSessionHistoryWithCleanMocks(); + sanitizeSessionHistory = harness.sanitizeSessionHistory; + mockedHelpers = harness.mockedHelpers; }); - it("sanitizes tool call ids for Google model APIs", async () => { - await expectGoogleModelApiFullSanitizeCall({ - sanitizeSessionHistory, + it("passes simple user-only history through for Google model APIs", async () => { + vi.mocked(mockedHelpers.isGoogleModelApi).mockReturnValue(true); + + const result = await sanitizeSessionHistory({ messages: mockMessages, + modelApi: "google-generative-ai", + provider: "google-vertex", sessionManager: mockSessionManager, + sessionId: TEST_SESSION_ID, }); + + expect(result).toEqual(mockMessages); }); - it("sanitizes tool call ids with strict9 for Mistral models", async () => { + it("passes simple user-only history through for Mistral models", async () => { setNonGoogleModelApi(); - await sanitizeSessionHistory({ + const result = await sanitizeSessionHistory({ messages: mockMessages, modelApi: "openai-responses", provider: "openrouter", @@ -187,21 +218,13 @@ describe("sanitizeSessionHistory", () => { sessionId: TEST_SESSION_ID, }); - expect(helpers.sanitizeSessionMessagesImages).toHaveBeenCalledWith( - mockMessages, - "session:history", - expect.objectContaining({ - sanitizeMode: "full", - sanitizeToolCallIds: true, - toolCallIdMode: "strict9", - }), - ); + expect(result).toEqual(mockMessages); }); - it("sanitizes tool call ids for Anthropic APIs", async () => { + it("passes simple user-only history through for Anthropic APIs", async () => { setNonGoogleModelApi(); - await sanitizeSessionHistory({ + const result = await sanitizeSessionHistory({ messages: mockMessages, modelApi: "anthropic-messages", provider: "anthropic", @@ -209,33 +232,25 @@ describe("sanitizeSessionHistory", () => { sessionId: TEST_SESSION_ID, }); - expect(helpers.sanitizeSessionMessagesImages).toHaveBeenCalledWith( - mockMessages, - "session:history", - expect.objectContaining({ sanitizeMode: "full", sanitizeToolCallIds: true }), - ); + expect(result).toEqual(mockMessages); }); - it("does not sanitize tool call ids for openai-responses", async () => { + it("passes simple user-only history through for openai-responses", async () => { setNonGoogleModelApi(); - await sanitizeWithOpenAIResponses({ + const result = await sanitizeWithOpenAIResponses({ sanitizeSessionHistory, messages: mockMessages, sessionManager: mockSessionManager, }); - expect(helpers.sanitizeSessionMessagesImages).toHaveBeenCalledWith( - mockMessages, - "session:history", - expect.objectContaining({ sanitizeMode: "images-only", sanitizeToolCallIds: false }), - ); + expect(result).toEqual(mockMessages); }); - it("sanitizes tool call ids for openai-completions", async () => { + it("passes simple user-only history through for openai-completions", async () => { setNonGoogleModelApi(); - await sanitizeSessionHistory({ + const result = await sanitizeSessionHistory({ messages: mockMessages, modelApi: "openai-completions", provider: "openai", @@ -244,15 +259,7 @@ describe("sanitizeSessionHistory", () => { sessionId: TEST_SESSION_ID, }); - expect(helpers.sanitizeSessionMessagesImages).toHaveBeenCalledWith( - mockMessages, - "session:history", - expect.objectContaining({ - sanitizeMode: "images-only", - sanitizeToolCallIds: true, - toolCallIdMode: "strict", - }), - ); + expect(result).toEqual(mockMessages); }); it("prepends a bootstrap user turn for strict OpenAI-compatible assistant-first history", async () => { @@ -314,7 +321,7 @@ describe("sanitizeSessionHistory", () => { }); it("drops stale assistant usage snapshots kept before latest compaction summary", async () => { - vi.mocked(helpers.isGoogleModelApi).mockReturnValue(false); + vi.mocked(mockedHelpers.isGoogleModelApi).mockReturnValue(false); const messages = castAgentMessages([ { role: "user", content: "old context" }, @@ -335,7 +342,7 @@ describe("sanitizeSessionHistory", () => { }); it("preserves fresh assistant usage snapshots created after latest compaction summary", async () => { - vi.mocked(helpers.isGoogleModelApi).mockReturnValue(false); + vi.mocked(mockedHelpers.isGoogleModelApi).mockReturnValue(false); const messages = castAgentMessages([ makeAssistantUsageMessage({ @@ -359,43 +366,33 @@ describe("sanitizeSessionHistory", () => { }); it("adds a zeroed assistant usage snapshot when usage is missing", async () => { - vi.mocked(helpers.isGoogleModelApi).mockReturnValue(false); - - const messages = castAgentMessages([ - { role: "user", content: "question" }, - { - role: "assistant", - content: [{ type: "text", text: "answer without usage" }], - }, - ]); - - const result = await sanitizeOpenAIHistory(messages); - const assistant = result.find((message) => message.role === "assistant") as - | (AgentMessage & { usage?: unknown }) - | undefined; + const assistant = await getSingleAssistantUsage( + castAgentMessages([ + { role: "user", content: "question" }, + { + role: "assistant", + content: [{ type: "text", text: "answer without usage" }], + }, + ]), + ); expect(assistant?.usage).toEqual(makeZeroUsageSnapshot()); }); it("normalizes mixed partial assistant usage fields to numeric totals", async () => { - vi.mocked(helpers.isGoogleModelApi).mockReturnValue(false); - - const messages = castAgentMessages([ - { role: "user", content: "question" }, - { - role: "assistant", - content: [{ type: "text", text: "answer with partial usage" }], - usage: { - output: 3, - cache_read_input_tokens: 9, + const assistant = await getSingleAssistantUsage( + castAgentMessages([ + { role: "user", content: "question" }, + { + role: "assistant", + content: [{ type: "text", text: "answer with partial usage" }], + usage: { + output: 3, + cache_read_input_tokens: 9, + }, }, - }, - ]); - - const result = await sanitizeOpenAIHistory(messages); - const assistant = result.find((message) => message.role === "assistant") as - | (AgentMessage & { usage?: unknown }) - | undefined; + ]), + ); expect(assistant?.usage).toEqual({ input: 0, @@ -407,31 +404,26 @@ describe("sanitizeSessionHistory", () => { }); it("preserves existing usage cost while normalizing token fields", async () => { - vi.mocked(helpers.isGoogleModelApi).mockReturnValue(false); - - const messages = castAgentMessages([ - { role: "user", content: "question" }, - { - role: "assistant", - content: [{ type: "text", text: "answer with partial usage and cost" }], - usage: { - output: 3, - cache_read_input_tokens: 9, - cost: { - input: 1.25, - output: 2.5, - cacheRead: 0.25, - cacheWrite: 0, - total: 4, + const assistant = await getSingleAssistantUsage( + castAgentMessages([ + { role: "user", content: "question" }, + { + role: "assistant", + content: [{ type: "text", text: "answer with partial usage and cost" }], + usage: { + output: 3, + cache_read_input_tokens: 9, + cost: { + input: 1.25, + output: 2.5, + cacheRead: 0.25, + cacheWrite: 0, + total: 4, + }, }, }, - }, - ]); - - const result = await sanitizeOpenAIHistory(messages); - const assistant = result.find((message) => message.role === "assistant") as - | (AgentMessage & { usage?: unknown }) - | undefined; + ]), + ); expect(assistant?.usage).toEqual({ ...makeZeroUsageSnapshot(), @@ -451,27 +443,22 @@ describe("sanitizeSessionHistory", () => { }); it("preserves unknown cost when token fields already match", async () => { - vi.mocked(helpers.isGoogleModelApi).mockReturnValue(false); - - const messages = castAgentMessages([ - { role: "user", content: "question" }, - { - role: "assistant", - content: [{ type: "text", text: "answer with complete numeric usage but no cost" }], - usage: { - input: 1, - output: 2, - cacheRead: 3, - cacheWrite: 4, - totalTokens: 10, + const assistant = await getSingleAssistantUsage( + castAgentMessages([ + { role: "user", content: "question" }, + { + role: "assistant", + content: [{ type: "text", text: "answer with complete numeric usage but no cost" }], + usage: { + input: 1, + output: 2, + cacheRead: 3, + cacheWrite: 4, + totalTokens: 10, + }, }, - }, - ]); - - const result = await sanitizeOpenAIHistory(messages); - const assistant = result.find((message) => message.role === "assistant") as - | (AgentMessage & { usage?: unknown }) - | undefined; + ]), + ); expect(assistant?.usage).toEqual({ input: 1, @@ -484,7 +471,7 @@ describe("sanitizeSessionHistory", () => { }); it("drops stale usage when compaction summary appears before kept assistant messages", async () => { - vi.mocked(helpers.isGoogleModelApi).mockReturnValue(false); + vi.mocked(mockedHelpers.isGoogleModelApi).mockReturnValue(false); const compactionTs = Date.parse("2026-02-26T12:00:00.000Z"); const messages = castAgentMessages([ @@ -505,7 +492,7 @@ describe("sanitizeSessionHistory", () => { }); it("keeps fresh usage after compaction timestamp in summary-first ordering", async () => { - vi.mocked(helpers.isGoogleModelApi).mockReturnValue(false); + vi.mocked(mockedHelpers.isGoogleModelApi).mockReturnValue(false); const compactionTs = Date.parse("2026-02-26T12:00:00.000Z"); const messages = castAgentMessages([ @@ -776,22 +763,30 @@ describe("sanitizeSessionHistory", () => { expect(types).not.toContain("thinking"); }); - it("does not drop thinking blocks for non-copilot providers", async () => { + it("drops assistant thinking blocks for anthropic replay", async () => { setNonGoogleModelApi(); const messages = makeThinkingAndTextAssistantMessages(); - const result = await sanitizeSessionHistory({ + const result = await sanitizeAnthropicHistory({ messages }); + + const assistant = getAssistantMessage(result); + expect(assistant.content).toEqual([{ type: "text", text: "hi" }]); + }); + + it("drops assistant thinking blocks for amazon-bedrock replay", async () => { + setNonGoogleModelApi(); + + const messages = makeThinkingAndTextAssistantMessages(); + + const result = await sanitizeAnthropicHistory({ messages, - modelApi: "anthropic-messages", - provider: "anthropic", - modelId: "claude-opus-4-6", - sessionManager: makeMockSessionManager(), - sessionId: TEST_SESSION_ID, + provider: "amazon-bedrock", + modelApi: "bedrock-converse-stream", }); - const types = getAssistantContentTypes(result); - expect(types).toContain("thinking"); + const assistant = getAssistantMessage(result); + expect(assistant.content).toEqual([{ type: "text", text: "hi" }]); }); it("does not drop thinking blocks for non-claude copilot models", async () => { diff --git a/src/agents/pi-embedded-runner.sessions-yield.e2e.test.ts b/src/agents/pi-embedded-runner.sessions-yield.e2e.test.ts new file mode 100644 index 00000000000..d91cf63539b --- /dev/null +++ b/src/agents/pi-embedded-runner.sessions-yield.e2e.test.ts @@ -0,0 +1,345 @@ +/** + * End-to-end test proving that when sessions_yield is called: + * 1. The attempt completes with yieldDetected + * 2. The run exits with stopReason "end_turn" and no pendingToolCalls + * 3. The parent session is idle (clearActiveEmbeddedRun has run) + * + * This exercises the full path: mock LLM → agent loop → tool execution → callback → attempt result → run result. + * Follows the same pattern as pi-embedded-runner.e2e.test.ts. + */ +import fs from "node:fs/promises"; +import path from "node:path"; +import "./test-helpers/fast-coding-tools.js"; +import { afterAll, beforeAll, describe, expect, it, vi } from "vitest"; +import { isEmbeddedPiRunActive, queueEmbeddedPiMessage } from "./pi-embedded-runner/runs.js"; +import { + cleanupEmbeddedPiRunnerTestWorkspace, + createEmbeddedPiRunnerOpenAiConfig, + createEmbeddedPiRunnerTestWorkspace, + type EmbeddedPiRunnerTestWorkspace, + immediateEnqueue, +} from "./test-helpers/pi-embedded-runner-e2e-fixtures.js"; + +function createMockUsage(input: number, output: number) { + return { + input, + output, + cacheRead: 0, + cacheWrite: 0, + totalTokens: input + output, + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + total: 0, + }, + }; +} + +let streamCallCount = 0; +let multiToolMode = false; +let responsePlan: Array<"toolUse" | "stop"> = []; +let observedContexts: Array> = []; + +vi.mock("@mariozechner/pi-coding-agent", async () => { + return await vi.importActual( + "@mariozechner/pi-coding-agent", + ); +}); + +vi.mock("@mariozechner/pi-ai", async () => { + const actual = await vi.importActual("@mariozechner/pi-ai"); + + const buildToolUseMessage = (model: { api: string; provider: string; id: string }) => { + const toolCalls: Array<{ + type: "toolCall"; + id: string; + name: string; + arguments: Record; + }> = [ + { + type: "toolCall" as const, + id: "tc-yield-e2e-1", + name: "sessions_yield", + arguments: { message: "Yielding turn." }, + }, + ]; + if (multiToolMode) { + toolCalls.push({ + type: "toolCall" as const, + id: "tc-post-yield-2", + name: "read", + arguments: { file_path: "/etc/hostname" }, + }); + } + return { + role: "assistant" as const, + content: toolCalls, + stopReason: "toolUse" as const, + api: model.api, + provider: model.provider, + model: model.id, + usage: createMockUsage(1, 1), + timestamp: Date.now(), + }; + }; + + const buildStopMessage = (model: { api: string; provider: string; id: string }) => ({ + role: "assistant" as const, + content: [{ type: "text" as const, text: "Acknowledged." }], + stopReason: "stop" as const, + api: model.api, + provider: model.provider, + model: model.id, + usage: createMockUsage(1, 1), + timestamp: Date.now(), + }); + + return { + ...actual, + complete: async (model: { api: string; provider: string; id: string }) => { + streamCallCount++; + const next = responsePlan.shift() ?? "stop"; + return next === "toolUse" ? buildToolUseMessage(model) : buildStopMessage(model); + }, + completeSimple: async (model: { api: string; provider: string; id: string }) => { + streamCallCount++; + const next = responsePlan.shift() ?? "stop"; + return next === "toolUse" ? buildToolUseMessage(model) : buildStopMessage(model); + }, + streamSimple: ( + model: { api: string; provider: string; id: string }, + context: { messages?: Array<{ role?: string; content?: unknown }> }, + ) => { + streamCallCount++; + observedContexts.push((context.messages ?? []).map((message) => ({ ...message }))); + const next = responsePlan.shift() ?? "stop"; + const message = next === "toolUse" ? buildToolUseMessage(model) : buildStopMessage(model); + const stream = actual.createAssistantMessageEventStream(); + queueMicrotask(() => { + stream.push({ + type: "done", + reason: next === "toolUse" ? "toolUse" : "stop", + message, + }); + stream.end(); + }); + return stream; + }, + }; +}); + +let runEmbeddedPiAgent: typeof import("./pi-embedded-runner/run.js").runEmbeddedPiAgent; +let e2eWorkspace: EmbeddedPiRunnerTestWorkspace | undefined; +let agentDir: string; +let workspaceDir: string; + +beforeAll(async () => { + vi.useRealTimers(); + streamCallCount = 0; + responsePlan = []; + observedContexts = []; + ({ runEmbeddedPiAgent } = await import("./pi-embedded-runner/run.js")); + e2eWorkspace = await createEmbeddedPiRunnerTestWorkspace("openclaw-yield-e2e-"); + ({ agentDir, workspaceDir } = e2eWorkspace); +}, 180_000); + +afterAll(async () => { + await cleanupEmbeddedPiRunnerTestWorkspace(e2eWorkspace); + e2eWorkspace = undefined; +}); + +const readSessionMessages = async (sessionFile: string) => { + const raw = await fs.readFile(sessionFile, "utf-8"); + return raw + .split(/\r?\n/) + .filter(Boolean) + .map( + (line) => + JSON.parse(line) as { type?: string; message?: { role?: string; content?: unknown } }, + ) + .filter((entry) => entry.type === "message") + .map((entry) => entry.message) as Array<{ role?: string; content?: unknown }>; +}; + +const readSessionEntries = async (sessionFile: string) => + (await fs.readFile(sessionFile, "utf-8")) + .split(/\r?\n/) + .filter(Boolean) + .map((line) => JSON.parse(line) as Record); + +describe("sessions_yield e2e", () => { + it( + "parent session is idle after yield and preserves the follow-up payload", + { timeout: 15_000 }, + async () => { + streamCallCount = 0; + responsePlan = ["toolUse"]; + observedContexts = []; + + const sessionId = "yield-e2e-parent"; + const sessionFile = path.join(workspaceDir, "session-yield-e2e.jsonl"); + const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-yield"]); + + const result = await runEmbeddedPiAgent({ + sessionId, + sessionKey: "agent:test:yield-e2e", + sessionFile, + workspaceDir, + config: cfg, + prompt: "Spawn subagent and yield.", + provider: "openai", + model: "mock-yield", + timeoutMs: 10_000, + agentDir, + runId: "run-yield-e2e-1", + enqueue: immediateEnqueue, + }); + + // 1. Run completed with end_turn (yield causes clean exit) + expect(result.meta.stopReason).toBe("end_turn"); + + // 2. No pending tool calls (yield is NOT a client tool call) + expect(result.meta.pendingToolCalls).toBeUndefined(); + + // 3. Parent session is IDLE — clearActiveEmbeddedRun ran in finally block + expect(isEmbeddedPiRunActive(sessionId)).toBe(false); + + // 4. Steer would fail — session not in ACTIVE_EMBEDDED_RUNS + expect(queueEmbeddedPiMessage(sessionId, "subagent result")).toBe(false); + + // 5. The yield stops at tool time — there is no second provider call. + expect(streamCallCount).toBe(1); + + // 6. Session transcript contains only the original assistant tool call. + const messages = await readSessionMessages(sessionFile); + const roles = messages.map((m) => m?.role); + expect(roles).toContain("user"); + expect(roles.filter((r) => r === "assistant")).toHaveLength(1); + + const firstAssistant = messages.find((m) => m?.role === "assistant"); + const content = firstAssistant?.content; + expect(Array.isArray(content)).toBe(true); + const toolCall = (content as Array<{ type?: string; name?: string }>).find( + (c) => c.type === "toolCall" && c.name === "sessions_yield", + ); + expect(toolCall).toBeDefined(); + + const entries = await readSessionEntries(sessionFile); + const yieldContext = entries.find( + (entry) => + entry.type === "custom_message" && entry.customType === "openclaw.sessions_yield", + ); + expect(yieldContext).toMatchObject({ + content: expect.stringContaining("Yielding turn."), + }); + + streamCallCount = 0; + responsePlan = ["stop"]; + observedContexts = []; + await runEmbeddedPiAgent({ + sessionId, + sessionKey: "agent:test:yield-e2e", + sessionFile, + workspaceDir, + config: cfg, + prompt: "Subagent finished with the requested result.", + provider: "openai", + model: "mock-yield", + timeoutMs: 10_000, + agentDir, + runId: "run-yield-e2e-2", + enqueue: immediateEnqueue, + }); + + const resumeContext = observedContexts[0] ?? []; + const resumeTexts = resumeContext.flatMap((message) => + Array.isArray(message.content) + ? (message.content as Array<{ type?: string; text?: string }>) + .filter((part) => part.type === "text" && typeof part.text === "string") + .map((part) => part.text ?? "") + : [], + ); + expect(resumeTexts.some((text) => text.includes("Yielding turn."))).toBe(true); + expect( + resumeTexts.some((text) => text.includes("Subagent finished with the requested result.")), + ).toBe(true); + }, + ); + + it( + "abort prevents subsequent tool calls from executing after yield", + { timeout: 15_000 }, + async () => { + streamCallCount = 0; + multiToolMode = true; + responsePlan = ["toolUse"]; + observedContexts = []; + + const sessionId = "yield-e2e-abort"; + const sessionFile = path.join(workspaceDir, "session-yield-abort.jsonl"); + const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-yield-abort"]); + + const result = await runEmbeddedPiAgent({ + sessionId, + sessionKey: "agent:test:yield-abort", + sessionFile, + workspaceDir, + config: cfg, + prompt: "Yield and then read a file.", + provider: "openai", + model: "mock-yield-abort", + timeoutMs: 10_000, + agentDir, + runId: "run-yield-abort-1", + enqueue: immediateEnqueue, + }); + + // Reset for other tests + multiToolMode = false; + + // 1. Run completed with end_turn despite the extra queued tool call + expect(result.meta.stopReason).toBe("end_turn"); + + // 2. Session is idle + expect(isEmbeddedPiRunActive(sessionId)).toBe(false); + + // 3. The yield prevented a post-tool provider call. + expect(streamCallCount).toBe(1); + + // 4. Transcript should contain sessions_yield but NOT a successful read result + const messages = await readSessionMessages(sessionFile); + const allContent = messages.flatMap((m) => + Array.isArray(m?.content) ? (m.content as Array<{ type?: string; name?: string }>) : [], + ); + const yieldCall = allContent.find( + (c) => c.type === "toolCall" && c.name === "sessions_yield", + ); + expect(yieldCall).toBeDefined(); + + // The read tool call should be in the assistant message (LLM requested it), + // but its result should NOT show a successful file read. + const readCall = allContent.find((c) => c.type === "toolCall" && c.name === "read"); + expect(readCall).toBeDefined(); // LLM asked for it... + + // ...but the file was never actually read (no tool result with file contents) + const toolResults = messages.filter((m) => m?.role === "toolResult"); + const readResult = toolResults.find((tr) => { + const content = tr?.content; + if (typeof content === "string") { + return content.includes("/etc/hostname"); + } + if (Array.isArray(content)) { + return (content as Array<{ text?: string }>).some((c) => + c.text?.includes("/etc/hostname"), + ); + } + return false; + }); + // If the read tool ran, its result would reference the file path. + // The abort should have prevented it from executing. + expect(readResult).toBeUndefined(); + }, + ); +}); diff --git a/src/agents/pi-embedded-runner/anthropic-stream-wrappers.ts b/src/agents/pi-embedded-runner/anthropic-stream-wrappers.ts index 8add7890b41..e04de8a5d6b 100644 --- a/src/agents/pi-embedded-runner/anthropic-stream-wrappers.ts +++ b/src/agents/pi-embedded-runner/anthropic-stream-wrappers.ts @@ -1,11 +1,13 @@ import type { StreamFn } from "@mariozechner/pi-agent-core"; import { streamSimple } from "@mariozechner/pi-ai"; +import { resolveFastModeParam } from "../fast-mode.js"; import { requiresOpenAiCompatibleAnthropicToolPayload, usesOpenAiFunctionAnthropicToolSchema, usesOpenAiStringModeAnthropicToolChoice, } from "../provider-capabilities.js"; import { log } from "./logger.js"; +import { streamWithPayloadPatch } from "./stream-payload-utils.js"; const ANTHROPIC_CONTEXT_1M_BETA = "context-1m-2025-08-07"; const ANTHROPIC_1M_MODEL_PREFIXES = ["claude-opus-4", "claude-sonnet-4"] as const; @@ -18,6 +20,7 @@ const PI_AI_OAUTH_ANTHROPIC_BETAS = [ "oauth-2025-04-20", ...PI_AI_DEFAULT_ANTHROPIC_BETAS, ] as const; +type AnthropicServiceTier = "auto" | "standard_only"; type CacheRetention = "none" | "short" | "long"; @@ -53,6 +56,36 @@ function isAnthropicOAuthApiKey(apiKey: unknown): boolean { return typeof apiKey === "string" && apiKey.includes("sk-ant-oat"); } +function isAnthropicPublicApiBaseUrl(baseUrl: unknown): boolean { + if (baseUrl == null) { + return true; + } + if (typeof baseUrl !== "string" || !baseUrl.trim()) { + return true; + } + + try { + return new URL(baseUrl).hostname.toLowerCase() === "api.anthropic.com"; + } catch { + return baseUrl.toLowerCase().includes("api.anthropic.com"); + } +} + +function resolveAnthropicFastServiceTier(enabled: boolean): AnthropicServiceTier { + return enabled ? "auto" : "standard_only"; +} + +function hasOpenAiAnthropicToolPayloadCompatFlag(model: { compat?: unknown }): boolean { + if (!model.compat || typeof model.compat !== "object" || Array.isArray(model.compat)) { + return false; + } + + return ( + (model.compat as { requiresOpenAiAnthropicToolPayload?: unknown }) + .requiresOpenAiAnthropicToolPayload === true + ); +} + function requiresAnthropicToolPayloadCompatibilityForModel(model: { api?: unknown; provider?: unknown; @@ -68,15 +101,7 @@ function requiresAnthropicToolPayloadCompatibilityForModel(model: { ) { return true; } - - if (!model.compat || typeof model.compat !== "object" || Array.isArray(model.compat)) { - return false; - } - - return ( - (model.compat as { requiresOpenAiAnthropicToolPayload?: unknown }) - .requiresOpenAiAnthropicToolPayload === true - ); + return hasOpenAiAnthropicToolPayloadCompatFlag(model); } function usesOpenAiFunctionAnthropicToolSchemaForModel(model: { @@ -86,13 +111,7 @@ function usesOpenAiFunctionAnthropicToolSchemaForModel(model: { if (typeof model.provider === "string" && usesOpenAiFunctionAnthropicToolSchema(model.provider)) { return true; } - if (!model.compat || typeof model.compat !== "object" || Array.isArray(model.compat)) { - return false; - } - return ( - (model.compat as { requiresOpenAiAnthropicToolPayload?: unknown }) - .requiresOpenAiAnthropicToolPayload === true - ); + return hasOpenAiAnthropicToolPayloadCompatFlag(model); } function usesOpenAiStringModeAnthropicToolChoiceForModel(model: { @@ -105,13 +124,7 @@ function usesOpenAiStringModeAnthropicToolChoiceForModel(model: { ) { return true; } - if (!model.compat || typeof model.compat !== "object" || Array.isArray(model.compat)) { - return false; - } - return ( - (model.compat as { requiresOpenAiAnthropicToolPayload?: unknown }) - .requiresOpenAiAnthropicToolPayload === true - ); + return hasOpenAiAnthropicToolPayloadCompatFlag(model); } function normalizeOpenAiFunctionAnthropicToolDefinition( @@ -277,7 +290,7 @@ export function createAnthropicToolPayloadCompatibilityWrapper( const originalOnPayload = options?.onPayload; return underlying(model, context, { ...options, - onPayload: (payload, payloadModel) => { + onPayload: (payload) => { if ( payload && typeof payload === "object" && @@ -298,12 +311,42 @@ export function createAnthropicToolPayloadCompatibilityWrapper( ); } } - return originalOnPayload?.(payload, payloadModel); + return originalOnPayload?.(payload, model); }, }); }; } +export function createAnthropicFastModeWrapper( + baseStreamFn: StreamFn | undefined, + enabled: boolean, +): StreamFn { + const underlying = baseStreamFn ?? streamSimple; + const serviceTier = resolveAnthropicFastServiceTier(enabled); + return (model, context, options) => { + if ( + model.api !== "anthropic-messages" || + model.provider !== "anthropic" || + !isAnthropicPublicApiBaseUrl(model.baseUrl) || + isAnthropicOAuthApiKey(options?.apiKey) + ) { + return underlying(model, context, options); + } + + return streamWithPayloadPatch(underlying, model, context, options, (payloadObj) => { + if (payloadObj.service_tier === undefined) { + payloadObj.service_tier = serviceTier; + } + }); + }; +} + +export function resolveAnthropicFastMode( + extraParams: Record | undefined, +): boolean | undefined { + return resolveFastModeParam(extraParams); +} + export function createBedrockNoCacheWrapper(baseStreamFn: StreamFn | undefined): StreamFn { const underlying = baseStreamFn ?? streamSimple; return (model, context, options) => diff --git a/src/agents/pi-embedded-runner/compact.hooks.test.ts b/src/agents/pi-embedded-runner/compact.hooks.test.ts index 9ef2a3efe76..af7cfd7e1bf 100644 --- a/src/agents/pi-embedded-runner/compact.hooks.test.ts +++ b/src/agents/pi-embedded-runner/compact.hooks.test.ts @@ -1,34 +1,72 @@ import { beforeEach, describe, expect, it, vi } from "vitest"; +import { onSessionTranscriptUpdate } from "../../sessions/transcript-events.js"; const { hookRunner, ensureRuntimePluginsLoaded, + resolveContextEngineMock, resolveModelMock, sessionCompactImpl, triggerInternalHook, sanitizeSessionHistoryMock, -} = vi.hoisted(() => ({ - hookRunner: { - hasHooks: vi.fn(), - runBeforeCompaction: vi.fn(), - runAfterCompaction: vi.fn(), - }, - ensureRuntimePluginsLoaded: vi.fn(), - resolveModelMock: vi.fn(() => ({ - model: { provider: "openai", api: "responses", id: "fake", input: [] }, - error: null, - authStorage: { setRuntimeApiKey: vi.fn() }, - modelRegistry: {}, - })), - sessionCompactImpl: vi.fn(async () => ({ - summary: "summary", - firstKeptEntryId: "entry-1", - tokensBefore: 120, - details: { ok: true }, - })), - triggerInternalHook: vi.fn(), - sanitizeSessionHistoryMock: vi.fn(async (params: { messages: unknown[] }) => params.messages), -})); + contextEngineCompactMock, + getMemorySearchManagerMock, + resolveMemorySearchConfigMock, + resolveSessionAgentIdMock, + estimateTokensMock, +} = vi.hoisted(() => { + const contextEngineCompactMock = vi.fn(async () => ({ + ok: true as boolean, + compacted: true as boolean, + reason: undefined as string | undefined, + result: { summary: "engine-summary", tokensAfter: 50 } as + | { summary: string; tokensAfter: number } + | undefined, + })); + + return { + hookRunner: { + hasHooks: vi.fn(), + runBeforeCompaction: vi.fn(), + runAfterCompaction: vi.fn(), + }, + ensureRuntimePluginsLoaded: vi.fn(), + resolveContextEngineMock: vi.fn(async () => ({ + info: { ownsCompaction: true }, + compact: contextEngineCompactMock, + })), + resolveModelMock: vi.fn(() => ({ + model: { provider: "openai", api: "responses", id: "fake", input: [] }, + error: null, + authStorage: { setRuntimeApiKey: vi.fn() }, + modelRegistry: {}, + })), + sessionCompactImpl: vi.fn(async () => ({ + summary: "summary", + firstKeptEntryId: "entry-1", + tokensBefore: 120, + details: { ok: true }, + })), + triggerInternalHook: vi.fn(), + sanitizeSessionHistoryMock: vi.fn(async (params: { messages: unknown[] }) => params.messages), + contextEngineCompactMock, + getMemorySearchManagerMock: vi.fn(async () => ({ + manager: { + sync: vi.fn(async () => {}), + }, + })), + resolveMemorySearchConfigMock: vi.fn(() => ({ + sources: ["sessions"], + sync: { + sessions: { + postCompactionForce: true, + }, + }, + })), + resolveSessionAgentIdMock: vi.fn(() => "main"), + estimateTokensMock: vi.fn((_message?: unknown) => 10), + }; +}); vi.mock("../../plugins/hook-runner-global.js", () => ({ getGlobalHookRunner: () => hookRunner, @@ -48,8 +86,15 @@ vi.mock("../../hooks/internal-hooks.js", async () => { }; }); +vi.mock("@mariozechner/pi-ai/oauth", () => ({ + getOAuthApiKey: vi.fn(), + getOAuthProviders: vi.fn(() => []), +})); + vi.mock("@mariozechner/pi-coding-agent", () => { return { + AuthStorage: class AuthStorage {}, + ModelRegistry: class ModelRegistry {}, createAgentSession: vi.fn(async () => { const session = { sessionId: "session-1", @@ -86,7 +131,7 @@ vi.mock("@mariozechner/pi-coding-agent", () => { SettingsManager: { create: vi.fn(() => ({})), }, - estimateTokens: vi.fn(() => 10), + estimateTokens: estimateTokensMock, }; }); @@ -123,6 +168,24 @@ vi.mock("../session-write-lock.js", () => ({ resolveSessionLockMaxHoldFromTimeout: vi.fn(() => 0), })); +vi.mock("../../context-engine/index.js", () => ({ + ensureContextEnginesInitialized: vi.fn(), + resolveContextEngine: resolveContextEngineMock, +})); + +vi.mock("../../process/command-queue.js", () => ({ + enqueueCommandInLane: vi.fn((_lane: unknown, task: () => unknown) => task()), +})); + +vi.mock("./lanes.js", () => ({ + resolveSessionLane: vi.fn(() => "test-session-lane"), + resolveGlobalLane: vi.fn(() => "test-global-lane"), +})); + +vi.mock("../context-window-guard.js", () => ({ + resolveContextWindowInfo: vi.fn(() => ({ tokens: 128_000 })), +})); + vi.mock("../bootstrap-files.js", () => ({ makeBootstrapWarn: vi.fn(() => () => {}), resolveBootstrapContextForRun: vi.fn(async () => ({ contextFiles: [] })), @@ -160,7 +223,7 @@ vi.mock("../transcript-policy.js", () => ({ })); vi.mock("./extensions.js", () => ({ - buildEmbeddedExtensionFactories: vi.fn(() => []), + buildEmbeddedExtensionFactories: vi.fn(() => ({ factories: [] })), })); vi.mock("./history.js", () => ({ @@ -180,9 +243,18 @@ vi.mock("../agent-paths.js", () => ({ })); vi.mock("../agent-scope.js", () => ({ + resolveSessionAgentId: resolveSessionAgentIdMock, resolveSessionAgentIds: vi.fn(() => ({ defaultAgentId: "main", sessionAgentId: "main" })), })); +vi.mock("../memory-search.js", () => ({ + resolveMemorySearchConfig: resolveMemorySearchConfigMock, +})); + +vi.mock("../../memory/index.js", () => ({ + getMemorySearchManager: getMemorySearchManagerMock, +})); + vi.mock("../date-time.js", () => ({ formatUserTime: vi.fn(() => ""), resolveUserTimeFormat: vi.fn(() => ""), @@ -208,6 +280,7 @@ vi.mock("../../config/channel-capabilities.js", () => ({ })); vi.mock("../../utils/message-channel.js", () => ({ + INTERNAL_MESSAGE_CHANNEL: "webchat", normalizeMessageChannel: vi.fn(() => undefined), })); @@ -251,7 +324,58 @@ vi.mock("./utils.js", () => ({ import { getApiProvider, unregisterApiProviders } from "@mariozechner/pi-ai"; import { getCustomApiRegistrySourceId } from "../custom-api-registry.js"; -import { compactEmbeddedPiSessionDirect } from "./compact.js"; +import { compactEmbeddedPiSessionDirect, compactEmbeddedPiSession } from "./compact.js"; + +const TEST_SESSION_ID = "session-1"; +const TEST_SESSION_KEY = "agent:main:session-1"; +const TEST_SESSION_FILE = "/tmp/session.jsonl"; +const TEST_WORKSPACE_DIR = "/tmp"; +const TEST_CUSTOM_INSTRUCTIONS = "focus on decisions"; + +function mockResolvedModel() { + resolveModelMock.mockReset(); + resolveModelMock.mockReturnValue({ + model: { provider: "openai", api: "responses", id: "fake", input: [] }, + error: null, + authStorage: { setRuntimeApiKey: vi.fn() }, + modelRegistry: {}, + }); +} + +function compactionConfig(mode: "await" | "off" | "async") { + return { + agents: { + defaults: { + compaction: { + postIndexSync: mode, + }, + }, + }, + } as never; +} + +function directCompactionArgs(overrides: Record = {}) { + return { + sessionId: TEST_SESSION_ID, + sessionKey: TEST_SESSION_KEY, + sessionFile: TEST_SESSION_FILE, + workspaceDir: TEST_WORKSPACE_DIR, + customInstructions: TEST_CUSTOM_INSTRUCTIONS, + ...overrides, + }; +} + +function wrappedCompactionArgs(overrides: Record = {}) { + return { + sessionId: TEST_SESSION_ID, + sessionKey: TEST_SESSION_KEY, + sessionFile: TEST_SESSION_FILE, + workspaceDir: TEST_WORKSPACE_DIR, + customInstructions: TEST_CUSTOM_INSTRUCTIONS, + enqueue: async (task: () => Promise | T) => await task(), + ...overrides, + }; +} const sessionHook = (action: string) => triggerInternalHook.mock.calls.find( @@ -265,13 +389,7 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { hookRunner.hasHooks.mockReset(); hookRunner.runBeforeCompaction.mockReset(); hookRunner.runAfterCompaction.mockReset(); - resolveModelMock.mockReset(); - resolveModelMock.mockReturnValue({ - model: { provider: "openai", api: "responses", id: "fake", input: [] }, - error: null, - authStorage: { setRuntimeApiKey: vi.fn() }, - modelRegistry: {}, - }); + mockResolvedModel(); sessionCompactImpl.mockReset(); sessionCompactImpl.mockResolvedValue({ summary: "summary", @@ -283,9 +401,36 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { sanitizeSessionHistoryMock.mockImplementation(async (params: { messages: unknown[] }) => { return params.messages; }); + getMemorySearchManagerMock.mockReset(); + getMemorySearchManagerMock.mockResolvedValue({ + manager: { + sync: vi.fn(async () => {}), + }, + }); + resolveMemorySearchConfigMock.mockReset(); + resolveMemorySearchConfigMock.mockReturnValue({ + sources: ["sessions"], + sync: { + sessions: { + postCompactionForce: true, + }, + }, + }); + resolveSessionAgentIdMock.mockReset(); + resolveSessionAgentIdMock.mockReturnValue("main"); + estimateTokensMock.mockReset(); + estimateTokensMock.mockReturnValue(10); unregisterApiProviders(getCustomApiRegistrySourceId("ollama")); }); + async function runDirectCompaction(customInstructions = TEST_CUSTOM_INSTRUCTIONS) { + return await compactEmbeddedPiSessionDirect( + directCompactionArgs({ + customInstructions, + }), + ); + } + it("bootstraps runtime plugins with the resolved workspace", async () => { await compactEmbeddedPiSessionDirect({ sessionId: "session-1", @@ -383,13 +528,7 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { hookRunner.hasHooks.mockReturnValue(true); sanitizeSessionHistoryMock.mockResolvedValue([]); - const result = await compactEmbeddedPiSessionDirect({ - sessionId: "session-1", - sessionKey: "agent:main:session-1", - sessionFile: "/tmp/session.jsonl", - workspaceDir: "/tmp", - customInstructions: "focus on decisions", - }); + const result = await runDirectCompaction(); expect(result.ok).toBe(true); const beforeContext = sessionHook("compact:before")?.context; @@ -400,6 +539,204 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { tokenCount: 0, }); }); + it("emits a transcript update after successful compaction", async () => { + const listener = vi.fn(); + const cleanup = onSessionTranscriptUpdate(listener); + + try { + const result = await compactEmbeddedPiSessionDirect({ + sessionId: "session-1", + sessionKey: "agent:main:session-1", + sessionFile: " /tmp/session.jsonl ", + workspaceDir: "/tmp", + customInstructions: "focus on decisions", + }); + + expect(result.ok).toBe(true); + expect(listener).toHaveBeenCalledTimes(1); + expect(listener).toHaveBeenCalledWith({ sessionFile: "/tmp/session.jsonl" }); + } finally { + cleanup(); + } + }); + + it("preserves tokensAfter when full-session context exceeds result.tokensBefore", async () => { + estimateTokensMock.mockImplementation((message: unknown) => { + const role = (message as { role?: string }).role; + if (role === "user") { + return 30; + } + if (role === "assistant") { + return 20; + } + return 5; + }); + sessionCompactImpl.mockResolvedValue({ + summary: "summary", + firstKeptEntryId: "entry-1", + tokensBefore: 20, + details: { ok: true }, + }); + + const result = await runDirectCompaction(); + + expect(result).toMatchObject({ + ok: true, + compacted: true, + result: { + tokensBefore: 20, + tokensAfter: 30, + }, + }); + expect(sessionHook("compact:after")?.context?.tokenCount).toBe(30); + }); + + it("treats pre-compaction token estimation failures as a no-op sanity check", async () => { + estimateTokensMock.mockImplementation((message: unknown) => { + const role = (message as { role?: string }).role; + if (role === "assistant") { + throw new Error("legacy message"); + } + if (role === "user") { + return 30; + } + return 5; + }); + sessionCompactImpl.mockResolvedValue({ + summary: "summary", + firstKeptEntryId: "entry-1", + tokensBefore: 20, + details: { ok: true }, + }); + + const result = await compactEmbeddedPiSessionDirect({ + sessionId: "session-1", + sessionKey: "agent:main:session-1", + sessionFile: "/tmp/session.jsonl", + workspaceDir: "/tmp", + customInstructions: "focus on decisions", + }); + + expect(result).toMatchObject({ + ok: true, + compacted: true, + result: { + tokensAfter: 30, + }, + }); + expect(sessionHook("compact:after")?.context?.tokenCount).toBe(30); + }); + + it("skips sync in await mode when postCompactionForce is false", async () => { + const sync = vi.fn(async () => {}); + getMemorySearchManagerMock.mockResolvedValue({ manager: { sync } }); + resolveMemorySearchConfigMock.mockReturnValue({ + sources: ["sessions"], + sync: { + sessions: { + postCompactionForce: false, + }, + }, + }); + + const result = await compactEmbeddedPiSessionDirect( + directCompactionArgs({ + config: compactionConfig("await"), + }), + ); + + expect(result.ok).toBe(true); + expect(resolveSessionAgentIdMock).toHaveBeenCalledWith({ + sessionKey: TEST_SESSION_KEY, + config: expect.any(Object), + }); + expect(getMemorySearchManagerMock).not.toHaveBeenCalled(); + expect(sync).not.toHaveBeenCalled(); + }); + + it("awaits post-compaction memory sync in await mode when postCompactionForce is true", async () => { + let releaseSync: (() => void) | undefined; + const syncGate = new Promise((resolve) => { + releaseSync = resolve; + }); + const sync = vi.fn(() => syncGate); + getMemorySearchManagerMock.mockResolvedValue({ manager: { sync } }); + let settled = false; + + const resultPromise = compactEmbeddedPiSessionDirect( + directCompactionArgs({ + config: compactionConfig("await"), + }), + ); + + void resultPromise.then(() => { + settled = true; + }); + await vi.waitFor(() => { + expect(sync).toHaveBeenCalledWith({ + reason: "post-compaction", + sessionFiles: [TEST_SESSION_FILE], + }); + }); + expect(settled).toBe(false); + releaseSync?.(); + const result = await resultPromise; + expect(result.ok).toBe(true); + expect(settled).toBe(true); + }); + + it("skips post-compaction memory sync when the mode is off", async () => { + const sync = vi.fn(async () => {}); + getMemorySearchManagerMock.mockResolvedValue({ manager: { sync } }); + + const result = await compactEmbeddedPiSessionDirect( + directCompactionArgs({ + config: compactionConfig("off"), + }), + ); + + expect(result.ok).toBe(true); + expect(resolveSessionAgentIdMock).not.toHaveBeenCalled(); + expect(getMemorySearchManagerMock).not.toHaveBeenCalled(); + expect(sync).not.toHaveBeenCalled(); + }); + + it("fires post-compaction memory sync without awaiting it in async mode", async () => { + const sync = vi.fn(async () => {}); + let resolveManager: ((value: { manager: { sync: typeof sync } }) => void) | undefined; + const managerGate = new Promise<{ manager: { sync: typeof sync } }>((resolve) => { + resolveManager = resolve; + }); + getMemorySearchManagerMock.mockImplementation(() => managerGate); + let settled = false; + + const resultPromise = compactEmbeddedPiSessionDirect( + directCompactionArgs({ + config: compactionConfig("async"), + }), + ); + + await vi.waitFor(() => { + expect(getMemorySearchManagerMock).toHaveBeenCalledTimes(1); + }); + void resultPromise.then(() => { + settled = true; + }); + await vi.waitFor(() => { + expect(settled).toBe(true); + }); + expect(sync).not.toHaveBeenCalled(); + resolveManager?.({ manager: { sync } }); + await managerGate; + await vi.waitFor(() => { + expect(sync).toHaveBeenCalledWith({ + reason: "post-compaction", + sessionFiles: [TEST_SESSION_FILE], + }); + }); + const result = await resultPromise; + expect(result.ok).toBe(true); + }); it("registers the Ollama api provider before compaction", async () => { resolveModelMock.mockReturnValue({ @@ -436,3 +773,138 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { expect(result.ok).toBe(true); }); }); + +describe("compactEmbeddedPiSession hooks (ownsCompaction engine)", () => { + beforeEach(() => { + hookRunner.hasHooks.mockReset(); + hookRunner.runBeforeCompaction.mockReset(); + hookRunner.runAfterCompaction.mockReset(); + resolveContextEngineMock.mockReset(); + resolveContextEngineMock.mockResolvedValue({ + info: { ownsCompaction: true }, + compact: contextEngineCompactMock, + }); + contextEngineCompactMock.mockReset(); + contextEngineCompactMock.mockResolvedValue({ + ok: true, + compacted: true, + reason: undefined, + result: { summary: "engine-summary", tokensAfter: 50 }, + }); + mockResolvedModel(); + }); + + it("fires before_compaction with sentinel -1 and after_compaction on success", async () => { + hookRunner.hasHooks.mockReturnValue(true); + + const result = await compactEmbeddedPiSession( + wrappedCompactionArgs({ + messageChannel: "telegram", + }), + ); + + expect(result.ok).toBe(true); + expect(result.compacted).toBe(true); + + expect(hookRunner.runBeforeCompaction).toHaveBeenCalledWith( + { messageCount: -1, sessionFile: TEST_SESSION_FILE }, + expect.objectContaining({ + sessionKey: TEST_SESSION_KEY, + messageProvider: "telegram", + }), + ); + expect(hookRunner.runAfterCompaction).toHaveBeenCalledWith( + { + messageCount: -1, + compactedCount: -1, + tokenCount: 50, + sessionFile: TEST_SESSION_FILE, + }, + expect.objectContaining({ + sessionKey: TEST_SESSION_KEY, + messageProvider: "telegram", + }), + ); + }); + + it("emits a transcript update and post-compaction memory sync on the engine-owned path", async () => { + const listener = vi.fn(); + const cleanup = onSessionTranscriptUpdate(listener); + const sync = vi.fn(async () => {}); + getMemorySearchManagerMock.mockResolvedValue({ manager: { sync } }); + + try { + const result = await compactEmbeddedPiSession( + wrappedCompactionArgs({ + sessionFile: ` ${TEST_SESSION_FILE} `, + config: compactionConfig("await"), + }), + ); + + expect(result.ok).toBe(true); + expect(listener).toHaveBeenCalledTimes(1); + expect(listener).toHaveBeenCalledWith({ sessionFile: TEST_SESSION_FILE }); + expect(sync).toHaveBeenCalledWith({ + reason: "post-compaction", + sessionFiles: [TEST_SESSION_FILE], + }); + } finally { + cleanup(); + } + }); + + it("does not fire after_compaction when compaction fails", async () => { + hookRunner.hasHooks.mockReturnValue(true); + const sync = vi.fn(async () => {}); + getMemorySearchManagerMock.mockResolvedValue({ manager: { sync } }); + contextEngineCompactMock.mockResolvedValue({ + ok: false, + compacted: false, + reason: "nothing to compact", + result: undefined, + }); + + const result = await compactEmbeddedPiSession(wrappedCompactionArgs()); + + expect(result.ok).toBe(false); + expect(hookRunner.runBeforeCompaction).toHaveBeenCalled(); + expect(hookRunner.runAfterCompaction).not.toHaveBeenCalled(); + expect(sync).not.toHaveBeenCalled(); + }); + + it("does not duplicate transcript updates or sync in the wrapper when the engine delegates compaction", async () => { + const listener = vi.fn(); + const cleanup = onSessionTranscriptUpdate(listener); + const sync = vi.fn(async () => {}); + getMemorySearchManagerMock.mockResolvedValue({ manager: { sync } }); + resolveContextEngineMock.mockResolvedValue({ + info: { ownsCompaction: false }, + compact: contextEngineCompactMock, + }); + + try { + const result = await compactEmbeddedPiSession( + wrappedCompactionArgs({ + config: compactionConfig("await"), + }), + ); + + expect(result.ok).toBe(true); + expect(listener).not.toHaveBeenCalled(); + expect(sync).not.toHaveBeenCalled(); + } finally { + cleanup(); + } + }); + + it("catches and logs hook exceptions without aborting compaction", async () => { + hookRunner.hasHooks.mockReturnValue(true); + hookRunner.runBeforeCompaction.mockRejectedValue(new Error("hook boom")); + + const result = await compactEmbeddedPiSession(wrappedCompactionArgs()); + + expect(result.ok).toBe(true); + expect(result.compacted).toBe(true); + expect(contextEngineCompactMock).toHaveBeenCalled(); + }); +}); diff --git a/src/agents/pi-embedded-runner/compact.ts b/src/agents/pi-embedded-runner/compact.ts index 91f99571db4..8c490e113d4 100644 --- a/src/agents/pi-embedded-runner/compact.ts +++ b/src/agents/pi-embedded-runner/compact.ts @@ -18,9 +18,11 @@ import { import { createInternalHookEvent, triggerInternalHook } from "../../hooks/internal-hooks.js"; import { getMachineDisplayName } from "../../infra/machine-name.js"; import { generateSecureToken } from "../../infra/secure-random.js"; +import { getMemorySearchManager } from "../../memory/index.js"; import { getGlobalHookRunner } from "../../plugins/hook-runner-global.js"; import { type enqueueCommand, enqueueCommandInLane } from "../../process/command-queue.js"; import { isCronSessionKey, isSubagentSessionKey } from "../../routing/session-key.js"; +import { emitSessionTranscriptUpdate } from "../../sessions/transcript-events.js"; import { resolveSignalReactionLevel } from "../../signal/reaction-level.js"; import { resolveTelegramInlineButtonsScope } from "../../telegram/inline-buttons.js"; import { resolveTelegramReactionLevel } from "../../telegram/reaction-level.js"; @@ -29,7 +31,7 @@ import { resolveUserPath } from "../../utils.js"; import { normalizeMessageChannel } from "../../utils/message-channel.js"; import { isReasoningTagProvider } from "../../utils/provider-utils.js"; import { resolveOpenClawAgentDir } from "../agent-paths.js"; -import { resolveSessionAgentIds } from "../agent-scope.js"; +import { resolveSessionAgentId, resolveSessionAgentIds } from "../agent-scope.js"; import type { ExecElevatedDefaults } from "../bash-tools.js"; import { makeBootstrapWarn, resolveBootstrapContextForRun } from "../bootstrap-files.js"; import { listChannelSupportedActions, resolveChannelMessageToolHints } from "../channel-tools.js"; @@ -38,7 +40,12 @@ import { ensureCustomApiRegistered } from "../custom-api-registry.js"; import { formatUserTime, resolveUserTimeFormat, resolveUserTimezone } from "../date-time.js"; import { DEFAULT_CONTEXT_TOKENS, DEFAULT_MODEL, DEFAULT_PROVIDER } from "../defaults.js"; import { resolveOpenClawDocsPath } from "../docs-path.js"; -import { getApiKeyForModel, resolveModelAuthMode } from "../model-auth.js"; +import { resolveMemorySearchConfig } from "../memory-search.js"; +import { + applyLocalNoAuthHeaderOverride, + getApiKeyForModel, + resolveModelAuthMode, +} from "../model-auth.js"; import { supportsModelTools } from "../model-tool-support.js"; import { ensureOpenClawModelsJson } from "../models-config.js"; import { createConfiguredOllamaStreamFn } from "../ollama-stream.js"; @@ -114,6 +121,8 @@ export type CompactEmbeddedPiSessionParams = { /** Whether the sender is an owner (required for owner-only tools). */ senderIsOwner?: boolean; sessionFile: string; + /** Optional caller-observed live prompt tokens used for compaction diagnostics. */ + currentTokenCount?: number; workspaceDir: string; agentDir?: string; config?: OpenClawConfig; @@ -152,6 +161,12 @@ function createCompactionDiagId(): string { return `cmp-${Date.now().toString(36)}-${generateSecureToken(4)}`; } +function normalizeObservedTokenCount(value: unknown): number | undefined { + return typeof value === "number" && Number.isFinite(value) && value > 0 + ? Math.floor(value) + : undefined; +} + function getMessageTextChars(msg: AgentMessage): number { const content = (msg as { content?: unknown }).content; if (typeof content === "string") { @@ -228,6 +243,9 @@ function classifyCompactionReason(reason?: string): string { if (text.includes("already compacted")) { return "already_compacted_recently"; } + if (text.includes("still exceeds target")) { + return "live_context_still_exceeds_target"; + } if (text.includes("guard")) { return "guard_blocked"; } @@ -256,6 +274,95 @@ function classifyCompactionReason(reason?: string): string { return "unknown"; } +function resolvePostCompactionIndexSyncMode(config?: OpenClawConfig): "off" | "async" | "await" { + const mode = config?.agents?.defaults?.compaction?.postIndexSync; + if (mode === "off" || mode === "async" || mode === "await") { + return mode; + } + return "async"; +} + +async function runPostCompactionSessionMemorySync(params: { + config?: OpenClawConfig; + sessionKey?: string; + sessionFile: string; +}): Promise { + if (!params.config) { + return; + } + try { + const sessionFile = params.sessionFile.trim(); + if (!sessionFile) { + return; + } + const agentId = resolveSessionAgentId({ + sessionKey: params.sessionKey, + config: params.config, + }); + const resolvedMemory = resolveMemorySearchConfig(params.config, agentId); + if (!resolvedMemory || !resolvedMemory.sources.includes("sessions")) { + return; + } + if (!resolvedMemory.sync.sessions.postCompactionForce) { + return; + } + const { manager } = await getMemorySearchManager({ + cfg: params.config, + agentId, + }); + if (!manager?.sync) { + return; + } + const syncTask = manager.sync({ + reason: "post-compaction", + sessionFiles: [sessionFile], + }); + await syncTask; + } catch (err) { + log.warn(`memory sync skipped (post-compaction): ${String(err)}`); + } +} + +function syncPostCompactionSessionMemory(params: { + config?: OpenClawConfig; + sessionKey?: string; + sessionFile: string; + mode: "off" | "async" | "await"; +}): Promise { + if (params.mode === "off" || !params.config) { + return Promise.resolve(); + } + + const syncTask = runPostCompactionSessionMemorySync({ + config: params.config, + sessionKey: params.sessionKey, + sessionFile: params.sessionFile, + }); + if (params.mode === "await") { + return syncTask; + } + void syncTask; + return Promise.resolve(); +} + +async function runPostCompactionSideEffects(params: { + config?: OpenClawConfig; + sessionKey?: string; + sessionFile: string; +}): Promise { + const sessionFile = params.sessionFile.trim(); + if (!sessionFile) { + return; + } + emitSessionTranscriptUpdate(sessionFile); + await syncPostCompactionSessionMemory({ + config: params.config, + sessionKey: params.sessionKey, + sessionFile, + mode: resolvePostCompactionIndexSyncMode(params.config), + }); +} + /** * Core compaction logic without lane queueing. * Use this when already inside a session/global lane to avoid deadlocks. @@ -326,8 +433,9 @@ export async function compactEmbeddedPiSessionDirect( const reason = error ?? `Unknown model: ${provider}/${modelId}`; return fail(reason); } + let apiKeyInfo: Awaited> | null = null; try { - const apiKeyInfo = await getApiKeyForModel({ + apiKeyInfo = await getApiKeyForModel({ model, cfg: params.config, profileId: authProfileId, @@ -415,10 +523,12 @@ export async function compactEmbeddedPiSessionDirect( modelContextWindow: model.contextWindow, defaultTokens: DEFAULT_CONTEXT_TOKENS, }); - const effectiveModel = + const effectiveModel = applyLocalNoAuthHeaderOverride( ctxInfo.tokens < (model.contextWindow ?? Infinity) ? { ...model, contextWindow: ctxInfo.tokens } - : model; + : model, + apiKeyInfo, + ); const runAbortController = new AbortController(); const toolsRaw = createOpenClawCodingTools({ @@ -701,6 +811,7 @@ export async function compactEmbeddedPiSessionDirect( const missingSessionKey = !params.sessionKey || !params.sessionKey.trim(); const hookSessionKey = params.sessionKey?.trim() || params.sessionId; const hookRunner = getGlobalHookRunner(); + const observedTokenCount = normalizeObservedTokenCount(params.currentTokenCount); const messageCountOriginal = originalMessages.length; let tokenCountOriginal: number | undefined; try { @@ -712,14 +823,16 @@ export async function compactEmbeddedPiSessionDirect( tokenCountOriginal = undefined; } const messageCountBefore = session.messages.length; - let tokenCountBefore: number | undefined; - try { - tokenCountBefore = 0; - for (const message of session.messages) { - tokenCountBefore += estimateTokens(message); + let tokenCountBefore = observedTokenCount; + if (tokenCountBefore === undefined) { + try { + tokenCountBefore = 0; + for (const message of session.messages) { + tokenCountBefore += estimateTokens(message); + } + } catch { + tokenCountBefore = undefined; } - } catch { - tokenCountBefore = undefined; } // TODO(#7175): Consider exposing full message snapshots or pre-compaction injection // hooks; current events only report counts/metadata. @@ -791,9 +904,25 @@ export async function compactEmbeddedPiSessionDirect( // Measure compactedCount from the original pre-limiting transcript so compaction // lifecycle metrics represent total reduction through the compaction pipeline. const messageCountCompactionInput = messageCountOriginal; + // Estimate full session tokens BEFORE compaction (including system prompt, + // bootstrap context, workspace files, and all history). This is needed for + // a correct sanity check — result.tokensBefore only covers the summarizable + // history subset, not the full session. + let fullSessionTokensBefore = 0; + try { + fullSessionTokensBefore = limited.reduce((sum, msg) => sum + estimateTokens(msg), 0); + } catch { + // If token estimation throws on a malformed message, fall back to 0 so + // the sanity check below becomes a no-op instead of crashing compaction. + } const result = await compactWithSafetyTimeout(() => session.compact(params.customInstructions), ); + await runPostCompactionSideEffects({ + config: params.config, + sessionKey: params.sessionKey, + sessionFile: params.sessionFile, + }); // Estimate tokens after compaction by summing token estimates for remaining messages let tokensAfter: number | undefined; try { @@ -801,8 +930,15 @@ export async function compactEmbeddedPiSessionDirect( for (const message of session.messages) { tokensAfter += estimateTokens(message); } - // Sanity check: tokensAfter should be less than tokensBefore - if (tokensAfter > result.tokensBefore) { + // Sanity check: compare against the best full-session pre-compaction baseline. + // Prefer the provider-observed live count when available; otherwise use the + // heuristic full-session estimate with a 10% margin for counter jitter. + const sanityCheckBaseline = observedTokenCount ?? fullSessionTokensBefore; + if ( + sanityCheckBaseline > 0 && + tokensAfter > + (observedTokenCount !== undefined ? sanityCheckBaseline : sanityCheckBaseline * 1.1) + ) { tokensAfter = undefined; // Don't trust the estimate } } catch { @@ -876,7 +1012,7 @@ export async function compactEmbeddedPiSessionDirect( result: { summary: result.summary, firstKeptEntryId: result.firstKeptEntryId, - tokensBefore: result.tokensBefore, + tokensBefore: observedTokenCount ?? result.tokensBefore, tokensAfter, details: result.details, }, @@ -936,14 +1072,77 @@ export async function compactEmbeddedPiSession( modelContextWindow: ceModel?.contextWindow, defaultTokens: DEFAULT_CONTEXT_TOKENS, }); + // When the context engine owns compaction, its compact() implementation + // bypasses compactEmbeddedPiSessionDirect (which fires the hooks internally). + // Fire before_compaction / after_compaction hooks here so plugin subscribers + // are notified regardless of which engine is active. + const engineOwnsCompaction = contextEngine.info.ownsCompaction === true; + const hookRunner = engineOwnsCompaction ? getGlobalHookRunner() : null; + const hookSessionKey = params.sessionKey?.trim() || params.sessionId; + const { sessionAgentId } = resolveSessionAgentIds({ + sessionKey: params.sessionKey, + config: params.config, + }); + const resolvedMessageProvider = params.messageChannel ?? params.messageProvider; + const hookCtx = { + sessionId: params.sessionId, + agentId: sessionAgentId, + sessionKey: hookSessionKey, + workspaceDir: resolveUserPath(params.workspaceDir), + messageProvider: resolvedMessageProvider, + }; + // Engine-owned compaction doesn't load the transcript at this level, so + // message counts are unavailable. We pass sessionFile so hook subscribers + // can read the transcript themselves if they need exact counts. + if (hookRunner?.hasHooks("before_compaction")) { + try { + await hookRunner.runBeforeCompaction( + { + messageCount: -1, + sessionFile: params.sessionFile, + }, + hookCtx, + ); + } catch (err) { + log.warn("before_compaction hook failed", { + errorMessage: err instanceof Error ? err.message : String(err), + }); + } + } const result = await contextEngine.compact({ sessionId: params.sessionId, + sessionKey: params.sessionKey, sessionFile: params.sessionFile, tokenBudget: ceCtxInfo.tokens, + currentTokenCount: params.currentTokenCount, customInstructions: params.customInstructions, force: params.trigger === "manual", runtimeContext: params as Record, }); + if (engineOwnsCompaction && result.ok && result.compacted) { + await runPostCompactionSideEffects({ + config: params.config, + sessionKey: params.sessionKey, + sessionFile: params.sessionFile, + }); + } + if (result.ok && result.compacted && hookRunner?.hasHooks("after_compaction")) { + try { + await hookRunner.runAfterCompaction( + { + messageCount: -1, + compactedCount: -1, + tokenCount: result.result?.tokensAfter, + sessionFile: params.sessionFile, + }, + hookCtx, + ); + } catch (err) { + log.warn("after_compaction hook failed", { + errorMessage: err instanceof Error ? err.message : String(err), + }); + } + } return { ok: result.ok, compacted: result.compacted, diff --git a/src/agents/pi-embedded-runner/extensions.ts b/src/agents/pi-embedded-runner/extensions.ts index 251063c6f19..08c1b0a3f70 100644 --- a/src/agents/pi-embedded-runner/extensions.ts +++ b/src/agents/pi-embedded-runner/extensions.ts @@ -84,6 +84,7 @@ export function buildEmbeddedExtensionFactories(params: { contextWindowTokens: contextWindowInfo.tokens, identifierPolicy: compactionCfg?.identifierPolicy, identifierInstructions: compactionCfg?.identifierInstructions, + customInstructions: compactionCfg?.customInstructions, qualityGuardEnabled: qualityGuardCfg?.enabled ?? false, qualityGuardMaxRetries: qualityGuardCfg?.maxRetries, model: params.model, diff --git a/src/agents/pi-embedded-runner/extra-params.kilocode.test.ts b/src/agents/pi-embedded-runner/extra-params.kilocode.test.ts index b2b5174fff4..35a6cefcbd4 100644 --- a/src/agents/pi-embedded-runner/extra-params.kilocode.test.ts +++ b/src/agents/pi-embedded-runner/extra-params.kilocode.test.ts @@ -17,7 +17,7 @@ function applyAndCapture(params: { }): CapturedCall { const captured: CapturedCall = {}; - const baseStreamFn: StreamFn = (_model, _context, options) => { + const baseStreamFn: StreamFn = (model, _context, options) => { captured.headers = options?.headers; options?.onPayload?.({}, model); return createAssistantMessageEventStream(); @@ -95,7 +95,7 @@ describe("extra-params: Kilocode kilo/auto reasoning", () => { it("does not inject reasoning.effort for kilo/auto", () => { let capturedPayload: Record | undefined; - const baseStreamFn: StreamFn = (_model, _context, options) => { + const baseStreamFn: StreamFn = (model, _context, options) => { const payload: Record = { reasoning_effort: "high" }; options?.onPayload?.(payload, model); capturedPayload = payload; @@ -123,7 +123,7 @@ describe("extra-params: Kilocode kilo/auto reasoning", () => { it("injects reasoning.effort for non-auto kilocode models", () => { let capturedPayload: Record | undefined; - const baseStreamFn: StreamFn = (_model, _context, options) => { + const baseStreamFn: StreamFn = (model, _context, options) => { const payload: Record = {}; options?.onPayload?.(payload, model); capturedPayload = payload; @@ -156,7 +156,7 @@ describe("extra-params: Kilocode kilo/auto reasoning", () => { it("does not inject reasoning.effort for x-ai models", () => { let capturedPayload: Record | undefined; - const baseStreamFn: StreamFn = (_model, _context, options) => { + const baseStreamFn: StreamFn = (model, _context, options) => { const payload: Record = { reasoning_effort: "high" }; options?.onPayload?.(payload, model); capturedPayload = payload; diff --git a/src/agents/pi-embedded-runner/extra-params.openrouter-cache-control.test.ts b/src/agents/pi-embedded-runner/extra-params.openrouter-cache-control.test.ts index 5be99b1fe80..5a36c9c5a4d 100644 --- a/src/agents/pi-embedded-runner/extra-params.openrouter-cache-control.test.ts +++ b/src/agents/pi-embedded-runner/extra-params.openrouter-cache-control.test.ts @@ -12,7 +12,7 @@ type StreamPayload = { }; function runOpenRouterPayload(payload: StreamPayload, modelId: string) { - const baseStreamFn: StreamFn = (_model, _context, options) => { + const baseStreamFn: StreamFn = (model, _context, options) => { options?.onPayload?.(payload, model); return createAssistantMessageEventStream(); }; diff --git a/src/agents/pi-embedded-runner/extra-params.ts b/src/agents/pi-embedded-runner/extra-params.ts index ad1e1ef916a..a9d5085e013 100644 --- a/src/agents/pi-embedded-runner/extra-params.ts +++ b/src/agents/pi-embedded-runner/extra-params.ts @@ -5,9 +5,11 @@ import type { ThinkLevel } from "../../auto-reply/thinking.js"; import type { OpenClawConfig } from "../../config/config.js"; import { createAnthropicBetaHeadersWrapper, + createAnthropicFastModeWrapper, createAnthropicToolPayloadCompatibilityWrapper, createBedrockNoCacheWrapper, isAnthropicBedrockModel, + resolveAnthropicFastMode, resolveAnthropicBetas, resolveCacheRetention, } from "./anthropic-stream-wrappers.js"; @@ -16,13 +18,16 @@ import { createMoonshotThinkingWrapper, createSiliconFlowThinkingWrapper, resolveMoonshotThinkingType, + shouldApplyMoonshotPayloadCompat, shouldApplySiliconFlowThinkingOffCompat, } from "./moonshot-stream-wrappers.js"; import { createCodexDefaultTransportWrapper, createOpenAIDefaultTransportWrapper, + createOpenAIFastModeWrapper, createOpenAIResponsesContextManagementWrapper, createOpenAIServiceTierWrapper, + resolveOpenAIFastMode, resolveOpenAIServiceTier, } from "./openai-stream-wrappers.js"; import { @@ -222,7 +227,7 @@ function createGoogleThinkingPayloadWrapper( const onPayload = options?.onPayload; return underlying(model, context, { ...options, - onPayload: (payload, payloadModel) => { + onPayload: (payload) => { if (model.api === "google-generative-ai") { sanitizeGoogleThinkingPayload({ payload, @@ -230,7 +235,7 @@ function createGoogleThinkingPayloadWrapper( thinkingLevel, }); } - return onPayload?.(payload, payloadModel); + return onPayload?.(payload, model); }, }); }; @@ -258,12 +263,12 @@ function createZaiToolStreamWrapper( const originalOnPayload = options?.onPayload; return underlying(model, context, { ...options, - onPayload: (payload, payloadModel) => { + onPayload: (payload) => { if (payload && typeof payload === "object") { // Inject tool_stream: true for Z.AI API (payload as Record).tool_stream = true; } - return originalOnPayload?.(payload, payloadModel); + return originalOnPayload?.(payload, model); }, }); }; @@ -306,11 +311,11 @@ function createParallelToolCallsWrapper( const originalOnPayload = options?.onPayload; return underlying(model, context, { ...options, - onPayload: (payload, payloadModel) => { + onPayload: (payload) => { if (payload && typeof payload === "object") { (payload as Record).parallel_tool_calls = enabled; } - return originalOnPayload?.(payload, payloadModel); + return originalOnPayload?.(payload, model); }, }); }; @@ -373,7 +378,7 @@ export function applyExtraParamsToAgent( agent.streamFn = createSiliconFlowThinkingWrapper(agent.streamFn); } - if (provider === "moonshot") { + if (shouldApplyMoonshotPayloadCompat({ provider, modelId })) { const moonshotThinkingType = resolveMoonshotThinkingType({ configuredThinking: merged?.thinking, thinkingLevel, @@ -436,6 +441,18 @@ export function applyExtraParamsToAgent( // upstream model-ID heuristics for Gemini 3.1 variants. agent.streamFn = createGoogleThinkingPayloadWrapper(agent.streamFn, thinkingLevel); + const anthropicFastMode = resolveAnthropicFastMode(merged); + if (anthropicFastMode !== undefined) { + log.debug(`applying Anthropic fast mode=${anthropicFastMode} for ${provider}/${modelId}`); + agent.streamFn = createAnthropicFastModeWrapper(agent.streamFn, anthropicFastMode); + } + + const openAIFastMode = resolveOpenAIFastMode(merged); + if (openAIFastMode) { + log.debug(`applying OpenAI fast mode for ${provider}/${modelId}`); + agent.streamFn = createOpenAIFastModeWrapper(agent.streamFn); + } + const openAIServiceTier = resolveOpenAIServiceTier(merged); if (openAIServiceTier) { log.debug(`applying OpenAI service_tier=${openAIServiceTier} for ${provider}/${modelId}`); diff --git a/src/agents/pi-embedded-runner/lanes.test.ts b/src/agents/pi-embedded-runner/lanes.test.ts new file mode 100644 index 00000000000..f3625ddc6ec --- /dev/null +++ b/src/agents/pi-embedded-runner/lanes.test.ts @@ -0,0 +1,44 @@ +import { describe, expect, it } from "vitest"; +import { CommandLane } from "../../process/lanes.js"; +import { resolveGlobalLane, resolveSessionLane } from "./lanes.js"; + +describe("resolveGlobalLane", () => { + it("defaults to main lane when no lane is provided", () => { + expect(resolveGlobalLane()).toBe(CommandLane.Main); + expect(resolveGlobalLane("")).toBe(CommandLane.Main); + expect(resolveGlobalLane(" ")).toBe(CommandLane.Main); + }); + + it("maps cron lane to nested lane to prevent deadlocks", () => { + // When cron jobs trigger nested agent runs, the outer execution holds + // the cron lane slot. Inner work must use a separate lane to avoid + // deadlock. See: https://github.com/openclaw/openclaw/issues/44805 + expect(resolveGlobalLane("cron")).toBe(CommandLane.Nested); + expect(resolveGlobalLane(" cron ")).toBe(CommandLane.Nested); + }); + + it("preserves other lanes as-is", () => { + expect(resolveGlobalLane("main")).toBe(CommandLane.Main); + expect(resolveGlobalLane("subagent")).toBe(CommandLane.Subagent); + expect(resolveGlobalLane("nested")).toBe(CommandLane.Nested); + expect(resolveGlobalLane("custom-lane")).toBe("custom-lane"); + expect(resolveGlobalLane(" custom ")).toBe("custom"); + }); +}); + +describe("resolveSessionLane", () => { + it("defaults to main lane and prefixes with session:", () => { + expect(resolveSessionLane("")).toBe("session:main"); + expect(resolveSessionLane(" ")).toBe("session:main"); + }); + + it("adds session: prefix if not present", () => { + expect(resolveSessionLane("abc123")).toBe("session:abc123"); + expect(resolveSessionLane(" xyz ")).toBe("session:xyz"); + }); + + it("preserves existing session: prefix", () => { + expect(resolveSessionLane("session:abc")).toBe("session:abc"); + expect(resolveSessionLane("session:main")).toBe("session:main"); + }); +}); diff --git a/src/agents/pi-embedded-runner/lanes.ts b/src/agents/pi-embedded-runner/lanes.ts index 81b742ded9f..57ffd1b4255 100644 --- a/src/agents/pi-embedded-runner/lanes.ts +++ b/src/agents/pi-embedded-runner/lanes.ts @@ -7,6 +7,10 @@ export function resolveSessionLane(key: string) { export function resolveGlobalLane(lane?: string) { const cleaned = lane?.trim(); + // Cron jobs hold the cron lane slot; inner operations must use nested to avoid deadlock. + if (cleaned === CommandLane.Cron) { + return CommandLane.Nested; + } return cleaned ? cleaned : CommandLane.Main; } diff --git a/src/agents/pi-embedded-runner/model.forward-compat.test.ts b/src/agents/pi-embedded-runner/model.forward-compat.test.ts index bdee17f1e9a..5def8359c13 100644 --- a/src/agents/pi-embedded-runner/model.forward-compat.test.ts +++ b/src/agents/pi-embedded-runner/model.forward-compat.test.ts @@ -58,6 +58,16 @@ describe("pi embedded model e2e smoke", () => { expect(result.model).toMatchObject(buildOpenAICodexForwardCompatExpectation("gpt-5.4")); }); + it("builds an openai-codex forward-compat fallback for gpt-5.3-codex-spark", () => { + mockOpenAICodexTemplateModel(); + + const result = resolveModel("openai-codex", "gpt-5.3-codex-spark", "/tmp/agent"); + expect(result.error).toBeUndefined(); + expect(result.model).toMatchObject( + buildOpenAICodexForwardCompatExpectation("gpt-5.3-codex-spark"), + ); + }); + it("keeps unknown-model errors for non-forward-compat IDs", () => { const result = resolveModel("openai-codex", "gpt-4.1-mini", "/tmp/agent"); expect(result.model).toBeUndefined(); diff --git a/src/agents/pi-embedded-runner/model.provider-normalization.ts b/src/agents/pi-embedded-runner/model.provider-normalization.ts index ecf1a25e7d3..82dabff7c1b 100644 --- a/src/agents/pi-embedded-runner/model.provider-normalization.ts +++ b/src/agents/pi-embedded-runner/model.provider-normalization.ts @@ -54,9 +54,33 @@ function normalizeOpenAICodexTransport(params: { } as Model; } +function normalizeOpenAITransport(params: { provider: string; model: Model }): Model { + if (normalizeProviderId(params.provider) !== "openai") { + return params.model; + } + + const useResponsesTransport = + params.model.api === "openai-completions" && + (!params.model.baseUrl || isOpenAIApiBaseUrl(params.model.baseUrl)); + + if (!useResponsesTransport) { + return params.model; + } + + return { + ...params.model, + api: "openai-responses", + } as Model; +} + export function normalizeResolvedProviderModel(params: { provider: string; model: Model; }): Model { - return normalizeModelCompat(normalizeOpenAICodexTransport(params)); + const normalizedOpenAI = normalizeOpenAITransport(params); + const normalizedCodex = normalizeOpenAICodexTransport({ + provider: params.provider, + model: normalizedOpenAI, + }); + return normalizeModelCompat(normalizedCodex); } diff --git a/src/agents/pi-embedded-runner/model.test-harness.ts b/src/agents/pi-embedded-runner/model.test-harness.ts index 58d724307de..21434557c79 100644 --- a/src/agents/pi-embedded-runner/model.test-harness.ts +++ b/src/agents/pi-embedded-runner/model.test-harness.ts @@ -35,15 +35,25 @@ export function mockOpenAICodexTemplateModel(): void { export function buildOpenAICodexForwardCompatExpectation( id: string = "gpt-5.3-codex", -): Partial & { provider: string; id: string } { +): Partial & { + provider: string; + id: string; + api: string; + baseUrl: string; +} { const isGpt54 = id === "gpt-5.4"; + const isSpark = id === "gpt-5.3-codex-spark"; return { provider: "openai-codex", id, api: "openai-codex-responses", baseUrl: "https://chatgpt.com/backend-api", reasoning: true, - contextWindow: isGpt54 ? 1_050_000 : 272000, + input: isSpark ? ["text"] : ["text", "image"], + cost: isSpark + ? { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 } + : OPENAI_CODEX_TEMPLATE_MODEL.cost, + contextWindow: isGpt54 ? 1_050_000 : isSpark ? 128_000 : 272000, maxTokens: 128000, }; } diff --git a/src/agents/pi-embedded-runner/model.test.ts b/src/agents/pi-embedded-runner/model.test.ts index e67fb2c2898..c56064967e1 100644 --- a/src/agents/pi-embedded-runner/model.test.ts +++ b/src/agents/pi-embedded-runner/model.test.ts @@ -180,7 +180,7 @@ describe("buildInlineProviderModels", () => { expect(result[0].headers).toBeUndefined(); }); - it("preserves literal marker-shaped headers in inline provider models", () => { + it("drops SecretRef marker headers in inline provider models", () => { const providers: Parameters[0] = { custom: { headers: { @@ -196,14 +196,48 @@ describe("buildInlineProviderModels", () => { expect(result).toHaveLength(1); expect(result[0].headers).toEqual({ - Authorization: "secretref-env:OPENAI_HEADER_TOKEN", - "X-Managed": "secretref-managed", "X-Static": "tenant-a", }); }); }); describe("resolveModel", () => { + it("defaults model input to text when discovery omits input", () => { + mockDiscoveredModel({ + provider: "custom", + modelId: "missing-input", + templateModel: { + id: "missing-input", + name: "missing-input", + api: "openai-completions", + provider: "custom", + baseUrl: "http://localhost:9999", + reasoning: false, + // NOTE: deliberately omit input to simulate buggy/custom catalogs. + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 8192, + maxTokens: 1024, + }, + }); + + const result = resolveModel("custom", "missing-input", "/tmp/agent", { + models: { + providers: { + custom: { + baseUrl: "http://localhost:9999", + api: "openai-completions", + // Intentionally keep this minimal — the discovered model provides the rest. + models: [{ id: "missing-input", name: "missing-input" }], + }, + }, + }, + } as unknown as OpenClawConfig); + + expect(result.error).toBeUndefined(); + expect(Array.isArray(result.model?.input)).toBe(true); + expect(result.model?.input).toEqual(["text"]); + }); + it("includes provider baseUrl in fallback model", () => { const cfg = { models: { @@ -245,7 +279,7 @@ describe("resolveModel", () => { }); }); - it("preserves literal marker-shaped provider headers in fallback models", () => { + it("drops SecretRef marker provider headers in fallback models", () => { const cfg = { models: { providers: { @@ -266,8 +300,6 @@ describe("resolveModel", () => { expect(result.error).toBeUndefined(); expect((result.model as unknown as { headers?: Record }).headers).toEqual({ - Authorization: "secretref-env:OPENAI_HEADER_TOKEN", - "X-Managed": "secretref-managed", "X-Custom-Auth": "token-123", }); }); @@ -350,6 +382,40 @@ describe("resolveModel", () => { expect(result.model?.reasoning).toBe(true); }); + it("matches prefixed OpenRouter native ids in configured fallback models", () => { + const cfg = { + models: { + providers: { + openrouter: { + baseUrl: "https://openrouter.ai/api/v1", + api: "openai-completions", + models: [ + { + ...makeModel("openrouter/healer-alpha"), + reasoning: true, + input: ["text", "image"], + contextWindow: 262144, + maxTokens: 65536, + }, + ], + }, + }, + }, + } as OpenClawConfig; + + const result = resolveModel("openrouter", "openrouter/healer-alpha", "/tmp/agent", cfg); + + expect(result.error).toBeUndefined(); + expect(result.model).toMatchObject({ + provider: "openrouter", + id: "openrouter/healer-alpha", + reasoning: true, + input: ["text", "image"], + contextWindow: 262144, + maxTokens: 65536, + }); + }); + it("prefers configured provider api metadata over discovered registry model", () => { mockDiscoveredModel({ provider: "onehub", @@ -480,6 +546,60 @@ describe("resolveModel", () => { expect(result.model).toMatchObject(buildOpenAICodexForwardCompatExpectation("gpt-5.4")); }); + it("builds an openai-codex fallback for gpt-5.3-codex-spark", () => { + mockOpenAICodexTemplateModel(); + + const result = resolveModel("openai-codex", "gpt-5.3-codex-spark", "/tmp/agent"); + + expect(result.error).toBeUndefined(); + expect(result.model).toMatchObject( + buildOpenAICodexForwardCompatExpectation("gpt-5.3-codex-spark"), + ); + }); + + it("keeps openai-codex gpt-5.3-codex-spark when discovery provides it", () => { + mockDiscoveredModel({ + provider: "openai-codex", + modelId: "gpt-5.3-codex-spark", + templateModel: { + ...buildOpenAICodexForwardCompatExpectation("gpt-5.3-codex-spark"), + name: "GPT-5.3 Codex Spark", + input: ["text"], + }, + }); + + const result = resolveModel("openai-codex", "gpt-5.3-codex-spark", "/tmp/agent"); + + expect(result.error).toBeUndefined(); + expect(result.model).toMatchObject({ + provider: "openai-codex", + id: "gpt-5.3-codex-spark", + api: "openai-codex-responses", + baseUrl: "https://chatgpt.com/backend-api", + }); + }); + + it("rejects stale direct openai gpt-5.3-codex-spark discovery rows", () => { + mockDiscoveredModel({ + provider: "openai", + modelId: "gpt-5.3-codex-spark", + templateModel: buildForwardCompatTemplate({ + id: "gpt-5.3-codex-spark", + name: "GPT-5.3 Codex Spark", + provider: "openai", + api: "openai-responses", + baseUrl: "https://api.openai.com/v1", + }), + }); + + const result = resolveModel("openai", "gpt-5.3-codex-spark", "/tmp/agent"); + + expect(result.model).toBeUndefined(); + expect(result.error).toBe( + "Unknown model: openai/gpt-5.3-codex-spark. gpt-5.3-codex-spark is only supported via openai-codex OAuth. Use openai-codex/gpt-5.3-codex-spark.", + ); + }); + it("applies provider overrides to openai gpt-5.4 forward-compat models", () => { mockDiscoveredModel({ provider: "openai", @@ -518,6 +638,54 @@ describe("resolveModel", () => { }); }); + it("normalizes stale native openai gpt-5.4 completions transport to responses", () => { + mockDiscoveredModel({ + provider: "openai", + modelId: "gpt-5.4", + templateModel: buildForwardCompatTemplate({ + id: "gpt-5.4", + name: "GPT-5.4", + provider: "openai", + api: "openai-completions", + baseUrl: "https://api.openai.com/v1", + }), + }); + + const result = resolveModel("openai", "gpt-5.4", "/tmp/agent"); + + expect(result.error).toBeUndefined(); + expect(result.model).toMatchObject({ + provider: "openai", + id: "gpt-5.4", + api: "openai-responses", + baseUrl: "https://api.openai.com/v1", + }); + }); + + it("keeps proxied openai completions transport untouched", () => { + mockDiscoveredModel({ + provider: "openai", + modelId: "gpt-5.4", + templateModel: buildForwardCompatTemplate({ + id: "gpt-5.4", + name: "GPT-5.4", + provider: "openai", + api: "openai-completions", + baseUrl: "https://proxy.example.com/v1", + }), + }); + + const result = resolveModel("openai", "gpt-5.4", "/tmp/agent"); + + expect(result.error).toBeUndefined(); + expect(result.model).toMatchObject({ + provider: "openai", + id: "gpt-5.4", + api: "openai-completions", + baseUrl: "https://proxy.example.com/v1", + }); + }); + it("builds an anthropic forward-compat fallback for claude-opus-4-6", () => { mockDiscoveredModel({ provider: "anthropic", @@ -611,6 +779,24 @@ describe("resolveModel", () => { expectUnknownModelError("openai-codex", "gpt-4.1-mini"); }); + it("rejects direct openai gpt-5.3-codex-spark with a codex-only hint", () => { + const result = resolveModel("openai", "gpt-5.3-codex-spark", "/tmp/agent"); + + expect(result.model).toBeUndefined(); + expect(result.error).toBe( + "Unknown model: openai/gpt-5.3-codex-spark. gpt-5.3-codex-spark is only supported via openai-codex OAuth. Use openai-codex/gpt-5.3-codex-spark.", + ); + }); + + it("rejects azure openai gpt-5.3-codex-spark with a codex-only hint", () => { + const result = resolveModel("azure-openai-responses", "gpt-5.3-codex-spark", "/tmp/agent"); + + expect(result.model).toBeUndefined(); + expect(result.error).toBe( + "Unknown model: azure-openai-responses/gpt-5.3-codex-spark. gpt-5.3-codex-spark is only supported via openai-codex OAuth. Use openai-codex/gpt-5.3-codex-spark.", + ); + }); + it("uses codex fallback even when openai-codex provider is configured", () => { // This test verifies the ordering: codex fallback must fire BEFORE the generic providerCfg fallback. // If ordering is wrong, the generic fallback would use api: "openai-responses" (the default) @@ -801,6 +987,43 @@ describe("resolveModel", () => { }); }); + it("lets provider config override registry-found kimi user agent headers", () => { + mockDiscoveredModel({ + provider: "kimi-coding", + modelId: "k2p5", + templateModel: { + ...buildForwardCompatTemplate({ + id: "k2p5", + name: "Kimi for Coding", + provider: "kimi-coding", + api: "anthropic-messages", + baseUrl: "https://api.kimi.com/coding/", + }), + headers: { "User-Agent": "claude-code/0.1.0" }, + }, + }); + + const cfg = { + models: { + providers: { + "kimi-coding": { + headers: { + "User-Agent": "custom-kimi-client/1.0", + "X-Kimi-Tenant": "tenant-a", + }, + }, + }, + }, + } as unknown as OpenClawConfig; + + const result = resolveModel("kimi-coding", "k2p5", "/tmp/agent", cfg); + expect(result.error).toBeUndefined(); + expect((result.model as unknown as { headers?: Record }).headers).toEqual({ + "User-Agent": "custom-kimi-client/1.0", + "X-Kimi-Tenant": "tenant-a", + }); + }); + it("does not override when no provider config exists", () => { mockDiscoveredModel({ provider: "anthropic", diff --git a/src/agents/pi-embedded-runner/model.ts b/src/agents/pi-embedded-runner/model.ts index 638d66f787f..751d22e4843 100644 --- a/src/agents/pi-embedded-runner/model.ts +++ b/src/agents/pi-embedded-runner/model.ts @@ -8,6 +8,10 @@ import { buildModelAliasLines } from "../model-alias-lines.js"; import { isSecretRefHeaderValueMarker } from "../model-auth-markers.js"; import { resolveForwardCompatModel } from "../model-forward-compat.js"; import { findNormalizedProviderValue, normalizeProviderId } from "../model-selection.js"; +import { + buildSuppressedBuiltInModelError, + shouldSuppressBuiltInModel, +} from "../model-suppression.js"; import { discoverAuthStorage, discoverModels } from "../pi-model-discovery.js"; import { normalizeResolvedProviderModel } from "./model.provider-normalization.js"; @@ -81,20 +85,30 @@ function applyConfiguredProviderOverrides(params: { const discoveredHeaders = sanitizeModelHeaders(discoveredModel.headers, { stripSecretRefMarkers: true, }); - const providerHeaders = sanitizeModelHeaders(providerConfig.headers); - const configuredHeaders = sanitizeModelHeaders(configuredModel?.headers); + const providerHeaders = sanitizeModelHeaders(providerConfig.headers, { + stripSecretRefMarkers: true, + }); + const configuredHeaders = sanitizeModelHeaders(configuredModel?.headers, { + stripSecretRefMarkers: true, + }); if (!configuredModel && !providerConfig.baseUrl && !providerConfig.api && !providerHeaders) { return { ...discoveredModel, headers: discoveredHeaders, }; } + const resolvedInput = configuredModel?.input ?? discoveredModel.input; + const normalizedInput = + Array.isArray(resolvedInput) && resolvedInput.length > 0 + ? resolvedInput.filter((item) => item === "text" || item === "image") + : (["text"] as Array<"text" | "image">); + return { ...discoveredModel, api: configuredModel?.api ?? providerConfig.api ?? discoveredModel.api, baseUrl: providerConfig.baseUrl ?? discoveredModel.baseUrl, reasoning: configuredModel?.reasoning ?? discoveredModel.reasoning, - input: configuredModel?.input ?? discoveredModel.input, + input: normalizedInput, cost: configuredModel?.cost ?? discoveredModel.cost, contextWindow: configuredModel?.contextWindow ?? discoveredModel.contextWindow, maxTokens: configuredModel?.maxTokens ?? discoveredModel.maxTokens, @@ -118,14 +132,18 @@ export function buildInlineProviderModels( if (!trimmed) { return []; } - const providerHeaders = sanitizeModelHeaders(entry?.headers); + const providerHeaders = sanitizeModelHeaders(entry?.headers, { + stripSecretRefMarkers: true, + }); return (entry?.models ?? []).map((model) => ({ ...model, provider: trimmed, baseUrl: entry?.baseUrl, api: model.api ?? entry?.api, headers: (() => { - const modelHeaders = sanitizeModelHeaders((model as InlineModelEntry).headers); + const modelHeaders = sanitizeModelHeaders((model as InlineModelEntry).headers, { + stripSecretRefMarkers: true, + }); if (!providerHeaders && !modelHeaders) { return undefined; } @@ -145,6 +163,9 @@ export function resolveModelWithRegistry(params: { cfg?: OpenClawConfig; }): Model | undefined { const { provider, modelId, modelRegistry, cfg } = params; + if (shouldSuppressBuiltInModel({ provider, id: modelId })) { + return undefined; + } const providerConfig = resolveConfiguredProviderConfig(cfg, provider); const model = modelRegistry.find(provider, modelId) as Model | null; @@ -205,8 +226,12 @@ export function resolveModelWithRegistry(params: { } const configuredModel = providerConfig?.models?.find((candidate) => candidate.id === modelId); - const providerHeaders = sanitizeModelHeaders(providerConfig?.headers); - const modelHeaders = sanitizeModelHeaders(configuredModel?.headers); + const providerHeaders = sanitizeModelHeaders(providerConfig?.headers, { + stripSecretRefMarkers: true, + }); + const modelHeaders = sanitizeModelHeaders(configuredModel?.headers, { + stripSecretRefMarkers: true, + }); if (providerConfig || modelId.startsWith("mock-")) { return normalizeResolvedModel({ provider, @@ -285,6 +310,10 @@ const LOCAL_PROVIDER_HINTS: Record = { }; function buildUnknownModelError(provider: string, modelId: string): string { + const suppressed = buildSuppressedBuiltInModelError({ provider, id: modelId }); + if (suppressed) { + return suppressed; + } const base = `Unknown model: ${provider}/${modelId}`; const hint = LOCAL_PROVIDER_HINTS[provider.toLowerCase()]; return hint ? `${base}. ${hint}` : base; diff --git a/src/agents/pi-embedded-runner/moonshot-stream-wrappers.ts b/src/agents/pi-embedded-runner/moonshot-stream-wrappers.ts index 384402ea7fd..c066a168a0f 100644 --- a/src/agents/pi-embedded-runner/moonshot-stream-wrappers.ts +++ b/src/agents/pi-embedded-runner/moonshot-stream-wrappers.ts @@ -35,6 +35,14 @@ function isMoonshotToolChoiceCompatible(toolChoice: unknown): boolean { return false; } +function isPinnedToolChoice(toolChoice: unknown): boolean { + if (!toolChoice || typeof toolChoice !== "object" || Array.isArray(toolChoice)) { + return false; + } + const typeValue = (toolChoice as Record).type; + return typeValue === "tool" || typeValue === "function"; +} + export function shouldApplySiliconFlowThinkingOffCompat(params: { provider: string; modelId: string; @@ -47,20 +55,41 @@ export function shouldApplySiliconFlowThinkingOffCompat(params: { ); } +export function shouldApplyMoonshotPayloadCompat(params: { + provider: string; + modelId: string; +}): boolean { + const normalizedProvider = params.provider.trim().toLowerCase(); + const normalizedModelId = params.modelId.trim().toLowerCase(); + + if (normalizedProvider === "moonshot") { + return true; + } + + // Ollama Cloud exposes Kimi variants through OpenAI-compatible model IDs such + // as `kimi-k2.5:cloud`, but they still need the same payload normalization as + // native Moonshot endpoints when thinking/tool_choice are enabled together. + return ( + normalizedProvider === "ollama" && + normalizedModelId.startsWith("kimi-k") && + normalizedModelId.includes(":cloud") + ); +} + export function createSiliconFlowThinkingWrapper(baseStreamFn: StreamFn | undefined): StreamFn { const underlying = baseStreamFn ?? streamSimple; return (model, context, options) => { const originalOnPayload = options?.onPayload; return underlying(model, context, { ...options, - onPayload: (payload, payloadModel) => { + onPayload: (payload) => { if (payload && typeof payload === "object") { const payloadObj = payload as Record; if (payloadObj.thinking === "off") { payloadObj.thinking = null; } } - return originalOnPayload?.(payload, payloadModel); + return originalOnPayload?.(payload, model); }, }); }; @@ -89,7 +118,7 @@ export function createMoonshotThinkingWrapper( const originalOnPayload = options?.onPayload; return underlying(model, context, { ...options, - onPayload: (payload, payloadModel) => { + onPayload: (payload) => { if (payload && typeof payload === "object") { const payloadObj = payload as Record; let effectiveThinkingType = normalizeMoonshotThinkingType(payloadObj.thinking); @@ -103,10 +132,14 @@ export function createMoonshotThinkingWrapper( effectiveThinkingType === "enabled" && !isMoonshotToolChoiceCompatible(payloadObj.tool_choice) ) { - payloadObj.tool_choice = "auto"; + if (payloadObj.tool_choice === "required") { + payloadObj.tool_choice = "auto"; + } else if (isPinnedToolChoice(payloadObj.tool_choice)) { + payloadObj.thinking = { type: "disabled" }; + } } } - return originalOnPayload?.(payload, payloadModel); + return originalOnPayload?.(payload, model); }, }); }; diff --git a/src/agents/pi-embedded-runner/openai-stream-wrappers.ts b/src/agents/pi-embedded-runner/openai-stream-wrappers.ts index 63ac5134a46..8542f329cbe 100644 --- a/src/agents/pi-embedded-runner/openai-stream-wrappers.ts +++ b/src/agents/pi-embedded-runner/openai-stream-wrappers.ts @@ -2,11 +2,13 @@ import type { StreamFn } from "@mariozechner/pi-agent-core"; import type { SimpleStreamOptions } from "@mariozechner/pi-ai"; import { streamSimple } from "@mariozechner/pi-ai"; import { log } from "./logger.js"; +import { streamWithPayloadPatch } from "./stream-payload-utils.js"; type OpenAIServiceTier = "auto" | "default" | "flex" | "priority"; +type OpenAIReasoningEffort = "low" | "medium" | "high"; const OPENAI_RESPONSES_APIS = new Set(["openai-responses"]); -const OPENAI_RESPONSES_PROVIDERS = new Set(["openai", "azure-openai-responses"]); +const OPENAI_RESPONSES_PROVIDERS = new Set(["openai", "azure-openai", "azure-openai-responses"]); function isDirectOpenAIBaseUrl(baseUrl: unknown): boolean { if (typeof baseUrl !== "string" || !baseUrl.trim()) { @@ -168,6 +170,89 @@ export function resolveOpenAIServiceTier( return normalized; } +function normalizeOpenAIFastMode(value: unknown): boolean | undefined { + if (typeof value === "boolean") { + return value; + } + if (typeof value !== "string") { + return undefined; + } + const normalized = value.trim().toLowerCase(); + if ( + normalized === "on" || + normalized === "true" || + normalized === "yes" || + normalized === "1" || + normalized === "fast" + ) { + return true; + } + if ( + normalized === "off" || + normalized === "false" || + normalized === "no" || + normalized === "0" || + normalized === "normal" + ) { + return false; + } + return undefined; +} + +export function resolveOpenAIFastMode( + extraParams: Record | undefined, +): boolean | undefined { + const raw = extraParams?.fastMode ?? extraParams?.fast_mode; + const normalized = normalizeOpenAIFastMode(raw); + if (raw !== undefined && normalized === undefined) { + const rawSummary = typeof raw === "string" ? raw : typeof raw; + log.warn(`ignoring invalid OpenAI fast mode param: ${rawSummary}`); + } + return normalized; +} + +function resolveFastModeReasoningEffort(modelId: unknown): OpenAIReasoningEffort { + if (typeof modelId !== "string") { + return "low"; + } + const normalized = modelId.trim().toLowerCase(); + // Keep fast mode broadly compatible across GPT-5 family variants by using + // the lowest shared non-disabled effort that current transports accept. + if (normalized.startsWith("gpt-5")) { + return "low"; + } + return "low"; +} + +function applyOpenAIFastModePayloadOverrides(params: { + payloadObj: Record; + model: { provider?: unknown; id?: unknown; baseUrl?: unknown; api?: unknown }; +}): void { + if (params.payloadObj.reasoning === undefined) { + params.payloadObj.reasoning = { + effort: resolveFastModeReasoningEffort(params.model.id), + }; + } + + const existingText = params.payloadObj.text; + if (existingText === undefined) { + params.payloadObj.text = { verbosity: "low" }; + } else if (existingText && typeof existingText === "object" && !Array.isArray(existingText)) { + const textObj = existingText as Record; + if (textObj.verbosity === undefined) { + textObj.verbosity = "low"; + } + } + + if ( + params.model.provider === "openai" && + params.payloadObj.service_tier === undefined && + isOpenAIPublicApiBaseUrl(params.model.baseUrl) + ) { + params.payloadObj.service_tier = "priority"; + } +} + export function createOpenAIResponsesContextManagementWrapper( baseStreamFn: StreamFn | undefined, extraParams: Record | undefined, @@ -187,7 +272,7 @@ export function createOpenAIResponsesContextManagementWrapper( const originalOnPayload = options?.onPayload; return underlying(model, context, { ...options, - onPayload: (payload, payloadModel) => { + onPayload: (payload) => { if (payload && typeof payload === "object") { applyOpenAIResponsesPayloadOverrides({ payloadObj: payload as Record, @@ -197,7 +282,32 @@ export function createOpenAIResponsesContextManagementWrapper( compactThreshold, }); } - return originalOnPayload?.(payload, payloadModel); + return originalOnPayload?.(payload, model); + }, + }); + }; +} + +export function createOpenAIFastModeWrapper(baseStreamFn: StreamFn | undefined): StreamFn { + const underlying = baseStreamFn ?? streamSimple; + return (model, context, options) => { + if ( + (model.api !== "openai-responses" && model.api !== "openai-codex-responses") || + (model.provider !== "openai" && model.provider !== "openai-codex") + ) { + return underlying(model, context, options); + } + const originalOnPayload = options?.onPayload; + return underlying(model, context, { + ...options, + onPayload: (payload) => { + if (payload && typeof payload === "object") { + applyOpenAIFastModePayloadOverrides({ + payloadObj: payload as Record, + model, + }); + } + return originalOnPayload?.(payload, model); }, }); }; @@ -216,18 +326,10 @@ export function createOpenAIServiceTierWrapper( ) { return underlying(model, context, options); } - const originalOnPayload = options?.onPayload; - return underlying(model, context, { - ...options, - onPayload: (payload, payloadModel) => { - if (payload && typeof payload === "object") { - const payloadObj = payload as Record; - if (payloadObj.service_tier === undefined) { - payloadObj.service_tier = serviceTier; - } - } - return originalOnPayload?.(payload, payloadModel); - }, + return streamWithPayloadPatch(underlying, model, context, options, (payloadObj) => { + if (payloadObj.service_tier === undefined) { + payloadObj.service_tier = serviceTier; + } }); }; } @@ -250,7 +352,7 @@ export function createOpenAIDefaultTransportWrapper(baseStreamFn: StreamFn | und const mergedOptions = { ...options, transport: options?.transport ?? "auto", - openaiWsWarmup: typedOptions?.openaiWsWarmup ?? true, + openaiWsWarmup: typedOptions?.openaiWsWarmup ?? false, } as SimpleStreamOptions; return underlying(model, context, mergedOptions); }; diff --git a/src/agents/pi-embedded-runner/proxy-stream-wrappers.ts b/src/agents/pi-embedded-runner/proxy-stream-wrappers.ts index bae540a48c3..4f77c31cfdd 100644 --- a/src/agents/pi-embedded-runner/proxy-stream-wrappers.ts +++ b/src/agents/pi-embedded-runner/proxy-stream-wrappers.ts @@ -73,7 +73,7 @@ export function createOpenRouterSystemCacheWrapper(baseStreamFn: StreamFn | unde const originalOnPayload = options?.onPayload; return underlying(model, context, { ...options, - onPayload: (payload, payloadModel) => { + onPayload: (payload) => { const messages = (payload as Record)?.messages; if (Array.isArray(messages)) { for (const msg of messages as Array<{ role?: string; content?: unknown }>) { @@ -92,7 +92,7 @@ export function createOpenRouterSystemCacheWrapper(baseStreamFn: StreamFn | unde } } } - return originalOnPayload?.(payload, payloadModel); + return originalOnPayload?.(payload, model); }, }); }; @@ -111,9 +111,9 @@ export function createOpenRouterWrapper( ...OPENROUTER_APP_HEADERS, ...options?.headers, }, - onPayload: (payload, payloadModel) => { + onPayload: (payload) => { normalizeProxyReasoningPayload(payload, thinkingLevel); - return onPayload?.(payload, payloadModel); + return onPayload?.(payload, model); }, }); }; @@ -136,9 +136,9 @@ export function createKilocodeWrapper( ...options?.headers, ...resolveKilocodeAppHeaders(), }, - onPayload: (payload, payloadModel) => { + onPayload: (payload) => { normalizeProxyReasoningPayload(payload, thinkingLevel); - return onPayload?.(payload, payloadModel); + return onPayload?.(payload, model); }, }); }; diff --git a/src/agents/pi-embedded-runner/run.overflow-compaction.fixture.ts b/src/agents/pi-embedded-runner/run.overflow-compaction.fixture.ts index 8c7afc834d2..8c320f765be 100644 --- a/src/agents/pi-embedded-runner/run.overflow-compaction.fixture.ts +++ b/src/agents/pi-embedded-runner/run.overflow-compaction.fixture.ts @@ -9,16 +9,18 @@ export function makeOverflowError(message: string = DEFAULT_OVERFLOW_ERROR_MESSA export function makeCompactionSuccess(params: { summary: string; - firstKeptEntryId: string; - tokensBefore: number; + firstKeptEntryId?: string; + tokensBefore?: number; + tokensAfter?: number; }) { return { ok: true as const, compacted: true as const, result: { summary: params.summary, - firstKeptEntryId: params.firstKeptEntryId, - tokensBefore: params.tokensBefore, + ...(params.firstKeptEntryId ? { firstKeptEntryId: params.firstKeptEntryId } : {}), + ...(params.tokensBefore !== undefined ? { tokensBefore: params.tokensBefore } : {}), + ...(params.tokensAfter !== undefined ? { tokensAfter: params.tokensAfter } : {}), }, }; } @@ -55,8 +57,9 @@ type MockCompactDirect = { compacted: true; result: { summary: string; - firstKeptEntryId: string; - tokensBefore: number; + firstKeptEntryId?: string; + tokensBefore?: number; + tokensAfter?: number; }; }) => unknown; }; diff --git a/src/agents/pi-embedded-runner/run.overflow-compaction.loop.test.ts b/src/agents/pi-embedded-runner/run.overflow-compaction.loop.test.ts index 5980170be62..7a2550ba1e9 100644 --- a/src/agents/pi-embedded-runner/run.overflow-compaction.loop.test.ts +++ b/src/agents/pi-embedded-runner/run.overflow-compaction.loop.test.ts @@ -2,9 +2,13 @@ import "./run.overflow-compaction.mocks.shared.js"; import { beforeEach, describe, expect, it, vi } from "vitest"; import { isCompactionFailureError, isLikelyContextOverflowError } from "../pi-embedded-helpers.js"; -vi.mock("../../utils.js", () => ({ - resolveUserPath: vi.fn((p: string) => p), -})); +vi.mock(import("../../utils.js"), async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + resolveUserPath: vi.fn((p: string) => p), + }; +}); import { log } from "./logger.js"; import { runEmbeddedPiAgent } from "./run.js"; @@ -16,6 +20,7 @@ import { queueOverflowAttemptWithOversizedToolOutput, } from "./run.overflow-compaction.fixture.js"; import { + mockedContextEngine, mockedCompactDirect, mockedRunEmbeddedAttempt, mockedSessionLikelyHasOversizedToolResults, @@ -30,6 +35,11 @@ const mockedIsLikelyContextOverflowError = vi.mocked(isLikelyContextOverflowErro describe("overflow compaction in run loop", () => { beforeEach(() => { vi.clearAllMocks(); + mockedRunEmbeddedAttempt.mockReset(); + mockedCompactDirect.mockReset(); + mockedSessionLikelyHasOversizedToolResults.mockReset(); + mockedTruncateOversizedToolResultsInSession.mockReset(); + mockedContextEngine.info.ownsCompaction = false; mockedIsCompactionFailureError.mockImplementation((msg?: string) => { if (!msg) { return false; @@ -72,7 +82,9 @@ describe("overflow compaction in run loop", () => { expect(mockedCompactDirect).toHaveBeenCalledTimes(1); expect(mockedCompactDirect).toHaveBeenCalledWith( - expect.objectContaining({ authProfileId: "test-profile" }), + expect.objectContaining({ + runtimeContext: expect.objectContaining({ authProfileId: "test-profile" }), + }), ); expect(mockedRunEmbeddedAttempt).toHaveBeenCalledTimes(2); expect(log.warn).toHaveBeenCalledWith( diff --git a/src/agents/pi-embedded-runner/run.overflow-compaction.mocks.shared.ts b/src/agents/pi-embedded-runner/run.overflow-compaction.mocks.shared.ts index 22dee7b49cd..53e73e6246d 100644 --- a/src/agents/pi-embedded-runner/run.overflow-compaction.mocks.shared.ts +++ b/src/agents/pi-embedded-runner/run.overflow-compaction.mocks.shared.ts @@ -6,6 +6,25 @@ import type { PluginHookBeforePromptBuildResult, } from "../../plugins/types.js"; +type MockCompactionResult = + | { + ok: true; + compacted: true; + result: { + summary: string; + firstKeptEntryId?: string; + tokensBefore?: number; + tokensAfter?: number; + }; + reason?: string; + } + | { + ok: false; + compacted: false; + reason: string; + result?: undefined; + }; + export const mockedGlobalHookRunner = { hasHooks: vi.fn((_hookName: string) => false), runBeforeAgentStart: vi.fn( @@ -26,12 +45,35 @@ export const mockedGlobalHookRunner = { _ctx: PluginHookAgentContext, ): Promise => undefined, ), + runBeforeCompaction: vi.fn(async () => undefined), + runAfterCompaction: vi.fn(async () => undefined), }; +export const mockedContextEngine = { + info: { ownsCompaction: false as boolean }, + compact: vi.fn<(params: unknown) => Promise>(async () => ({ + ok: false as const, + compacted: false as const, + reason: "nothing to compact", + })), +}; + +export const mockedContextEngineCompact = vi.mocked(mockedContextEngine.compact); +export const mockedEnsureRuntimePluginsLoaded: (...args: unknown[]) => void = vi.fn(); + vi.mock("../../plugins/hook-runner-global.js", () => ({ getGlobalHookRunner: vi.fn(() => mockedGlobalHookRunner), })); +vi.mock("../../context-engine/index.js", () => ({ + ensureContextEnginesInitialized: vi.fn(), + resolveContextEngine: vi.fn(async () => mockedContextEngine), +})); + +vi.mock("../runtime-plugins.js", () => ({ + ensureRuntimePluginsLoaded: mockedEnsureRuntimePluginsLoaded, +})); + vi.mock("../auth-profiles.js", () => ({ isProfileInCooldown: vi.fn(() => false), markAuthProfileFailure: vi.fn(async () => {}), @@ -67,13 +109,21 @@ vi.mock("../workspace-run.js", () => ({ vi.mock("../pi-embedded-helpers.js", () => ({ formatBillingErrorMessage: vi.fn(() => ""), classifyFailoverReason: vi.fn(() => null), + extractObservedOverflowTokenCount: vi.fn((msg?: string) => { + const match = msg?.match(/prompt is too long:\s*([\d,]+)\s+tokens\s*>\s*[\d,]+\s+maximum/i); + return match?.[1] ? Number(match[1].replaceAll(",", "")) : undefined; + }), formatAssistantErrorText: vi.fn(() => ""), isAuthAssistantError: vi.fn(() => false), isBillingAssistantError: vi.fn(() => false), isCompactionFailureError: vi.fn(() => false), isLikelyContextOverflowError: vi.fn((msg?: string) => { const lower = (msg ?? "").toLowerCase(); - return lower.includes("request_too_large") || lower.includes("context window exceeded"); + return ( + lower.includes("request_too_large") || + lower.includes("context window exceeded") || + lower.includes("prompt is too long") + ); }), isFailoverAssistantError: vi.fn(() => false), isFailoverErrorMessage: vi.fn(() => false), @@ -141,9 +191,13 @@ vi.mock("../../process/command-queue.js", () => ({ enqueueCommandInLane: vi.fn((_lane: string, task: () => unknown) => task()), })); -vi.mock("../../utils/message-channel.js", () => ({ - isMarkdownCapableMessageChannel: vi.fn(() => true), -})); +vi.mock(import("../../utils/message-channel.js"), async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + isMarkdownCapableMessageChannel: vi.fn(() => true), + }; +}); vi.mock("../agent-paths.js", () => ({ resolveOpenClawAgentDir: vi.fn(() => "/tmp/agent-dir"), @@ -155,9 +209,36 @@ vi.mock("../defaults.js", () => ({ DEFAULT_PROVIDER: "anthropic", })); +type MockFailoverErrorDescription = { + message: string; + reason: string | undefined; + status: number | undefined; + code: string | undefined; +}; + +type MockCoerceToFailoverError = ( + err: unknown, + params?: { provider?: string; model?: string; profileId?: string }, +) => unknown; +type MockDescribeFailoverError = (err: unknown) => MockFailoverErrorDescription; +type MockResolveFailoverStatus = (reason: string) => number | undefined; + +export const mockedCoerceToFailoverError = vi.fn(); +export const mockedDescribeFailoverError = vi.fn( + (err: unknown): MockFailoverErrorDescription => ({ + message: err instanceof Error ? err.message : String(err), + reason: undefined, + status: undefined, + code: undefined, + }), +); +export const mockedResolveFailoverStatus = vi.fn(); + vi.mock("../failover-error.js", () => ({ FailoverError: class extends Error {}, - resolveFailoverStatus: vi.fn(), + coerceToFailoverError: mockedCoerceToFailoverError, + describeFailoverError: mockedDescribeFailoverError, + resolveFailoverStatus: mockedResolveFailoverStatus, })); vi.mock("./lanes.js", () => ({ diff --git a/src/agents/pi-embedded-runner/run.overflow-compaction.shared-test.ts b/src/agents/pi-embedded-runner/run.overflow-compaction.shared-test.ts index 45bab82e1b8..c697ac9526a 100644 --- a/src/agents/pi-embedded-runner/run.overflow-compaction.shared-test.ts +++ b/src/agents/pi-embedded-runner/run.overflow-compaction.shared-test.ts @@ -1,5 +1,8 @@ import { vi } from "vitest"; -import { compactEmbeddedPiSessionDirect } from "./compact.js"; +import { + mockedContextEngine, + mockedContextEngineCompact, +} from "./run.overflow-compaction.mocks.shared.js"; import { runEmbeddedAttempt } from "./run/attempt.js"; import { sessionLikelyHasOversizedToolResults, @@ -7,13 +10,14 @@ import { } from "./tool-result-truncation.js"; export const mockedRunEmbeddedAttempt = vi.mocked(runEmbeddedAttempt); -export const mockedCompactDirect = vi.mocked(compactEmbeddedPiSessionDirect); +export const mockedCompactDirect = mockedContextEngineCompact; export const mockedSessionLikelyHasOversizedToolResults = vi.mocked( sessionLikelyHasOversizedToolResults, ); export const mockedTruncateOversizedToolResultsInSession = vi.mocked( truncateOversizedToolResultsInSession, ); +export { mockedContextEngine }; export const overflowBaseRunParams = { sessionId: "test-session", diff --git a/src/agents/pi-embedded-runner/run.overflow-compaction.test.ts b/src/agents/pi-embedded-runner/run.overflow-compaction.test.ts index 19b4a81d279..d18123a4ae2 100644 --- a/src/agents/pi-embedded-runner/run.overflow-compaction.test.ts +++ b/src/agents/pi-embedded-runner/run.overflow-compaction.test.ts @@ -9,8 +9,14 @@ import { mockOverflowRetrySuccess, queueOverflowAttemptWithOversizedToolOutput, } from "./run.overflow-compaction.fixture.js"; -import { mockedGlobalHookRunner } from "./run.overflow-compaction.mocks.shared.js"; import { + mockedCoerceToFailoverError, + mockedDescribeFailoverError, + mockedGlobalHookRunner, + mockedResolveFailoverStatus, +} from "./run.overflow-compaction.mocks.shared.js"; +import { + mockedContextEngine, mockedCompactDirect, mockedRunEmbeddedAttempt, mockedSessionLikelyHasOversizedToolResults, @@ -22,6 +28,35 @@ const mockedPickFallbackThinkingLevel = vi.mocked(pickFallbackThinkingLevel); describe("runEmbeddedPiAgent overflow compaction trigger routing", () => { beforeEach(() => { vi.clearAllMocks(); + mockedRunEmbeddedAttempt.mockReset(); + mockedCompactDirect.mockReset(); + mockedCoerceToFailoverError.mockReset(); + mockedDescribeFailoverError.mockReset(); + mockedResolveFailoverStatus.mockReset(); + mockedSessionLikelyHasOversizedToolResults.mockReset(); + mockedTruncateOversizedToolResultsInSession.mockReset(); + mockedGlobalHookRunner.runBeforeAgentStart.mockReset(); + mockedGlobalHookRunner.runBeforeCompaction.mockReset(); + mockedGlobalHookRunner.runAfterCompaction.mockReset(); + mockedContextEngine.info.ownsCompaction = false; + mockedCompactDirect.mockResolvedValue({ + ok: false, + compacted: false, + reason: "nothing to compact", + }); + mockedCoerceToFailoverError.mockReturnValue(null); + mockedDescribeFailoverError.mockImplementation((err: unknown) => ({ + message: err instanceof Error ? err.message : String(err), + reason: undefined, + status: undefined, + code: undefined, + })); + mockedSessionLikelyHasOversizedToolResults.mockReturnValue(false); + mockedTruncateOversizedToolResultsInSession.mockResolvedValue({ + truncated: false, + truncatedCount: 0, + reason: "no oversized tool results", + }); mockedGlobalHookRunner.hasHooks.mockImplementation(() => false); }); @@ -81,12 +116,42 @@ describe("runEmbeddedPiAgent overflow compaction trigger routing", () => { expect(mockedCompactDirect).toHaveBeenCalledTimes(1); expect(mockedCompactDirect).toHaveBeenCalledWith( expect.objectContaining({ - trigger: "overflow", - authProfileId: "test-profile", + sessionId: "test-session", + sessionFile: "/tmp/session.json", + runtimeContext: expect.objectContaining({ + trigger: "overflow", + authProfileId: "test-profile", + }), }), ); }); + it("passes observed overflow token counts into compaction when providers report them", async () => { + const overflowError = new Error( + '400 {"type":"error","error":{"type":"invalid_request_error","message":"prompt is too long: 277403 tokens > 200000 maximum"}}', + ); + + mockedRunEmbeddedAttempt + .mockResolvedValueOnce(makeAttemptResult({ promptError: overflowError })) + .mockResolvedValueOnce(makeAttemptResult({ promptError: null })); + mockedCompactDirect.mockResolvedValueOnce( + makeCompactionSuccess({ + summary: "Compacted session", + firstKeptEntryId: "entry-8", + tokensBefore: 277403, + }), + ); + + const result = await runEmbeddedPiAgent(overflowBaseRunParams); + + expect(mockedCompactDirect).toHaveBeenCalledWith( + expect.objectContaining({ + currentTokenCount: 277403, + }), + ); + expect(result.meta.error).toBeUndefined(); + }); + it("does not reset compaction attempt budget after successful tool-result truncation", async () => { const overflowError = queueOverflowAttemptWithOversizedToolOutput( mockedRunEmbeddedAttempt, @@ -132,6 +197,63 @@ describe("runEmbeddedPiAgent overflow compaction trigger routing", () => { expect(result.meta.error?.kind).toBe("context_overflow"); }); + it("fires compaction hooks during overflow recovery for ownsCompaction engines", async () => { + mockedContextEngine.info.ownsCompaction = true; + mockedGlobalHookRunner.hasHooks.mockImplementation( + (hookName) => hookName === "before_compaction" || hookName === "after_compaction", + ); + mockedRunEmbeddedAttempt + .mockResolvedValueOnce(makeAttemptResult({ promptError: makeOverflowError() })) + .mockResolvedValueOnce(makeAttemptResult({ promptError: null })); + mockedCompactDirect.mockResolvedValueOnce({ + ok: true, + compacted: true, + result: { + summary: "engine-owned compaction", + tokensAfter: 50, + }, + }); + + await runEmbeddedPiAgent(overflowBaseRunParams); + + expect(mockedGlobalHookRunner.runBeforeCompaction).toHaveBeenCalledWith( + { messageCount: -1, sessionFile: "/tmp/session.json" }, + expect.objectContaining({ + sessionKey: "test-key", + }), + ); + expect(mockedGlobalHookRunner.runAfterCompaction).toHaveBeenCalledWith( + { + messageCount: -1, + compactedCount: -1, + tokenCount: 50, + sessionFile: "/tmp/session.json", + }, + expect.objectContaining({ + sessionKey: "test-key", + }), + ); + }); + + it("guards thrown engine-owned overflow compaction attempts", async () => { + mockedContextEngine.info.ownsCompaction = true; + mockedGlobalHookRunner.hasHooks.mockImplementation( + (hookName) => hookName === "before_compaction" || hookName === "after_compaction", + ); + mockedRunEmbeddedAttempt.mockResolvedValueOnce( + makeAttemptResult({ promptError: makeOverflowError() }), + ); + mockedCompactDirect.mockRejectedValueOnce(new Error("engine boom")); + + const result = await runEmbeddedPiAgent(overflowBaseRunParams); + + expect(mockedCompactDirect).toHaveBeenCalledTimes(1); + expect(mockedGlobalHookRunner.runBeforeCompaction).toHaveBeenCalledTimes(1); + expect(mockedGlobalHookRunner.runAfterCompaction).not.toHaveBeenCalled(); + expect(result.meta.error?.kind).toBe("context_overflow"); + expect(result.payloads?.[0]?.isError).toBe(true); + }); + it("returns retry_limit when repeated retries never converge", async () => { mockedRunEmbeddedAttempt.mockClear(); mockedCompactDirect.mockClear(); @@ -148,4 +270,57 @@ describe("runEmbeddedPiAgent overflow compaction trigger routing", () => { expect(result.meta.error?.kind).toBe("retry_limit"); expect(result.payloads?.[0]?.isError).toBe(true); }); + + it("normalizes abort-wrapped prompt errors before handing off to model fallback", async () => { + const promptError = Object.assign(new Error("request aborted"), { + name: "AbortError", + cause: { + error: { + code: 429, + message: "Resource has been exhausted (e.g. check quota).", + status: "RESOURCE_EXHAUSTED", + }, + }, + }); + const normalized = Object.assign(new Error("Resource has been exhausted (e.g. check quota)."), { + name: "FailoverError", + reason: "rate_limit", + status: 429, + }); + + mockedRunEmbeddedAttempt.mockResolvedValueOnce(makeAttemptResult({ promptError })); + mockedCoerceToFailoverError.mockReturnValueOnce(normalized); + mockedDescribeFailoverError.mockImplementation((err: unknown) => ({ + message: err instanceof Error ? err.message : String(err), + reason: err === normalized ? "rate_limit" : undefined, + status: err === normalized ? 429 : undefined, + code: undefined, + })); + mockedResolveFailoverStatus.mockReturnValueOnce(429); + + await expect( + runEmbeddedPiAgent({ + ...overflowBaseRunParams, + config: { + agents: { + defaults: { + model: { + fallbacks: ["openai/gpt-5.2"], + }, + }, + }, + }, + }), + ).rejects.toBe(normalized); + + expect(mockedCoerceToFailoverError).toHaveBeenCalledWith( + promptError, + expect.objectContaining({ + provider: "anthropic", + model: "test-model", + profileId: "test-profile", + }), + ); + expect(mockedResolveFailoverStatus).toHaveBeenCalledWith("rate_limit"); + }); }); diff --git a/src/agents/pi-embedded-runner/run.ts b/src/agents/pi-embedded-runner/run.ts index 21b29fe2cb6..4ca6c0ea226 100644 --- a/src/agents/pi-embedded-runner/run.ts +++ b/src/agents/pi-embedded-runner/run.ts @@ -28,8 +28,14 @@ import { resolveContextWindowInfo, } from "../context-window-guard.js"; import { DEFAULT_CONTEXT_TOKENS, DEFAULT_MODEL, DEFAULT_PROVIDER } from "../defaults.js"; -import { FailoverError, resolveFailoverStatus } from "../failover-error.js"; import { + coerceToFailoverError, + describeFailoverError, + FailoverError, + resolveFailoverStatus, +} from "../failover-error.js"; +import { + applyLocalNoAuthHeaderOverride, ensureAuthProfileStore, getApiKeyForModel, resolveAuthProfileOrder, @@ -40,6 +46,7 @@ import { ensureOpenClawModelsJson } from "../models-config.js"; import { formatBillingErrorMessage, classifyFailoverReason, + extractObservedOverflowTokenCount, formatAssistantErrorText, isAuthAssistantError, isBillingAssistantError, @@ -61,6 +68,7 @@ import { resolveGlobalLane, resolveSessionLane } from "./lanes.js"; import { log } from "./logger.js"; import { resolveModel } from "./model.js"; import { runEmbeddedAttempt } from "./run/attempt.js"; +import { createFailoverDecisionLogger } from "./run/failover-observation.js"; import type { RunEmbeddedPiAgentParams } from "./run/params.js"; import { buildEmbeddedRunPayloads } from "./run/payloads.js"; import { @@ -552,7 +560,7 @@ export async function runEmbeddedPiAgent( resolveProfilesUnavailableReason({ store: authStore, profileIds, - }) ?? "rate_limit" + }) ?? "unknown" ); } const classified = classifyFailoverReason(params.message); @@ -668,14 +676,15 @@ export async function runEmbeddedPiAgent( ? (resolveProfilesUnavailableReason({ store: authStore, profileIds: autoProfileCandidates, - }) ?? "rate_limit") + }) ?? "unknown") : null; const allowTransientCooldownProbe = params.allowTransientCooldownProbe === true && allAutoProfilesInCooldown && (unavailableReason === "rate_limit" || unavailableReason === "overloaded" || - unavailableReason === "billing"); + unavailableReason === "billing" || + unavailableReason === "unknown"); let didTransientCooldownProbe = false; while (profileIndex < profileCandidates.length) { @@ -762,6 +771,7 @@ export async function runEmbeddedPiAgent( reason, cfg: params.config, agentDir, + runId: params.runId, }); }; const resolveAuthProfileFailureReason = ( @@ -848,6 +858,7 @@ export async function runEmbeddedPiAgent( sessionId: params.sessionId, sessionKey: params.sessionKey, trigger: params.trigger, + memoryFlushWritePath: params.memoryFlushWritePath, messageChannel: params.messageChannel, messageProvider: params.messageProvider, agentAccountId: params.agentAccountId, @@ -879,7 +890,7 @@ export async function runEmbeddedPiAgent( disableTools: params.disableTools, provider, modelId, - model: effectiveModel, + model: applyLocalNoAuthHeaderOverride(effectiveModel, apiKeyInfo), authProfileId: lastProfileId, authProfileIdSource: lockedProfileId ? "user" : "auto", authStorage, @@ -887,6 +898,7 @@ export async function runEmbeddedPiAgent( agentId: workspaceResolution.agentId, legacyBeforeAgentStartResult, thinkLevel, + fastMode: params.fastMode, verboseLevel: params.verboseLevel, reasoningLevel: params.reasoningLevel, toolResultFormat: resolvedToolResultFormat, @@ -984,11 +996,13 @@ export async function runEmbeddedPiAgent( const overflowDiagId = createCompactionDiagId(); const errorText = contextOverflowError.text; const msgCount = attempt.messagesSnapshot?.length ?? 0; + const observedOverflowTokens = extractObservedOverflowTokenCount(errorText); log.warn( `[context-overflow-diag] sessionKey=${params.sessionKey ?? params.sessionId} ` + `provider=${provider}/${modelId} source=${contextOverflowError.source} ` + `messages=${msgCount} sessionFile=${params.sessionFile} ` + `diagId=${overflowDiagId} compactionAttempts=${overflowCompactionAttempts} ` + + `observedTokens=${observedOverflowTokens ?? "unknown"} ` + `error=${errorText.slice(0, 200)}`, ); const isCompactionFailure = isCompactionFailureError(errorText); @@ -1024,37 +1038,91 @@ export async function runEmbeddedPiAgent( log.warn( `context overflow detected (attempt ${overflowCompactionAttempts}/${MAX_OVERFLOW_COMPACTION_ATTEMPTS}); attempting auto-compaction for ${provider}/${modelId}`, ); - const compactResult = await contextEngine.compact({ - sessionId: params.sessionId, - sessionFile: params.sessionFile, - tokenBudget: ctxInfo.tokens, - force: true, - compactionTarget: "budget", - runtimeContext: { + let compactResult: Awaited>; + // When the engine owns compaction, hooks are not fired inside + // compactEmbeddedPiSessionDirect (which is bypassed). Fire them + // here so subscribers (memory extensions, usage trackers) are + // notified even on overflow-recovery compactions. + const overflowEngineOwnsCompaction = contextEngine.info.ownsCompaction === true; + const overflowHookRunner = overflowEngineOwnsCompaction ? hookRunner : null; + if (overflowHookRunner?.hasHooks("before_compaction")) { + try { + await overflowHookRunner.runBeforeCompaction( + { messageCount: -1, sessionFile: params.sessionFile }, + hookCtx, + ); + } catch (hookErr) { + log.warn( + `before_compaction hook failed during overflow recovery: ${String(hookErr)}`, + ); + } + } + try { + compactResult = await contextEngine.compact({ + sessionId: params.sessionId, sessionKey: params.sessionKey, - messageChannel: params.messageChannel, - messageProvider: params.messageProvider, - agentAccountId: params.agentAccountId, - authProfileId: lastProfileId, - workspaceDir: resolvedWorkspace, - agentDir, - config: params.config, - skillsSnapshot: params.skillsSnapshot, - senderIsOwner: params.senderIsOwner, - provider, - model: modelId, - runId: params.runId, - thinkLevel, - reasoningLevel: params.reasoningLevel, - bashElevated: params.bashElevated, - extraSystemPrompt: params.extraSystemPrompt, - ownerNumbers: params.ownerNumbers, - trigger: "overflow", - diagId: overflowDiagId, - attempt: overflowCompactionAttempts, - maxAttempts: MAX_OVERFLOW_COMPACTION_ATTEMPTS, - }, - }); + sessionFile: params.sessionFile, + tokenBudget: ctxInfo.tokens, + ...(observedOverflowTokens !== undefined + ? { currentTokenCount: observedOverflowTokens } + : {}), + force: true, + compactionTarget: "budget", + runtimeContext: { + sessionKey: params.sessionKey, + messageChannel: params.messageChannel, + messageProvider: params.messageProvider, + agentAccountId: params.agentAccountId, + authProfileId: lastProfileId, + workspaceDir: resolvedWorkspace, + agentDir, + config: params.config, + skillsSnapshot: params.skillsSnapshot, + senderIsOwner: params.senderIsOwner, + provider, + model: modelId, + runId: params.runId, + thinkLevel, + reasoningLevel: params.reasoningLevel, + bashElevated: params.bashElevated, + extraSystemPrompt: params.extraSystemPrompt, + ownerNumbers: params.ownerNumbers, + trigger: "overflow", + ...(observedOverflowTokens !== undefined + ? { currentTokenCount: observedOverflowTokens } + : {}), + diagId: overflowDiagId, + attempt: overflowCompactionAttempts, + maxAttempts: MAX_OVERFLOW_COMPACTION_ATTEMPTS, + }, + }); + } catch (compactErr) { + log.warn( + `contextEngine.compact() threw during overflow recovery for ${provider}/${modelId}: ${String(compactErr)}`, + ); + compactResult = { ok: false, compacted: false, reason: String(compactErr) }; + } + if ( + compactResult.ok && + compactResult.compacted && + overflowHookRunner?.hasHooks("after_compaction") + ) { + try { + await overflowHookRunner.runAfterCompaction( + { + messageCount: -1, + compactedCount: -1, + tokenCount: compactResult.result?.tokensAfter, + sessionFile: params.sessionFile, + }, + hookCtx, + ); + } catch (hookErr) { + log.warn( + `after_compaction hook failed during overflow recovery: ${String(hookErr)}`, + ); + } + } if (compactResult.compacted) { autoCompactionCount += 1; log.info(`auto-compaction succeeded for ${provider}/${modelId}; retrying prompt`); @@ -1154,7 +1222,17 @@ export async function runEmbeddedPiAgent( } if (promptError && !aborted) { - const errorText = describeUnknownError(promptError); + // Normalize wrapped errors (e.g. abort-wrapped RESOURCE_EXHAUSTED) into + // FailoverError so rate-limit classification works even for nested shapes. + const normalizedPromptFailover = coerceToFailoverError(promptError, { + provider: activeErrorContext.provider, + model: activeErrorContext.model, + profileId: lastProfileId, + }); + const promptErrorDetails = normalizedPromptFailover + ? describeFailoverError(normalizedPromptFailover) + : describeFailoverError(promptError); + const errorText = promptErrorDetails.message || describeUnknownError(promptError); if (await maybeRefreshCopilotForAuthError(errorText, copilotAuthRetry)) { authRetryPending = true; continue; @@ -1218,19 +1296,36 @@ export async function runEmbeddedPiAgent( }, }; } - const promptFailoverReason = classifyFailoverReason(errorText); + const promptFailoverReason = + promptErrorDetails.reason ?? classifyFailoverReason(errorText); const promptProfileFailureReason = resolveAuthProfileFailureReason(promptFailoverReason); await maybeMarkAuthProfileFailure({ profileId: lastProfileId, reason: promptProfileFailureReason, }); - const promptFailoverFailure = isFailoverErrorMessage(errorText); + const promptFailoverFailure = + promptFailoverReason !== null || isFailoverErrorMessage(errorText); + // Capture the failing profile before auth-profile rotation mutates `lastProfileId`. + const failedPromptProfileId = lastProfileId; + const logPromptFailoverDecision = createFailoverDecisionLogger({ + stage: "prompt", + runId: params.runId, + rawError: errorText, + failoverReason: promptFailoverReason, + profileFailureReason: promptProfileFailureReason, + provider, + model: modelId, + profileId: failedPromptProfileId, + fallbackConfigured, + aborted, + }); if ( promptFailoverFailure && promptFailoverReason !== "timeout" && (await advanceAuthProfile()) ) { + logPromptFailoverDecision("rotate_profile"); await maybeBackoffBeforeOverloadFailover(promptFailoverReason); continue; } @@ -1249,14 +1344,22 @@ export async function runEmbeddedPiAgent( // are configured so outer model fallback can continue on overload, // rate-limit, auth, or billing failures. if (fallbackConfigured && promptFailoverFailure) { + const status = resolveFailoverStatus(promptFailoverReason ?? "unknown"); + logPromptFailoverDecision("fallback_model", { status }); await maybeBackoffBeforeOverloadFailover(promptFailoverReason); - throw new FailoverError(errorText, { - reason: promptFailoverReason ?? "unknown", - provider, - model: modelId, - profileId: lastProfileId, - status: resolveFailoverStatus(promptFailoverReason ?? "unknown"), - }); + throw ( + normalizedPromptFailover ?? + new FailoverError(errorText, { + reason: promptFailoverReason ?? "unknown", + provider, + model: modelId, + profileId: lastProfileId, + status: resolveFailoverStatus(promptFailoverReason ?? "unknown"), + }) + ); + } + if (promptFailoverFailure || promptFailoverReason) { + logPromptFailoverDecision("surface_error"); } throw promptError; } @@ -1282,6 +1385,21 @@ export async function runEmbeddedPiAgent( resolveAuthProfileFailureReason(assistantFailoverReason); const cloudCodeAssistFormatError = attempt.cloudCodeAssistFormatError; const imageDimensionError = parseImageDimensionError(lastAssistant?.errorMessage ?? ""); + // Capture the failing profile before auth-profile rotation mutates `lastProfileId`. + const failedAssistantProfileId = lastProfileId; + const logAssistantFailoverDecision = createFailoverDecisionLogger({ + stage: "assistant", + runId: params.runId, + rawError: lastAssistant?.errorMessage?.trim(), + failoverReason: assistantFailoverReason, + profileFailureReason: assistantProfileFailureReason, + provider: activeErrorContext.provider, + model: activeErrorContext.model, + profileId: failedAssistantProfileId, + fallbackConfigured, + timedOut, + aborted, + }); if ( authFailure && @@ -1339,6 +1457,7 @@ export async function runEmbeddedPiAgent( const rotated = await advanceAuthProfile(); if (rotated) { + logAssistantFailoverDecision("rotate_profile"); await maybeBackoffBeforeOverloadFailover(assistantFailoverReason); continue; } @@ -1371,6 +1490,7 @@ export async function runEmbeddedPiAgent( const status = resolveFailoverStatus(assistantFailoverReason ?? "unknown") ?? (isTimeoutErrorMessage(message) ? 408 : undefined); + logAssistantFailoverDecision("fallback_model", { status }); throw new FailoverError(message, { reason: assistantFailoverReason ?? "unknown", provider: activeErrorContext.provider, @@ -1379,6 +1499,7 @@ export async function runEmbeddedPiAgent( status, }); } + logAssistantFailoverDecision("surface_error"); } const usage = toNormalizedUsage(usageAccumulator); @@ -1417,6 +1538,7 @@ export async function runEmbeddedPiAgent( suppressToolErrorWarnings: params.suppressToolErrorWarnings, inlineToolResultsAllowed: false, didSendViaMessagingTool: attempt.didSendViaMessagingTool, + didSendDeterministicApprovalPrompt: attempt.didSendDeterministicApprovalPrompt, }); // Timeout aborts can leave the run without any assistant payloads. @@ -1439,6 +1561,7 @@ export async function runEmbeddedPiAgent( systemPromptReport: attempt.systemPromptReport, }, didSendViaMessagingTool: attempt.didSendViaMessagingTool, + didSendDeterministicApprovalPrompt: attempt.didSendDeterministicApprovalPrompt, messagingToolSentTexts: attempt.messagingToolSentTexts, messagingToolSentMediaUrls: attempt.messagingToolSentMediaUrls, messagingToolSentTargets: attempt.messagingToolSentTargets, @@ -1474,7 +1597,9 @@ export async function runEmbeddedPiAgent( // ACP bridge) can distinguish end_turn from max_tokens. stopReason: attempt.clientToolCall ? "tool_calls" - : (lastAssistant?.stopReason as string | undefined), + : attempt.yieldDetected + ? "end_turn" + : (lastAssistant?.stopReason as string | undefined), pendingToolCalls: attempt.clientToolCall ? [ { @@ -1486,6 +1611,7 @@ export async function runEmbeddedPiAgent( : undefined, }, didSendViaMessagingTool: attempt.didSendViaMessagingTool, + didSendDeterministicApprovalPrompt: attempt.didSendDeterministicApprovalPrompt, messagingToolSentTexts: attempt.messagingToolSentTexts, messagingToolSentMediaUrls: attempt.messagingToolSentMediaUrls, messagingToolSentTargets: attempt.messagingToolSentTargets, diff --git a/src/agents/pi-embedded-runner/run/attempt.spawn-workspace.test.ts b/src/agents/pi-embedded-runner/run/attempt.spawn-workspace.test.ts new file mode 100644 index 00000000000..e67bb20d88d --- /dev/null +++ b/src/agents/pi-embedded-runner/run/attempt.spawn-workspace.test.ts @@ -0,0 +1,717 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import type { AgentMessage } from "@mariozechner/pi-agent-core"; +import type { Api, Model } from "@mariozechner/pi-ai"; +import type { + AuthStorage, + ExtensionContext, + ModelRegistry, + ToolDefinition, +} from "@mariozechner/pi-coding-agent"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import type { + AssembleResult, + BootstrapResult, + CompactResult, + ContextEngineInfo, + IngestBatchResult, + IngestResult, +} from "../../../context-engine/types.js"; +import { createHostSandboxFsBridge } from "../../test-helpers/host-sandbox-fs-bridge.js"; +import { createPiToolsSandboxContext } from "../../test-helpers/pi-tools-sandbox-context.js"; + +const hoisted = vi.hoisted(() => { + const spawnSubagentDirectMock = vi.fn(); + const createAgentSessionMock = vi.fn(); + const sessionManagerOpenMock = vi.fn(); + const resolveSandboxContextMock = vi.fn(); + const subscribeEmbeddedPiSessionMock = vi.fn(); + const acquireSessionWriteLockMock = vi.fn(); + const sessionManager = { + getLeafEntry: vi.fn(() => null), + branch: vi.fn(), + resetLeaf: vi.fn(), + buildSessionContext: vi.fn<() => { messages: AgentMessage[] }>(() => ({ messages: [] })), + appendCustomEntry: vi.fn(), + }; + return { + spawnSubagentDirectMock, + createAgentSessionMock, + sessionManagerOpenMock, + resolveSandboxContextMock, + subscribeEmbeddedPiSessionMock, + acquireSessionWriteLockMock, + sessionManager, + }; +}); + +vi.mock("@mariozechner/pi-coding-agent", async (importOriginal) => { + const actual = await importOriginal(); + + return { + ...actual, + createAgentSession: (...args: unknown[]) => hoisted.createAgentSessionMock(...args), + DefaultResourceLoader: class { + async reload() {} + }, + SessionManager: { + open: (...args: unknown[]) => hoisted.sessionManagerOpenMock(...args), + } as unknown as typeof actual.SessionManager, + }; +}); + +vi.mock("../../subagent-spawn.js", () => ({ + SUBAGENT_SPAWN_MODES: ["run", "session"], + spawnSubagentDirect: (...args: unknown[]) => hoisted.spawnSubagentDirectMock(...args), +})); + +vi.mock("../../sandbox.js", () => ({ + resolveSandboxContext: (...args: unknown[]) => hoisted.resolveSandboxContextMock(...args), +})); + +vi.mock("../../session-tool-result-guard-wrapper.js", () => ({ + guardSessionManager: () => hoisted.sessionManager, +})); + +vi.mock("../../pi-embedded-subscribe.js", () => ({ + subscribeEmbeddedPiSession: (...args: unknown[]) => + hoisted.subscribeEmbeddedPiSessionMock(...args), +})); + +vi.mock("../../../plugins/hook-runner-global.js", () => ({ + getGlobalHookRunner: () => undefined, +})); + +vi.mock("../../../infra/machine-name.js", () => ({ + getMachineDisplayName: async () => "test-host", +})); + +vi.mock("../../../infra/net/undici-global-dispatcher.js", () => ({ + ensureGlobalUndiciEnvProxyDispatcher: () => {}, + ensureGlobalUndiciStreamTimeouts: () => {}, +})); + +vi.mock("../../bootstrap-files.js", () => ({ + makeBootstrapWarn: () => () => {}, + resolveBootstrapContextForRun: async () => ({ bootstrapFiles: [], contextFiles: [] }), +})); + +vi.mock("../../skills.js", () => ({ + applySkillEnvOverrides: () => () => {}, + applySkillEnvOverridesFromSnapshot: () => () => {}, + resolveSkillsPromptForRun: () => "", +})); + +vi.mock("../skills-runtime.js", () => ({ + resolveEmbeddedRunSkillEntries: () => ({ + shouldLoadSkillEntries: false, + skillEntries: undefined, + }), +})); + +vi.mock("../../docs-path.js", () => ({ + resolveOpenClawDocsPath: async () => undefined, +})); + +vi.mock("../../pi-project-settings.js", () => ({ + createPreparedEmbeddedPiSettingsManager: () => ({}), +})); + +vi.mock("../../pi-settings.js", () => ({ + applyPiAutoCompactionGuard: () => {}, +})); + +vi.mock("../extensions.js", () => ({ + buildEmbeddedExtensionFactories: () => [], +})); + +vi.mock("../google.js", () => ({ + logToolSchemasForGoogle: () => {}, + sanitizeSessionHistory: async ({ messages }: { messages: unknown[] }) => messages, + sanitizeToolsForGoogle: ({ tools }: { tools: unknown[] }) => tools, +})); + +vi.mock("../../session-file-repair.js", () => ({ + repairSessionFileIfNeeded: async () => {}, +})); + +vi.mock("../session-manager-cache.js", () => ({ + prewarmSessionFile: async () => {}, + trackSessionManagerAccess: () => {}, +})); + +vi.mock("../session-manager-init.js", () => ({ + prepareSessionManagerForRun: async () => {}, +})); + +vi.mock("../../session-write-lock.js", () => ({ + acquireSessionWriteLock: (...args: unknown[]) => hoisted.acquireSessionWriteLockMock(...args), + resolveSessionLockMaxHoldFromTimeout: () => 1, +})); + +vi.mock("../tool-result-context-guard.js", () => ({ + installToolResultContextGuard: () => () => {}, +})); + +vi.mock("../wait-for-idle-before-flush.js", () => ({ + flushPendingToolResultsAfterIdle: async () => {}, +})); + +vi.mock("../runs.js", () => ({ + setActiveEmbeddedRun: () => {}, + clearActiveEmbeddedRun: () => {}, +})); + +vi.mock("./images.js", () => ({ + detectAndLoadPromptImages: async () => ({ images: [] }), +})); + +vi.mock("../../system-prompt-params.js", () => ({ + buildSystemPromptParams: () => ({ + runtimeInfo: {}, + userTimezone: "UTC", + userTime: "00:00", + userTimeFormat: "24h", + }), +})); + +vi.mock("../../system-prompt-report.js", () => ({ + buildSystemPromptReport: () => undefined, +})); + +vi.mock("../system-prompt.js", () => ({ + applySystemPromptOverrideToSession: () => {}, + buildEmbeddedSystemPrompt: () => "system prompt", + createSystemPromptOverride: (prompt: string) => () => prompt, +})); + +vi.mock("../extra-params.js", () => ({ + applyExtraParamsToAgent: () => {}, +})); + +vi.mock("../../openai-ws-stream.js", () => ({ + createOpenAIWebSocketStreamFn: vi.fn(), + releaseWsSession: () => {}, +})); + +vi.mock("../../anthropic-payload-log.js", () => ({ + createAnthropicPayloadLogger: () => undefined, +})); + +vi.mock("../../cache-trace.js", () => ({ + createCacheTrace: () => undefined, +})); + +vi.mock("../../model-selection.js", async (importOriginal) => { + const actual = await importOriginal(); + + return { + ...actual, + normalizeProviderId: (providerId?: string) => providerId?.trim().toLowerCase() ?? "", + resolveDefaultModelForAgent: () => ({ provider: "openai", model: "gpt-test" }), + }; +}); + +const { runEmbeddedAttempt } = await import("./attempt.js"); + +type MutableSession = { + sessionId: string; + messages: unknown[]; + isCompacting: boolean; + isStreaming: boolean; + agent: { + streamFn?: unknown; + replaceMessages: (messages: unknown[]) => void; + }; + prompt: (prompt: string, options?: { images?: unknown[] }) => Promise; + abort: () => Promise; + dispose: () => void; + steer: (text: string) => Promise; +}; + +function createSubscriptionMock() { + return { + assistantTexts: [] as string[], + toolMetas: [] as Array<{ toolName: string; meta?: string }>, + unsubscribe: () => {}, + waitForCompactionRetry: async () => {}, + getMessagingToolSentTexts: () => [] as string[], + getMessagingToolSentMediaUrls: () => [] as string[], + getMessagingToolSentTargets: () => [] as unknown[], + getSuccessfulCronAdds: () => 0, + didSendViaMessagingTool: () => false, + didSendDeterministicApprovalPrompt: () => false, + getLastToolError: () => undefined, + getUsageTotals: () => undefined, + getCompactionCount: () => 0, + isCompacting: () => false, + }; +} + +function resetEmbeddedAttemptHarness( + params: { + includeSpawnSubagent?: boolean; + subscribeImpl?: () => ReturnType; + sessionMessages?: AgentMessage[]; + } = {}, +) { + if (params.includeSpawnSubagent) { + hoisted.spawnSubagentDirectMock.mockReset().mockResolvedValue({ + status: "accepted", + childSessionKey: "agent:main:subagent:child", + runId: "run-child", + }); + } + hoisted.createAgentSessionMock.mockReset(); + hoisted.sessionManagerOpenMock.mockReset().mockReturnValue(hoisted.sessionManager); + hoisted.resolveSandboxContextMock.mockReset(); + hoisted.acquireSessionWriteLockMock.mockReset().mockResolvedValue({ + release: async () => {}, + }); + hoisted.sessionManager.getLeafEntry.mockReset().mockReturnValue(null); + hoisted.sessionManager.branch.mockReset(); + hoisted.sessionManager.resetLeaf.mockReset(); + hoisted.sessionManager.buildSessionContext + .mockReset() + .mockReturnValue({ messages: params.sessionMessages ?? [] }); + hoisted.sessionManager.appendCustomEntry.mockReset(); + if (params.subscribeImpl) { + hoisted.subscribeEmbeddedPiSessionMock.mockReset().mockImplementation(params.subscribeImpl); + } +} + +async function cleanupTempPaths(tempPaths: string[]) { + while (tempPaths.length > 0) { + const target = tempPaths.pop(); + if (target) { + await fs.rm(target, { recursive: true, force: true }); + } + } +} + +function createDefaultEmbeddedSession(params?: { + prompt?: (session: MutableSession) => Promise; +}): MutableSession { + const session: MutableSession = { + sessionId: "embedded-session", + messages: [], + isCompacting: false, + isStreaming: false, + agent: { + replaceMessages: (messages: unknown[]) => { + session.messages = [...messages]; + }, + }, + prompt: async () => { + if (params?.prompt) { + await params.prompt(session); + return; + } + session.messages = [ + ...session.messages, + { role: "assistant", content: "done", timestamp: 2 }, + ]; + }, + abort: async () => {}, + dispose: () => {}, + steer: async () => {}, + }; + + return session; +} + +function createContextEngineBootstrapAndAssemble() { + return { + bootstrap: vi.fn(async (_params: { sessionKey?: string }) => ({ bootstrapped: true })), + assemble: vi.fn(async ({ messages }: { messages: AgentMessage[]; sessionKey?: string }) => ({ + messages, + estimatedTokens: 1, + })), + }; +} + +function expectCalledWithSessionKey(mock: ReturnType, sessionKey: string) { + expect(mock).toHaveBeenCalledWith( + expect.objectContaining({ + sessionKey, + }), + ); +} + +const testModel = { + api: "openai-completions", + provider: "openai", + compat: {}, + contextWindow: 8192, + input: ["text"], +} as unknown as Model; + +const cacheTtlEligibleModel = { + api: "anthropic", + provider: "anthropic", + compat: {}, + contextWindow: 8192, + input: ["text"], +} as unknown as Model; + +describe("runEmbeddedAttempt sessions_spawn workspace inheritance", () => { + const tempPaths: string[] = []; + + beforeEach(() => { + resetEmbeddedAttemptHarness({ + includeSpawnSubagent: true, + subscribeImpl: createSubscriptionMock, + }); + }); + + afterEach(async () => { + await cleanupTempPaths(tempPaths); + }); + + it("passes the real workspace to sessions_spawn when workspaceAccess is ro", async () => { + const realWorkspace = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-real-workspace-")); + const sandboxWorkspace = await fs.mkdtemp( + path.join(os.tmpdir(), "openclaw-sandbox-workspace-"), + ); + const agentDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-agent-dir-")); + tempPaths.push(realWorkspace, sandboxWorkspace, agentDir); + + hoisted.resolveSandboxContextMock.mockResolvedValue( + createPiToolsSandboxContext({ + workspaceDir: sandboxWorkspace, + agentWorkspaceDir: realWorkspace, + workspaceAccess: "ro", + fsBridge: createHostSandboxFsBridge(sandboxWorkspace), + tools: { allow: ["sessions_spawn"], deny: [] }, + sessionKey: "agent:main:main", + }), + ); + + hoisted.createAgentSessionMock.mockImplementation( + async (params: { customTools: ToolDefinition[] }) => { + const session = createDefaultEmbeddedSession({ + prompt: async () => { + const spawnTool = params.customTools.find((tool) => tool.name === "sessions_spawn"); + expect(spawnTool).toBeDefined(); + if (!spawnTool) { + throw new Error("missing sessions_spawn tool"); + } + await spawnTool.execute( + "call-sessions-spawn", + { task: "inspect workspace" }, + undefined, + undefined, + {} as unknown as ExtensionContext, + ); + }, + }); + + return { session }; + }, + ); + + const result = await runEmbeddedAttempt({ + sessionId: "embedded-session", + sessionKey: "agent:main:main", + sessionFile: path.join(realWorkspace, "session.jsonl"), + workspaceDir: realWorkspace, + agentDir, + config: {}, + prompt: "spawn a child session", + timeoutMs: 10_000, + runId: "run-1", + provider: "openai", + modelId: "gpt-test", + model: testModel, + authStorage: {} as AuthStorage, + modelRegistry: {} as ModelRegistry, + thinkLevel: "off", + senderIsOwner: true, + disableMessageTool: true, + }); + + expect(result.promptError).toBeNull(); + expect(hoisted.spawnSubagentDirectMock).toHaveBeenCalledTimes(1); + expect(hoisted.spawnSubagentDirectMock).toHaveBeenCalledWith( + expect.objectContaining({ + task: "inspect workspace", + }), + expect.objectContaining({ + workspaceDir: realWorkspace, + }), + ); + expect(hoisted.spawnSubagentDirectMock).not.toHaveBeenCalledWith( + expect.anything(), + expect.objectContaining({ + workspaceDir: sandboxWorkspace, + }), + ); + }); +}); + +describe("runEmbeddedAttempt cache-ttl tracking after compaction", () => { + const tempPaths: string[] = []; + + beforeEach(() => { + resetEmbeddedAttemptHarness(); + }); + + afterEach(async () => { + await cleanupTempPaths(tempPaths); + }); + + async function runAttemptWithCacheTtl(compactionCount: number) { + const workspaceDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-cache-ttl-workspace-")); + const agentDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-cache-ttl-agent-")); + const sessionFile = path.join(workspaceDir, "session.jsonl"); + tempPaths.push(workspaceDir, agentDir); + await fs.writeFile(sessionFile, "", "utf8"); + + hoisted.subscribeEmbeddedPiSessionMock.mockReset().mockImplementation(() => ({ + ...createSubscriptionMock(), + getCompactionCount: () => compactionCount, + })); + + hoisted.createAgentSessionMock.mockImplementation(async () => ({ + session: createDefaultEmbeddedSession(), + })); + + return await runEmbeddedAttempt({ + sessionId: "embedded-session", + sessionKey: "agent:main:test-cache-ttl", + sessionFile, + workspaceDir, + agentDir, + config: { + agents: { + defaults: { + contextPruning: { + mode: "cache-ttl", + }, + }, + }, + }, + prompt: "hello", + timeoutMs: 10_000, + runId: `run-cache-ttl-${compactionCount}`, + provider: "anthropic", + modelId: "claude-sonnet-4-20250514", + model: cacheTtlEligibleModel, + authStorage: {} as AuthStorage, + modelRegistry: {} as ModelRegistry, + thinkLevel: "off", + senderIsOwner: true, + disableMessageTool: true, + }); + } + + it("skips cache-ttl append when compaction completed during the attempt", async () => { + const result = await runAttemptWithCacheTtl(1); + + expect(result.promptError).toBeNull(); + expect(hoisted.sessionManager.appendCustomEntry).not.toHaveBeenCalledWith( + "openclaw.cache-ttl", + expect.anything(), + ); + }); + + it("appends cache-ttl when no compaction completed during the attempt", async () => { + const result = await runAttemptWithCacheTtl(0); + + expect(result.promptError).toBeNull(); + expect(hoisted.sessionManager.appendCustomEntry).toHaveBeenCalledWith( + "openclaw.cache-ttl", + expect.objectContaining({ + provider: "anthropic", + modelId: "claude-sonnet-4-20250514", + timestamp: expect.any(Number), + }), + ); + }); +}); + +describe("runEmbeddedAttempt context engine sessionKey forwarding", () => { + const tempPaths: string[] = []; + const sessionKey = "agent:main:discord:channel:test-ctx-engine"; + + beforeEach(() => { + hoisted.createAgentSessionMock.mockReset(); + hoisted.sessionManagerOpenMock.mockReset().mockReturnValue(hoisted.sessionManager); + hoisted.resolveSandboxContextMock.mockReset(); + hoisted.subscribeEmbeddedPiSessionMock.mockReset().mockImplementation(createSubscriptionMock); + hoisted.acquireSessionWriteLockMock.mockReset().mockResolvedValue({ + release: async () => {}, + }); + hoisted.sessionManager.getLeafEntry.mockReset().mockReturnValue(null); + hoisted.sessionManager.branch.mockReset(); + hoisted.sessionManager.resetLeaf.mockReset(); + hoisted.sessionManager.appendCustomEntry.mockReset(); + }); + + afterEach(async () => { + while (tempPaths.length > 0) { + const target = tempPaths.pop(); + if (target) { + await fs.rm(target, { recursive: true, force: true }); + } + } + }); + + // Build a minimal real attempt harness so lifecycle hooks run against + // the actual runner flow instead of a hand-written wrapper. + async function runAttemptWithContextEngine(contextEngine: { + bootstrap?: (params: { + sessionId: string; + sessionKey?: string; + sessionFile: string; + }) => Promise; + assemble: (params: { + sessionId: string; + sessionKey?: string; + messages: AgentMessage[]; + tokenBudget?: number; + }) => Promise; + afterTurn?: (params: { + sessionId: string; + sessionKey?: string; + sessionFile: string; + messages: AgentMessage[]; + prePromptMessageCount: number; + tokenBudget?: number; + runtimeContext?: Record; + }) => Promise; + ingestBatch?: (params: { + sessionId: string; + sessionKey?: string; + messages: AgentMessage[]; + }) => Promise; + ingest?: (params: { + sessionId: string; + sessionKey?: string; + message: AgentMessage; + }) => Promise; + compact?: (params: { + sessionId: string; + sessionKey?: string; + sessionFile: string; + tokenBudget?: number; + }) => Promise; + info?: Partial; + }) { + const workspaceDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-ctx-engine-workspace-")); + const agentDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-ctx-engine-agent-")); + const sessionFile = path.join(workspaceDir, "session.jsonl"); + tempPaths.push(workspaceDir, agentDir); + await fs.writeFile(sessionFile, "", "utf8"); + const seedMessages: AgentMessage[] = [ + { role: "user", content: "seed", timestamp: 1 } as AgentMessage, + ]; + const infoId = contextEngine.info?.id ?? "test-context-engine"; + const infoName = contextEngine.info?.name ?? "Test Context Engine"; + const infoVersion = contextEngine.info?.version ?? "0.0.1"; + + hoisted.sessionManager.buildSessionContext + .mockReset() + .mockReturnValue({ messages: seedMessages }); + + hoisted.createAgentSessionMock.mockImplementation(async () => ({ + session: createDefaultEmbeddedSession(), + })); + + return await runEmbeddedAttempt({ + sessionId: "embedded-session", + sessionKey, + sessionFile, + workspaceDir, + agentDir, + config: {}, + prompt: "hello", + timeoutMs: 10_000, + runId: "run-context-engine-forwarding", + provider: "openai", + modelId: "gpt-test", + model: testModel, + authStorage: {} as AuthStorage, + modelRegistry: {} as ModelRegistry, + thinkLevel: "off", + senderIsOwner: true, + disableMessageTool: true, + contextTokenBudget: 2048, + contextEngine: { + ...contextEngine, + ingest: + contextEngine.ingest ?? + (async () => ({ + ingested: true, + })), + compact: + contextEngine.compact ?? + (async () => ({ + ok: false, + compacted: false, + reason: "not used in this test", + })), + info: { + id: infoId, + name: infoName, + version: infoVersion, + }, + }, + }); + } + + it("forwards sessionKey to bootstrap, assemble, and afterTurn", async () => { + const { bootstrap, assemble } = createContextEngineBootstrapAndAssemble(); + const afterTurn = vi.fn(async (_params: { sessionKey?: string }) => {}); + + const result = await runAttemptWithContextEngine({ + bootstrap, + assemble, + afterTurn, + }); + + expect(result.promptError).toBeNull(); + expectCalledWithSessionKey(bootstrap, sessionKey); + expectCalledWithSessionKey(assemble, sessionKey); + expectCalledWithSessionKey(afterTurn, sessionKey); + }); + + it("forwards sessionKey to ingestBatch when afterTurn is absent", async () => { + const { bootstrap, assemble } = createContextEngineBootstrapAndAssemble(); + const ingestBatch = vi.fn( + async (_params: { sessionKey?: string; messages: AgentMessage[] }) => ({ ingestedCount: 1 }), + ); + + const result = await runAttemptWithContextEngine({ + bootstrap, + assemble, + ingestBatch, + }); + + expect(result.promptError).toBeNull(); + expectCalledWithSessionKey(ingestBatch, sessionKey); + }); + + it("forwards sessionKey to per-message ingest when ingestBatch is absent", async () => { + const { bootstrap, assemble } = createContextEngineBootstrapAndAssemble(); + const ingest = vi.fn(async (_params: { sessionKey?: string; message: AgentMessage }) => ({ + ingested: true, + })); + + const result = await runAttemptWithContextEngine({ + bootstrap, + assemble, + ingest, + }); + + expect(result.promptError).toBeNull(); + expect(ingest).toHaveBeenCalled(); + expect( + ingest.mock.calls.every((call) => { + const params = call[0]; + return params.sessionKey === sessionKey; + }), + ).toBe(true); + }); +}); diff --git a/src/agents/pi-embedded-runner/run/attempt.test.ts b/src/agents/pi-embedded-runner/run/attempt.test.ts index 9821adc0e0b..ef88e04ef46 100644 --- a/src/agents/pi-embedded-runner/run/attempt.test.ts +++ b/src/agents/pi-embedded-runner/run/attempt.test.ts @@ -13,6 +13,7 @@ import { shouldInjectOllamaCompatNumCtx, decodeHtmlEntitiesInObject, wrapOllamaCompatNumCtx, + wrapStreamFnRepairMalformedToolCallArguments, wrapStreamFnTrimToolCallNames, } from "./attempt.js"; @@ -357,6 +358,279 @@ describe("wrapStreamFnTrimToolCallNames", () => { expect(result).toBe(finalMessage); }); + it("infers tool names from malformed toolCallId variants when allowlist is present", async () => { + const partialToolCall = { type: "toolCall", id: "functions.read:0", name: "" }; + const finalToolCallA = { type: "toolCall", id: "functionsread3", name: "" }; + const finalToolCallB: { type: string; id: string; name?: string } = { + type: "toolCall", + id: "functionswrite4", + }; + const finalToolCallC = { type: "functionCall", id: "functions.exec2", name: "" }; + const event = { + type: "toolcall_delta", + partial: { role: "assistant", content: [partialToolCall] }, + }; + const finalMessage = { + role: "assistant", + content: [finalToolCallA, finalToolCallB, finalToolCallC], + }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [event], + resultMessage: finalMessage, + }), + ); + + const stream = await invokeWrappedStream(baseFn, new Set(["read", "write", "exec"])); + for await (const _item of stream) { + // drain + } + const result = await stream.result(); + + expect(partialToolCall.name).toBe("read"); + expect(finalToolCallA.name).toBe("read"); + expect(finalToolCallB.name).toBe("write"); + expect(finalToolCallC.name).toBe("exec"); + expect(result).toBe(finalMessage); + }); + + it("does not infer names from malformed toolCallId when allowlist is absent", async () => { + const finalToolCall: { type: string; id: string; name?: string } = { + type: "toolCall", + id: "functionsread3", + }; + const finalMessage = { role: "assistant", content: [finalToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [], + resultMessage: finalMessage, + }), + ); + + const stream = await invokeWrappedStream(baseFn); + await stream.result(); + + expect(finalToolCall.name).toBeUndefined(); + }); + + it("infers malformed non-blank tool names before dispatch", async () => { + const partialToolCall = { type: "toolCall", id: "functionsread3", name: "functionsread3" }; + const finalToolCall = { type: "toolCall", id: "functionsread3", name: "functionsread3" }; + const event = { + type: "toolcall_delta", + partial: { role: "assistant", content: [partialToolCall] }, + }; + const finalMessage = { role: "assistant", content: [finalToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [event], + resultMessage: finalMessage, + }), + ); + + const stream = await invokeWrappedStream(baseFn, new Set(["read", "write"])); + for await (const _item of stream) { + // drain + } + await stream.result(); + + expect(partialToolCall.name).toBe("read"); + expect(finalToolCall.name).toBe("read"); + }); + + it("recovers malformed non-blank names when id is missing", async () => { + const finalToolCall = { type: "toolCall", name: "functionsread3" }; + const finalMessage = { role: "assistant", content: [finalToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [], + resultMessage: finalMessage, + }), + ); + + const stream = await invokeWrappedStream(baseFn, new Set(["read", "write"])); + await stream.result(); + + expect(finalToolCall.name).toBe("read"); + }); + + it("recovers canonical tool names from canonical ids when name is empty", async () => { + const finalToolCall = { type: "toolCall", id: "read", name: "" }; + const finalMessage = { role: "assistant", content: [finalToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [], + resultMessage: finalMessage, + }), + ); + + const stream = await invokeWrappedStream(baseFn, new Set(["read", "write"])); + await stream.result(); + + expect(finalToolCall.name).toBe("read"); + }); + + it("recovers tool names from ids when name is whitespace-only", async () => { + const finalToolCall = { type: "toolCall", id: "functionswrite4", name: " " }; + const finalMessage = { role: "assistant", content: [finalToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [], + resultMessage: finalMessage, + }), + ); + + const stream = await invokeWrappedStream(baseFn, new Set(["read", "write"])); + await stream.result(); + + expect(finalToolCall.name).toBe("write"); + }); + + it("keeps blank names blank and assigns fallback ids when both name and id are blank", async () => { + const finalToolCall = { type: "toolCall", id: "", name: "" }; + const finalMessage = { role: "assistant", content: [finalToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [], + resultMessage: finalMessage, + }), + ); + + const stream = await invokeWrappedStream(baseFn, new Set(["read", "write"])); + await stream.result(); + + expect(finalToolCall.name).toBe(""); + expect(finalToolCall.id).toBe("call_auto_1"); + }); + + it("assigns fallback ids when both name and id are missing", async () => { + const finalToolCall: { type: string; name?: string; id?: string } = { type: "toolCall" }; + const finalMessage = { role: "assistant", content: [finalToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [], + resultMessage: finalMessage, + }), + ); + + const stream = await invokeWrappedStream(baseFn, new Set(["read", "write"])); + await stream.result(); + + expect(finalToolCall.name).toBeUndefined(); + expect(finalToolCall.id).toBe("call_auto_1"); + }); + + it("prefers explicit canonical names over conflicting canonical ids", async () => { + const finalToolCall = { type: "toolCall", id: "write", name: "read" }; + const finalMessage = { role: "assistant", content: [finalToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [], + resultMessage: finalMessage, + }), + ); + + const stream = await invokeWrappedStream(baseFn, new Set(["read", "write"])); + await stream.result(); + + expect(finalToolCall.name).toBe("read"); + expect(finalToolCall.id).toBe("write"); + }); + + it("prefers explicit trimmed canonical names over conflicting malformed ids", async () => { + const finalToolCall = { type: "toolCall", id: "functionswrite4", name: " read " }; + const finalMessage = { role: "assistant", content: [finalToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [], + resultMessage: finalMessage, + }), + ); + + const stream = await invokeWrappedStream(baseFn, new Set(["read", "write"])); + await stream.result(); + + expect(finalToolCall.name).toBe("read"); + }); + + it("does not rewrite composite names that mention multiple tools", async () => { + const finalToolCall = { type: "toolCall", id: "functionsread3", name: "read write" }; + const finalMessage = { role: "assistant", content: [finalToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [], + resultMessage: finalMessage, + }), + ); + + const stream = await invokeWrappedStream(baseFn, new Set(["read", "write"])); + await stream.result(); + + expect(finalToolCall.name).toBe("read write"); + }); + + it("fails closed for malformed non-blank names that are ambiguous", async () => { + const finalToolCall = { type: "toolCall", id: "functions.exec2", name: "functions.exec2" }; + const finalMessage = { role: "assistant", content: [finalToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [], + resultMessage: finalMessage, + }), + ); + + const stream = await invokeWrappedStream(baseFn, new Set(["exec", "exec2"])); + await stream.result(); + + expect(finalToolCall.name).toBe("functions.exec2"); + }); + + it("matches malformed ids case-insensitively across common separators", async () => { + const finalToolCall = { type: "toolCall", id: "Functions.Read_7", name: "" }; + const finalMessage = { role: "assistant", content: [finalToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [], + resultMessage: finalMessage, + }), + ); + + const stream = await invokeWrappedStream(baseFn, new Set(["read", "write"])); + await stream.result(); + + expect(finalToolCall.name).toBe("read"); + }); + it("does not override explicit non-blank tool names with inferred ids", async () => { + const finalToolCall = { type: "toolCall", id: "functionswrite4", name: "someOtherTool" }; + const finalMessage = { role: "assistant", content: [finalToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [], + resultMessage: finalMessage, + }), + ); + + const stream = await invokeWrappedStream(baseFn, new Set(["read", "write"])); + await stream.result(); + + expect(finalToolCall.name).toBe("someOtherTool"); + }); + + it("fails closed when malformed ids could map to multiple allowlisted tools", async () => { + const finalToolCall = { type: "toolCall", id: "functions.exec2", name: "" }; + const finalMessage = { role: "assistant", content: [finalToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [], + resultMessage: finalMessage, + }), + ); + + const stream = await invokeWrappedStream(baseFn, new Set(["exec", "exec2"])); + await stream.result(); + + expect(finalToolCall.name).toBe(""); + }); it("does not collapse whitespace-only tool names to empty strings", async () => { const partialToolCall = { type: "toolCall", name: " " }; const finalToolCall = { type: "toolCall", name: "\t " }; @@ -430,6 +704,182 @@ describe("wrapStreamFnTrimToolCallNames", () => { }); }); +describe("wrapStreamFnRepairMalformedToolCallArguments", () => { + function createFakeStream(params: { events: unknown[]; resultMessage: unknown }): { + result: () => Promise; + [Symbol.asyncIterator]: () => AsyncIterator; + } { + return { + async result() { + return params.resultMessage; + }, + [Symbol.asyncIterator]() { + return (async function* () { + for (const event of params.events) { + yield event; + } + })(); + }, + }; + } + + async function invokeWrappedStream(baseFn: (...args: never[]) => unknown) { + const wrappedFn = wrapStreamFnRepairMalformedToolCallArguments(baseFn as never); + return await wrappedFn({} as never, {} as never, {} as never); + } + + it("repairs anthropic-compatible tool arguments when trailing junk follows valid JSON", async () => { + const partialToolCall = { type: "toolCall", name: "read", arguments: {} }; + const streamedToolCall = { type: "toolCall", name: "read", arguments: {} }; + const endMessageToolCall = { type: "toolCall", name: "read", arguments: {} }; + const finalToolCall = { type: "toolCall", name: "read", arguments: {} }; + const partialMessage = { role: "assistant", content: [partialToolCall] }; + const endMessage = { role: "assistant", content: [endMessageToolCall] }; + const finalMessage = { role: "assistant", content: [finalToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [ + { + type: "toolcall_delta", + contentIndex: 0, + delta: '{"path":"/tmp/report.txt"}', + partial: partialMessage, + }, + { + type: "toolcall_delta", + contentIndex: 0, + delta: "xx", + partial: partialMessage, + }, + { + type: "toolcall_end", + contentIndex: 0, + toolCall: streamedToolCall, + partial: partialMessage, + message: endMessage, + }, + ], + resultMessage: finalMessage, + }), + ); + + const stream = await invokeWrappedStream(baseFn); + for await (const _item of stream) { + // drain + } + const result = await stream.result(); + + expect(partialToolCall.arguments).toEqual({ path: "/tmp/report.txt" }); + expect(streamedToolCall.arguments).toEqual({ path: "/tmp/report.txt" }); + expect(endMessageToolCall.arguments).toEqual({ path: "/tmp/report.txt" }); + expect(finalToolCall.arguments).toEqual({ path: "/tmp/report.txt" }); + expect(result).toBe(finalMessage); + }); + + it("keeps incomplete partial JSON unchanged until a complete object exists", async () => { + const partialToolCall = { type: "toolCall", name: "read", arguments: {} }; + const partialMessage = { role: "assistant", content: [partialToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [ + { + type: "toolcall_delta", + contentIndex: 0, + delta: '{"path":"/tmp', + partial: partialMessage, + }, + ], + resultMessage: { role: "assistant", content: [partialToolCall] }, + }), + ); + + const stream = await invokeWrappedStream(baseFn); + for await (const _item of stream) { + // drain + } + + expect(partialToolCall.arguments).toEqual({}); + }); + + it("does not repair tool arguments when trailing junk exceeds the Kimi-specific allowance", async () => { + const partialToolCall = { type: "toolCall", name: "read", arguments: {} }; + const streamedToolCall = { type: "toolCall", name: "read", arguments: {} }; + const partialMessage = { role: "assistant", content: [partialToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [ + { + type: "toolcall_delta", + contentIndex: 0, + delta: '{"path":"/tmp/report.txt"}oops', + partial: partialMessage, + }, + { + type: "toolcall_end", + contentIndex: 0, + toolCall: streamedToolCall, + partial: partialMessage, + }, + ], + resultMessage: { role: "assistant", content: [partialToolCall] }, + }), + ); + + const stream = await invokeWrappedStream(baseFn); + for await (const _item of stream) { + // drain + } + + expect(partialToolCall.arguments).toEqual({}); + expect(streamedToolCall.arguments).toEqual({}); + }); + + it("clears a cached repair when later deltas make the trailing suffix invalid", async () => { + const partialToolCall = { type: "toolCall", name: "read", arguments: {} }; + const streamedToolCall = { type: "toolCall", name: "read", arguments: {} }; + const partialMessage = { role: "assistant", content: [partialToolCall] }; + const baseFn = vi.fn(() => + createFakeStream({ + events: [ + { + type: "toolcall_delta", + contentIndex: 0, + delta: '{"path":"/tmp/report.txt"}', + partial: partialMessage, + }, + { + type: "toolcall_delta", + contentIndex: 0, + delta: "x", + partial: partialMessage, + }, + { + type: "toolcall_delta", + contentIndex: 0, + delta: "yzq", + partial: partialMessage, + }, + { + type: "toolcall_end", + contentIndex: 0, + toolCall: streamedToolCall, + partial: partialMessage, + }, + ], + resultMessage: { role: "assistant", content: [partialToolCall] }, + }), + ); + + const stream = await invokeWrappedStream(baseFn); + for await (const _item of stream) { + // drain + } + + expect(partialToolCall.arguments).toEqual({}); + expect(streamedToolCall.arguments).toEqual({}); + }); +}); + describe("isOllamaCompatProvider", () => { it("detects native ollama provider id", () => { expect( diff --git a/src/agents/pi-embedded-runner/run/attempt.ts b/src/agents/pi-embedded-runner/run/attempt.ts index d7fa541c2be..274ef0ef865 100644 --- a/src/agents/pi-embedded-runner/run/attempt.ts +++ b/src/agents/pi-embedded-runner/run/attempt.ts @@ -11,7 +11,10 @@ import { resolveHeartbeatPrompt } from "../../../auto-reply/heartbeat.js"; import { resolveChannelCapabilities } from "../../../config/channel-capabilities.js"; import type { OpenClawConfig } from "../../../config/config.js"; import { getMachineDisplayName } from "../../../infra/machine-name.js"; -import { ensureGlobalUndiciStreamTimeouts } from "../../../infra/net/undici-global-dispatcher.js"; +import { + ensureGlobalUndiciEnvProxyDispatcher, + ensureGlobalUndiciStreamTimeouts, +} from "../../../infra/net/undici-global-dispatcher.js"; import { MAX_IMAGE_BYTES } from "../../../media/constants.js"; import { getGlobalHookRunner } from "../../../plugins/hook-runner-global.js"; import type { @@ -145,6 +148,186 @@ type PromptBuildHookRunner = { ) => Promise; }; +const SESSIONS_YIELD_INTERRUPT_CUSTOM_TYPE = "openclaw.sessions_yield_interrupt"; +const SESSIONS_YIELD_CONTEXT_CUSTOM_TYPE = "openclaw.sessions_yield"; + +// Persist a hidden context reminder so the next turn knows why the runner stopped. +function buildSessionsYieldContextMessage(message: string): string { + return `${message}\n\n[Context: The previous turn ended intentionally via sessions_yield while waiting for a follow-up event.]`; +} + +// Return a synthetic aborted response so pi-agent-core unwinds without a real provider call. +function createYieldAbortedResponse(model: { api?: string; provider?: string; id?: string }): { + [Symbol.asyncIterator]: () => AsyncGenerator; + result: () => Promise<{ + role: "assistant"; + content: Array<{ type: "text"; text: string }>; + stopReason: "aborted"; + api: string; + provider: string; + model: string; + usage: { + input: number; + output: number; + cacheRead: number; + cacheWrite: number; + totalTokens: number; + cost: { + input: number; + output: number; + cacheRead: number; + cacheWrite: number; + total: number; + }; + }; + timestamp: number; + }>; +} { + const message = { + role: "assistant" as const, + content: [{ type: "text" as const, text: "" }], + stopReason: "aborted" as const, + api: model.api ?? "", + provider: model.provider ?? "", + model: model.id ?? "", + usage: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + totalTokens: 0, + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + total: 0, + }, + }, + timestamp: Date.now(), + }; + return { + async *[Symbol.asyncIterator]() {}, + result: async () => message, + }; +} + +// Queue a hidden steering message so pi-agent-core skips any remaining tool calls. +function queueSessionsYieldInterruptMessage(activeSession: { + agent: { steer: (message: AgentMessage) => void }; +}) { + activeSession.agent.steer({ + role: "custom", + customType: SESSIONS_YIELD_INTERRUPT_CUSTOM_TYPE, + content: "[sessions_yield interrupt]", + display: false, + details: { source: "sessions_yield" }, + timestamp: Date.now(), + }); +} + +// Append the caller-provided yield payload as a hidden session message once the run is idle. +async function persistSessionsYieldContextMessage( + activeSession: { + sendCustomMessage: ( + message: { + customType: string; + content: string; + display: boolean; + details?: Record; + }, + options?: { triggerTurn?: boolean }, + ) => Promise; + }, + message: string, +) { + await activeSession.sendCustomMessage( + { + customType: SESSIONS_YIELD_CONTEXT_CUSTOM_TYPE, + content: buildSessionsYieldContextMessage(message), + display: false, + details: { source: "sessions_yield", message }, + }, + { triggerTurn: false }, + ); +} + +// Remove the synthetic yield interrupt + aborted assistant entry from the live transcript. +function stripSessionsYieldArtifacts(activeSession: { + messages: AgentMessage[]; + agent: { replaceMessages: (messages: AgentMessage[]) => void }; + sessionManager?: unknown; +}) { + const strippedMessages = activeSession.messages.slice(); + while (strippedMessages.length > 0) { + const last = strippedMessages.at(-1) as + | AgentMessage + | { role?: string; customType?: string; stopReason?: string }; + if (last?.role === "assistant" && "stopReason" in last && last.stopReason === "aborted") { + strippedMessages.pop(); + continue; + } + if ( + last?.role === "custom" && + "customType" in last && + last.customType === SESSIONS_YIELD_INTERRUPT_CUSTOM_TYPE + ) { + strippedMessages.pop(); + continue; + } + break; + } + if (strippedMessages.length !== activeSession.messages.length) { + activeSession.agent.replaceMessages(strippedMessages); + } + + const sessionManager = activeSession.sessionManager as + | { + fileEntries?: Array<{ + type?: string; + id?: string; + parentId?: string | null; + message?: { role?: string; stopReason?: string }; + customType?: string; + }>; + byId?: Map; + leafId?: string | null; + _rewriteFile?: () => void; + } + | undefined; + const fileEntries = sessionManager?.fileEntries; + const byId = sessionManager?.byId; + if (!fileEntries || !byId) { + return; + } + + let changed = false; + while (fileEntries.length > 1) { + const last = fileEntries.at(-1); + if (!last || last.type === "session") { + break; + } + const isYieldAbortAssistant = + last.type === "message" && + last.message?.role === "assistant" && + last.message?.stopReason === "aborted"; + const isYieldInterruptMessage = + last.type === "custom_message" && last.customType === SESSIONS_YIELD_INTERRUPT_CUSTOM_TYPE; + if (!isYieldAbortAssistant && !isYieldInterruptMessage) { + break; + } + fileEntries.pop(); + if (last.id) { + byId.delete(last.id); + } + sessionManager.leafId = last.parentId ?? null; + changed = true; + } + if (changed) { + sessionManager._rewriteFile?.(); + } +} + export function isOllamaCompatProvider(model: { provider?: string; baseUrl?: string; @@ -228,33 +411,85 @@ export function wrapOllamaCompatNumCtx(baseFn: StreamFn | undefined, numCtx: num return (model, context, options) => streamFn(model, context, { ...options, - onPayload: (payload: unknown, payloadModel) => { + onPayload: (payload: unknown) => { if (!payload || typeof payload !== "object") { - return options?.onPayload?.(payload, payloadModel); + return options?.onPayload?.(payload, model); } const payloadRecord = payload as Record; if (!payloadRecord.options || typeof payloadRecord.options !== "object") { payloadRecord.options = {}; } (payloadRecord.options as Record).num_ctx = numCtx; - return options?.onPayload?.(payload, payloadModel); + return options?.onPayload?.(payload, model); }, }); } -function normalizeToolCallNameForDispatch(rawName: string, allowedToolNames?: Set): string { - const trimmed = rawName.trim(); - if (!trimmed) { - // Keep whitespace-only placeholders unchanged so they do not collapse to - // empty names (which can later surface as toolName="" loops). +function resolveCaseInsensitiveAllowedToolName( + rawName: string, + allowedToolNames?: Set, +): string | null { + if (!allowedToolNames || allowedToolNames.size === 0) { + return null; + } + const folded = rawName.toLowerCase(); + let caseInsensitiveMatch: string | null = null; + for (const name of allowedToolNames) { + if (name.toLowerCase() !== folded) { + continue; + } + if (caseInsensitiveMatch && caseInsensitiveMatch !== name) { + return null; + } + caseInsensitiveMatch = name; + } + return caseInsensitiveMatch; +} + +function resolveExactAllowedToolName( + rawName: string, + allowedToolNames?: Set, +): string | null { + if (!allowedToolNames || allowedToolNames.size === 0) { + return null; + } + if (allowedToolNames.has(rawName)) { return rawName; } - if (!allowedToolNames || allowedToolNames.size === 0) { - return trimmed; + const normalized = normalizeToolName(rawName); + if (allowedToolNames.has(normalized)) { + return normalized; + } + return ( + resolveCaseInsensitiveAllowedToolName(rawName, allowedToolNames) ?? + resolveCaseInsensitiveAllowedToolName(normalized, allowedToolNames) + ); +} + +function buildStructuredToolNameCandidates(rawName: string): string[] { + const trimmed = rawName.trim(); + if (!trimmed) { + return []; } - const candidateNames = new Set([trimmed, normalizeToolName(trimmed)]); + const candidates: string[] = []; + const seen = new Set(); + const addCandidate = (value: string) => { + const candidate = value.trim(); + if (!candidate || seen.has(candidate)) { + return; + } + seen.add(candidate); + candidates.push(candidate); + }; + + addCandidate(trimmed); + addCandidate(normalizeToolName(trimmed)); + const normalizedDelimiter = trimmed.replace(/\//g, "."); + addCandidate(normalizedDelimiter); + addCandidate(normalizeToolName(normalizedDelimiter)); + const segments = normalizedDelimiter .split(".") .map((segment) => segment.trim()) @@ -262,11 +497,23 @@ function normalizeToolCallNameForDispatch(rawName: string, allowedToolNames?: Se if (segments.length > 1) { for (let index = 1; index < segments.length; index += 1) { const suffix = segments.slice(index).join("."); - candidateNames.add(suffix); - candidateNames.add(normalizeToolName(suffix)); + addCandidate(suffix); + addCandidate(normalizeToolName(suffix)); } } + return candidates; +} + +function resolveStructuredAllowedToolName( + rawName: string, + allowedToolNames?: Set, +): string | null { + if (!allowedToolNames || allowedToolNames.size === 0) { + return null; + } + + const candidateNames = buildStructuredToolNameCandidates(rawName); for (const candidate of candidateNames) { if (allowedToolNames.has(candidate)) { return candidate; @@ -274,23 +521,116 @@ function normalizeToolCallNameForDispatch(rawName: string, allowedToolNames?: Se } for (const candidate of candidateNames) { - const folded = candidate.toLowerCase(); - let caseInsensitiveMatch: string | null = null; - for (const name of allowedToolNames) { - if (name.toLowerCase() !== folded) { - continue; - } - if (caseInsensitiveMatch && caseInsensitiveMatch !== name) { - return candidate; - } - caseInsensitiveMatch = name; - } + const caseInsensitiveMatch = resolveCaseInsensitiveAllowedToolName(candidate, allowedToolNames); if (caseInsensitiveMatch) { return caseInsensitiveMatch; } } - return trimmed; + return null; +} + +function inferToolNameFromToolCallId( + rawId: string | undefined, + allowedToolNames?: Set, +): string | null { + if (!rawId || !allowedToolNames || allowedToolNames.size === 0) { + return null; + } + const id = rawId.trim(); + if (!id) { + return null; + } + + const candidateTokens = new Set(); + const addToken = (value: string) => { + const trimmed = value.trim(); + if (!trimmed) { + return; + } + candidateTokens.add(trimmed); + candidateTokens.add(trimmed.replace(/[:._/-]\d+$/, "")); + candidateTokens.add(trimmed.replace(/\d+$/, "")); + + const normalizedDelimiter = trimmed.replace(/\//g, "."); + candidateTokens.add(normalizedDelimiter); + candidateTokens.add(normalizedDelimiter.replace(/[:._-]\d+$/, "")); + candidateTokens.add(normalizedDelimiter.replace(/\d+$/, "")); + + for (const prefixPattern of [/^functions?[._-]?/i, /^tools?[._-]?/i]) { + const stripped = normalizedDelimiter.replace(prefixPattern, ""); + if (stripped !== normalizedDelimiter) { + candidateTokens.add(stripped); + candidateTokens.add(stripped.replace(/[:._-]\d+$/, "")); + candidateTokens.add(stripped.replace(/\d+$/, "")); + } + } + }; + + const preColon = id.split(":")[0] ?? id; + for (const seed of [id, preColon]) { + addToken(seed); + } + + let singleMatch: string | null = null; + for (const candidate of candidateTokens) { + const matched = resolveStructuredAllowedToolName(candidate, allowedToolNames); + if (!matched) { + continue; + } + if (singleMatch && singleMatch !== matched) { + return null; + } + singleMatch = matched; + } + + return singleMatch; +} + +function looksLikeMalformedToolNameCounter(rawName: string): boolean { + const normalizedDelimiter = rawName.trim().replace(/\//g, "."); + return ( + /^(?:functions?|tools?)[._-]?/i.test(normalizedDelimiter) && + /(?:[:._-]\d+|\d+)$/.test(normalizedDelimiter) + ); +} + +function normalizeToolCallNameForDispatch( + rawName: string, + allowedToolNames?: Set, + rawToolCallId?: string, +): string { + const trimmed = rawName.trim(); + if (!trimmed) { + // Keep whitespace-only placeholders unchanged unless we can safely infer + // a canonical name from toolCallId and allowlist. + return inferToolNameFromToolCallId(rawToolCallId, allowedToolNames) ?? rawName; + } + if (!allowedToolNames || allowedToolNames.size === 0) { + return trimmed; + } + + const exact = resolveExactAllowedToolName(trimmed, allowedToolNames); + if (exact) { + return exact; + } + // Some providers put malformed toolCallId-like strings into `name` + // itself (for example `functionsread3`). Recover conservatively from the + // name token before consulting the separate id so explicit names like + // `someOtherTool` are preserved. + const inferredFromName = inferToolNameFromToolCallId(trimmed, allowedToolNames); + if (inferredFromName) { + return inferredFromName; + } + + // If the explicit name looks like a provider-mangled tool-call id with a + // numeric suffix, fail closed when inference is ambiguous instead of routing + // to whichever structured candidate happens to match. + if (looksLikeMalformedToolNameCounter(trimmed)) { + return trimmed; + } + + return resolveStructuredAllowedToolName(trimmed, allowedToolNames) ?? trimmed; } function isToolCallBlockType(type: unknown): boolean { @@ -366,13 +706,21 @@ function trimWhitespaceFromToolCallNamesInMessage( if (!block || typeof block !== "object") { continue; } - const typedBlock = block as { type?: unknown; name?: unknown }; - if (!isToolCallBlockType(typedBlock.type) || typeof typedBlock.name !== "string") { + const typedBlock = block as { type?: unknown; name?: unknown; id?: unknown }; + if (!isToolCallBlockType(typedBlock.type)) { continue; } - const normalized = normalizeToolCallNameForDispatch(typedBlock.name, allowedToolNames); - if (normalized !== typedBlock.name) { - typedBlock.name = normalized; + const rawId = typeof typedBlock.id === "string" ? typedBlock.id : undefined; + if (typeof typedBlock.name === "string") { + const normalized = normalizeToolCallNameForDispatch(typedBlock.name, allowedToolNames, rawId); + if (normalized !== typedBlock.name) { + typedBlock.name = normalized; + } + continue; + } + const inferred = inferToolNameFromToolCallId(rawId, allowedToolNames); + if (inferred) { + typedBlock.name = inferred; } } normalizeToolCallIdsInMessage(message); @@ -433,6 +781,281 @@ export function wrapStreamFnTrimToolCallNames( }; } +function extractBalancedJsonPrefix(raw: string): string | null { + let start = 0; + while (start < raw.length && /\s/.test(raw[start] ?? "")) { + start += 1; + } + const startChar = raw[start]; + if (startChar !== "{" && startChar !== "[") { + return null; + } + + let depth = 0; + let inString = false; + let escaped = false; + for (let i = start; i < raw.length; i += 1) { + const char = raw[i]; + if (char === undefined) { + break; + } + if (inString) { + if (escaped) { + escaped = false; + } else if (char === "\\") { + escaped = true; + } else if (char === '"') { + inString = false; + } + continue; + } + if (char === '"') { + inString = true; + continue; + } + if (char === "{" || char === "[") { + depth += 1; + continue; + } + if (char === "}" || char === "]") { + depth -= 1; + if (depth === 0) { + return raw.slice(start, i + 1); + } + } + } + return null; +} + +const MAX_TOOLCALL_REPAIR_BUFFER_CHARS = 64_000; +const MAX_TOOLCALL_REPAIR_TRAILING_CHARS = 3; +const TOOLCALL_REPAIR_ALLOWED_TRAILING_RE = /^[^\s{}[\]":,\\]{1,3}$/; + +function shouldAttemptMalformedToolCallRepair(partialJson: string, delta: string): boolean { + if (/[}\]]/.test(delta)) { + return true; + } + const trimmedDelta = delta.trim(); + return ( + trimmedDelta.length > 0 && + trimmedDelta.length <= MAX_TOOLCALL_REPAIR_TRAILING_CHARS && + /[}\]]/.test(partialJson) + ); +} + +type ToolCallArgumentRepair = { + args: Record; + trailingSuffix: string; +}; + +function tryParseMalformedToolCallArguments(raw: string): ToolCallArgumentRepair | undefined { + if (!raw.trim()) { + return undefined; + } + try { + JSON.parse(raw); + return undefined; + } catch { + const jsonPrefix = extractBalancedJsonPrefix(raw); + if (!jsonPrefix) { + return undefined; + } + const suffix = raw.slice(raw.indexOf(jsonPrefix) + jsonPrefix.length).trim(); + if ( + suffix.length === 0 || + suffix.length > MAX_TOOLCALL_REPAIR_TRAILING_CHARS || + !TOOLCALL_REPAIR_ALLOWED_TRAILING_RE.test(suffix) + ) { + return undefined; + } + try { + const parsed = JSON.parse(jsonPrefix) as unknown; + return parsed && typeof parsed === "object" && !Array.isArray(parsed) + ? { args: parsed as Record, trailingSuffix: suffix } + : undefined; + } catch { + return undefined; + } + } +} + +function repairToolCallArgumentsInMessage( + message: unknown, + contentIndex: number, + repairedArgs: Record, +): void { + if (!message || typeof message !== "object") { + return; + } + const content = (message as { content?: unknown }).content; + if (!Array.isArray(content)) { + return; + } + const block = content[contentIndex]; + if (!block || typeof block !== "object") { + return; + } + const typedBlock = block as { type?: unknown; arguments?: unknown }; + if (!isToolCallBlockType(typedBlock.type)) { + return; + } + typedBlock.arguments = repairedArgs; +} + +function clearToolCallArgumentsInMessage(message: unknown, contentIndex: number): void { + if (!message || typeof message !== "object") { + return; + } + const content = (message as { content?: unknown }).content; + if (!Array.isArray(content)) { + return; + } + const block = content[contentIndex]; + if (!block || typeof block !== "object") { + return; + } + const typedBlock = block as { type?: unknown; arguments?: unknown }; + if (!isToolCallBlockType(typedBlock.type)) { + return; + } + typedBlock.arguments = {}; +} + +function repairMalformedToolCallArgumentsInMessage( + message: unknown, + repairedArgsByIndex: Map>, +): void { + if (!message || typeof message !== "object") { + return; + } + const content = (message as { content?: unknown }).content; + if (!Array.isArray(content)) { + return; + } + for (const [index, repairedArgs] of repairedArgsByIndex.entries()) { + repairToolCallArgumentsInMessage(message, index, repairedArgs); + } +} + +function wrapStreamRepairMalformedToolCallArguments( + stream: ReturnType, +): ReturnType { + const partialJsonByIndex = new Map(); + const repairedArgsByIndex = new Map>(); + const disabledIndices = new Set(); + const loggedRepairIndices = new Set(); + const originalResult = stream.result.bind(stream); + stream.result = async () => { + const message = await originalResult(); + repairMalformedToolCallArgumentsInMessage(message, repairedArgsByIndex); + partialJsonByIndex.clear(); + repairedArgsByIndex.clear(); + disabledIndices.clear(); + loggedRepairIndices.clear(); + return message; + }; + + const originalAsyncIterator = stream[Symbol.asyncIterator].bind(stream); + (stream as { [Symbol.asyncIterator]: typeof originalAsyncIterator })[Symbol.asyncIterator] = + function () { + const iterator = originalAsyncIterator(); + return { + async next() { + const result = await iterator.next(); + if (!result.done && result.value && typeof result.value === "object") { + const event = result.value as { + type?: unknown; + contentIndex?: unknown; + delta?: unknown; + partial?: unknown; + message?: unknown; + toolCall?: unknown; + }; + if ( + typeof event.contentIndex === "number" && + Number.isInteger(event.contentIndex) && + event.type === "toolcall_delta" && + typeof event.delta === "string" + ) { + if (disabledIndices.has(event.contentIndex)) { + return result; + } + const nextPartialJson = + (partialJsonByIndex.get(event.contentIndex) ?? "") + event.delta; + if (nextPartialJson.length > MAX_TOOLCALL_REPAIR_BUFFER_CHARS) { + partialJsonByIndex.delete(event.contentIndex); + repairedArgsByIndex.delete(event.contentIndex); + disabledIndices.add(event.contentIndex); + return result; + } + partialJsonByIndex.set(event.contentIndex, nextPartialJson); + if (shouldAttemptMalformedToolCallRepair(nextPartialJson, event.delta)) { + const repair = tryParseMalformedToolCallArguments(nextPartialJson); + if (repair) { + repairedArgsByIndex.set(event.contentIndex, repair.args); + repairToolCallArgumentsInMessage(event.partial, event.contentIndex, repair.args); + repairToolCallArgumentsInMessage(event.message, event.contentIndex, repair.args); + if (!loggedRepairIndices.has(event.contentIndex)) { + loggedRepairIndices.add(event.contentIndex); + log.warn( + `repairing kimi-coding tool call arguments after ${repair.trailingSuffix.length} trailing chars`, + ); + } + } else { + repairedArgsByIndex.delete(event.contentIndex); + clearToolCallArgumentsInMessage(event.partial, event.contentIndex); + clearToolCallArgumentsInMessage(event.message, event.contentIndex); + } + } + } + if ( + typeof event.contentIndex === "number" && + Number.isInteger(event.contentIndex) && + event.type === "toolcall_end" + ) { + const repairedArgs = repairedArgsByIndex.get(event.contentIndex); + if (repairedArgs) { + if (event.toolCall && typeof event.toolCall === "object") { + (event.toolCall as { arguments?: unknown }).arguments = repairedArgs; + } + repairToolCallArgumentsInMessage(event.partial, event.contentIndex, repairedArgs); + repairToolCallArgumentsInMessage(event.message, event.contentIndex, repairedArgs); + } + partialJsonByIndex.delete(event.contentIndex); + disabledIndices.delete(event.contentIndex); + loggedRepairIndices.delete(event.contentIndex); + } + } + return result; + }, + async return(value?: unknown) { + return iterator.return?.(value) ?? { done: true as const, value: undefined }; + }, + async throw(error?: unknown) { + return iterator.throw?.(error) ?? { done: true as const, value: undefined }; + }, + }; + }; + + return stream; +} + +export function wrapStreamFnRepairMalformedToolCallArguments(baseFn: StreamFn): StreamFn { + return (model, context, options) => { + const maybeStream = baseFn(model, context, options); + if (maybeStream && typeof maybeStream === "object" && "then" in maybeStream) { + return Promise.resolve(maybeStream).then((stream) => + wrapStreamRepairMalformedToolCallArguments(stream), + ); + } + return wrapStreamRepairMalformedToolCallArguments(maybeStream); + }; +} + +function shouldRepairMalformedAnthropicToolCallArguments(provider?: string): boolean { + return normalizeProviderId(provider ?? "") === "kimi-coding"; +} + // --------------------------------------------------------------------------- // xAI / Grok: decode HTML entities in tool call arguments // --------------------------------------------------------------------------- @@ -749,6 +1372,9 @@ export async function runEmbeddedAttempt( const resolvedWorkspace = resolveUserPath(params.workspaceDir); const prevCwd = process.cwd(); const runAbortController = new AbortController(); + // Proxy bootstrap must happen before timeout tuning so the timeouts wrap the + // active EnvHttpProxyAgent instead of being replaced by a bare proxy dispatcher. + ensureGlobalUndiciEnvProxyDispatcher(); ensureGlobalUndiciStreamTimeouts(); log.debug( @@ -840,6 +1466,13 @@ export async function runEmbeddedAttempt( config: params.config, sessionAgentId, }); + // Track sessions_yield tool invocation (callback pattern, like clientToolCallDetected) + let yieldDetected = false; + let yieldMessage: string | null = null; + // Late-binding reference so onYield can abort the session (declared after tool creation) + let abortSessionForYield: (() => void) | null = null; + let queueYieldInterruptForSession: (() => void) | null = null; + let yieldAbortSettled: Promise | null = null; // Check if the model supports native image input const modelHasVision = params.model.input?.includes("image") ?? false; const toolsRaw = params.disableTools @@ -869,6 +1502,10 @@ export async function runEmbeddedAttempt( runId: params.runId, agentDir, workspaceDir: effectiveWorkspace, + // When sandboxing uses a copied workspace (`ro` or `none`), effectiveWorkspace points + // at the sandbox copy. Spawned subagents should inherit the real workspace instead. + spawnWorkspaceDir: + sandbox?.enabled && sandbox.workspaceAccess !== "rw" ? resolvedWorkspace : undefined, config: params.config, abortSignal: runAbortController.signal, modelProvider: params.model.provider, @@ -884,6 +1521,13 @@ export async function runEmbeddedAttempt( requireExplicitMessageTarget: params.requireExplicitMessageTarget ?? isSubagentSessionKey(params.sessionKey), disableMessageTool: params.disableMessageTool, + onYield: (message) => { + yieldDetected = true; + yieldMessage = message; + queueYieldInterruptForSession?.(); + runAbortController.abort("sessions_yield"); + abortSessionForYield?.(); + }, }); const toolsEnabled = supportsModelTools(params.model); const tools = sanitizeToolsForGoogle({ @@ -1097,6 +1741,7 @@ export async function runEmbeddedAttempt( try { await params.contextEngine.bootstrap({ sessionId: params.sessionId, + sessionKey: params.sessionKey, sessionFile: params.sessionFile, }); } catch (bootstrapErr) { @@ -1194,6 +1839,12 @@ export async function runEmbeddedAttempt( throw new Error("Embedded agent session missing"); } const activeSession = session; + abortSessionForYield = () => { + yieldAbortSettled = Promise.resolve(activeSession.abort()); + }; + queueYieldInterruptForSession = () => { + queueSessionsYieldInterruptMessage(activeSession); + }; removeToolResultContextGuard = installToolResultContextGuard({ agent: activeSession.agent, contextWindowTokens: Math.max( @@ -1279,7 +1930,10 @@ export async function runEmbeddedAttempt( params.config, params.provider, params.modelId, - params.streamParams, + { + ...params.streamParams, + fastMode: params.fastMode, + }, params.thinkLevel, sessionAgentId, ); @@ -1293,9 +1947,10 @@ export async function runEmbeddedAttempt( activeSession.agent.streamFn = cacheTrace.wrapStreamFn(activeSession.agent.streamFn); } - // Copilot/Claude can reject persisted `thinking` blocks (e.g. thinkingSignature:"reasoning_text") - // on *any* follow-up provider call (including tool continuations). Wrap the stream function - // so every outbound request sees sanitized messages. + // Anthropic Claude endpoints can reject replayed `thinking` blocks + // (e.g. thinkingSignature:"reasoning_text") on any follow-up provider + // call, including tool continuations. Wrap the stream function so every + // outbound request sees sanitized messages. if (transcriptPolicy.dropThinkingBlocks) { const inner = activeSession.agent.streamFn; activeSession.agent.streamFn = (model, context, options) => { @@ -1365,6 +2020,17 @@ export async function runEmbeddedAttempt( }; } + const innerStreamFn = activeSession.agent.streamFn; + activeSession.agent.streamFn = (model, context, options) => { + const signal = runAbortController.signal as AbortSignal & { reason?: unknown }; + if (yieldDetected && signal.aborted && signal.reason === "sessions_yield") { + return createYieldAbortedResponse(model) as unknown as Awaited< + ReturnType + >; + } + return innerStreamFn(model, context, options); + }; + // Some models emit tool names with surrounding whitespace (e.g. " read "). // pi-agent-core dispatches tool calls with exact string matching, so normalize // names on the live response stream before tool execution. @@ -1373,6 +2039,15 @@ export async function runEmbeddedAttempt( allowedToolNames, ); + if ( + params.model.api === "anthropic-messages" && + shouldRepairMalformedAnthropicToolCallArguments(params.provider) + ) { + activeSession.agent.streamFn = wrapStreamFnRepairMalformedToolCallArguments( + activeSession.agent.streamFn, + ); + } + if (isXaiProvider(params.provider, params.modelId)) { activeSession.agent.streamFn = wrapStreamFnDecodeXaiToolCallArguments( activeSession.agent.streamFn, @@ -1423,6 +2098,7 @@ export async function runEmbeddedAttempt( try { const assembled = await params.contextEngine.assemble({ sessionId: params.sessionId, + sessionKey: params.sessionKey, messages: activeSession.messages, tokenBudget: params.contextTokenBudget, }); @@ -1456,6 +2132,7 @@ export async function runEmbeddedAttempt( } let aborted = Boolean(params.abortSignal?.aborted); + let yieldAborted = false; let timedOut = false; let timedOutDuringCompaction = false; const getAbortReason = (signal: AbortSignal): unknown => @@ -1768,6 +2445,8 @@ export async function runEmbeddedAttempt( sessionId: params.sessionId, workspaceDir: params.workspaceDir, messageProvider: params.messageProvider ?? undefined, + trigger: params.trigger, + channelId: params.messageChannel ?? params.messageProvider ?? undefined, }, ) .catch((err) => { @@ -1783,8 +2462,29 @@ export async function runEmbeddedAttempt( await abortable(activeSession.prompt(effectivePrompt)); } } catch (err) { - promptError = err; - promptErrorSource = "prompt"; + // Yield-triggered abort is intentional — treat as clean stop, not error. + // Check the abort reason to distinguish from external aborts (timeout, user cancel) + // that may race after yieldDetected is set. + yieldAborted = + yieldDetected && + isRunnerAbortError(err) && + err instanceof Error && + err.cause === "sessions_yield"; + if (yieldAborted) { + aborted = false; + // Ensure the session abort has fully settled before proceeding. + if (yieldAbortSettled) { + // eslint-disable-next-line @typescript-eslint/await-thenable -- abort() returns Promise per AgentSession.d.ts + await yieldAbortSettled; + } + stripSessionsYieldArtifacts(activeSession); + if (yieldMessage) { + await persistSessionsYieldContextMessage(activeSession, yieldMessage); + } + } else { + promptError = err; + promptErrorSource = "prompt"; + } } finally { log.debug( `embedded run prompt end: runId=${params.runId} sessionId=${params.sessionId} durationMs=${Date.now() - promptStartedAt}`, @@ -1811,12 +2511,16 @@ export async function runEmbeddedAttempt( await params.onBlockReplyFlush(); } - const compactionRetryWait = await waitForCompactionRetryWithAggregateTimeout({ - waitForCompactionRetry, - abortable, - aggregateTimeoutMs: COMPACTION_RETRY_AGGREGATE_TIMEOUT_MS, - isCompactionStillInFlight: isCompactionInFlight, - }); + // Skip compaction wait when yield aborted the run — the signal is + // already tripped and abortable() would immediately reject. + const compactionRetryWait = yieldAborted + ? { timedOut: false } + : await waitForCompactionRetryWithAggregateTimeout({ + waitForCompactionRetry, + abortable, + aggregateTimeoutMs: COMPACTION_RETRY_AGGREGATE_TIMEOUT_MS, + isCompactionStillInFlight: isCompactionInFlight, + }); if (compactionRetryWait.timedOut) { timedOutDuringCompaction = true; if (!isProbeSession) { @@ -1842,14 +2546,19 @@ export async function runEmbeddedAttempt( } } + // Check if ANY compaction occurred during the entire attempt (prompt + retry). + // Using a cumulative count (> 0) instead of a delta check avoids missing + // compactions that complete during activeSession.prompt() before the delta + // baseline is sampled. const compactionOccurredThisAttempt = getCompactionCount() > 0; - // Append cache-TTL timestamp AFTER prompt + compaction retry completes. // Previously this was before the prompt, which caused a custom entry to be // inserted between compaction and the next prompt — breaking the // prepareCompaction() guard that checks the last entry type, leading to // double-compaction. See: https://github.com/openclaw/openclaw/issues/9282 // Skip when timed out during compaction — session state may be inconsistent. + // Also skip when compaction ran this attempt — appending a custom entry + // after compaction would break the guard again. See: #28491 if (!timedOutDuringCompaction && !compactionOccurredThisAttempt) { const shouldTrackCacheTtl = params.config?.agents?.defaults?.contextPruning?.mode === "cache-ttl" && @@ -1910,6 +2619,7 @@ export async function runEmbeddedAttempt( try { await params.contextEngine.afterTurn({ sessionId: sessionIdUsed, + sessionKey: params.sessionKey, sessionFile: params.sessionFile, messages: messagesSnapshot, prePromptMessageCount, @@ -1927,6 +2637,7 @@ export async function runEmbeddedAttempt( try { await params.contextEngine.ingestBatch({ sessionId: sessionIdUsed, + sessionKey: params.sessionKey, messages: newMessages, }); } catch (ingestErr) { @@ -1937,6 +2648,7 @@ export async function runEmbeddedAttempt( try { await params.contextEngine.ingest({ sessionId: sessionIdUsed, + sessionKey: params.sessionKey, message: msg, }); } catch (ingestErr) { @@ -1976,6 +2688,8 @@ export async function runEmbeddedAttempt( sessionId: params.sessionId, workspaceDir: params.workspaceDir, messageProvider: params.messageProvider ?? undefined, + trigger: params.trigger, + channelId: params.messageChannel ?? params.messageProvider ?? undefined, }, ) .catch((err) => { @@ -2036,6 +2750,8 @@ export async function runEmbeddedAttempt( sessionId: params.sessionId, workspaceDir: params.workspaceDir, messageProvider: params.messageProvider ?? undefined, + trigger: params.trigger, + channelId: params.messageChannel ?? params.messageProvider ?? undefined, }, ) .catch((err) => { @@ -2069,6 +2785,7 @@ export async function runEmbeddedAttempt( compactionCount: getCompactionCount(), // Client tool call detected (OpenResponses hosted tools) clientToolCall: clientToolCallDetected ?? undefined, + yieldDetected: yieldDetected || undefined, }; } finally { // Always tear down the session (and release the lock) before we leave this attempt. diff --git a/src/agents/pi-embedded-runner/run/compaction-retry-aggregate-timeout.test.ts b/src/agents/pi-embedded-runner/run/compaction-retry-aggregate-timeout.test.ts index 9a38127c84a..5e1088c3155 100644 --- a/src/agents/pi-embedded-runner/run/compaction-retry-aggregate-timeout.test.ts +++ b/src/agents/pi-embedded-runner/run/compaction-retry-aggregate-timeout.test.ts @@ -1,10 +1,28 @@ import { describe, expect, it, vi } from "vitest"; import { waitForCompactionRetryWithAggregateTimeout } from "./compaction-retry-aggregate-timeout.js"; +async function withFakeTimers(run: () => Promise) { + vi.useFakeTimers(); + try { + await run(); + } finally { + await vi.runOnlyPendingTimersAsync(); + vi.useRealTimers(); + } +} + +function expectClearedTimeoutState(onTimeout: ReturnType, timedOut: boolean) { + if (timedOut) { + expect(onTimeout).toHaveBeenCalledTimes(1); + } else { + expect(onTimeout).not.toHaveBeenCalled(); + } + expect(vi.getTimerCount()).toBe(0); +} + describe("waitForCompactionRetryWithAggregateTimeout", () => { it("times out and fires callback when compaction retry never resolves", async () => { - vi.useFakeTimers(); - try { + await withFakeTimers(async () => { const onTimeout = vi.fn(); const waitForCompactionRetry = vi.fn(async () => await new Promise(() => {})); @@ -19,17 +37,12 @@ describe("waitForCompactionRetryWithAggregateTimeout", () => { const result = await resultPromise; expect(result.timedOut).toBe(true); - expect(onTimeout).toHaveBeenCalledTimes(1); - expect(vi.getTimerCount()).toBe(0); - } finally { - await vi.runOnlyPendingTimersAsync(); - vi.useRealTimers(); - } + expectClearedTimeoutState(onTimeout, true); + }); }); it("keeps waiting while compaction remains in flight", async () => { - vi.useFakeTimers(); - try { + await withFakeTimers(async () => { const onTimeout = vi.fn(); let compactionInFlight = true; const waitForCompactionRetry = vi.fn( @@ -54,17 +67,12 @@ describe("waitForCompactionRetryWithAggregateTimeout", () => { const result = await resultPromise; expect(result.timedOut).toBe(false); - expect(onTimeout).not.toHaveBeenCalled(); - expect(vi.getTimerCount()).toBe(0); - } finally { - await vi.runOnlyPendingTimersAsync(); - vi.useRealTimers(); - } + expectClearedTimeoutState(onTimeout, false); + }); }); it("times out after an idle timeout window", async () => { - vi.useFakeTimers(); - try { + await withFakeTimers(async () => { const onTimeout = vi.fn(); let compactionInFlight = true; const waitForCompactionRetry = vi.fn(async () => await new Promise(() => {})); @@ -84,17 +92,12 @@ describe("waitForCompactionRetryWithAggregateTimeout", () => { const result = await resultPromise; expect(result.timedOut).toBe(true); - expect(onTimeout).toHaveBeenCalledTimes(1); - expect(vi.getTimerCount()).toBe(0); - } finally { - await vi.runOnlyPendingTimersAsync(); - vi.useRealTimers(); - } + expectClearedTimeoutState(onTimeout, true); + }); }); it("does not time out when compaction retry resolves", async () => { - vi.useFakeTimers(); - try { + await withFakeTimers(async () => { const onTimeout = vi.fn(); const waitForCompactionRetry = vi.fn(async () => {}); @@ -106,17 +109,12 @@ describe("waitForCompactionRetryWithAggregateTimeout", () => { }); expect(result.timedOut).toBe(false); - expect(onTimeout).not.toHaveBeenCalled(); - expect(vi.getTimerCount()).toBe(0); - } finally { - await vi.runOnlyPendingTimersAsync(); - vi.useRealTimers(); - } + expectClearedTimeoutState(onTimeout, false); + }); }); it("propagates abort errors from abortable and clears timer", async () => { - vi.useFakeTimers(); - try { + await withFakeTimers(async () => { const abortError = new Error("aborted"); abortError.name = "AbortError"; const onTimeout = vi.fn(); @@ -133,11 +131,7 @@ describe("waitForCompactionRetryWithAggregateTimeout", () => { }), ).rejects.toThrow("aborted"); - expect(onTimeout).not.toHaveBeenCalled(); - expect(vi.getTimerCount()).toBe(0); - } finally { - await vi.runOnlyPendingTimersAsync(); - vi.useRealTimers(); - } + expectClearedTimeoutState(onTimeout, false); + }); }); }); diff --git a/src/agents/pi-embedded-runner/run/failover-observation.test.ts b/src/agents/pi-embedded-runner/run/failover-observation.test.ts new file mode 100644 index 00000000000..763540f9ca7 --- /dev/null +++ b/src/agents/pi-embedded-runner/run/failover-observation.test.ts @@ -0,0 +1,48 @@ +import { describe, expect, it } from "vitest"; +import { normalizeFailoverDecisionObservationBase } from "./failover-observation.js"; + +describe("normalizeFailoverDecisionObservationBase", () => { + it("fills timeout observation reasons for deadline timeouts without provider error text", () => { + expect( + normalizeFailoverDecisionObservationBase({ + stage: "assistant", + runId: "run:timeout", + rawError: "", + failoverReason: null, + profileFailureReason: null, + provider: "openai", + model: "mock-1", + profileId: "openai:p1", + fallbackConfigured: false, + timedOut: true, + aborted: false, + }), + ).toMatchObject({ + failoverReason: "timeout", + profileFailureReason: "timeout", + timedOut: true, + }); + }); + + it("preserves explicit failover reasons", () => { + expect( + normalizeFailoverDecisionObservationBase({ + stage: "assistant", + runId: "run:overloaded", + rawError: '{"error":{"type":"overloaded_error"}}', + failoverReason: "overloaded", + profileFailureReason: "overloaded", + provider: "openai", + model: "mock-1", + profileId: "openai:p1", + fallbackConfigured: true, + timedOut: true, + aborted: false, + }), + ).toMatchObject({ + failoverReason: "overloaded", + profileFailureReason: "overloaded", + timedOut: true, + }); + }); +}); diff --git a/src/agents/pi-embedded-runner/run/failover-observation.ts b/src/agents/pi-embedded-runner/run/failover-observation.ts new file mode 100644 index 00000000000..9b915535314 --- /dev/null +++ b/src/agents/pi-embedded-runner/run/failover-observation.ts @@ -0,0 +1,76 @@ +import { redactIdentifier } from "../../../logging/redact-identifier.js"; +import type { AuthProfileFailureReason } from "../../auth-profiles.js"; +import { + buildApiErrorObservationFields, + sanitizeForConsole, +} from "../../pi-embedded-error-observation.js"; +import type { FailoverReason } from "../../pi-embedded-helpers.js"; +import { log } from "../logger.js"; + +export type FailoverDecisionLoggerInput = { + stage: "prompt" | "assistant"; + decision: "rotate_profile" | "fallback_model" | "surface_error"; + runId?: string; + rawError?: string; + failoverReason: FailoverReason | null; + profileFailureReason?: AuthProfileFailureReason | null; + provider: string; + model: string; + profileId?: string; + fallbackConfigured: boolean; + timedOut?: boolean; + aborted?: boolean; + status?: number; +}; + +export type FailoverDecisionLoggerBase = Omit; + +export function normalizeFailoverDecisionObservationBase( + base: FailoverDecisionLoggerBase, +): FailoverDecisionLoggerBase { + return { + ...base, + failoverReason: base.failoverReason ?? (base.timedOut ? "timeout" : null), + profileFailureReason: base.profileFailureReason ?? (base.timedOut ? "timeout" : null), + }; +} + +export function createFailoverDecisionLogger( + base: FailoverDecisionLoggerBase, +): ( + decision: FailoverDecisionLoggerInput["decision"], + extra?: Pick, +) => void { + const normalizedBase = normalizeFailoverDecisionObservationBase(base); + const safeProfileId = normalizedBase.profileId + ? redactIdentifier(normalizedBase.profileId, { len: 12 }) + : undefined; + const safeRunId = sanitizeForConsole(normalizedBase.runId) ?? "-"; + const safeProvider = sanitizeForConsole(normalizedBase.provider) ?? "-"; + const safeModel = sanitizeForConsole(normalizedBase.model) ?? "-"; + const profileText = safeProfileId ?? "-"; + const reasonText = normalizedBase.failoverReason ?? "none"; + return (decision, extra) => { + const observedError = buildApiErrorObservationFields(normalizedBase.rawError); + log.warn("embedded run failover decision", { + event: "embedded_run_failover_decision", + tags: ["error_handling", "failover", normalizedBase.stage, decision], + runId: normalizedBase.runId, + stage: normalizedBase.stage, + decision, + failoverReason: normalizedBase.failoverReason, + profileFailureReason: normalizedBase.profileFailureReason, + provider: normalizedBase.provider, + model: normalizedBase.model, + profileId: safeProfileId, + fallbackConfigured: normalizedBase.fallbackConfigured, + timedOut: normalizedBase.timedOut, + aborted: normalizedBase.aborted, + status: extra?.status, + ...observedError, + consoleMessage: + `embedded run failover decision: runId=${safeRunId} stage=${normalizedBase.stage} decision=${decision} ` + + `reason=${reasonText} provider=${safeProvider}/${safeModel} profile=${profileText}`, + }); + }; +} diff --git a/src/agents/pi-embedded-runner/run/history-image-prune.test.ts b/src/agents/pi-embedded-runner/run/history-image-prune.test.ts index bf4b27f5beb..dbed0335435 100644 --- a/src/agents/pi-embedded-runner/run/history-image-prune.test.ts +++ b/src/agents/pi-embedded-runner/run/history-image-prune.test.ts @@ -49,6 +49,30 @@ describe("pruneProcessedHistoryImages", () => { expect(first.content[1]).toMatchObject({ type: "image", data: "abc" }); }); + it("prunes image blocks from toolResult messages that already have assistant replies", () => { + const messages: AgentMessage[] = [ + castAgentMessage({ + role: "toolResult", + toolName: "read", + content: [{ type: "text", text: "screenshot bytes" }, { ...image }], + }), + castAgentMessage({ + role: "assistant", + content: "ack", + }), + ]; + + const didMutate = pruneProcessedHistoryImages(messages); + + expect(didMutate).toBe(true); + const firstTool = messages[0] as Extract | undefined; + if (!firstTool || !Array.isArray(firstTool.content)) { + throw new Error("expected toolResult array content"); + } + expect(firstTool.content).toHaveLength(2); + expect(firstTool.content[1]).toMatchObject({ type: "text", text: PRUNED_HISTORY_IMAGE_MARKER }); + }); + it("does not change messages when no assistant turn exists", () => { const messages: AgentMessage[] = [ castAgentMessage({ diff --git a/src/agents/pi-embedded-runner/run/history-image-prune.ts b/src/agents/pi-embedded-runner/run/history-image-prune.ts index d7dbea5de38..4e92bb08f01 100644 --- a/src/agents/pi-embedded-runner/run/history-image-prune.ts +++ b/src/agents/pi-embedded-runner/run/history-image-prune.ts @@ -21,7 +21,11 @@ export function pruneProcessedHistoryImages(messages: AgentMessage[]): boolean { let didMutate = false; for (let i = 0; i < lastAssistantIndex; i++) { const message = messages[i]; - if (!message || message.role !== "user" || !Array.isArray(message.content)) { + if ( + !message || + (message.role !== "user" && message.role !== "toolResult") || + !Array.isArray(message.content) + ) { continue; } for (let j = 0; j < message.content.length; j++) { diff --git a/src/agents/pi-embedded-runner/run/params.ts b/src/agents/pi-embedded-runner/run/params.ts index 6d067c910bf..ba69d991dd9 100644 --- a/src/agents/pi-embedded-runner/run/params.ts +++ b/src/agents/pi-embedded-runner/run/params.ts @@ -1,5 +1,6 @@ import type { ImageContent } from "@mariozechner/pi-ai"; import type { ReasoningLevel, ThinkLevel, VerboseLevel } from "../../../auto-reply/thinking.js"; +import type { ReplyPayload } from "../../../auto-reply/types.js"; import type { AgentStreamParams } from "../../../commands/agent/types.js"; import type { OpenClawConfig } from "../../../config/config.js"; import type { enqueueCommand } from "../../../process/command-queue.js"; @@ -28,6 +29,8 @@ export type RunEmbeddedPiAgentParams = { agentAccountId?: string; /** What initiated this agent run: "user", "heartbeat", "cron", or "memory". */ trigger?: string; + /** Relative workspace path that memory-triggered writes are allowed to append to. */ + memoryFlushWritePath?: string; /** Delivery target (e.g. telegram:group:123:topic:456) for topic/thread routing. */ messageTo?: string; /** Thread/topic identifier for routing replies to the originating thread. */ @@ -76,6 +79,7 @@ export type RunEmbeddedPiAgentParams = { authProfileId?: string; authProfileIdSource?: "auto" | "user"; thinkLevel?: ThinkLevel; + fastMode?: boolean; verboseLevel?: VerboseLevel; reasoningLevel?: ReasoningLevel; toolResultFormat?: ToolResultFormat; @@ -104,7 +108,7 @@ export type RunEmbeddedPiAgentParams = { blockReplyChunking?: BlockReplyChunking; onReasoningStream?: (payload: { text?: string; mediaUrls?: string[] }) => void | Promise; onReasoningEnd?: () => void | Promise; - onToolResult?: (payload: { text?: string; mediaUrls?: string[] }) => void | Promise; + onToolResult?: (payload: ReplyPayload) => void | Promise; onAgentEvent?: (evt: { stream: string; data: Record }) => void; lane?: string; enqueue?: typeof enqueueCommand; diff --git a/src/agents/pi-embedded-runner/run/payloads.errors.test.ts b/src/agents/pi-embedded-runner/run/payloads.errors.test.ts index 4268e177dfc..a2e7873aedf 100644 --- a/src/agents/pi-embedded-runner/run/payloads.errors.test.ts +++ b/src/agents/pi-embedded-runner/run/payloads.errors.test.ts @@ -101,6 +101,18 @@ describe("buildEmbeddedRunPayloads", () => { expect(payloads[0]?.isError).toBe(true); }); + it("does not emit a synthetic billing error for successful turns with stale errorMessage", () => { + const payloads = buildPayloads({ + lastAssistant: makeAssistant({ + stopReason: "stop", + errorMessage: "insufficient credits for embedding model", + content: [{ type: "text", text: "Handle payment required errors in your API." }], + }), + }); + + expectSinglePayloadText(payloads, "Handle payment required errors in your API."); + }); + it("suppresses raw error JSON even when errorMessage is missing", () => { const payloads = buildPayloads({ assistantTexts: [errorJsonPretty], diff --git a/src/agents/pi-embedded-runner/run/payloads.test.ts b/src/agents/pi-embedded-runner/run/payloads.test.ts index ee8acd1d43e..6c81fb12150 100644 --- a/src/agents/pi-embedded-runner/run/payloads.test.ts +++ b/src/agents/pi-embedded-runner/run/payloads.test.ts @@ -82,4 +82,13 @@ describe("buildEmbeddedRunPayloads tool-error warnings", () => { expect(payloads).toHaveLength(0); }); + + it("suppresses assistant text when a deterministic exec approval prompt was already delivered", () => { + const payloads = buildPayloads({ + assistantTexts: ["Approval is needed. Please run /approve abc allow-once"], + didSendDeterministicApprovalPrompt: true, + }); + + expect(payloads).toHaveLength(0); + }); }); diff --git a/src/agents/pi-embedded-runner/run/payloads.ts b/src/agents/pi-embedded-runner/run/payloads.ts index c3c87845451..c0e0ded136e 100644 --- a/src/agents/pi-embedded-runner/run/payloads.ts +++ b/src/agents/pi-embedded-runner/run/payloads.ts @@ -102,6 +102,7 @@ export function buildEmbeddedRunPayloads(params: { suppressToolErrorWarnings?: boolean; inlineToolResultsAllowed: boolean; didSendViaMessagingTool?: boolean; + didSendDeterministicApprovalPrompt?: boolean; }): Array<{ text?: string; mediaUrl?: string; @@ -125,15 +126,19 @@ export function buildEmbeddedRunPayloads(params: { }> = []; const useMarkdown = params.toolResultFormat === "markdown"; + const suppressAssistantArtifacts = params.didSendDeterministicApprovalPrompt === true; const lastAssistantErrored = params.lastAssistant?.stopReason === "error"; - const errorText = params.lastAssistant - ? formatAssistantErrorText(params.lastAssistant, { - cfg: params.config, - sessionKey: params.sessionKey, - provider: params.provider, - model: params.model, - }) - : undefined; + const errorText = + params.lastAssistant && lastAssistantErrored + ? suppressAssistantArtifacts + ? undefined + : formatAssistantErrorText(params.lastAssistant, { + cfg: params.config, + sessionKey: params.sessionKey, + provider: params.provider, + model: params.model, + }) + : undefined; const rawErrorMessage = lastAssistantErrored ? params.lastAssistant?.errorMessage?.trim() || undefined : undefined; @@ -184,8 +189,9 @@ export function buildEmbeddedRunPayloads(params: { } } - const reasoningText = - params.lastAssistant && params.reasoningLevel === "on" + const reasoningText = suppressAssistantArtifacts + ? "" + : params.lastAssistant && params.reasoningLevel === "on" ? formatReasoningMessage(extractAssistantThinking(params.lastAssistant)) : ""; if (reasoningText) { @@ -243,13 +249,14 @@ export function buildEmbeddedRunPayloads(params: { } return isRawApiErrorPayload(trimmed); }; - const answerTexts = ( - params.assistantTexts.length - ? params.assistantTexts - : fallbackAnswerText - ? [fallbackAnswerText] - : [] - ).filter((text) => !shouldSuppressRawErrorText(text)); + const answerTexts = suppressAssistantArtifacts + ? [] + : (params.assistantTexts.length + ? params.assistantTexts + : fallbackAnswerText + ? [fallbackAnswerText] + : [] + ).filter((text) => !shouldSuppressRawErrorText(text)); let hasUserFacingAssistantReply = false; for (const text of answerTexts) { diff --git a/src/agents/pi-embedded-runner/run/types.ts b/src/agents/pi-embedded-runner/run/types.ts index dff5aa6f251..3bb2b49b131 100644 --- a/src/agents/pi-embedded-runner/run/types.ts +++ b/src/agents/pi-embedded-runner/run/types.ts @@ -54,6 +54,7 @@ export type EmbeddedRunAttemptResult = { actionFingerprint?: string; }; didSendViaMessagingTool: boolean; + didSendDeterministicApprovalPrompt?: boolean; messagingToolSentTexts: string[]; messagingToolSentMediaUrls: string[]; messagingToolSentTargets: MessagingToolSend[]; @@ -63,4 +64,6 @@ export type EmbeddedRunAttemptResult = { compactionCount?: number; /** Client tool call detected (OpenResponses hosted tools). */ clientToolCall?: { name: string; params: Record }; + /** True when sessions_yield tool was called during this attempt. */ + yieldDetected?: boolean; }; diff --git a/src/agents/pi-embedded-runner/runs.test.ts b/src/agents/pi-embedded-runner/runs.test.ts index 73201749317..d9bf90f961d 100644 --- a/src/agents/pi-embedded-runner/runs.test.ts +++ b/src/agents/pi-embedded-runner/runs.test.ts @@ -1,4 +1,5 @@ import { afterEach, describe, expect, it, vi } from "vitest"; +import { importFreshModule } from "../../../test/helpers/import-fresh.js"; import { __testing, abortEmbeddedPiRun, @@ -105,4 +106,35 @@ describe("pi-embedded runner run registry", () => { vi.useRealTimers(); } }); + + it("shares active run state across distinct module instances", async () => { + const runsA = await importFreshModule( + import.meta.url, + "./runs.js?scope=shared-a", + ); + const runsB = await importFreshModule( + import.meta.url, + "./runs.js?scope=shared-b", + ); + const handle = { + queueMessage: async () => {}, + isStreaming: () => true, + isCompacting: () => false, + abort: vi.fn(), + }; + + runsA.__testing.resetActiveEmbeddedRuns(); + runsB.__testing.resetActiveEmbeddedRuns(); + + try { + runsA.setActiveEmbeddedRun("session-shared", handle); + expect(runsB.isEmbeddedPiRunActive("session-shared")).toBe(true); + + runsB.clearActiveEmbeddedRun("session-shared", handle); + expect(runsA.isEmbeddedPiRunActive("session-shared")).toBe(false); + } finally { + runsA.__testing.resetActiveEmbeddedRuns(); + runsB.__testing.resetActiveEmbeddedRuns(); + } + }); }); diff --git a/src/agents/pi-embedded-runner/runs.ts b/src/agents/pi-embedded-runner/runs.ts index 6b62b9b59ed..0d4cecc8372 100644 --- a/src/agents/pi-embedded-runner/runs.ts +++ b/src/agents/pi-embedded-runner/runs.ts @@ -3,6 +3,7 @@ import { logMessageQueued, logSessionStateChange, } from "../../logging/diagnostic.js"; +import { resolveGlobalSingleton } from "../../shared/global-singleton.js"; type EmbeddedPiQueueHandle = { queueMessage: (text: string) => Promise; @@ -11,12 +12,23 @@ type EmbeddedPiQueueHandle = { abort: () => void; }; -const ACTIVE_EMBEDDED_RUNS = new Map(); type EmbeddedRunWaiter = { resolve: (ended: boolean) => void; timer: NodeJS.Timeout; }; -const EMBEDDED_RUN_WAITERS = new Map>(); + +/** + * Use global singleton state so busy/streaming checks stay consistent even + * when the bundler emits multiple copies of this module into separate chunks. + */ +const EMBEDDED_RUN_STATE_KEY = Symbol.for("openclaw.embeddedRunState"); + +const embeddedRunState = resolveGlobalSingleton(EMBEDDED_RUN_STATE_KEY, () => ({ + activeRuns: new Map(), + waiters: new Map>(), +})); +const ACTIVE_EMBEDDED_RUNS = embeddedRunState.activeRuns; +const EMBEDDED_RUN_WAITERS = embeddedRunState.waiters; export function queueEmbeddedPiMessage(sessionId: string, text: string): boolean { const handle = ACTIVE_EMBEDDED_RUNS.get(sessionId); diff --git a/src/agents/pi-embedded-runner/sessions-yield.orchestration.test.ts b/src/agents/pi-embedded-runner/sessions-yield.orchestration.test.ts new file mode 100644 index 00000000000..e05ffd19cbf --- /dev/null +++ b/src/agents/pi-embedded-runner/sessions-yield.orchestration.test.ts @@ -0,0 +1,87 @@ +/** + * Integration test proving that sessions_yield produces a clean end_turn exit + * with no pending tool calls, so the parent session is idle when subagent + * results arrive. + */ +import "./run.overflow-compaction.mocks.shared.js"; +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { runEmbeddedPiAgent } from "./run.js"; +import { makeAttemptResult } from "./run.overflow-compaction.fixture.js"; +import { mockedGlobalHookRunner } from "./run.overflow-compaction.mocks.shared.js"; +import { + mockedRunEmbeddedAttempt, + overflowBaseRunParams, +} from "./run.overflow-compaction.shared-test.js"; +import { isEmbeddedPiRunActive, queueEmbeddedPiMessage } from "./runs.js"; + +describe("sessions_yield orchestration", () => { + beforeEach(() => { + vi.clearAllMocks(); + mockedGlobalHookRunner.hasHooks.mockImplementation(() => false); + }); + + it("parent session is idle after yield — end_turn, no pendingToolCalls", async () => { + const sessionId = "yield-parent-session"; + + // Simulate an attempt where sessions_yield was called + mockedRunEmbeddedAttempt.mockResolvedValueOnce( + makeAttemptResult({ + promptError: null, + sessionIdUsed: sessionId, + yieldDetected: true, + }), + ); + + const result = await runEmbeddedPiAgent({ + ...overflowBaseRunParams, + sessionId, + runId: "run-yield-orchestration", + }); + + // 1. Run completed with end_turn (yield causes clean exit) + expect(result.meta.stopReason).toBe("end_turn"); + + // 2. No pending tool calls (yield is NOT a client tool call) + expect(result.meta.pendingToolCalls).toBeUndefined(); + + // 3. Parent session is IDLE (not in ACTIVE_EMBEDDED_RUNS) + expect(isEmbeddedPiRunActive(sessionId)).toBe(false); + + // 4. Steer would fail (message delivery must take direct path, not steer) + expect(queueEmbeddedPiMessage(sessionId, "subagent result")).toBe(false); + }); + + it("clientToolCall takes precedence over yieldDetected", async () => { + // Edge case: both flags set (shouldn't happen, but clientToolCall wins) + mockedRunEmbeddedAttempt.mockResolvedValueOnce( + makeAttemptResult({ + promptError: null, + yieldDetected: true, + clientToolCall: { name: "hosted_tool", params: { arg: "value" } }, + }), + ); + + const result = await runEmbeddedPiAgent({ + ...overflowBaseRunParams, + runId: "run-yield-vs-client-tool", + }); + + // clientToolCall wins — tool_calls stopReason, pendingToolCalls populated + expect(result.meta.stopReason).toBe("tool_calls"); + expect(result.meta.pendingToolCalls).toHaveLength(1); + expect(result.meta.pendingToolCalls![0].name).toBe("hosted_tool"); + }); + + it("normal attempt without yield has no stopReason override", async () => { + mockedRunEmbeddedAttempt.mockResolvedValueOnce(makeAttemptResult({ promptError: null })); + + const result = await runEmbeddedPiAgent({ + ...overflowBaseRunParams, + runId: "run-no-yield", + }); + + // Neither clientToolCall nor yieldDetected → stopReason is undefined + expect(result.meta.stopReason).toBeUndefined(); + expect(result.meta.pendingToolCalls).toBeUndefined(); + }); +}); diff --git a/src/agents/pi-embedded-runner/stream-payload-utils.ts b/src/agents/pi-embedded-runner/stream-payload-utils.ts new file mode 100644 index 00000000000..580bf5b1391 --- /dev/null +++ b/src/agents/pi-embedded-runner/stream-payload-utils.ts @@ -0,0 +1,20 @@ +import type { StreamFn } from "@mariozechner/pi-agent-core"; + +export function streamWithPayloadPatch( + underlying: StreamFn, + model: Parameters[0], + context: Parameters[1], + options: Parameters[2], + patchPayload: (payload: Record) => void, +) { + const originalOnPayload = options?.onPayload; + return underlying(model, context, { + ...options, + onPayload: (payload) => { + if (payload && typeof payload === "object") { + patchPayload(payload as Record); + } + return originalOnPayload?.(payload, model); + }, + }); +} diff --git a/src/agents/pi-embedded-runner/tool-result-char-estimator.ts b/src/agents/pi-embedded-runner/tool-result-char-estimator.ts index 16bdc5e43eb..6d022d62289 100644 --- a/src/agents/pi-embedded-runner/tool-result-char-estimator.ts +++ b/src/agents/pi-embedded-runner/tool-result-char-estimator.ts @@ -46,6 +46,20 @@ function getToolResultContent(msg: AgentMessage): unknown[] { return Array.isArray(content) ? content : []; } +function estimateContentBlockChars(content: unknown[]): number { + let chars = 0; + for (const block of content) { + if (isTextBlock(block)) { + chars += block.text.length; + } else if (isImageBlock(block)) { + chars += IMAGE_CHAR_ESTIMATE; + } else { + chars += estimateUnknownChars(block); + } + } + return chars; +} + export function getToolResultText(msg: AgentMessage): string { const content = getToolResultContent(msg); const chunks: string[] = []; @@ -67,19 +81,10 @@ function estimateMessageChars(msg: AgentMessage): number { if (typeof content === "string") { return content.length; } - let chars = 0; if (Array.isArray(content)) { - for (const block of content) { - if (isTextBlock(block)) { - chars += block.text.length; - } else if (isImageBlock(block)) { - chars += IMAGE_CHAR_ESTIMATE; - } else { - chars += estimateUnknownChars(block); - } - } + return estimateContentBlockChars(content); } - return chars; + return 0; } if (msg.role === "assistant") { @@ -115,17 +120,8 @@ function estimateMessageChars(msg: AgentMessage): number { } if (isToolResultMessage(msg)) { - let chars = 0; const content = getToolResultContent(msg); - for (const block of content) { - if (isTextBlock(block)) { - chars += block.text.length; - } else if (isImageBlock(block)) { - chars += IMAGE_CHAR_ESTIMATE; - } else { - chars += estimateUnknownChars(block); - } - } + let chars = estimateContentBlockChars(content); const details = (msg as { details?: unknown }).details; chars += estimateUnknownChars(details); const weightedChars = Math.ceil( diff --git a/src/agents/pi-embedded-runner/usage-reporting.test.ts b/src/agents/pi-embedded-runner/usage-reporting.test.ts index 48cb586e727..ebab56a841b 100644 --- a/src/agents/pi-embedded-runner/usage-reporting.test.ts +++ b/src/agents/pi-embedded-runner/usage-reporting.test.ts @@ -79,6 +79,36 @@ describe("runEmbeddedPiAgent usage reporting", () => { ); }); + it("forwards memory flush write paths into memory-triggered attempts", async () => { + mockedRunEmbeddedAttempt.mockResolvedValueOnce({ + aborted: false, + promptError: null, + timedOut: false, + sessionIdUsed: "test-session", + assistantTexts: [], + // eslint-disable-next-line @typescript-eslint/no-explicit-any + } as any); + + await runEmbeddedPiAgent({ + sessionId: "test-session", + sessionKey: "test-key", + sessionFile: "/tmp/session.json", + workspaceDir: "/tmp/workspace", + prompt: "flush", + timeoutMs: 30000, + runId: "run-memory-forwarding", + trigger: "memory", + memoryFlushWritePath: "memory/2026-03-10.md", + }); + + expect(mockedRunEmbeddedAttempt).toHaveBeenCalledWith( + expect.objectContaining({ + trigger: "memory", + memoryFlushWritePath: "memory/2026-03-10.md", + }), + ); + }); + it("reports total usage from the last turn instead of accumulated total", async () => { // Simulate a multi-turn run result. // Turn 1: Input 100, Output 50. Total 150. diff --git a/src/agents/pi-embedded-subscribe.handlers.lifecycle.test.ts b/src/agents/pi-embedded-subscribe.handlers.lifecycle.test.ts index 7a8b1e12e05..911b124113a 100644 --- a/src/agents/pi-embedded-subscribe.handlers.lifecycle.test.ts +++ b/src/agents/pi-embedded-subscribe.handlers.lifecycle.test.ts @@ -54,8 +54,13 @@ describe("handleAgentEnd", () => { const warn = vi.mocked(ctx.log.warn); expect(warn).toHaveBeenCalledTimes(1); - expect(warn.mock.calls[0]?.[0]).toContain("runId=run-1"); - expect(warn.mock.calls[0]?.[0]).toContain("error=connection refused"); + expect(warn.mock.calls[0]?.[0]).toBe("embedded run agent end"); + expect(warn.mock.calls[0]?.[1]).toMatchObject({ + event: "embedded_run_agent_end", + runId: "run-1", + error: "connection refused", + rawErrorPreview: "connection refused", + }); expect(onAgentEvent).toHaveBeenCalledWith({ stream: "lifecycle", data: { @@ -65,6 +70,85 @@ describe("handleAgentEnd", () => { }); }); + it("attaches raw provider error metadata and includes model/provider in console output", () => { + const ctx = createContext({ + role: "assistant", + stopReason: "error", + provider: "anthropic", + model: "claude-test", + errorMessage: '{"type":"error","error":{"type":"overloaded_error","message":"Overloaded"}}', + content: [{ type: "text", text: "" }], + }); + + handleAgentEnd(ctx); + + const warn = vi.mocked(ctx.log.warn); + expect(warn).toHaveBeenCalledTimes(1); + expect(warn.mock.calls[0]?.[0]).toBe("embedded run agent end"); + expect(warn.mock.calls[0]?.[1]).toMatchObject({ + event: "embedded_run_agent_end", + runId: "run-1", + error: "The AI service is temporarily overloaded. Please try again in a moment.", + failoverReason: "overloaded", + providerErrorType: "overloaded_error", + consoleMessage: + "embedded run agent end: runId=run-1 isError=true model=claude-test provider=anthropic error=The AI service is temporarily overloaded. Please try again in a moment.", + }); + }); + + it("sanitizes model and provider before writing consoleMessage", () => { + const ctx = createContext({ + role: "assistant", + stopReason: "error", + provider: "anthropic\u001b]8;;https://evil.test\u0007", + model: "claude\tsonnet\n4", + errorMessage: "connection refused", + content: [{ type: "text", text: "" }], + }); + + handleAgentEnd(ctx); + + const warn = vi.mocked(ctx.log.warn); + const meta = warn.mock.calls[0]?.[1]; + expect(meta).toMatchObject({ + consoleMessage: + "embedded run agent end: runId=run-1 isError=true model=claude sonnet 4 provider=anthropic]8;;https://evil.test error=connection refused", + }); + expect(meta?.consoleMessage).not.toContain("\n"); + expect(meta?.consoleMessage).not.toContain("\r"); + expect(meta?.consoleMessage).not.toContain("\t"); + expect(meta?.consoleMessage).not.toContain("\u001b"); + }); + + it("redacts logged error text before emitting lifecycle events", () => { + const onAgentEvent = vi.fn(); + const ctx = createContext( + { + role: "assistant", + stopReason: "error", + errorMessage: "x-api-key: sk-abcdefghijklmnopqrstuvwxyz123456", + content: [{ type: "text", text: "" }], + }, + { onAgentEvent }, + ); + + handleAgentEnd(ctx); + + const warn = vi.mocked(ctx.log.warn); + expect(warn.mock.calls[0]?.[1]).toMatchObject({ + event: "embedded_run_agent_end", + error: "x-api-key: ***", + rawErrorPreview: "x-api-key: ***", + }); + expect(onAgentEvent).toHaveBeenCalledWith({ + stream: "lifecycle", + data: { + phase: "error", + error: "x-api-key: ***", + }, + }); + }); + it("keeps non-error run-end logging on debug only", () => { const ctx = createContext(undefined); diff --git a/src/agents/pi-embedded-subscribe.handlers.lifecycle.ts b/src/agents/pi-embedded-subscribe.handlers.lifecycle.ts index 4c6803e814c..973de1ebefc 100644 --- a/src/agents/pi-embedded-subscribe.handlers.lifecycle.ts +++ b/src/agents/pi-embedded-subscribe.handlers.lifecycle.ts @@ -1,6 +1,11 @@ import { emitAgentEvent } from "../infra/agent-events.js"; import { createInlineCodeState } from "../markdown/code-spans.js"; -import { formatAssistantErrorText } from "./pi-embedded-helpers.js"; +import { + buildApiErrorObservationFields, + buildTextObservationFields, + sanitizeForConsole, +} from "./pi-embedded-error-observation.js"; +import { classifyFailoverReason, formatAssistantErrorText } from "./pi-embedded-helpers.js"; import type { EmbeddedPiSubscribeContext } from "./pi-embedded-subscribe.handlers.types.js"; import { isAssistantMessage } from "./pi-embedded-utils.js"; @@ -36,16 +41,33 @@ export function handleAgentEnd(ctx: EmbeddedPiSubscribeContext) { provider: lastAssistant.provider, model: lastAssistant.model, }); + const rawError = lastAssistant.errorMessage?.trim(); + const failoverReason = classifyFailoverReason(rawError ?? ""); const errorText = (friendlyError || lastAssistant.errorMessage || "LLM request failed.").trim(); - ctx.log.warn( - `embedded run agent end: runId=${ctx.params.runId} isError=true error=${errorText}`, - ); + const observedError = buildApiErrorObservationFields(rawError); + const safeErrorText = + buildTextObservationFields(errorText).textPreview ?? "LLM request failed."; + const safeRunId = sanitizeForConsole(ctx.params.runId) ?? "-"; + const safeModel = sanitizeForConsole(lastAssistant.model) ?? "unknown"; + const safeProvider = sanitizeForConsole(lastAssistant.provider) ?? "unknown"; + ctx.log.warn("embedded run agent end", { + event: "embedded_run_agent_end", + tags: ["error_handling", "lifecycle", "agent_end", "assistant_error"], + runId: ctx.params.runId, + isError: true, + error: safeErrorText, + failoverReason, + model: lastAssistant.model, + provider: lastAssistant.provider, + ...observedError, + consoleMessage: `embedded run agent end: runId=${safeRunId} isError=true model=${safeModel} provider=${safeProvider} error=${safeErrorText}`, + }); emitAgentEvent({ runId: ctx.params.runId, stream: "lifecycle", data: { phase: "error", - error: errorText, + error: safeErrorText, endedAt: Date.now(), }, }); @@ -53,7 +75,7 @@ export function handleAgentEnd(ctx: EmbeddedPiSubscribeContext) { stream: "lifecycle", data: { phase: "error", - error: errorText, + error: safeErrorText, }, }); } else { diff --git a/src/agents/pi-embedded-subscribe.handlers.messages.ts b/src/agents/pi-embedded-subscribe.handlers.messages.ts index c89a4b71496..04f47e67cde 100644 --- a/src/agents/pi-embedded-subscribe.handlers.messages.ts +++ b/src/agents/pi-embedded-subscribe.handlers.messages.ts @@ -85,6 +85,9 @@ export function handleMessageUpdate( } ctx.noteLastAssistant(msg); + if (ctx.state.deterministicApprovalPromptSent) { + return; + } const assistantEvent = evt.assistantMessageEvent; const assistantRecord = @@ -261,6 +264,9 @@ export function handleMessageEnd( const assistantMessage = msg; ctx.noteLastAssistant(assistantMessage); ctx.recordAssistantUsage((assistantMessage as { usage?: unknown }).usage); + if (ctx.state.deterministicApprovalPromptSent) { + return; + } promoteThinkingTagsToBlocks(assistantMessage); const rawText = extractAssistantText(assistantMessage); diff --git a/src/agents/pi-embedded-subscribe.handlers.tools.media.test.ts b/src/agents/pi-embedded-subscribe.handlers.tools.media.test.ts index 741fa96c815..66685f04036 100644 --- a/src/agents/pi-embedded-subscribe.handlers.tools.media.test.ts +++ b/src/agents/pi-embedded-subscribe.handlers.tools.media.test.ts @@ -28,6 +28,7 @@ function createMockContext(overrides?: { messagingToolSentTextsNormalized: [], messagingToolSentMediaUrls: [], messagingToolSentTargets: [], + deterministicApprovalPromptSent: false, }, log: { debug: vi.fn(), warn: vi.fn() }, shouldEmitToolResult: vi.fn(() => false), diff --git a/src/agents/pi-embedded-subscribe.handlers.tools.test.ts b/src/agents/pi-embedded-subscribe.handlers.tools.test.ts index 96a988e5bc6..3cf7935a8a2 100644 --- a/src/agents/pi-embedded-subscribe.handlers.tools.test.ts +++ b/src/agents/pi-embedded-subscribe.handlers.tools.test.ts @@ -45,6 +45,7 @@ function createTestContext(): { messagingToolSentMediaUrls: [], messagingToolSentTargets: [], successfulCronAdds: 0, + deterministicApprovalPromptSent: false, }, shouldEmitToolResult: () => false, shouldEmitToolOutput: () => false, @@ -175,6 +176,161 @@ describe("handleToolExecutionEnd cron.add commitment tracking", () => { }); }); +describe("handleToolExecutionEnd exec approval prompts", () => { + it("emits a deterministic approval payload and marks assistant output suppressed", async () => { + const { ctx } = createTestContext(); + const onToolResult = vi.fn(); + ctx.params.onToolResult = onToolResult; + + await handleToolExecutionEnd( + ctx as never, + { + type: "tool_execution_end", + toolName: "exec", + toolCallId: "tool-exec-approval", + isError: false, + result: { + details: { + status: "approval-pending", + approvalId: "12345678-1234-1234-1234-123456789012", + approvalSlug: "12345678", + expiresAtMs: 1_800_000_000_000, + host: "gateway", + command: "npm view diver name version description", + cwd: "/tmp/work", + warningText: "Warning: heredoc execution requires explicit approval in allowlist mode.", + }, + }, + } as never, + ); + + expect(onToolResult).toHaveBeenCalledWith( + expect.objectContaining({ + text: expect.stringContaining("```txt\n/approve 12345678 allow-once\n```"), + channelData: { + execApproval: { + approvalId: "12345678-1234-1234-1234-123456789012", + approvalSlug: "12345678", + allowedDecisions: ["allow-once", "allow-always", "deny"], + }, + }, + }), + ); + expect(ctx.state.deterministicApprovalPromptSent).toBe(true); + }); + + it("emits a deterministic unavailable payload when the initiating surface cannot approve", async () => { + const { ctx } = createTestContext(); + const onToolResult = vi.fn(); + ctx.params.onToolResult = onToolResult; + + await handleToolExecutionEnd( + ctx as never, + { + type: "tool_execution_end", + toolName: "exec", + toolCallId: "tool-exec-unavailable", + isError: false, + result: { + details: { + status: "approval-unavailable", + reason: "initiating-platform-disabled", + channelLabel: "Discord", + }, + }, + } as never, + ); + + expect(onToolResult).toHaveBeenCalledWith( + expect.objectContaining({ + text: expect.stringContaining("chat exec approvals are not enabled on Discord"), + }), + ); + expect(onToolResult).toHaveBeenCalledWith( + expect.objectContaining({ + text: expect.not.stringContaining("/approve"), + }), + ); + expect(onToolResult).toHaveBeenCalledWith( + expect.objectContaining({ + text: expect.not.stringContaining("Pending command:"), + }), + ); + expect(onToolResult).toHaveBeenCalledWith( + expect.objectContaining({ + text: expect.not.stringContaining("Host:"), + }), + ); + expect(onToolResult).toHaveBeenCalledWith( + expect.objectContaining({ + text: expect.not.stringContaining("CWD:"), + }), + ); + expect(ctx.state.deterministicApprovalPromptSent).toBe(true); + }); + + it("emits the shared approver-DM notice when another approval client received the request", async () => { + const { ctx } = createTestContext(); + const onToolResult = vi.fn(); + ctx.params.onToolResult = onToolResult; + + await handleToolExecutionEnd( + ctx as never, + { + type: "tool_execution_end", + toolName: "exec", + toolCallId: "tool-exec-unavailable-dm-redirect", + isError: false, + result: { + details: { + status: "approval-unavailable", + reason: "initiating-platform-disabled", + channelLabel: "Telegram", + sentApproverDms: true, + }, + }, + } as never, + ); + + expect(onToolResult).toHaveBeenCalledWith( + expect.objectContaining({ + text: "Approval required. I sent the allowed approvers DMs.", + }), + ); + expect(ctx.state.deterministicApprovalPromptSent).toBe(true); + }); + + it("does not suppress assistant output when deterministic prompt delivery rejects", async () => { + const { ctx } = createTestContext(); + ctx.params.onToolResult = vi.fn(async () => { + throw new Error("delivery failed"); + }); + + await handleToolExecutionEnd( + ctx as never, + { + type: "tool_execution_end", + toolName: "exec", + toolCallId: "tool-exec-approval-reject", + isError: false, + result: { + details: { + status: "approval-pending", + approvalId: "12345678-1234-1234-1234-123456789012", + approvalSlug: "12345678", + expiresAtMs: 1_800_000_000_000, + host: "gateway", + command: "npm view diver name version description", + cwd: "/tmp/work", + }, + }, + } as never, + ); + + expect(ctx.state.deterministicApprovalPromptSent).toBe(false); + }); +}); + describe("messaging tool media URL tracking", () => { it("tracks media arg from messaging tool as pending", async () => { const { ctx } = createTestContext(); diff --git a/src/agents/pi-embedded-subscribe.handlers.tools.ts b/src/agents/pi-embedded-subscribe.handlers.tools.ts index 8abd9469bbc..70f6b54639c 100644 --- a/src/agents/pi-embedded-subscribe.handlers.tools.ts +++ b/src/agents/pi-embedded-subscribe.handlers.tools.ts @@ -1,5 +1,9 @@ import type { AgentEvent } from "@mariozechner/pi-agent-core"; import { emitAgentEvent } from "../infra/agent-events.js"; +import { + buildExecApprovalPendingReplyPayload, + buildExecApprovalUnavailableReplyPayload, +} from "../infra/exec-approval-reply.js"; import { getGlobalHookRunner } from "../plugins/hook-runner-global.js"; import type { PluginHookAfterToolCallEvent } from "../plugins/types.js"; import { normalizeTextForComparison } from "./pi-embedded-helpers.js"; @@ -139,7 +143,81 @@ function collectMessagingMediaUrlsFromToolResult(result: unknown): string[] { return urls; } -function emitToolResultOutput(params: { +function readExecApprovalPendingDetails(result: unknown): { + approvalId: string; + approvalSlug: string; + expiresAtMs?: number; + host: "gateway" | "node"; + command: string; + cwd?: string; + nodeId?: string; + warningText?: string; +} | null { + if (!result || typeof result !== "object") { + return null; + } + const outer = result as Record; + const details = + outer.details && typeof outer.details === "object" && !Array.isArray(outer.details) + ? (outer.details as Record) + : outer; + if (details.status !== "approval-pending") { + return null; + } + const approvalId = typeof details.approvalId === "string" ? details.approvalId.trim() : ""; + const approvalSlug = typeof details.approvalSlug === "string" ? details.approvalSlug.trim() : ""; + const command = typeof details.command === "string" ? details.command : ""; + const host = details.host === "node" ? "node" : details.host === "gateway" ? "gateway" : null; + if (!approvalId || !approvalSlug || !command || !host) { + return null; + } + return { + approvalId, + approvalSlug, + expiresAtMs: typeof details.expiresAtMs === "number" ? details.expiresAtMs : undefined, + host, + command, + cwd: typeof details.cwd === "string" ? details.cwd : undefined, + nodeId: typeof details.nodeId === "string" ? details.nodeId : undefined, + warningText: typeof details.warningText === "string" ? details.warningText : undefined, + }; +} + +function readExecApprovalUnavailableDetails(result: unknown): { + reason: "initiating-platform-disabled" | "initiating-platform-unsupported" | "no-approval-route"; + warningText?: string; + channelLabel?: string; + sentApproverDms?: boolean; +} | null { + if (!result || typeof result !== "object") { + return null; + } + const outer = result as Record; + const details = + outer.details && typeof outer.details === "object" && !Array.isArray(outer.details) + ? (outer.details as Record) + : outer; + if (details.status !== "approval-unavailable") { + return null; + } + const reason = + details.reason === "initiating-platform-disabled" || + details.reason === "initiating-platform-unsupported" || + details.reason === "no-approval-route" + ? details.reason + : null; + if (!reason) { + return null; + } + return { + reason, + warningText: typeof details.warningText === "string" ? details.warningText : undefined, + channelLabel: typeof details.channelLabel === "string" ? details.channelLabel : undefined, + sentApproverDms: details.sentApproverDms === true, + }; +} + +async function emitToolResultOutput(params: { ctx: ToolHandlerContext; toolName: string; meta?: string; @@ -152,6 +230,46 @@ function emitToolResultOutput(params: { return; } + const approvalPending = readExecApprovalPendingDetails(result); + if (!isToolError && approvalPending) { + try { + await ctx.params.onToolResult( + buildExecApprovalPendingReplyPayload({ + approvalId: approvalPending.approvalId, + approvalSlug: approvalPending.approvalSlug, + command: approvalPending.command, + cwd: approvalPending.cwd, + host: approvalPending.host, + nodeId: approvalPending.nodeId, + expiresAtMs: approvalPending.expiresAtMs, + warningText: approvalPending.warningText, + }), + ); + ctx.state.deterministicApprovalPromptSent = true; + } catch { + // ignore delivery failures + } + return; + } + + const approvalUnavailable = readExecApprovalUnavailableDetails(result); + if (!isToolError && approvalUnavailable) { + try { + await ctx.params.onToolResult?.( + buildExecApprovalUnavailableReplyPayload({ + reason: approvalUnavailable.reason, + warningText: approvalUnavailable.warningText, + channelLabel: approvalUnavailable.channelLabel, + sentApproverDms: approvalUnavailable.sentApproverDms, + }), + ); + ctx.state.deterministicApprovalPromptSent = true; + } catch { + // ignore delivery failures + } + return; + } + if (ctx.shouldEmitToolOutput()) { const outputText = extractToolResultText(sanitizedResult); if (outputText) { @@ -427,7 +545,7 @@ export async function handleToolExecutionEnd( `embedded run tool end: runId=${ctx.params.runId} tool=${toolName} toolCallId=${toolCallId}`, ); - emitToolResultOutput({ ctx, toolName, meta, isToolError, result, sanitizedResult }); + await emitToolResultOutput({ ctx, toolName, meta, isToolError, result, sanitizedResult }); // Run after_tool_call plugin hook (fire-and-forget) const hookRunnerAfter = ctx.hookRunner ?? getGlobalHookRunner(); diff --git a/src/agents/pi-embedded-subscribe.handlers.types.ts b/src/agents/pi-embedded-subscribe.handlers.types.ts index 1a9d48f46f0..4436e6f6aa3 100644 --- a/src/agents/pi-embedded-subscribe.handlers.types.ts +++ b/src/agents/pi-embedded-subscribe.handlers.types.ts @@ -12,8 +12,8 @@ import type { import type { NormalizedUsage } from "./usage.js"; export type EmbeddedSubscribeLogger = { - debug: (message: string) => void; - warn: (message: string) => void; + debug: (message: string, meta?: Record) => void; + warn: (message: string, meta?: Record) => void; }; export type ToolErrorSummary = { @@ -76,6 +76,7 @@ export type EmbeddedPiSubscribeState = { pendingMessagingTargets: Map; successfulCronAdds: number; pendingMessagingMediaUrls: Map; + deterministicApprovalPromptSent: boolean; lastAssistant?: AgentMessage; }; @@ -155,6 +156,7 @@ export type ToolHandlerState = Pick< | "messagingToolSentMediaUrls" | "messagingToolSentTargets" | "successfulCronAdds" + | "deterministicApprovalPromptSent" >; export type ToolHandlerContext = { diff --git a/src/agents/pi-embedded-subscribe.ts b/src/agents/pi-embedded-subscribe.ts index c5ffedbf14f..83592372e80 100644 --- a/src/agents/pi-embedded-subscribe.ts +++ b/src/agents/pi-embedded-subscribe.ts @@ -78,6 +78,7 @@ export function subscribeEmbeddedPiSession(params: SubscribeEmbeddedPiSessionPar pendingMessagingTargets: new Map(), successfulCronAdds: 0, pendingMessagingMediaUrls: new Map(), + deterministicApprovalPromptSent: false, }; const usageTotals = { input: 0, @@ -598,6 +599,7 @@ export function subscribeEmbeddedPiSession(params: SubscribeEmbeddedPiSessionPar pendingMessagingTargets.clear(); state.successfulCronAdds = 0; state.pendingMessagingMediaUrls.clear(); + state.deterministicApprovalPromptSent = false; resetAssistantMessageState(0); }; @@ -688,6 +690,7 @@ export function subscribeEmbeddedPiSession(params: SubscribeEmbeddedPiSessionPar // Used to suppress agent's confirmation text (e.g., "Respondi no Telegram!") // which is generated AFTER the tool sends the actual answer. didSendViaMessagingTool: () => messagingToolSentTexts.length > 0, + didSendDeterministicApprovalPrompt: () => state.deterministicApprovalPromptSent, getLastToolError: () => (state.lastToolError ? { ...state.lastToolError } : undefined), getUsageTotals, getCompactionCount: () => compactionCount, diff --git a/src/agents/pi-embedded-subscribe.types.ts b/src/agents/pi-embedded-subscribe.types.ts index 689cd49998e..bbb2d552d73 100644 --- a/src/agents/pi-embedded-subscribe.types.ts +++ b/src/agents/pi-embedded-subscribe.types.ts @@ -1,5 +1,6 @@ import type { AgentSession } from "@mariozechner/pi-coding-agent"; import type { ReasoningLevel, VerboseLevel } from "../auto-reply/thinking.js"; +import type { ReplyPayload } from "../auto-reply/types.js"; import type { OpenClawConfig } from "../config/types.openclaw.js"; import type { HookRunner } from "../plugins/hooks.js"; import type { BlockReplyChunking } from "./pi-embedded-block-chunker.js"; @@ -16,7 +17,7 @@ export type SubscribeEmbeddedPiSessionParams = { toolResultFormat?: ToolResultFormat; shouldEmitToolResult?: () => boolean; shouldEmitToolOutput?: () => boolean; - onToolResult?: (payload: { text?: string; mediaUrls?: string[] }) => void | Promise; + onToolResult?: (payload: ReplyPayload) => void | Promise; onReasoningStream?: (payload: { text?: string; mediaUrls?: string[] }) => void | Promise; /** Called when a thinking/reasoning block ends ( tag processed). */ onReasoningEnd?: () => void | Promise; diff --git a/src/agents/pi-embedded-utils.strip-model-special-tokens.test.ts b/src/agents/pi-embedded-utils.strip-model-special-tokens.test.ts new file mode 100644 index 00000000000..ef0e2b32dec --- /dev/null +++ b/src/agents/pi-embedded-utils.strip-model-special-tokens.test.ts @@ -0,0 +1,25 @@ +import { describe, expect, it } from "vitest"; +import { stripModelSpecialTokens } from "./pi-embedded-utils.js"; + +/** + * @see https://github.com/openclaw/openclaw/issues/40020 + */ +describe("stripModelSpecialTokens", () => { + it("strips tokens and inserts space between adjacent words", () => { + expect(stripModelSpecialTokens("<|user|>Question<|assistant|>Answer")).toBe("Question Answer"); + }); + + it("strips full-width pipe variants (DeepSeek U+FF5C)", () => { + expect(stripModelSpecialTokens("<|begin▁of▁sentence|>Hello there")).toBe("Hello there"); + }); + + it("does not strip normal angle brackets or HTML", () => { + expect(stripModelSpecialTokens("a < b && c > d")).toBe("a < b && c > d"); + expect(stripModelSpecialTokens("
hello
")).toBe("
hello
"); + }); + + it("passes through text without tokens unchanged", () => { + const text = "Just a normal response."; + expect(stripModelSpecialTokens(text)).toBe(text); + }); +}); diff --git a/src/agents/pi-embedded-utils.test.ts b/src/agents/pi-embedded-utils.test.ts index 6a5ce710c85..ab84a375d94 100644 --- a/src/agents/pi-embedded-utils.test.ts +++ b/src/agents/pi-embedded-utils.test.ts @@ -134,6 +134,20 @@ describe("extractAssistantText", () => { ); }); + it("preserves response when errorMessage set from background failure (#13935)", () => { + const responseText = "Handle payment required errors in your API."; + const msg = makeAssistantMessage({ + role: "assistant", + errorMessage: "insufficient credits for embedding model", + stopReason: "stop", + content: [{ type: "text", text: responseText }], + timestamp: Date.now(), + }); + + const result = extractAssistantText(msg); + expect(result).toBe(responseText); + }); + it("strips Minimax tool invocations with extra attributes", () => { const msg = makeAssistantMessage({ role: "assistant", diff --git a/src/agents/pi-embedded-utils.ts b/src/agents/pi-embedded-utils.ts index 21a4eb39fd5..375df11654d 100644 --- a/src/agents/pi-embedded-utils.ts +++ b/src/agents/pi-embedded-utils.ts @@ -33,6 +33,32 @@ export function stripMinimaxToolCallXml(text: string): string { return cleaned; } +/** + * Strip model control tokens leaked into assistant text output. + * + * Models like GLM-5 and DeepSeek sometimes emit internal delimiter tokens + * (e.g. `<|assistant|>`, `<|tool_call_result_begin|>`, `<|begin▁of▁sentence|>`) + * in their responses. These use the universal `<|...|>` convention (ASCII or + * full-width pipe variants) and should never reach end users. + * + * This is a provider bug — no upstream fix tracked yet. + * Remove this function when upstream providers stop leaking tokens. + * @see https://github.com/openclaw/openclaw/issues/40020 + */ +// Match both ASCII pipe <|...|> and full-width pipe <|...|> (U+FF5C) variants. +const MODEL_SPECIAL_TOKEN_RE = /<[||][^||]*[||]>/g; + +export function stripModelSpecialTokens(text: string): string { + if (!text) { + return text; + } + if (!MODEL_SPECIAL_TOKEN_RE.test(text)) { + return text; + } + MODEL_SPECIAL_TOKEN_RE.lastIndex = 0; + return text.replace(MODEL_SPECIAL_TOKEN_RE, " ").replace(/ +/g, " ").trim(); +} + /** * Strip downgraded tool call text representations that leak into text content. * When replaying history to Gemini, tool calls without `thought_signature` are @@ -212,14 +238,16 @@ export function extractAssistantText(msg: AssistantMessage): string { extractTextFromChatContent(msg.content, { sanitizeText: (text) => stripThinkingTagsFromText( - stripDowngradedToolCallText(stripMinimaxToolCallXml(text)), + stripDowngradedToolCallText(stripModelSpecialTokens(stripMinimaxToolCallXml(text))), ).trim(), joinWith: "\n", normalizeText: (text) => text.trim(), }) ?? ""; // Only apply keyword-based error rewrites when the assistant message is actually an error. // Otherwise normal prose that *mentions* errors (e.g. "context overflow") can get clobbered. - const errorContext = msg.stopReason === "error" || Boolean(msg.errorMessage?.trim()); + // Gate on stopReason only — a non-error response with an errorMessage set (e.g. from a + // background tool failure) should not have its content rewritten (#13935). + const errorContext = msg.stopReason === "error"; return sanitizeUserFacingText(extracted, { errorContext }); } diff --git a/src/agents/pi-extensions/compaction-instructions.test.ts b/src/agents/pi-extensions/compaction-instructions.test.ts new file mode 100644 index 00000000000..a75112d07cb --- /dev/null +++ b/src/agents/pi-extensions/compaction-instructions.test.ts @@ -0,0 +1,237 @@ +import { describe, expect, it } from "vitest"; +import { + DEFAULT_COMPACTION_INSTRUCTIONS, + resolveCompactionInstructions, + composeSplitTurnInstructions, +} from "./compaction-instructions.js"; + +describe("DEFAULT_COMPACTION_INSTRUCTIONS", () => { + it("is a non-empty string", () => { + expect(typeof DEFAULT_COMPACTION_INSTRUCTIONS).toBe("string"); + expect(DEFAULT_COMPACTION_INSTRUCTIONS.trim().length).toBeGreaterThan(0); + }); + + it("contains language preservation directive", () => { + expect(DEFAULT_COMPACTION_INSTRUCTIONS).toContain("primary language"); + }); + + it("contains factual content directive", () => { + expect(DEFAULT_COMPACTION_INSTRUCTIONS).toContain("factual content"); + }); + + it("does not exceed MAX_INSTRUCTION_LENGTH (800 chars)", () => { + expect(DEFAULT_COMPACTION_INSTRUCTIONS.length).toBeLessThanOrEqual(800); + }); +}); + +describe("resolveCompactionInstructions", () => { + describe("null / undefined handling", () => { + it("returns DEFAULT when both args are undefined", () => { + expect(resolveCompactionInstructions(undefined, undefined)).toBe( + DEFAULT_COMPACTION_INSTRUCTIONS, + ); + }); + + it("returns DEFAULT when both args are explicitly null (untyped JS caller)", () => { + expect( + resolveCompactionInstructions(null as unknown as undefined, null as unknown as undefined), + ).toBe(DEFAULT_COMPACTION_INSTRUCTIONS); + }); + }); + + describe("empty and whitespace normalization", () => { + it("treats empty-string event as absent -- runtime wins", () => { + const result = resolveCompactionInstructions("", "runtime value"); + expect(result).toBe("runtime value"); + }); + + it("treats whitespace-only event as absent -- runtime wins", () => { + const result = resolveCompactionInstructions(" ", "runtime value"); + expect(result).toBe("runtime value"); + }); + + it("treats tab/newline-only event as absent -- runtime wins", () => { + const result = resolveCompactionInstructions("\t\n\r", "runtime value"); + expect(result).toBe("runtime value"); + }); + + it("treats empty-string runtime as absent -- DEFAULT wins", () => { + const result = resolveCompactionInstructions(undefined, ""); + expect(result).toBe(DEFAULT_COMPACTION_INSTRUCTIONS); + }); + + it("treats whitespace-only runtime as absent -- DEFAULT wins", () => { + const result = resolveCompactionInstructions(undefined, " "); + expect(result).toBe(DEFAULT_COMPACTION_INSTRUCTIONS); + }); + + it("falls through to DEFAULT when both are empty strings", () => { + expect(resolveCompactionInstructions("", "")).toBe(DEFAULT_COMPACTION_INSTRUCTIONS); + }); + + it("falls through to DEFAULT when both are whitespace-only", () => { + expect(resolveCompactionInstructions(" ", "\t\n")).toBe(DEFAULT_COMPACTION_INSTRUCTIONS); + }); + + it("non-breaking space (\\u00A0) IS trimmed by ES2015+ trim() -- falls through", () => { + const nbsp = "\u00A0"; + const result = resolveCompactionInstructions(nbsp, "runtime"); + expect(result).toBe("runtime"); + }); + + it("KNOWN_EDGE: zero-width space (\\u200B) survives normalization -- invisible string used as instructions", () => { + const zws = "\u200B"; + const result = resolveCompactionInstructions(zws, "runtime"); + expect(result).toBe(zws); + }); + }); + + describe("precedence", () => { + it("event wins over runtime when both are non-empty", () => { + const result = resolveCompactionInstructions("event value", "runtime value"); + expect(result).toBe("event value"); + }); + + it("runtime wins when event is undefined", () => { + const result = resolveCompactionInstructions(undefined, "runtime value"); + expect(result).toBe("runtime value"); + }); + + it("event is trimmed before use", () => { + const result = resolveCompactionInstructions(" event ", "runtime"); + expect(result).toBe("event"); + }); + + it("runtime is trimmed before use", () => { + const result = resolveCompactionInstructions(undefined, " runtime "); + expect(result).toBe("runtime"); + }); + }); + + describe("truncation at 800 chars", () => { + it("does NOT truncate string of exactly 800 chars", () => { + const exact800 = "A".repeat(800); + const result = resolveCompactionInstructions(exact800, undefined); + expect(result).toHaveLength(800); + expect(result).toBe(exact800); + }); + + it("truncates string of 801 chars to 800", () => { + const over = "B".repeat(801); + const result = resolveCompactionInstructions(over, undefined); + expect(result).toHaveLength(800); + expect(result).toBe("B".repeat(800)); + }); + + it("truncates very long string to exactly 800", () => { + const huge = "C".repeat(5000); + const result = resolveCompactionInstructions(huge, undefined); + expect(result).toHaveLength(800); + }); + + it("truncation applies AFTER trimming -- 810 raw chars with 10 leading spaces yields 800", () => { + const padded = " ".repeat(10) + "D".repeat(800); + const result = resolveCompactionInstructions(padded, undefined); + expect(result).toHaveLength(800); + expect(result).toBe("D".repeat(800)); + }); + + it("truncation applies to runtime fallback as well", () => { + const longRuntime = "R".repeat(1000); + const result = resolveCompactionInstructions(undefined, longRuntime); + expect(result).toHaveLength(800); + }); + + it("truncates by code points, not code units (emoji safe)", () => { + const emojis801 = "\u{1F600}".repeat(801); + const result = resolveCompactionInstructions(emojis801, undefined); + expect(Array.from(result)).toHaveLength(800); + }); + + it("does not split surrogate pair when cut lands inside a pair", () => { + const input = "X" + "\u{1F600}".repeat(800); + const result = resolveCompactionInstructions(input, undefined); + const codePoints = Array.from(result); + expect(codePoints).toHaveLength(800); + expect(codePoints[0]).toBe("X"); + // Every code point in the truncated result must be a complete character (no lone surrogates) + for (const cp of codePoints) { + const code = cp.codePointAt(0)!; + const isLoneSurrogate = code >= 0xd800 && code <= 0xdfff; + expect(isLoneSurrogate).toBe(false); + } + }); + }); + + describe("return type", () => { + it("always returns a string, never undefined or null", () => { + const cases: [string | undefined, string | undefined][] = [ + [undefined, undefined], + ["", ""], + [" ", " "], + [null as unknown as undefined, null as unknown as undefined], + ["valid", undefined], + [undefined, "valid"], + ]; + + for (const [event, runtime] of cases) { + const result = resolveCompactionInstructions(event, runtime); + expect(typeof result).toBe("string"); + expect(result.length).toBeGreaterThan(0); + } + }); + }); +}); + +describe("composeSplitTurnInstructions", () => { + it("joins turn prefix, separator, and resolved instructions with double newlines", () => { + const result = composeSplitTurnInstructions("Turn prefix here", "Resolved instructions here"); + expect(result).toBe( + "Turn prefix here\n\nAdditional requirements:\n\nResolved instructions here", + ); + }); + + it("output contains the turn prefix verbatim", () => { + const prefix = "Summarize the last 5 messages."; + const result = composeSplitTurnInstructions(prefix, "Keep it short."); + expect(result).toContain(prefix); + }); + + it("output contains the resolved instructions verbatim", () => { + const instructions = "Write in Korean. Preserve persona."; + const result = composeSplitTurnInstructions("prefix", instructions); + expect(result).toContain(instructions); + }); + + it("output contains 'Additional requirements:' separator", () => { + const result = composeSplitTurnInstructions("a", "b"); + expect(result).toContain("Additional requirements:"); + }); + + it("KNOWN_EDGE: empty turnPrefix produces leading blank line", () => { + const result = composeSplitTurnInstructions("", "instructions"); + expect(result).toBe("\n\nAdditional requirements:\n\ninstructions"); + expect(result.startsWith("\n")).toBe(true); + }); + + it("KNOWN_EDGE: empty resolvedInstructions produces trailing blank area", () => { + const result = composeSplitTurnInstructions("prefix", ""); + expect(result).toBe("prefix\n\nAdditional requirements:\n\n"); + expect(result.endsWith("\n\n")).toBe(true); + }); + + it("does not deduplicate if instructions already contain 'Additional requirements:'", () => { + const instructions = "Additional requirements: keep it short."; + const result = composeSplitTurnInstructions("prefix", instructions); + const count = (result.match(/Additional requirements:/g) || []).length; + expect(count).toBe(2); + }); + + it("preserves multiline content in both inputs", () => { + const prefix = "Line 1\nLine 2"; + const instructions = "Rule A\nRule B\nRule C"; + const result = composeSplitTurnInstructions(prefix, instructions); + expect(result).toContain("Line 1\nLine 2"); + expect(result).toContain("Rule A\nRule B\nRule C"); + }); +}); diff --git a/src/agents/pi-extensions/compaction-instructions.ts b/src/agents/pi-extensions/compaction-instructions.ts new file mode 100644 index 00000000000..104cf6cb90b --- /dev/null +++ b/src/agents/pi-extensions/compaction-instructions.ts @@ -0,0 +1,68 @@ +/** + * Compaction instruction utilities. + * + * Provides default language-preservation instructions and a precedence-based + * resolver for customInstructions used during context compaction summaries. + */ + +/** + * Default instructions injected into every safeguard-mode compaction summary. + * Preserves conversation language and persona while keeping the SDK's required + * summary structure intact. + */ +export const DEFAULT_COMPACTION_INSTRUCTIONS = + "Write the summary body in the primary language used in the conversation.\n" + + "Focus on factual content: what was discussed, decisions made, and current state.\n" + + "Keep the required summary structure and section headers unchanged.\n" + + "Do not translate or alter code, file paths, identifiers, or error messages."; + +/** + * Upper bound on custom instruction length to prevent prompt bloat. + * ~800 chars ≈ ~200 tokens — keeps summarization quality stable. + */ +const MAX_INSTRUCTION_LENGTH = 800; + +function truncateUnicodeSafe(s: string, maxCodePoints: number): string { + const chars = Array.from(s); + if (chars.length <= maxCodePoints) { + return s; + } + return chars.slice(0, maxCodePoints).join(""); +} + +function normalize(s: string | undefined): string | undefined { + if (s == null) { + return undefined; + } + const trimmed = s.trim(); + return trimmed.length > 0 ? trimmed : undefined; +} + +/** + * Resolve compaction instructions with precedence: + * event (SDK) → runtime (config) → DEFAULT constant. + * + * Each input is normalized first (trim + empty→undefined) so that blank + * strings don't short-circuit the fallback chain. + */ +export function resolveCompactionInstructions( + eventInstructions: string | undefined, + runtimeInstructions: string | undefined, +): string { + const resolved = + normalize(eventInstructions) ?? + normalize(runtimeInstructions) ?? + DEFAULT_COMPACTION_INSTRUCTIONS; + return truncateUnicodeSafe(resolved, MAX_INSTRUCTION_LENGTH); +} + +/** + * Compose split-turn instructions by combining the SDK's turn-prefix + * instructions with the resolved compaction instructions. + */ +export function composeSplitTurnInstructions( + turnPrefixInstructions: string, + resolvedInstructions: string, +): string { + return [turnPrefixInstructions, "Additional requirements:", resolvedInstructions].join("\n\n"); +} diff --git a/src/agents/pi-extensions/compaction-safeguard-runtime.ts b/src/agents/pi-extensions/compaction-safeguard-runtime.ts index 0180689f864..42ccb90aa49 100644 --- a/src/agents/pi-extensions/compaction-safeguard-runtime.ts +++ b/src/agents/pi-extensions/compaction-safeguard-runtime.ts @@ -7,6 +7,7 @@ export type CompactionSafeguardRuntimeValue = { contextWindowTokens?: number; identifierPolicy?: AgentCompactionIdentifierPolicy; identifierInstructions?: string; + customInstructions?: string; /** * Model to use for compaction summarization. * Passed through runtime because `ctx.model` is undefined in the compact.ts workflow diff --git a/src/agents/pi-extensions/compaction-safeguard.ts b/src/agents/pi-extensions/compaction-safeguard.ts index 7eb2cc29352..4461b97d3e0 100644 --- a/src/agents/pi-extensions/compaction-safeguard.ts +++ b/src/agents/pi-extensions/compaction-safeguard.ts @@ -23,6 +23,10 @@ import { collectTextContentBlocks } from "../content-blocks.js"; import { wrapUntrustedPromptDataBlock } from "../sanitize-for-prompt.js"; import { repairToolUseResultPairing } from "../session-transcript-repair.js"; import { extractToolCallsFromAssistant, extractToolResultId } from "../tool-call-id.js"; +import { + composeSplitTurnInstructions, + resolveCompactionInstructions, +} from "./compaction-instructions.js"; import { getCompactionSafeguardRuntime } from "./compaction-safeguard-runtime.js"; const log = createSubsystemLogger("compaction-safeguard"); @@ -697,7 +701,7 @@ async function readWorkspaceContextForSummary(): Promise { export default function compactionSafeguardExtension(api: ExtensionAPI): void { api.on("session_before_compact", async (event, ctx) => { - const { preparation, customInstructions, signal } = event; + const { preparation, customInstructions: eventInstructions, signal } = event; if (!preparation.messagesToSummarize.some(isRealConversationMessage)) { log.warn( "Compaction safeguard: cancelling compaction with no real conversation messages to summarize.", @@ -715,6 +719,10 @@ export default function compactionSafeguardExtension(api: ExtensionAPI): void { // Model resolution: ctx.model is undefined in compact.ts workflow (extensionRunner.initialize() is never called). // Fall back to runtime.model which is explicitly passed when building extension paths. const runtime = getCompactionSafeguardRuntime(ctx.sessionManager); + const customInstructions = resolveCompactionInstructions( + eventInstructions, + runtime?.customInstructions, + ); const summarizationInstructions = { identifierPolicy: runtime?.identifierPolicy, identifierInstructions: runtime?.identifierInstructions, @@ -726,7 +734,7 @@ export default function compactionSafeguardExtension(api: ExtensionAPI): void { // Use a WeakSet to track which session managers have already logged the warning. if (!ctx.model && !runtime?.model && !missedModelWarningSessions.has(ctx.sessionManager)) { missedModelWarningSessions.add(ctx.sessionManager); - console.warn( + log.warn( "[compaction-safeguard] Both ctx.model and runtime.model are undefined. " + "Compaction summarization will not run. This indicates extensionRunner.initialize() " + "was not called and model was not passed through runtime registry.", @@ -737,7 +745,7 @@ export default function compactionSafeguardExtension(api: ExtensionAPI): void { const apiKey = await ctx.modelRegistry.getApiKey(model); if (!apiKey) { - console.warn( + log.warn( "Compaction safeguard: no API key available; cancelling compaction to preserve history.", ); return { cancel: true }; @@ -892,7 +900,10 @@ export default function compactionSafeguardExtension(api: ExtensionAPI): void { reserveTokens, maxChunkTokens, contextWindow: contextWindowTokens, - customInstructions: `${TURN_PREFIX_INSTRUCTIONS}\n\n${currentInstructions}`, + customInstructions: composeSplitTurnInstructions( + TURN_PREFIX_INSTRUCTIONS, + currentInstructions, + ), summarizationInstructions, previousSummary: undefined, }); diff --git a/src/agents/pi-extensions/context-pruning.test.ts b/src/agents/pi-extensions/context-pruning.test.ts index 7812f5db00a..9dedff97def 100644 --- a/src/agents/pi-extensions/context-pruning.test.ts +++ b/src/agents/pi-extensions/context-pruning.test.ts @@ -358,21 +358,26 @@ describe("context-pruning", () => { expect(toolText(findToolResult(next, "t2"))).toContain("y".repeat(20_000)); }); - it("skips tool results that contain images (no soft trim, no hard clear)", () => { + it("replaces image blocks in tool results during soft trim", () => { const messages: AgentMessage[] = [ makeUser("u1"), makeImageToolResult({ toolCallId: "t1", toolName: "exec", - text: "x".repeat(20_000), + text: "visible tool text", }), ]; - const next = pruneWithAggressiveDefaults(messages); + const next = pruneWithAggressiveDefaults(messages, { + hardClearRatio: 10.0, + hardClear: { enabled: false, placeholder: "[cleared]" }, + softTrim: { maxChars: 200, headChars: 100, tailChars: 100 }, + }); const tool = findToolResult(next, "t1"); - expect(tool.content.some((b) => b.type === "image")).toBe(true); - expect(toolText(tool)).toContain("x".repeat(20_000)); + expect(tool.content.some((b) => b.type === "image")).toBe(false); + expect(toolText(tool)).toContain("[image removed during context pruning]"); + expect(toolText(tool)).toContain("visible tool text"); }); it("soft-trims across block boundaries", () => { diff --git a/src/agents/pi-extensions/context-pruning/pruner.test.ts b/src/agents/pi-extensions/context-pruning/pruner.test.ts index 3985bb2feb1..a847bff0e8c 100644 --- a/src/agents/pi-extensions/context-pruning/pruner.test.ts +++ b/src/agents/pi-extensions/context-pruning/pruner.test.ts @@ -45,6 +45,19 @@ function makeAssistant(content: AssistantMessage["content"]): AgentMessage { }; } +function makeToolResult( + content: Array< + { type: "text"; text: string } | { type: "image"; data: string; mimeType: string } + >, +): AgentMessage { + return { + role: "toolResult", + toolName: "read", + content, + timestamp: Date.now(), + } as AgentMessage; +} + describe("pruneContextMessages", () => { it("does not crash on assistant message with malformed thinking block (missing thinking string)", () => { const messages: AgentMessage[] = [ @@ -109,4 +122,119 @@ describe("pruneContextMessages", () => { }); expect(result).toHaveLength(2); }); + + it("soft-trims image-containing tool results by replacing image blocks with placeholders", () => { + const messages: AgentMessage[] = [ + makeUser("summarize this"), + makeToolResult([ + { type: "text", text: "A".repeat(120) }, + { type: "image", data: "img", mimeType: "image/png" }, + { type: "text", text: "B".repeat(120) }, + ]), + makeAssistant([{ type: "text", text: "done" }]), + ]; + + const result = pruneContextMessages({ + messages, + settings: { + ...DEFAULT_CONTEXT_PRUNING_SETTINGS, + keepLastAssistants: 1, + softTrimRatio: 0, + hardClear: { + ...DEFAULT_CONTEXT_PRUNING_SETTINGS.hardClear, + enabled: false, + }, + softTrim: { + maxChars: 200, + headChars: 170, + tailChars: 30, + }, + }, + ctx: CONTEXT_WINDOW_1M, + isToolPrunable: () => true, + contextWindowTokensOverride: 16, + }); + + const toolResult = result[1] as Extract; + expect(toolResult.content).toHaveLength(1); + expect(toolResult.content[0]).toMatchObject({ type: "text" }); + const textBlock = toolResult.content[0] as { type: "text"; text: string }; + expect(textBlock.text).toContain("[image removed during context pruning]"); + expect(textBlock.text).toContain( + "[Tool result trimmed: kept first 170 chars and last 30 chars", + ); + }); + + it("replaces image-only tool results with placeholders even when text trimming is not needed", () => { + const messages: AgentMessage[] = [ + makeUser("summarize this"), + makeToolResult([{ type: "image", data: "img", mimeType: "image/png" }]), + makeAssistant([{ type: "text", text: "done" }]), + ]; + + const result = pruneContextMessages({ + messages, + settings: { + ...DEFAULT_CONTEXT_PRUNING_SETTINGS, + keepLastAssistants: 1, + softTrimRatio: 0, + hardClearRatio: 10, + hardClear: { + ...DEFAULT_CONTEXT_PRUNING_SETTINGS.hardClear, + enabled: false, + }, + softTrim: { + maxChars: 5_000, + headChars: 2_000, + tailChars: 2_000, + }, + }, + ctx: CONTEXT_WINDOW_1M, + isToolPrunable: () => true, + contextWindowTokensOverride: 1, + }); + + const toolResult = result[1] as Extract; + expect(toolResult.content).toEqual([ + { type: "text", text: "[image removed during context pruning]" }, + ]); + }); + + it("hard-clears image-containing tool results once ratios require clearing", () => { + const messages: AgentMessage[] = [ + makeUser("summarize this"), + makeToolResult([ + { type: "text", text: "small text" }, + { type: "image", data: "img", mimeType: "image/png" }, + ]), + makeAssistant([{ type: "text", text: "done" }]), + ]; + + const placeholder = "[hard cleared test placeholder]"; + const result = pruneContextMessages({ + messages, + settings: { + ...DEFAULT_CONTEXT_PRUNING_SETTINGS, + keepLastAssistants: 1, + softTrimRatio: 0, + hardClearRatio: 0, + minPrunableToolChars: 1, + softTrim: { + maxChars: 5_000, + headChars: 2_000, + tailChars: 2_000, + }, + hardClear: { + enabled: true, + placeholder, + }, + }, + ctx: CONTEXT_WINDOW_1M, + isToolPrunable: () => true, + contextWindowTokensOverride: 8, + }); + + const toolResult = result[1] as Extract; + expect(toolResult.content).toEqual([{ type: "text", text: placeholder }]); + }); }); diff --git a/src/agents/pi-extensions/context-pruning/pruner.ts b/src/agents/pi-extensions/context-pruning/pruner.ts index c195fa79e09..a0f4458f6d4 100644 --- a/src/agents/pi-extensions/context-pruning/pruner.ts +++ b/src/agents/pi-extensions/context-pruning/pruner.ts @@ -5,9 +5,8 @@ import type { EffectiveContextPruningSettings } from "./settings.js"; import { makeToolPrunablePredicate } from "./tools.js"; const CHARS_PER_TOKEN_ESTIMATE = 4; -// We currently skip pruning tool results that contain images. Still, we count them (approx.) so -// we start trimming prunable tool results earlier when image-heavy context is consuming the window. const IMAGE_CHAR_ESTIMATE = 8_000; +const PRUNED_CONTEXT_IMAGE_MARKER = "[image removed during context pruning]"; function asText(text: string): TextContent { return { type: "text", text }; @@ -23,6 +22,22 @@ function collectTextSegments(content: ReadonlyArray) return parts; } +function collectPrunableToolResultSegments( + content: ReadonlyArray, +): string[] { + const parts: string[] = []; + for (const block of content) { + if (block.type === "text") { + parts.push(block.text); + continue; + } + if (block.type === "image") { + parts.push(PRUNED_CONTEXT_IMAGE_MARKER); + } + } + return parts; +} + function estimateJoinedTextLength(parts: string[]): number { if (parts.length === 0) { return 0; @@ -190,21 +205,25 @@ function softTrimToolResultMessage(params: { settings: EffectiveContextPruningSettings; }): ToolResultMessage | null { const { msg, settings } = params; - // Ignore image tool results for now: these are often directly relevant and hard to partially prune safely. - if (hasImageBlocks(msg.content)) { - return null; - } - - const parts = collectTextSegments(msg.content); + const hasImages = hasImageBlocks(msg.content); + const parts = hasImages + ? collectPrunableToolResultSegments(msg.content) + : collectTextSegments(msg.content); const rawLen = estimateJoinedTextLength(parts); if (rawLen <= settings.softTrim.maxChars) { - return null; + if (!hasImages) { + return null; + } + return { ...msg, content: [asText(parts.join("\n"))] }; } const headChars = Math.max(0, settings.softTrim.headChars); const tailChars = Math.max(0, settings.softTrim.tailChars); if (headChars + tailChars >= rawLen) { - return null; + if (!hasImages) { + return null; + } + return { ...msg, content: [asText(parts.join("\n"))] }; } const head = takeHeadFromJoinedText(parts, headChars); @@ -274,9 +293,6 @@ export function pruneContextMessages(params: { if (!isToolPrunable(msg.toolName)) { continue; } - if (hasImageBlocks(msg.content)) { - continue; - } prunableToolIndexes.push(i); const updated = softTrimToolResultMessage({ diff --git a/src/agents/pi-model-discovery-runtime.ts b/src/agents/pi-model-discovery-runtime.ts index 8f57cfab65b..d448f941d46 100644 --- a/src/agents/pi-model-discovery-runtime.ts +++ b/src/agents/pi-model-discovery-runtime.ts @@ -1 +1,6 @@ -export { discoverAuthStorage, discoverModels } from "./pi-model-discovery.js"; +export { + AuthStorage, + discoverAuthStorage, + discoverModels, + ModelRegistry, +} from "./pi-model-discovery.js"; diff --git a/src/agents/pi-tool-handler-state.test-helpers.ts b/src/agents/pi-tool-handler-state.test-helpers.ts index 0775299ab83..cfb559b9884 100644 --- a/src/agents/pi-tool-handler-state.test-helpers.ts +++ b/src/agents/pi-tool-handler-state.test-helpers.ts @@ -10,6 +10,7 @@ export function createBaseToolHandlerState() { messagingToolSentTextsNormalized: [] as string[], messagingToolSentMediaUrls: [] as string[], messagingToolSentTargets: [] as unknown[], + deterministicApprovalPromptSent: false, blockBuffer: "", }; } diff --git a/src/agents/pi-tools.create-openclaw-coding-tools.adds-claude-style-aliases-schemas-without-dropping.test.ts b/src/agents/pi-tools.create-openclaw-coding-tools.adds-claude-style-aliases-schemas-without-dropping.test.ts index 5a7cb72ccb7..ed705842ada 100644 --- a/src/agents/pi-tools.create-openclaw-coding-tools.adds-claude-style-aliases-schemas-without-dropping.test.ts +++ b/src/agents/pi-tools.create-openclaw-coding-tools.adds-claude-style-aliases-schemas-without-dropping.test.ts @@ -160,7 +160,8 @@ describe("createOpenClawCodingTools", () => { it("mentions Chrome extension relay in browser tool description", () => { const browser = createBrowserTool(); expect(browser.description).toMatch(/Chrome extension/i); - expect(browser.description).toMatch(/profile="chrome"/i); + expect(browser.description).toMatch(/profile="user"/i); + expect(browser.description).toMatch(/profile="chrome-relay"/i); }); it("keeps browser tool schema properties after normalization", () => { const browser = defaultTools.find((tool) => tool.name === "browser"); diff --git a/src/agents/pi-tools.policy.test.ts b/src/agents/pi-tools.policy.test.ts index 0cdc572c448..846044c41c0 100644 --- a/src/agents/pi-tools.policy.test.ts +++ b/src/agents/pi-tools.policy.test.ts @@ -1,3 +1,6 @@ +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; import { describe, expect, it } from "vitest"; import type { OpenClawConfig } from "../config/config.js"; import { @@ -5,6 +8,7 @@ import { isToolAllowedByPolicyName, resolveEffectiveToolPolicy, resolveSubagentToolPolicy, + resolveSubagentToolPolicyForSession, } from "./pi-tools.policy.js"; import { createStubTool } from "./test-helpers/pi-tool-stubs.js"; @@ -144,9 +148,9 @@ describe("resolveSubagentToolPolicy depth awareness", () => { expect(isToolAllowedByPolicyName("sessions_spawn", policy)).toBe(false); }); - it("depth-2 leaf allows subagents (for visibility)", () => { + it("depth-2 leaf denies subagents", () => { const policy = resolveSubagentToolPolicy(baseCfg, 2); - expect(isToolAllowedByPolicyName("subagents", policy)).toBe(true); + expect(isToolAllowedByPolicyName("subagents", policy)).toBe(false); }); it("depth-2 leaf denies sessions_list and sessions_history", () => { @@ -165,6 +169,41 @@ describe("resolveSubagentToolPolicy depth awareness", () => { expect(isToolAllowedByPolicyName("sessions_list", policy)).toBe(false); }); + it("uses stored leaf role for flat depth-1 session keys", () => { + const storePath = path.join( + os.tmpdir(), + `openclaw-subagent-policy-${Date.now()}-${Math.random().toString(16).slice(2)}.json`, + ); + fs.mkdirSync(path.dirname(storePath), { recursive: true }); + fs.writeFileSync( + storePath, + JSON.stringify( + { + "agent:main:subagent:flat-leaf": { + sessionId: "flat-leaf", + updatedAt: Date.now(), + spawnDepth: 1, + subagentRole: "leaf", + subagentControlScope: "none", + }, + }, + null, + 2, + ), + "utf-8", + ); + const cfg = { + ...baseCfg, + session: { + store: storePath, + }, + } as unknown as OpenClawConfig; + + const policy = resolveSubagentToolPolicyForSession(cfg, "agent:main:subagent:flat-leaf"); + expect(isToolAllowedByPolicyName("sessions_spawn", policy)).toBe(false); + expect(isToolAllowedByPolicyName("subagents", policy)).toBe(false); + }); + it("defaults to leaf behavior when no depth is provided", () => { const policy = resolveSubagentToolPolicy(baseCfg); // Default depth=1, maxSpawnDepth=2 → orchestrator diff --git a/src/agents/pi-tools.policy.ts b/src/agents/pi-tools.policy.ts index 61d037dd9f3..0353c454865 100644 --- a/src/agents/pi-tools.policy.ts +++ b/src/agents/pi-tools.policy.ts @@ -11,6 +11,10 @@ import { compileGlobPatterns, matchesAnyGlobPattern } from "./glob-pattern.js"; import type { AnyAgentTool } from "./pi-tools.types.js"; import { pickSandboxToolPolicy } from "./sandbox-tool-policy.js"; import type { SandboxToolPolicy } from "./sandbox.js"; +import { + resolveStoredSubagentCapabilities, + type SubagentSessionRole, +} from "./subagent-capabilities.js"; import { expandToolGroups, normalizeToolName } from "./tool-policy.js"; function makeToolPolicyMatcher(policy: SandboxToolPolicy) { @@ -64,15 +68,20 @@ const SUBAGENT_TOOL_DENY_ALWAYS = [ * Additional tools denied for leaf sub-agents (depth >= maxSpawnDepth). * These are tools that only make sense for orchestrator sub-agents that can spawn children. */ -const SUBAGENT_TOOL_DENY_LEAF = ["sessions_list", "sessions_history", "sessions_spawn"]; +const SUBAGENT_TOOL_DENY_LEAF = [ + "subagents", + "sessions_list", + "sessions_history", + "sessions_spawn", +]; /** * Build the deny list for a sub-agent at a given depth. * * - Depth 1 with maxSpawnDepth >= 2 (orchestrator): allowed to use sessions_spawn, * subagents, sessions_list, sessions_history so it can manage its children. - * - Depth >= maxSpawnDepth (leaf): denied sessions_spawn and - * session management tools. Still allowed subagents (for list/status visibility). + * - Depth >= maxSpawnDepth (leaf): denied subagents, sessions_spawn, and + * session management tools. */ function resolveSubagentDenyList(depth: number, maxSpawnDepth: number): string[] { const isLeaf = depth >= Math.max(1, Math.floor(maxSpawnDepth)); @@ -84,6 +93,13 @@ function resolveSubagentDenyList(depth: number, maxSpawnDepth: number): string[] return [...SUBAGENT_TOOL_DENY_ALWAYS]; } +function resolveSubagentDenyListForRole(role: SubagentSessionRole): string[] { + if (role === "leaf") { + return [...SUBAGENT_TOOL_DENY_ALWAYS, ...SUBAGENT_TOOL_DENY_LEAF]; + } + return [...SUBAGENT_TOOL_DENY_ALWAYS]; +} + export function resolveSubagentToolPolicy(cfg?: OpenClawConfig, depth?: number): SandboxToolPolicy { const configured = cfg?.tools?.subagents?.tools; const maxSpawnDepth = @@ -103,6 +119,27 @@ export function resolveSubagentToolPolicy(cfg?: OpenClawConfig, depth?: number): return { allow: mergedAllow, deny }; } +export function resolveSubagentToolPolicyForSession( + cfg: OpenClawConfig | undefined, + sessionKey: string, +): SandboxToolPolicy { + const configured = cfg?.tools?.subagents?.tools; + const capabilities = resolveStoredSubagentCapabilities(sessionKey, { cfg }); + const allow = Array.isArray(configured?.allow) ? configured.allow : undefined; + const alsoAllow = Array.isArray(configured?.alsoAllow) ? configured.alsoAllow : undefined; + const explicitAllow = new Set( + [...(allow ?? []), ...(alsoAllow ?? [])].map((toolName) => normalizeToolName(toolName)), + ); + const deny = [ + ...resolveSubagentDenyListForRole(capabilities.role).filter( + (toolName) => !explicitAllow.has(normalizeToolName(toolName)), + ), + ...(Array.isArray(configured?.deny) ? configured.deny : []), + ]; + const mergedAllow = allow && alsoAllow ? Array.from(new Set([...allow, ...alsoAllow])) : allow; + return { allow: mergedAllow, deny }; +} + export function isToolAllowedByPolicyName(name: string, policy?: SandboxToolPolicy): boolean { if (!policy) { return true; diff --git a/src/agents/pi-tools.read.ts b/src/agents/pi-tools.read.ts index b01c7adff03..5ea48b01fa1 100644 --- a/src/agents/pi-tools.read.ts +++ b/src/agents/pi-tools.read.ts @@ -4,6 +4,7 @@ import { fileURLToPath } from "node:url"; import type { AgentToolResult } from "@mariozechner/pi-agent-core"; import { createEditTool, createReadTool, createWriteTool } from "@mariozechner/pi-coding-agent"; import { + appendFileWithinRoot, SafeOpenError, openFileWithinRoot, readFileWithinRoot, @@ -406,6 +407,161 @@ function mapContainerPathToWorkspaceRoot(params: { return path.resolve(params.root, ...relative.split("/").filter(Boolean)); } +export function resolveToolPathAgainstWorkspaceRoot(params: { + filePath: string; + root: string; + containerWorkdir?: string; +}): string { + const mapped = mapContainerPathToWorkspaceRoot(params); + const candidate = mapped.startsWith("@") ? mapped.slice(1) : mapped; + return path.isAbsolute(candidate) + ? path.resolve(candidate) + : path.resolve(params.root, candidate || "."); +} + +type MemoryFlushAppendOnlyWriteOptions = { + root: string; + relativePath: string; + containerWorkdir?: string; + sandbox?: { + root: string; + bridge: SandboxFsBridge; + }; +}; + +async function readOptionalUtf8File(params: { + absolutePath: string; + relativePath: string; + sandbox?: MemoryFlushAppendOnlyWriteOptions["sandbox"]; + signal?: AbortSignal; +}): Promise { + try { + if (params.sandbox) { + const stat = await params.sandbox.bridge.stat({ + filePath: params.relativePath, + cwd: params.sandbox.root, + signal: params.signal, + }); + if (!stat) { + return ""; + } + const buffer = await params.sandbox.bridge.readFile({ + filePath: params.relativePath, + cwd: params.sandbox.root, + signal: params.signal, + }); + return buffer.toString("utf-8"); + } + return await fs.readFile(params.absolutePath, "utf-8"); + } catch (error) { + if ((error as NodeJS.ErrnoException | undefined)?.code === "ENOENT") { + return ""; + } + throw error; + } +} + +async function appendMemoryFlushContent(params: { + absolutePath: string; + root: string; + relativePath: string; + content: string; + sandbox?: MemoryFlushAppendOnlyWriteOptions["sandbox"]; + signal?: AbortSignal; +}) { + if (!params.sandbox) { + await appendFileWithinRoot({ + rootDir: params.root, + relativePath: params.relativePath, + data: params.content, + mkdir: true, + prependNewlineIfNeeded: true, + }); + return; + } + + const existing = await readOptionalUtf8File({ + absolutePath: params.absolutePath, + relativePath: params.relativePath, + sandbox: params.sandbox, + signal: params.signal, + }); + const separator = + existing.length > 0 && !existing.endsWith("\n") && !params.content.startsWith("\n") ? "\n" : ""; + const next = `${existing}${separator}${params.content}`; + if (params.sandbox) { + const parent = path.posix.dirname(params.relativePath); + if (parent && parent !== ".") { + await params.sandbox.bridge.mkdirp({ + filePath: parent, + cwd: params.sandbox.root, + signal: params.signal, + }); + } + await params.sandbox.bridge.writeFile({ + filePath: params.relativePath, + cwd: params.sandbox.root, + data: next, + mkdir: true, + signal: params.signal, + }); + return; + } + await fs.mkdir(path.dirname(params.absolutePath), { recursive: true }); + await fs.writeFile(params.absolutePath, next, "utf-8"); +} + +export function wrapToolMemoryFlushAppendOnlyWrite( + tool: AnyAgentTool, + options: MemoryFlushAppendOnlyWriteOptions, +): AnyAgentTool { + const allowedAbsolutePath = path.resolve(options.root, options.relativePath); + return { + ...tool, + description: `${tool.description} During memory flush, this tool may only append to ${options.relativePath}.`, + execute: async (toolCallId, args, signal, onUpdate) => { + const normalized = normalizeToolParams(args); + const record = + normalized ?? + (args && typeof args === "object" ? (args as Record) : undefined); + assertRequiredParams(record, CLAUDE_PARAM_GROUPS.write, tool.name); + const filePath = + typeof record?.path === "string" && record.path.trim() ? record.path : undefined; + const content = typeof record?.content === "string" ? record.content : undefined; + if (!filePath || content === undefined) { + return tool.execute(toolCallId, normalized ?? args, signal, onUpdate); + } + + const resolvedPath = resolveToolPathAgainstWorkspaceRoot({ + filePath, + root: options.root, + containerWorkdir: options.containerWorkdir, + }); + if (resolvedPath !== allowedAbsolutePath) { + throw new Error( + `Memory flush writes are restricted to ${options.relativePath}; use that path only.`, + ); + } + + await appendMemoryFlushContent({ + absolutePath: allowedAbsolutePath, + root: options.root, + relativePath: options.relativePath, + content, + sandbox: options.sandbox, + signal, + }); + return { + content: [{ type: "text", text: `Appended content to ${options.relativePath}.` }], + details: { + path: options.relativePath, + appendOnly: true, + }, + }; + }, + }; +} + export function wrapToolWorkspaceRootGuardWithOptions( tool: AnyAgentTool, root: string, diff --git a/src/agents/pi-tools.ts b/src/agents/pi-tools.ts index 543a163ab0c..6536e9dfbb5 100644 --- a/src/agents/pi-tools.ts +++ b/src/agents/pi-tools.ts @@ -24,7 +24,7 @@ import { isToolAllowedByPolicies, resolveEffectiveToolPolicy, resolveGroupToolPolicy, - resolveSubagentToolPolicy, + resolveSubagentToolPolicyForSession, } from "./pi-tools.policy.js"; import { assertRequiredParams, @@ -36,6 +36,7 @@ import { createSandboxedWriteTool, normalizeToolParams, patchToolSchemaForClaudeCompatibility, + wrapToolMemoryFlushAppendOnlyWrite, wrapToolWorkspaceRootGuard, wrapToolWorkspaceRootGuardWithOptions, wrapToolParamNormalization, @@ -44,7 +45,6 @@ import { cleanToolSchemaForGemini, normalizeToolParameters } from "./pi-tools.sc import type { AnyAgentTool } from "./pi-tools.types.js"; import type { SandboxContext } from "./sandbox.js"; import { isXaiProvider } from "./schema/clean-for-xai.js"; -import { getSubagentDepthFromSessionStore } from "./subagent-depth.js"; import { createToolFsPolicy, resolveToolFsConfig } from "./tool-fs-policy.js"; import { applyToolPolicyPipeline, @@ -67,6 +67,7 @@ const TOOL_DENY_BY_MESSAGE_PROVIDER: Readonly> voice: ["tts"], }; const TOOL_DENY_FOR_XAI_PROVIDERS = new Set(["web_search"]); +const MEMORY_FLUSH_ALLOWED_TOOL_NAMES = new Set(["read", "write"]); function normalizeMessageProvider(messageProvider?: string): string | undefined { const normalized = messageProvider?.trim().toLowerCase(); @@ -207,8 +208,19 @@ export function createOpenClawCodingTools(options?: { sessionId?: string; /** Stable run identifier for this agent invocation. */ runId?: string; + /** What initiated this run (for trigger-specific tool restrictions). */ + trigger?: string; + /** Relative workspace path that memory-triggered writes may append to. */ + memoryFlushWritePath?: string; agentDir?: string; workspaceDir?: string; + /** + * Workspace directory that spawned subagents should inherit. + * When sandboxing uses a copied workspace (`ro` or `none`), workspaceDir is the + * sandbox copy but subagents should inherit the real agent workspace instead. + * Defaults to workspaceDir when not set. + */ + spawnWorkspaceDir?: string; config?: OpenClawConfig; abortSignal?: AbortSignal; /** @@ -255,9 +267,16 @@ export function createOpenClawCodingTools(options?: { disableMessageTool?: boolean; /** Whether the sender is an owner (required for owner-only tools). */ senderIsOwner?: boolean; + /** Callback invoked when sessions_yield tool is called. */ + onYield?: (message: string) => Promise | void; }): AnyAgentTool[] { const execToolName = "exec"; const sandbox = options?.sandbox?.enabled ? options.sandbox : undefined; + const isMemoryFlushRun = options?.trigger === "memory"; + if (isMemoryFlushRun && !options?.memoryFlushWritePath) { + throw new Error("memoryFlushWritePath required for memory-triggered tool runs"); + } + const memoryFlushWritePath = isMemoryFlushRun ? options.memoryFlushWritePath : undefined; const { agentId, globalPolicy, @@ -303,10 +322,7 @@ export function createOpenClawCodingTools(options?: { options?.exec?.scopeKey ?? options?.sessionKey ?? (agentId ? `agent:${agentId}` : undefined); const subagentPolicy = isSubagentSessionKey(options?.sessionKey) && options?.sessionKey - ? resolveSubagentToolPolicy( - options.config, - getSubagentDepthFromSessionStore(options.sessionKey, { cfg: options.config }), - ) + ? resolveSubagentToolPolicyForSession(options.config, options.sessionKey) : undefined; const allowBackground = isToolAllowedByPolicies("process", [ profilePolicyWithAlsoAllow, @@ -322,7 +338,7 @@ export function createOpenClawCodingTools(options?: { const execConfig = resolveExecConfig({ cfg: options?.config, agentId }); const fsConfig = resolveToolFsConfig({ cfg: options?.config, agentId }); const fsPolicy = createToolFsPolicy({ - workspaceOnly: fsConfig.workspaceOnly, + workspaceOnly: isMemoryFlushRun || fsConfig.workspaceOnly, }); const sandboxRoot = sandbox?.workspaceDir; const sandboxFsBridge = sandbox?.fsBridge; @@ -488,6 +504,9 @@ export function createOpenClawCodingTools(options?: { sandboxFsBridge, fsPolicy, workspaceDir: workspaceRoot, + spawnWorkspaceDir: options?.spawnWorkspaceDir + ? resolveWorkspaceRoot(options.spawnWorkspaceDir) + : undefined, sandboxed: !!sandbox, config: options?.config, pluginToolAllowlist: collectExplicitAllowlist([ @@ -513,9 +532,35 @@ export function createOpenClawCodingTools(options?: { requesterSenderId: options?.senderId, senderIsOwner: options?.senderIsOwner, sessionId: options?.sessionId, + onYield: options?.onYield, }), ]; - const toolsForMessageProvider = applyMessageProviderToolPolicy(tools, options?.messageProvider); + const toolsForMemoryFlush = + isMemoryFlushRun && memoryFlushWritePath + ? tools.flatMap((tool) => { + if (!MEMORY_FLUSH_ALLOWED_TOOL_NAMES.has(tool.name)) { + return []; + } + if (tool.name === "write") { + return [ + wrapToolMemoryFlushAppendOnlyWrite(tool, { + root: sandboxRoot ?? workspaceRoot, + relativePath: memoryFlushWritePath, + containerWorkdir: sandbox?.containerWorkdir, + sandbox: + sandboxRoot && sandboxFsBridge + ? { root: sandboxRoot, bridge: sandboxFsBridge } + : undefined, + }), + ]; + } + return [tool]; + }) + : tools; + const toolsForMessageProvider = applyMessageProviderToolPolicy( + toolsForMemoryFlush, + options?.messageProvider, + ); const toolsForModelProvider = applyModelProviderToolPolicy(toolsForMessageProvider, { modelProvider: options?.modelProvider, modelId: options?.modelId, diff --git a/src/agents/pi-tools.whatsapp-login-gating.test.ts b/src/agents/pi-tools.whatsapp-login-gating.test.ts index 61f65fc0541..8dd6637becd 100644 --- a/src/agents/pi-tools.whatsapp-login-gating.test.ts +++ b/src/agents/pi-tools.whatsapp-login-gating.test.ts @@ -21,6 +21,7 @@ describe("owner-only tool gating", () => { expect(toolNames).not.toContain("whatsapp_login"); expect(toolNames).not.toContain("cron"); expect(toolNames).not.toContain("gateway"); + expect(toolNames).not.toContain("nodes"); }); it("keeps owner-only tools for authorized senders", () => { @@ -29,6 +30,13 @@ describe("owner-only tool gating", () => { expect(toolNames).toContain("whatsapp_login"); expect(toolNames).toContain("cron"); expect(toolNames).toContain("gateway"); + expect(toolNames).toContain("nodes"); + }); + + it("keeps canvas available to unauthorized senders by current trust model", () => { + const tools = createOpenClawCodingTools({ senderIsOwner: false }); + const toolNames = tools.map((tool) => tool.name); + expect(toolNames).toContain("canvas"); }); it("defaults to removing owner-only tools when owner status is unknown", () => { @@ -37,5 +45,7 @@ describe("owner-only tool gating", () => { expect(toolNames).not.toContain("whatsapp_login"); expect(toolNames).not.toContain("cron"); expect(toolNames).not.toContain("gateway"); + expect(toolNames).not.toContain("nodes"); + expect(toolNames).toContain("canvas"); }); }); diff --git a/src/agents/pi-tools.workspace-only-false.test.ts b/src/agents/pi-tools.workspace-only-false.test.ts index 713315de899..99d3a9e4b39 100644 --- a/src/agents/pi-tools.workspace-only-false.test.ts +++ b/src/agents/pi-tools.workspace-only-false.test.ts @@ -1,7 +1,20 @@ import fs from "node:fs/promises"; import os from "node:os"; import path from "node:path"; -import { afterEach, beforeEach, describe, expect, it } from "vitest"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; + +vi.mock("@mariozechner/pi-ai", async (importOriginal) => { + const original = await importOriginal(); + return { + ...original, + }; +}); + +vi.mock("@mariozechner/pi-ai/oauth", () => ({ + getOAuthApiKey: () => undefined, + getOAuthProviders: () => [], +})); + import { createOpenClawCodingTools } from "./pi-tools.js"; describe("FS tools with workspaceOnly=false", () => { @@ -181,4 +194,50 @@ describe("FS tools with workspaceOnly=false", () => { }), ).rejects.toThrow(/Path escapes (workspace|sandbox) root/); }); + + it("restricts memory-triggered writes to append-only canonical memory files", async () => { + const allowedRelativePath = "memory/2026-03-07.md"; + const allowedAbsolutePath = path.join(workspaceDir, allowedRelativePath); + await fs.mkdir(path.dirname(allowedAbsolutePath), { recursive: true }); + await fs.writeFile(allowedAbsolutePath, "seed"); + + const tools = createOpenClawCodingTools({ + workspaceDir, + trigger: "memory", + memoryFlushWritePath: allowedRelativePath, + config: { + tools: { + exec: { + applyPatch: { + enabled: true, + }, + }, + }, + }, + modelProvider: "openai", + modelId: "gpt-5", + }); + + const writeTool = tools.find((tool) => tool.name === "write"); + expect(writeTool).toBeDefined(); + expect(tools.map((tool) => tool.name).toSorted()).toEqual(["read", "write"]); + + await expect( + writeTool!.execute("test-call-memory-deny", { + path: outsideFile, + content: "should not write here", + }), + ).rejects.toThrow(/Memory flush writes are restricted to memory\/2026-03-07\.md/); + + const result = await writeTool!.execute("test-call-memory-append", { + path: allowedRelativePath, + content: "new note", + }); + expect(hasToolError(result)).toBe(false); + expect(result.content).toContainEqual({ + type: "text", + text: "Appended content to memory/2026-03-07.md.", + }); + await expect(fs.readFile(allowedAbsolutePath, "utf-8")).resolves.toBe("seed\nnew note"); + }); }); diff --git a/src/agents/provider-capabilities.test.ts b/src/agents/provider-capabilities.test.ts index 5e162c87794..ef59f025de8 100644 --- a/src/agents/provider-capabilities.test.ts +++ b/src/agents/provider-capabilities.test.ts @@ -22,7 +22,19 @@ describe("resolveProviderCapabilities", () => { transcriptToolCallIdMode: "default", transcriptToolCallIdModelHints: [], geminiThoughtSignatureModelHints: [], - dropThinkingBlockModelHints: [], + dropThinkingBlockModelHints: ["claude"], + }); + expect(resolveProviderCapabilities("amazon-bedrock")).toEqual({ + anthropicToolSchemaMode: "native", + anthropicToolChoiceMode: "native", + providerFamily: "anthropic", + preserveAnthropicThinkingSignatures: true, + openAiCompatTurnValidation: true, + geminiThoughtSignatureSanitization: false, + transcriptToolCallIdMode: "default", + transcriptToolCallIdModelHints: [], + geminiThoughtSignatureModelHints: [], + dropThinkingBlockModelHints: ["claude"], }); }); @@ -47,6 +59,7 @@ describe("resolveProviderCapabilities", () => { it("flags providers that opt out of OpenAI-compatible turn validation", () => { expect(supportsOpenAiCompatTurnValidation("openrouter")).toBe(false); expect(supportsOpenAiCompatTurnValidation("opencode")).toBe(false); + expect(supportsOpenAiCompatTurnValidation("opencode-go")).toBe(false); expect(supportsOpenAiCompatTurnValidation("moonshot")).toBe(true); }); @@ -63,6 +76,12 @@ describe("resolveProviderCapabilities", () => { modelId: "gemini-2.0-flash", }), ).toBe(true); + expect( + shouldSanitizeGeminiThoughtSignaturesForModel({ + provider: "opencode-go", + modelId: "google/gemini-2.5-pro-preview", + }), + ).toBe(true); expect(resolveTranscriptToolCallIdMode("mistral", "mistral-large-latest")).toBe("strict9"); }); @@ -75,6 +94,18 @@ describe("resolveProviderCapabilities", () => { it("tracks provider families and model-specific transcript quirks in the registry", () => { expect(isOpenAiProviderFamily("openai")).toBe(true); expect(isAnthropicProviderFamily("amazon-bedrock")).toBe(true); + expect( + shouldDropThinkingBlocksForModel({ + provider: "anthropic", + modelId: "claude-opus-4-6", + }), + ).toBe(true); + expect( + shouldDropThinkingBlocksForModel({ + provider: "amazon-bedrock", + modelId: "anthropic.claude-3-5-sonnet-20241022-v2:0", + }), + ).toBe(true); expect( shouldDropThinkingBlocksForModel({ provider: "github-copilot", diff --git a/src/agents/provider-capabilities.ts b/src/agents/provider-capabilities.ts index 62007b810f8..f443fac4d11 100644 --- a/src/agents/provider-capabilities.ts +++ b/src/agents/provider-capabilities.ts @@ -29,9 +29,11 @@ const DEFAULT_PROVIDER_CAPABILITIES: ProviderCapabilities = { const PROVIDER_CAPABILITIES: Record> = { anthropic: { providerFamily: "anthropic", + dropThinkingBlockModelHints: ["claude"], }, "amazon-bedrock": { providerFamily: "anthropic", + dropThinkingBlockModelHints: ["claude"], }, // kimi-coding natively supports Anthropic tool framing (input_schema); // converting to OpenAI format causes XML text fallback instead of tool_use blocks. @@ -66,6 +68,11 @@ const PROVIDER_CAPABILITIES: Record> = { geminiThoughtSignatureSanitization: true, geminiThoughtSignatureModelHints: ["gemini"], }, + "opencode-go": { + openAiCompatTurnValidation: false, + geminiThoughtSignatureSanitization: true, + geminiThoughtSignatureModelHints: ["gemini"], + }, kilocode: { geminiThoughtSignatureSanitization: true, geminiThoughtSignatureModelHints: ["gemini"], diff --git a/src/agents/sandbox-create-args.test.ts b/src/agents/sandbox-create-args.test.ts index 9bc00547143..60b6241f58a 100644 --- a/src/agents/sandbox-create-args.test.ts +++ b/src/agents/sandbox-create-args.test.ts @@ -1,4 +1,5 @@ import { describe, expect, it } from "vitest"; +import { OPENCLAW_CLI_ENV_VALUE } from "../infra/openclaw-exec-env.js"; import { buildSandboxCreateArgs } from "./sandbox/docker.js"; import type { SandboxDockerConfig } from "./sandbox/types.js"; @@ -113,7 +114,14 @@ describe("buildSandboxCreateArgs", () => { "1.5", ]), ); - expect(args).toEqual(expect.arrayContaining(["--env", "LANG=C.UTF-8"])); + expect(args).toEqual( + expect.arrayContaining([ + "--env", + "LANG=C.UTF-8", + "--env", + `OPENCLAW_CLI=${OPENCLAW_CLI_ENV_VALUE}`, + ]), + ); const ulimitValues: string[] = []; for (let i = 0; i < args.length; i += 1) { @@ -129,6 +137,33 @@ describe("buildSandboxCreateArgs", () => { ); }); + it("preserves the OpenClaw exec marker when strict env sanitization is enabled", () => { + const cfg = createSandboxConfig({ + env: { + NODE_ENV: "test", + }, + }); + + const args = buildSandboxCreateArgs({ + name: "openclaw-sbx-marker", + cfg, + scopeKey: "main", + createdAtMs: 1700000000000, + envSanitizationOptions: { + strictMode: true, + }, + }); + + expect(args).toEqual( + expect.arrayContaining([ + "--env", + "NODE_ENV=test", + "--env", + `OPENCLAW_CLI=${OPENCLAW_CLI_ENV_VALUE}`, + ]), + ); + }); + it("emits -v flags for safe custom binds", () => { const cfg: SandboxDockerConfig = { image: "openclaw-sandbox:bookworm-slim", diff --git a/src/agents/sandbox/constants.ts b/src/agents/sandbox/constants.ts index f2a562f26b6..8e906eb9432 100644 --- a/src/agents/sandbox/constants.ts +++ b/src/agents/sandbox/constants.ts @@ -1,6 +1,6 @@ import path from "node:path"; import { CHANNEL_IDS } from "../../channels/registry.js"; -import { STATE_DIR } from "../../config/config.js"; +import { STATE_DIR } from "../../config/paths.js"; export const DEFAULT_SANDBOX_WORKSPACE_ROOT = path.join(STATE_DIR, "sandboxes"); @@ -22,6 +22,7 @@ export const DEFAULT_TOOL_ALLOW = [ "sessions_history", "sessions_send", "sessions_spawn", + "sessions_yield", "subagents", "session_status", ] as const; diff --git a/src/agents/sandbox/docker.ts b/src/agents/sandbox/docker.ts index 2bd9dad12b5..aefceb08495 100644 --- a/src/agents/sandbox/docker.ts +++ b/src/agents/sandbox/docker.ts @@ -5,6 +5,7 @@ import { resolveWindowsSpawnProgram, } from "../../plugin-sdk/windows-spawn.js"; import { sanitizeEnvVars } from "./sanitize-env-vars.js"; +import type { EnvSanitizationOptions } from "./sanitize-env-vars.js"; type ExecDockerRawOptions = { allowFailure?: boolean; @@ -52,7 +53,7 @@ export function resolveDockerSpawnInvocation( env: runtime.env, execPath: runtime.execPath, packageName: "docker", - allowShellFallback: true, + allowShellFallback: false, }); const resolved = materializeWindowsSpawnProgram(program, args); return { @@ -162,6 +163,7 @@ export function execDockerRaw( } import { formatCliCommand } from "../../cli/command-format.js"; +import { markOpenClawExecEnv } from "../../infra/openclaw-exec-env.js"; import { defaultRuntime } from "../../runtime.js"; import { computeSandboxConfigHash } from "./config-hash.js"; import { DEFAULT_SANDBOX_IMAGE } from "./constants.js"; @@ -324,6 +326,7 @@ export function buildSandboxCreateArgs(params: { allowSourcesOutsideAllowedRoots?: boolean; allowReservedContainerTargets?: boolean; allowContainerNamespaceJoin?: boolean; + envSanitizationOptions?: EnvSanitizationOptions; }) { // Runtime security validation: blocks dangerous bind mounts, network modes, and profiles. validateSandboxSecurity({ @@ -365,14 +368,14 @@ export function buildSandboxCreateArgs(params: { if (params.cfg.user) { args.push("--user", params.cfg.user); } - const envSanitization = sanitizeEnvVars(params.cfg.env ?? {}); + const envSanitization = sanitizeEnvVars(params.cfg.env ?? {}, params.envSanitizationOptions); if (envSanitization.blocked.length > 0) { log.warn(`Blocked sensitive environment variables: ${envSanitization.blocked.join(", ")}`); } if (envSanitization.warnings.length > 0) { log.warn(`Suspicious environment variables: ${envSanitization.warnings.join(", ")}`); } - for (const [key, value] of Object.entries(envSanitization.allowed)) { + for (const [key, value] of Object.entries(markOpenClawExecEnv(envSanitization.allowed))) { args.push("--env", `${key}=${value}`); } for (const cap of params.cfg.capDrop) { diff --git a/src/agents/sandbox/docker.windows.test.ts b/src/agents/sandbox/docker.windows.test.ts index 3dd294e8360..7abebad98ab 100644 --- a/src/agents/sandbox/docker.windows.test.ts +++ b/src/agents/sandbox/docker.windows.test.ts @@ -47,22 +47,20 @@ describe("resolveDockerSpawnInvocation", () => { }); }); - it("falls back to shell mode when only unresolved docker.cmd wrapper exists", async () => { + it("rejects unresolved docker.cmd wrappers instead of shelling out", async () => { const dir = await createTempDir(); const cmdPath = path.join(dir, "docker.cmd"); await mkdir(path.dirname(cmdPath), { recursive: true }); await writeFile(cmdPath, "@ECHO off\r\necho docker\r\n", "utf8"); - const resolved = resolveDockerSpawnInvocation(["ps"], { - platform: "win32", - env: { PATH: dir, PATHEXT: ".CMD;.EXE;.BAT" }, - execPath: "C:\\node\\node.exe", - }); - expect(path.normalize(resolved.command).toLowerCase()).toBe( - path.normalize(cmdPath).toLowerCase(), + expect(() => + resolveDockerSpawnInvocation(["ps"], { + platform: "win32", + env: { PATH: dir, PATHEXT: ".CMD;.EXE;.BAT" }, + execPath: "C:\\node\\node.exe", + }), + ).toThrow( + /wrapper resolved, but no executable\/Node entrypoint could be resolved without shell execution\./i, ); - expect(resolved.args).toEqual(["ps"]); - expect(resolved.shell).toBe(true); - expect(resolved.windowsHide).toBeUndefined(); }); }); diff --git a/src/agents/sandbox/fs-bridge-mutation-helper.test.ts b/src/agents/sandbox/fs-bridge-mutation-helper.test.ts new file mode 100644 index 00000000000..973c81341d1 --- /dev/null +++ b/src/agents/sandbox/fs-bridge-mutation-helper.test.ts @@ -0,0 +1,186 @@ +import { spawnSync } from "node:child_process"; +import fs from "node:fs/promises"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { withTempDir } from "../../test-helpers/temp-dir.js"; +import { + buildPinnedWritePlan, + SANDBOX_PINNED_MUTATION_PYTHON, +} from "./fs-bridge-mutation-helper.js"; + +function runMutation(args: string[], input?: string) { + return spawnSync("python3", ["-c", SANDBOX_PINNED_MUTATION_PYTHON, ...args], { + input, + encoding: "utf8", + stdio: ["pipe", "pipe", "pipe"], + }); +} + +function runWritePlan(args: string[], input?: string) { + const plan = buildPinnedWritePlan({ + check: { + target: { + hostPath: args[1] ?? "", + containerPath: args[1] ?? "", + relativePath: path.posix.join(args[2] ?? "", args[3] ?? ""), + writable: true, + }, + options: { + action: "write files", + requireWritable: true, + }, + }, + pinned: { + mountRootPath: args[1] ?? "", + relativeParentPath: args[2] ?? "", + basename: args[3] ?? "", + }, + mkdir: args[4] === "1", + }); + + return spawnSync("sh", ["-c", plan.script, "moltbot-sandbox-fs", ...(plan.args ?? [])], { + input, + encoding: "utf8", + stdio: ["pipe", "pipe", "pipe"], + }); +} + +describe("sandbox pinned mutation helper", () => { + it("writes through a pinned directory fd", async () => { + await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => { + const workspace = path.join(root, "workspace"); + await fs.mkdir(workspace, { recursive: true }); + + const result = runMutation(["write", workspace, "nested/deeper", "note.txt", "1"], "hello"); + + expect(result.status).toBe(0); + await expect( + fs.readFile(path.join(workspace, "nested", "deeper", "note.txt"), "utf8"), + ).resolves.toBe("hello"); + }); + }); + + it.runIf(process.platform !== "win32")( + "preserves stdin payload bytes when the pinned write plan runs through sh", + async () => { + await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => { + const workspace = path.join(root, "workspace"); + await fs.mkdir(workspace, { recursive: true }); + + const result = runWritePlan( + ["write", workspace, "nested/deeper", "note.txt", "1"], + "hello", + ); + + expect(result.status).toBe(0); + await expect( + fs.readFile(path.join(workspace, "nested", "deeper", "note.txt"), "utf8"), + ).resolves.toBe("hello"); + }); + }, + ); + + it.runIf(process.platform !== "win32")( + "rejects symlink-parent writes instead of materializing a temp file outside the mount", + async () => { + await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => { + const workspace = path.join(root, "workspace"); + const outside = path.join(root, "outside"); + await fs.mkdir(workspace, { recursive: true }); + await fs.mkdir(outside, { recursive: true }); + await fs.symlink(outside, path.join(workspace, "alias")); + + const result = runMutation(["write", workspace, "alias", "escape.txt", "0"], "owned"); + + expect(result.status).not.toBe(0); + await expect(fs.readFile(path.join(outside, "escape.txt"), "utf8")).rejects.toThrow(); + }); + }, + ); + + it.runIf(process.platform !== "win32")("rejects symlink segments during mkdirp", async () => { + await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => { + const workspace = path.join(root, "workspace"); + const outside = path.join(root, "outside"); + await fs.mkdir(workspace, { recursive: true }); + await fs.mkdir(outside, { recursive: true }); + await fs.symlink(outside, path.join(workspace, "alias")); + + const result = runMutation(["mkdirp", workspace, "alias/nested"]); + + expect(result.status).not.toBe(0); + await expect(fs.readFile(path.join(outside, "nested"), "utf8")).rejects.toThrow(); + }); + }); + + it.runIf(process.platform !== "win32")("remove unlinks the symlink itself", async () => { + await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => { + const workspace = path.join(root, "workspace"); + const outside = path.join(root, "outside"); + await fs.mkdir(workspace, { recursive: true }); + await fs.mkdir(outside, { recursive: true }); + await fs.writeFile(path.join(outside, "secret.txt"), "classified", "utf8"); + await fs.symlink(path.join(outside, "secret.txt"), path.join(workspace, "link.txt")); + + const result = runMutation(["remove", workspace, "", "link.txt", "0", "0"]); + + expect(result.status).toBe(0); + await expect(fs.readlink(path.join(workspace, "link.txt"))).rejects.toThrow(); + await expect(fs.readFile(path.join(outside, "secret.txt"), "utf8")).resolves.toBe( + "classified", + ); + }); + }); + + it.runIf(process.platform !== "win32")( + "rejects symlink destination parents during rename", + async () => { + await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => { + const workspace = path.join(root, "workspace"); + const outside = path.join(root, "outside"); + await fs.mkdir(workspace, { recursive: true }); + await fs.mkdir(outside, { recursive: true }); + await fs.writeFile(path.join(workspace, "from.txt"), "payload", "utf8"); + await fs.symlink(outside, path.join(workspace, "alias")); + + const result = runMutation([ + "rename", + workspace, + "", + "from.txt", + workspace, + "alias", + "escape.txt", + "1", + ]); + + expect(result.status).not.toBe(0); + await expect(fs.readFile(path.join(workspace, "from.txt"), "utf8")).resolves.toBe( + "payload", + ); + await expect(fs.readFile(path.join(outside, "escape.txt"), "utf8")).rejects.toThrow(); + }); + }, + ); + + it.runIf(process.platform !== "win32")( + "copies directories across different mount roots during rename fallback", + async () => { + await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => { + const sourceRoot = path.join(root, "source"); + const destRoot = path.join(root, "dest"); + await fs.mkdir(path.join(sourceRoot, "dir", "nested"), { recursive: true }); + await fs.mkdir(destRoot, { recursive: true }); + await fs.writeFile(path.join(sourceRoot, "dir", "nested", "file.txt"), "payload", "utf8"); + + const result = runMutation(["rename", sourceRoot, "", "dir", destRoot, "", "moved", "1"]); + + expect(result.status).toBe(0); + await expect( + fs.readFile(path.join(destRoot, "moved", "nested", "file.txt"), "utf8"), + ).resolves.toBe("payload"); + await expect(fs.stat(path.join(sourceRoot, "dir"))).rejects.toThrow(); + }); + }, + ); +}); diff --git a/src/agents/sandbox/fs-bridge-mutation-helper.ts b/src/agents/sandbox/fs-bridge-mutation-helper.ts new file mode 100644 index 00000000000..3c6edb2c2cb --- /dev/null +++ b/src/agents/sandbox/fs-bridge-mutation-helper.ts @@ -0,0 +1,353 @@ +import { PATH_ALIAS_POLICIES } from "../../infra/path-alias-guards.js"; +import type { + PathSafetyCheck, + PinnedSandboxDirectoryEntry, + PinnedSandboxEntry, +} from "./fs-bridge-path-safety.js"; +import type { SandboxFsCommandPlan } from "./fs-bridge-shell-command-plans.js"; + +export const SANDBOX_PINNED_MUTATION_PYTHON = [ + "import errno", + "import os", + "import secrets", + "import stat", + "import sys", + "", + "operation = sys.argv[1]", + "", + "DIR_FLAGS = os.O_RDONLY", + "if hasattr(os, 'O_DIRECTORY'):", + " DIR_FLAGS |= os.O_DIRECTORY", + "if hasattr(os, 'O_NOFOLLOW'):", + " DIR_FLAGS |= os.O_NOFOLLOW", + "", + "READ_FLAGS = os.O_RDONLY", + "if hasattr(os, 'O_NOFOLLOW'):", + " READ_FLAGS |= os.O_NOFOLLOW", + "", + "WRITE_FLAGS = os.O_WRONLY | os.O_CREAT | os.O_EXCL", + "if hasattr(os, 'O_NOFOLLOW'):", + " WRITE_FLAGS |= os.O_NOFOLLOW", + "", + "def split_relative(path_value):", + " segments = []", + " for segment in path_value.split('/'):", + " if not segment or segment == '.':", + " continue", + " if segment == '..':", + " raise OSError(errno.EPERM, 'path traversal is not allowed', segment)", + " segments.append(segment)", + " return segments", + "", + "def open_dir(path_value, dir_fd=None):", + " return os.open(path_value, DIR_FLAGS, dir_fd=dir_fd)", + "", + "def walk_dir(root_fd, rel_path, mkdir_enabled):", + " current_fd = os.dup(root_fd)", + " try:", + " for segment in split_relative(rel_path):", + " try:", + " next_fd = open_dir(segment, dir_fd=current_fd)", + " except FileNotFoundError:", + " if not mkdir_enabled:", + " raise", + " os.mkdir(segment, 0o777, dir_fd=current_fd)", + " next_fd = open_dir(segment, dir_fd=current_fd)", + " os.close(current_fd)", + " current_fd = next_fd", + " return current_fd", + " except Exception:", + " os.close(current_fd)", + " raise", + "", + "def create_temp_file(parent_fd, basename):", + " prefix = '.openclaw-write-' + basename + '.'", + " for _ in range(128):", + " candidate = prefix + secrets.token_hex(6)", + " try:", + " fd = os.open(candidate, WRITE_FLAGS, 0o600, dir_fd=parent_fd)", + " return candidate, fd", + " except FileExistsError:", + " continue", + " raise RuntimeError('failed to allocate sandbox temp file')", + "", + "def create_temp_dir(parent_fd, basename, mode):", + " prefix = '.openclaw-move-' + basename + '.'", + " for _ in range(128):", + " candidate = prefix + secrets.token_hex(6)", + " try:", + " os.mkdir(candidate, mode, dir_fd=parent_fd)", + " return candidate", + " except FileExistsError:", + " continue", + " raise RuntimeError('failed to allocate sandbox temp directory')", + "", + "def write_atomic(parent_fd, basename, stdin_buffer):", + " temp_fd = None", + " temp_name = None", + " try:", + " temp_name, temp_fd = create_temp_file(parent_fd, basename)", + " while True:", + " chunk = stdin_buffer.read(65536)", + " if not chunk:", + " break", + " os.write(temp_fd, chunk)", + " os.fsync(temp_fd)", + " os.close(temp_fd)", + " temp_fd = None", + " os.replace(temp_name, basename, src_dir_fd=parent_fd, dst_dir_fd=parent_fd)", + " temp_name = None", + " os.fsync(parent_fd)", + " finally:", + " if temp_fd is not None:", + " os.close(temp_fd)", + " if temp_name is not None:", + " try:", + " os.unlink(temp_name, dir_fd=parent_fd)", + " except FileNotFoundError:", + " pass", + "", + "def remove_tree(parent_fd, basename):", + " entry_stat = os.lstat(basename, dir_fd=parent_fd)", + " if not stat.S_ISDIR(entry_stat.st_mode) or stat.S_ISLNK(entry_stat.st_mode):", + " os.unlink(basename, dir_fd=parent_fd)", + " return", + " dir_fd = open_dir(basename, dir_fd=parent_fd)", + " try:", + " for child in os.listdir(dir_fd):", + " remove_tree(dir_fd, child)", + " finally:", + " os.close(dir_fd)", + " os.rmdir(basename, dir_fd=parent_fd)", + "", + "def move_entry(src_parent_fd, src_basename, dst_parent_fd, dst_basename):", + " try:", + " os.rename(src_basename, dst_basename, src_dir_fd=src_parent_fd, dst_dir_fd=dst_parent_fd)", + " os.fsync(dst_parent_fd)", + " os.fsync(src_parent_fd)", + " return", + " except OSError as err:", + " if err.errno != errno.EXDEV:", + " raise", + " src_stat = os.lstat(src_basename, dir_fd=src_parent_fd)", + " if stat.S_ISDIR(src_stat.st_mode) and not stat.S_ISLNK(src_stat.st_mode):", + " temp_dir_name = create_temp_dir(dst_parent_fd, dst_basename, stat.S_IMODE(src_stat.st_mode) or 0o755)", + " temp_dir_fd = open_dir(temp_dir_name, dir_fd=dst_parent_fd)", + " src_dir_fd = open_dir(src_basename, dir_fd=src_parent_fd)", + " try:", + " for child in os.listdir(src_dir_fd):", + " move_entry(src_dir_fd, child, temp_dir_fd, child)", + " finally:", + " os.close(src_dir_fd)", + " os.close(temp_dir_fd)", + " os.rename(temp_dir_name, dst_basename, src_dir_fd=dst_parent_fd, dst_dir_fd=dst_parent_fd)", + " os.rmdir(src_basename, dir_fd=src_parent_fd)", + " os.fsync(dst_parent_fd)", + " os.fsync(src_parent_fd)", + " return", + " if stat.S_ISLNK(src_stat.st_mode):", + " link_target = os.readlink(src_basename, dir_fd=src_parent_fd)", + " try:", + " os.unlink(dst_basename, dir_fd=dst_parent_fd)", + " except FileNotFoundError:", + " pass", + " os.symlink(link_target, dst_basename, dir_fd=dst_parent_fd)", + " os.unlink(src_basename, dir_fd=src_parent_fd)", + " os.fsync(dst_parent_fd)", + " os.fsync(src_parent_fd)", + " return", + " src_fd = os.open(src_basename, READ_FLAGS, dir_fd=src_parent_fd)", + " temp_fd = None", + " temp_name = None", + " try:", + " temp_name, temp_fd = create_temp_file(dst_parent_fd, dst_basename)", + " while True:", + " chunk = os.read(src_fd, 65536)", + " if not chunk:", + " break", + " os.write(temp_fd, chunk)", + " try:", + " os.fchmod(temp_fd, stat.S_IMODE(src_stat.st_mode))", + " except AttributeError:", + " pass", + " os.fsync(temp_fd)", + " os.close(temp_fd)", + " temp_fd = None", + " os.replace(temp_name, dst_basename, src_dir_fd=dst_parent_fd, dst_dir_fd=dst_parent_fd)", + " temp_name = None", + " os.unlink(src_basename, dir_fd=src_parent_fd)", + " os.fsync(dst_parent_fd)", + " os.fsync(src_parent_fd)", + " finally:", + " if temp_fd is not None:", + " os.close(temp_fd)", + " if temp_name is not None:", + " try:", + " os.unlink(temp_name, dir_fd=dst_parent_fd)", + " except FileNotFoundError:", + " pass", + " os.close(src_fd)", + "", + "if operation == 'write':", + " root_fd = open_dir(sys.argv[2])", + " parent_fd = None", + " try:", + " parent_fd = walk_dir(root_fd, sys.argv[3], sys.argv[5] == '1')", + " write_atomic(parent_fd, sys.argv[4], sys.stdin.buffer)", + " finally:", + " if parent_fd is not None:", + " os.close(parent_fd)", + " os.close(root_fd)", + "elif operation == 'mkdirp':", + " root_fd = open_dir(sys.argv[2])", + " target_fd = None", + " try:", + " target_fd = walk_dir(root_fd, sys.argv[3], True)", + " os.fsync(target_fd)", + " finally:", + " if target_fd is not None:", + " os.close(target_fd)", + " os.close(root_fd)", + "elif operation == 'remove':", + " root_fd = open_dir(sys.argv[2])", + " parent_fd = None", + " try:", + " parent_fd = walk_dir(root_fd, sys.argv[3], False)", + " try:", + " if sys.argv[5] == '1':", + " remove_tree(parent_fd, sys.argv[4])", + " else:", + " entry_stat = os.lstat(sys.argv[4], dir_fd=parent_fd)", + " if stat.S_ISDIR(entry_stat.st_mode) and not stat.S_ISLNK(entry_stat.st_mode):", + " os.rmdir(sys.argv[4], dir_fd=parent_fd)", + " else:", + " os.unlink(sys.argv[4], dir_fd=parent_fd)", + " os.fsync(parent_fd)", + " except FileNotFoundError:", + " if sys.argv[6] != '1':", + " raise", + " finally:", + " if parent_fd is not None:", + " os.close(parent_fd)", + " os.close(root_fd)", + "elif operation == 'rename':", + " src_root_fd = open_dir(sys.argv[2])", + " dst_root_fd = open_dir(sys.argv[5])", + " src_parent_fd = None", + " dst_parent_fd = None", + " try:", + " src_parent_fd = walk_dir(src_root_fd, sys.argv[3], False)", + " dst_parent_fd = walk_dir(dst_root_fd, sys.argv[6], sys.argv[8] == '1')", + " move_entry(src_parent_fd, sys.argv[4], dst_parent_fd, sys.argv[7])", + " finally:", + " if src_parent_fd is not None:", + " os.close(src_parent_fd)", + " if dst_parent_fd is not None:", + " os.close(dst_parent_fd)", + " os.close(src_root_fd)", + " os.close(dst_root_fd)", + "else:", + " raise RuntimeError('unknown sandbox mutation operation: ' + operation)", +].join("\n"); + +function buildPinnedMutationPlan(params: { + args: string[]; + checks: PathSafetyCheck[]; +}): SandboxFsCommandPlan { + return { + checks: params.checks, + recheckBeforeCommand: true, + // Feed the helper source over fd 3 so stdin stays available for write payload bytes. + script: [ + "set -eu", + "python3 /dev/fd/3 \"$@\" 3<<'PY'", + SANDBOX_PINNED_MUTATION_PYTHON, + "PY", + ].join("\n"), + args: params.args, + }; +} + +export function buildPinnedWritePlan(params: { + check: PathSafetyCheck; + pinned: PinnedSandboxEntry; + mkdir: boolean; +}): SandboxFsCommandPlan { + return buildPinnedMutationPlan({ + checks: [params.check], + args: [ + "write", + params.pinned.mountRootPath, + params.pinned.relativeParentPath, + params.pinned.basename, + params.mkdir ? "1" : "0", + ], + }); +} + +export function buildPinnedMkdirpPlan(params: { + check: PathSafetyCheck; + pinned: PinnedSandboxDirectoryEntry; +}): SandboxFsCommandPlan { + return buildPinnedMutationPlan({ + checks: [params.check], + args: ["mkdirp", params.pinned.mountRootPath, params.pinned.relativePath], + }); +} + +export function buildPinnedRemovePlan(params: { + check: PathSafetyCheck; + pinned: PinnedSandboxEntry; + recursive?: boolean; + force?: boolean; +}): SandboxFsCommandPlan { + return buildPinnedMutationPlan({ + checks: [ + { + target: params.check.target, + options: { + ...params.check.options, + aliasPolicy: PATH_ALIAS_POLICIES.unlinkTarget, + }, + }, + ], + args: [ + "remove", + params.pinned.mountRootPath, + params.pinned.relativeParentPath, + params.pinned.basename, + params.recursive ? "1" : "0", + params.force === false ? "0" : "1", + ], + }); +} + +export function buildPinnedRenamePlan(params: { + fromCheck: PathSafetyCheck; + toCheck: PathSafetyCheck; + from: PinnedSandboxEntry; + to: PinnedSandboxEntry; +}): SandboxFsCommandPlan { + return buildPinnedMutationPlan({ + checks: [ + { + target: params.fromCheck.target, + options: { + ...params.fromCheck.options, + aliasPolicy: PATH_ALIAS_POLICIES.unlinkTarget, + }, + }, + params.toCheck, + ], + args: [ + "rename", + params.from.mountRootPath, + params.from.relativeParentPath, + params.from.basename, + params.to.mountRootPath, + params.to.relativeParentPath, + params.to.basename, + "1", + ], + }); +} diff --git a/src/agents/sandbox/fs-bridge-mutation-python-source.ts b/src/agents/sandbox/fs-bridge-mutation-python-source.ts new file mode 100644 index 00000000000..d0653e6ae41 --- /dev/null +++ b/src/agents/sandbox/fs-bridge-mutation-python-source.ts @@ -0,0 +1,190 @@ +// language=python +export const SANDBOX_PINNED_FS_MUTATION_PYTHON = String.raw`import os +import secrets +import subprocess +import sys + +operation = sys.argv[1] + +DIR_FLAGS = os.O_RDONLY +if hasattr(os, "O_DIRECTORY"): + DIR_FLAGS |= os.O_DIRECTORY +if hasattr(os, "O_NOFOLLOW"): + DIR_FLAGS |= os.O_NOFOLLOW + +WRITE_FLAGS = os.O_WRONLY | os.O_CREAT | os.O_EXCL +if hasattr(os, "O_NOFOLLOW"): + WRITE_FLAGS |= os.O_NOFOLLOW + + +def open_dir(path, dir_fd=None): + return os.open(path, DIR_FLAGS, dir_fd=dir_fd) + + +def walk_parent(root_fd, rel_parent, mkdir_enabled): + current_fd = os.dup(root_fd) + try: + segments = [segment for segment in rel_parent.split("/") if segment and segment != "."] + for segment in segments: + if segment == "..": + raise OSError("path traversal is not allowed") + try: + next_fd = open_dir(segment, dir_fd=current_fd) + except FileNotFoundError: + if not mkdir_enabled: + raise + os.mkdir(segment, 0o777, dir_fd=current_fd) + next_fd = open_dir(segment, dir_fd=current_fd) + os.close(current_fd) + current_fd = next_fd + return current_fd + except Exception: + os.close(current_fd) + raise + + +def create_temp_file(parent_fd, basename): + prefix = ".openclaw-write-" + basename + "." + for _ in range(128): + candidate = prefix + secrets.token_hex(6) + try: + fd = os.open(candidate, WRITE_FLAGS, 0o600, dir_fd=parent_fd) + return candidate, fd + except FileExistsError: + continue + raise RuntimeError("failed to allocate sandbox temp file") + + +def fd_path(fd, basename=None): + base = f"/proc/self/fd/{fd}" + if basename is None: + return base + return f"{base}/{basename}" + + +def run_command(argv, pass_fds): + subprocess.run(argv, check=True, pass_fds=tuple(pass_fds)) + + +def write_stdin_to_fd(fd): + while True: + chunk = sys.stdin.buffer.read(65536) + if not chunk: + break + os.write(fd, chunk) + + +def run_write(args): + mount_root, relative_parent, basename, mkdir_enabled_raw = args + mkdir_enabled = mkdir_enabled_raw == "1" + root_fd = open_dir(mount_root) + parent_fd = None + temp_fd = None + temp_name = None + try: + parent_fd = walk_parent(root_fd, relative_parent, mkdir_enabled) + temp_name, temp_fd = create_temp_file(parent_fd, basename) + write_stdin_to_fd(temp_fd) + os.fsync(temp_fd) + os.close(temp_fd) + temp_fd = None + os.replace(temp_name, basename, src_dir_fd=parent_fd, dst_dir_fd=parent_fd) + os.fsync(parent_fd) + except Exception: + if temp_fd is not None: + os.close(temp_fd) + temp_fd = None + if temp_name is not None and parent_fd is not None: + try: + os.unlink(temp_name, dir_fd=parent_fd) + except FileNotFoundError: + pass + raise + finally: + if parent_fd is not None: + os.close(parent_fd) + os.close(root_fd) + + +def run_mkdirp(args): + mount_root, relative_parent, basename = args + root_fd = open_dir(mount_root) + parent_fd = None + try: + parent_fd = walk_parent(root_fd, relative_parent, True) + run_command(["mkdir", "-p", "--", fd_path(parent_fd, basename)], [parent_fd]) + os.fsync(parent_fd) + finally: + if parent_fd is not None: + os.close(parent_fd) + os.close(root_fd) + + +def run_remove(args): + mount_root, relative_parent, basename, recursive_raw, force_raw = args + root_fd = open_dir(mount_root) + parent_fd = None + try: + parent_fd = walk_parent(root_fd, relative_parent, False) + argv = ["rm"] + if force_raw == "1": + argv.append("-f") + if recursive_raw == "1": + argv.append("-r") + argv.extend(["--", fd_path(parent_fd, basename)]) + run_command(argv, [parent_fd]) + os.fsync(parent_fd) + finally: + if parent_fd is not None: + os.close(parent_fd) + os.close(root_fd) + + +def run_rename(args): + ( + from_mount_root, + from_relative_parent, + from_basename, + to_mount_root, + to_relative_parent, + to_basename, + ) = args + from_root_fd = open_dir(from_mount_root) + to_root_fd = open_dir(to_mount_root) + from_parent_fd = None + to_parent_fd = None + try: + from_parent_fd = walk_parent(from_root_fd, from_relative_parent, False) + to_parent_fd = walk_parent(to_root_fd, to_relative_parent, True) + run_command( + [ + "mv", + "--", + fd_path(from_parent_fd, from_basename), + fd_path(to_parent_fd, to_basename), + ], + [from_parent_fd, to_parent_fd], + ) + os.fsync(from_parent_fd) + if to_parent_fd != from_parent_fd: + os.fsync(to_parent_fd) + finally: + if from_parent_fd is not None: + os.close(from_parent_fd) + if to_parent_fd is not None: + os.close(to_parent_fd) + os.close(from_root_fd) + os.close(to_root_fd) + + +OPERATIONS = { + "write": run_write, + "mkdirp": run_mkdirp, + "remove": run_remove, + "rename": run_rename, +} + +if operation not in OPERATIONS: + raise RuntimeError(f"unknown sandbox fs mutation: {operation}") + +OPERATIONS[operation](sys.argv[2:])`; diff --git a/src/agents/sandbox/fs-bridge-path-safety.ts b/src/agents/sandbox/fs-bridge-path-safety.ts index a18ed500287..83fa4149974 100644 --- a/src/agents/sandbox/fs-bridge-path-safety.ts +++ b/src/agents/sandbox/fs-bridge-path-safety.ts @@ -18,11 +18,22 @@ export type PathSafetyCheck = { options: PathSafetyOptions; }; +export type PinnedSandboxEntry = { + mountRootPath: string; + relativeParentPath: string; + basename: string; +}; + export type AnchoredSandboxEntry = { canonicalParentPath: string; basename: string; }; +export type PinnedSandboxDirectoryEntry = { + mountRootPath: string; + relativePath: string; +}; + type RunCommand = ( script: string, options?: { @@ -76,6 +87,26 @@ export class SandboxFsPathGuard { return lexicalMount; } + private finalizePinnedEntry(params: { + mount: SandboxFsMount; + parentPath: string; + basename: string; + targetPath: string; + action: string; + }): PinnedSandboxEntry { + const relativeParentPath = path.posix.relative(params.mount.containerRoot, params.parentPath); + if (relativeParentPath.startsWith("..") || path.posix.isAbsolute(relativeParentPath)) { + throw new Error( + `Sandbox path escapes allowed mounts; cannot ${params.action}: ${params.targetPath}`, + ); + } + return { + mountRootPath: params.mount.containerRoot, + relativeParentPath: relativeParentPath === "." ? "" : relativeParentPath, + basename: params.basename, + }; + } + private async assertGuardedPathSafety( target: SandboxResolvedFsPath, options: PathSafetyOptions, @@ -128,7 +159,26 @@ export class SandboxFsPathGuard { return guarded; } - async resolveAnchoredSandboxEntry(target: SandboxResolvedFsPath): Promise { + resolvePinnedEntry(target: SandboxResolvedFsPath, action: string): PinnedSandboxEntry { + const basename = path.posix.basename(target.containerPath); + if (!basename || basename === "." || basename === "/") { + throw new Error(`Invalid sandbox entry target: ${target.containerPath}`); + } + const parentPath = normalizeContainerPath(path.posix.dirname(target.containerPath)); + const mount = this.resolveRequiredMount(parentPath, action); + return this.finalizePinnedEntry({ + mount, + parentPath, + basename, + targetPath: target.containerPath, + action, + }); + } + + async resolveAnchoredSandboxEntry( + target: SandboxResolvedFsPath, + action: string, + ): Promise { const basename = path.posix.basename(target.containerPath); if (!basename || basename === "." || basename === "/") { throw new Error(`Invalid sandbox entry target: ${target.containerPath}`); @@ -138,12 +188,45 @@ export class SandboxFsPathGuard { containerPath: parentPath, allowFinalSymlinkForUnlink: false, }); + this.resolveRequiredMount(canonicalParentPath, action); return { canonicalParentPath, basename, }; } + async resolveAnchoredPinnedEntry( + target: SandboxResolvedFsPath, + action: string, + ): Promise { + const anchoredTarget = await this.resolveAnchoredSandboxEntry(target, action); + const mount = this.resolveRequiredMount(anchoredTarget.canonicalParentPath, action); + return this.finalizePinnedEntry({ + mount, + parentPath: anchoredTarget.canonicalParentPath, + basename: anchoredTarget.basename, + targetPath: target.containerPath, + action, + }); + } + + resolvePinnedDirectoryEntry( + target: SandboxResolvedFsPath, + action: string, + ): PinnedSandboxDirectoryEntry { + const mount = this.resolveRequiredMount(target.containerPath, action); + const relativePath = path.posix.relative(mount.containerRoot, target.containerPath); + if (relativePath.startsWith("..") || path.posix.isAbsolute(relativePath)) { + throw new Error( + `Sandbox path escapes allowed mounts; cannot ${action}: ${target.containerPath}`, + ); + } + return { + mountRootPath: mount.containerRoot, + relativePath: relativePath === "." ? "" : relativePath, + }; + } + private pathIsExistingDirectory(hostPath: string): boolean { try { return fs.statSync(hostPath).isDirectory(); diff --git a/src/agents/sandbox/fs-bridge-shell-command-plans.ts b/src/agents/sandbox/fs-bridge-shell-command-plans.ts index 4c1a9b8d64f..4bcd1ae04de 100644 --- a/src/agents/sandbox/fs-bridge-shell-command-plans.ts +++ b/src/agents/sandbox/fs-bridge-shell-command-plans.ts @@ -1,4 +1,3 @@ -import { PATH_ALIAS_POLICIES } from "../../infra/path-alias-guards.js"; import type { AnchoredSandboxEntry, PathSafetyCheck } from "./fs-bridge-path-safety.js"; import type { SandboxResolvedFsPath } from "./fs-paths.js"; @@ -6,107 +5,19 @@ export type SandboxFsCommandPlan = { checks: PathSafetyCheck[]; script: string; args?: string[]; + stdin?: Buffer | string; recheckBeforeCommand?: boolean; allowFailure?: boolean; }; -export function buildWriteCommitPlan( - target: SandboxResolvedFsPath, - tempPath: string, -): SandboxFsCommandPlan { - return { - checks: [{ target, options: { action: "write files", requireWritable: true } }], - recheckBeforeCommand: true, - script: 'set -eu; mv -f -- "$1" "$2"', - args: [tempPath, target.containerPath], - }; -} - -export function buildMkdirpPlan( +export function buildStatPlan( target: SandboxResolvedFsPath, anchoredTarget: AnchoredSandboxEntry, ): SandboxFsCommandPlan { - return { - checks: [ - { - target, - options: { - action: "create directories", - requireWritable: true, - allowedType: "directory", - }, - }, - ], - script: 'set -eu\ncd -- "$1"\nmkdir -p -- "$2"', - args: [anchoredTarget.canonicalParentPath, anchoredTarget.basename], - }; -} - -export function buildRemovePlan(params: { - target: SandboxResolvedFsPath; - anchoredTarget: AnchoredSandboxEntry; - recursive?: boolean; - force?: boolean; -}): SandboxFsCommandPlan { - const flags = [params.force === false ? "" : "-f", params.recursive ? "-r" : ""].filter(Boolean); - const rmCommand = flags.length > 0 ? `rm ${flags.join(" ")}` : "rm"; - return { - checks: [ - { - target: params.target, - options: { - action: "remove files", - requireWritable: true, - aliasPolicy: PATH_ALIAS_POLICIES.unlinkTarget, - }, - }, - ], - recheckBeforeCommand: true, - script: `set -eu\ncd -- "$1"\n${rmCommand} -- "$2"`, - args: [params.anchoredTarget.canonicalParentPath, params.anchoredTarget.basename], - }; -} - -export function buildRenamePlan(params: { - from: SandboxResolvedFsPath; - to: SandboxResolvedFsPath; - anchoredFrom: AnchoredSandboxEntry; - anchoredTo: AnchoredSandboxEntry; -}): SandboxFsCommandPlan { - return { - checks: [ - { - target: params.from, - options: { - action: "rename files", - requireWritable: true, - aliasPolicy: PATH_ALIAS_POLICIES.unlinkTarget, - }, - }, - { - target: params.to, - options: { - action: "rename files", - requireWritable: true, - }, - }, - ], - recheckBeforeCommand: true, - script: ["set -eu", 'mkdir -p -- "$2"', 'cd -- "$1"', 'mv -- "$3" "$2/$4"'].join("\n"), - args: [ - params.anchoredFrom.canonicalParentPath, - params.anchoredTo.canonicalParentPath, - params.anchoredFrom.basename, - params.anchoredTo.basename, - ], - }; -} - -export function buildStatPlan(target: SandboxResolvedFsPath): SandboxFsCommandPlan { return { checks: [{ target, options: { action: "stat files" } }], - script: 'set -eu; stat -c "%F|%s|%Y" -- "$1"', - args: [target.containerPath], + script: 'set -eu\ncd -- "$1"\nstat -c "%F|%s|%Y" -- "$2"', + args: [anchoredTarget.canonicalParentPath, anchoredTarget.basename], allowFailure: true, }; } diff --git a/src/agents/sandbox/fs-bridge.anchored-ops.test.ts b/src/agents/sandbox/fs-bridge.anchored-ops.test.ts index 79bc5a55f3c..f92e99cc3c6 100644 --- a/src/agents/sandbox/fs-bridge.anchored-ops.test.ts +++ b/src/agents/sandbox/fs-bridge.anchored-ops.test.ts @@ -4,9 +4,13 @@ import { describe, expect, it } from "vitest"; import { createSandbox, createSandboxFsBridge, - findCallByScriptFragment, + createSeededSandboxFsBridge, + dockerExecResult, findCallsByScriptFragment, + findCallByDockerArg, + findCallByScriptFragment, getDockerArg, + getDockerScript, installFsBridgeTestHarness, mockedExecDockerRaw, withTempDir, @@ -67,54 +71,127 @@ describe("sandbox fs bridge anchored ops", () => { }); }); - const anchoredCases = [ + const pinnedCases = [ { - name: "mkdirp anchors parent + basename", + name: "write pins canonical parent + basename", + invoke: (bridge: ReturnType) => + bridge.writeFile({ filePath: "nested/file.txt", data: "updated" }), + expectedArgs: ["write", "/workspace", "nested", "file.txt", "1"], + forbiddenArgs: ["/workspace/nested/file.txt"], + }, + { + name: "mkdirp pins mount root + relative path", invoke: (bridge: ReturnType) => bridge.mkdirp({ filePath: "nested/leaf" }), - scriptFragment: 'mkdir -p -- "$2"', - expectedArgs: ["/workspace/nested", "leaf"], + expectedArgs: ["mkdirp", "/workspace", "nested/leaf"], forbiddenArgs: ["/workspace/nested/leaf"], - canonicalProbe: "/workspace/nested", }, { - name: "remove anchors parent + basename", + name: "remove pins mount root + parent/basename", invoke: (bridge: ReturnType) => bridge.remove({ filePath: "nested/file.txt" }), - scriptFragment: 'rm -f -- "$2"', - expectedArgs: ["/workspace/nested", "file.txt"], + expectedArgs: ["remove", "/workspace", "nested", "file.txt", "0", "1"], forbiddenArgs: ["/workspace/nested/file.txt"], - canonicalProbe: "/workspace/nested", }, { - name: "rename anchors both parents + basenames", + name: "rename pins both parents + basenames", invoke: (bridge: ReturnType) => bridge.rename({ from: "from.txt", to: "nested/to.txt" }), - scriptFragment: 'mv -- "$3" "$2/$4"', - expectedArgs: ["/workspace", "/workspace/nested", "from.txt", "to.txt"], + expectedArgs: ["rename", "/workspace", "", "from.txt", "/workspace", "nested", "to.txt", "1"], forbiddenArgs: ["/workspace/from.txt", "/workspace/nested/to.txt"], - canonicalProbe: "/workspace/nested", }, ] as const; - it.each(anchoredCases)("$name", async (testCase) => { - const bridge = createSandboxFsBridge({ sandbox: createSandbox() }); + it.each(pinnedCases)("$name", async (testCase) => { + await withTempDir("openclaw-fs-bridge-contract-write-", async (stateDir) => { + const { bridge } = await createSeededSandboxFsBridge(stateDir); - await testCase.invoke(bridge); + await testCase.invoke(bridge); - const opCall = findCallByScriptFragment(testCase.scriptFragment); - expect(opCall).toBeDefined(); - const args = opCall?.[0] ?? []; - testCase.expectedArgs.forEach((value, index) => { - expect(getDockerArg(args, index + 1)).toBe(value); - }); - testCase.forbiddenArgs.forEach((value) => { - expect(args).not.toContain(value); + const opCall = mockedExecDockerRaw.mock.calls.find( + ([args]) => + typeof args[5] === "string" && + args[5].includes("python3 /dev/fd/3 \"$@\" 3<<'PY'") && + getDockerArg(args, 1) === testCase.expectedArgs[0], + ); + expect(opCall).toBeDefined(); + const args = opCall?.[0] ?? []; + testCase.expectedArgs.forEach((value, index) => { + expect(getDockerArg(args, index + 1)).toBe(value); + }); + testCase.forbiddenArgs.forEach((value) => { + expect(args).not.toContain(value); + }); }); + }); - const canonicalCalls = findCallsByScriptFragment('readlink -f -- "$cursor"'); - expect( - canonicalCalls.some(([callArgs]) => getDockerArg(callArgs, 1) === testCase.canonicalProbe), - ).toBe(true); + it.runIf(process.platform !== "win32")( + "write resolves symlink parents to canonical pinned paths", + async () => { + await withTempDir("openclaw-fs-bridge-contract-write-", async (stateDir) => { + const workspaceDir = path.join(stateDir, "workspace"); + const realDir = path.join(workspaceDir, "real"); + await fs.mkdir(realDir, { recursive: true }); + await fs.symlink(realDir, path.join(workspaceDir, "alias")); + + mockedExecDockerRaw.mockImplementation(async (args) => { + const script = getDockerScript(args); + if (script.includes('readlink -f -- "$cursor"')) { + const target = getDockerArg(args, 1); + return dockerExecResult(`${target.replace("/workspace/alias", "/workspace/real")}\n`); + } + if (script.includes('stat -c "%F|%s|%Y"')) { + return dockerExecResult("regular file|1|2"); + } + return dockerExecResult(""); + }); + + const bridge = createSandboxFsBridge({ + sandbox: createSandbox({ + workspaceDir, + agentWorkspaceDir: workspaceDir, + }), + }); + + await bridge.writeFile({ filePath: "alias/note.txt", data: "updated" }); + + const writeCall = findCallByDockerArg(1, "write"); + expect(writeCall).toBeDefined(); + const args = writeCall?.[0] ?? []; + expect(getDockerArg(args, 2)).toBe("/workspace"); + expect(getDockerArg(args, 3)).toBe("real"); + expect(getDockerArg(args, 4)).toBe("note.txt"); + expect(args).not.toContain("alias"); + + const canonicalCalls = findCallsByScriptFragment('readlink -f -- "$cursor"'); + expect( + canonicalCalls.some(([callArgs]) => getDockerArg(callArgs, 1) === "/workspace/alias"), + ).toBe(true); + }); + }, + ); + + it("stat anchors parent + basename", async () => { + await withTempDir("openclaw-fs-bridge-contract-stat-", async (stateDir) => { + const workspaceDir = path.join(stateDir, "workspace"); + await fs.mkdir(path.join(workspaceDir, "nested"), { recursive: true }); + await fs.writeFile(path.join(workspaceDir, "nested", "file.txt"), "bye", "utf8"); + + const bridge = createSandboxFsBridge({ + sandbox: createSandbox({ + workspaceDir, + agentWorkspaceDir: workspaceDir, + }), + }); + + await bridge.stat({ filePath: "nested/file.txt" }); + + const statCall = findCallByScriptFragment('stat -c "%F|%s|%Y" -- "$2"'); + expect(statCall).toBeDefined(); + const args = statCall?.[0] ?? []; + expect(getDockerArg(args, 1)).toBe("/workspace/nested"); + expect(getDockerArg(args, 2)).toBe("file.txt"); + expect(args).not.toContain("/workspace/nested/file.txt"); + }); }); }); diff --git a/src/agents/sandbox/fs-bridge.boundary.test.ts b/src/agents/sandbox/fs-bridge.boundary.test.ts index 3b86496fac6..574a698db4c 100644 --- a/src/agents/sandbox/fs-bridge.boundary.test.ts +++ b/src/agents/sandbox/fs-bridge.boundary.test.ts @@ -6,7 +6,7 @@ import { createSandbox, createSandboxFsBridge, expectMkdirpAllowsExistingDirectory, - getScriptsFromCalls, + findCallByDockerArg, installFsBridgeTestHarness, mockedExecDockerRaw, withTempDir, @@ -55,8 +55,7 @@ describe("sandbox fs bridge boundary validation", () => { await expect(bridge.mkdirp({ filePath: "memory/kemik" })).rejects.toThrow( /cannot create directories/i, ); - const scripts = getScriptsFromCalls(); - expect(scripts.some((script) => script.includes('mkdir -p -- "$2"'))).toBe(false); + expect(findCallByDockerArg(1, "mkdirp")).toBeUndefined(); }); }); @@ -111,7 +110,6 @@ describe("sandbox fs bridge boundary validation", () => { it("rejects missing files before any docker read command runs", async () => { const bridge = createSandboxFsBridge({ sandbox: createSandbox() }); await expect(bridge.readFile({ filePath: "a.txt" })).rejects.toThrow(/ENOENT|no such file/i); - const scripts = getScriptsFromCalls(); - expect(scripts.some((script) => script.includes('cat -- "$1"'))).toBe(false); + expect(mockedExecDockerRaw).not.toHaveBeenCalled(); }); }); diff --git a/src/agents/sandbox/fs-bridge.e2e-docker.test.ts b/src/agents/sandbox/fs-bridge.e2e-docker.test.ts new file mode 100644 index 00000000000..62a064b49f5 --- /dev/null +++ b/src/agents/sandbox/fs-bridge.e2e-docker.test.ts @@ -0,0 +1,89 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { DEFAULT_SANDBOX_IMAGE } from "./constants.js"; +import { buildSandboxCreateArgs, execDocker, execDockerRaw } from "./docker.js"; +import { createSandboxFsBridge } from "./fs-bridge.js"; +import { createSandboxTestContext } from "./test-fixtures.js"; +import { appendWorkspaceMountArgs } from "./workspace-mounts.js"; + +async function sandboxImageReady(): Promise { + try { + const dockerVersion = await execDockerRaw(["version"], { allowFailure: true }); + if (dockerVersion.code !== 0) { + return false; + } + const pythonCheck = await execDockerRaw( + ["run", "--rm", "--entrypoint", "python3", DEFAULT_SANDBOX_IMAGE, "--version"], + { allowFailure: true }, + ); + return pythonCheck.code === 0; + } catch { + return false; + } +} + +describe("sandbox fs bridge docker e2e", () => { + it.runIf(process.platform !== "win32")( + "writes through docker exec using the pinned mutation helper", + async () => { + if (!(await sandboxImageReady())) { + return; + } + + const stateDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-fsbridge-e2e-")); + const workspaceDir = path.join(stateDir, "workspace"); + await fs.mkdir(workspaceDir, { recursive: true }); + + const suffix = `${process.pid}-${Date.now()}`; + const containerName = `openclaw-fsbridge-${suffix}`.slice(0, 63); + + try { + const sandbox = createSandboxTestContext({ + overrides: { + workspaceDir, + agentWorkspaceDir: workspaceDir, + containerName, + containerWorkdir: "/workspace", + }, + dockerOverrides: { + image: DEFAULT_SANDBOX_IMAGE, + containerPrefix: "openclaw-fsbridge-", + user: "", + }, + }); + + const createArgs = buildSandboxCreateArgs({ + name: containerName, + cfg: sandbox.docker, + scopeKey: sandbox.sessionKey, + includeBinds: false, + bindSourceRoots: [workspaceDir], + }); + createArgs.push("--workdir", sandbox.containerWorkdir); + appendWorkspaceMountArgs({ + args: createArgs, + workspaceDir, + agentWorkspaceDir: workspaceDir, + workdir: sandbox.containerWorkdir, + workspaceAccess: sandbox.workspaceAccess, + }); + createArgs.push(sandbox.docker.image, "sleep", "infinity"); + + await execDocker(createArgs); + await execDocker(["start", containerName]); + + const bridge = createSandboxFsBridge({ sandbox }); + await bridge.writeFile({ filePath: "nested/hello.txt", data: "from-docker" }); + + await expect( + fs.readFile(path.join(workspaceDir, "nested", "hello.txt"), "utf8"), + ).resolves.toBe("from-docker"); + } finally { + await execDocker(["rm", "-f", containerName], { allowFailure: true }); + await fs.rm(stateDir, { recursive: true, force: true }); + } + }, + ); +}); diff --git a/src/agents/sandbox/fs-bridge.shell.test.ts b/src/agents/sandbox/fs-bridge.shell.test.ts index d8b29c0f5d5..1e870ef0268 100644 --- a/src/agents/sandbox/fs-bridge.shell.test.ts +++ b/src/agents/sandbox/fs-bridge.shell.test.ts @@ -4,6 +4,7 @@ import { describe, expect, it } from "vitest"; import { createSandbox, createSandboxFsBridge, + createSeededSandboxFsBridge, getScriptsFromCalls, installFsBridgeTestHarness, mockedExecDockerRaw, @@ -45,10 +46,10 @@ describe("sandbox fs bridge shell compatibility", () => { }); }); - it("resolveCanonicalContainerPath script is valid POSIX sh (no do; token)", async () => { + it("path canonicalization recheck script is valid POSIX sh", async () => { const bridge = createSandboxFsBridge({ sandbox: createSandbox() }); - await bridge.mkdirp({ filePath: "nested" }); + await bridge.writeFile({ filePath: "b.txt", data: "hello" }); const scripts = getScriptsFromCalls(); const canonicalScript = scripts.find((script) => script.includes("allow_final")); @@ -129,12 +130,34 @@ describe("sandbox fs bridge shell compatibility", () => { await bridge.writeFile({ filePath: "b.txt", data: "hello" }); const scripts = getScriptsFromCalls(); + expect(scripts.some((script) => script.includes("python3 - \"$@\" <<'PY'"))).toBe(false); + expect(scripts.some((script) => script.includes("python3 /dev/fd/3 \"$@\" 3<<'PY'"))).toBe( + true, + ); expect(scripts.some((script) => script.includes('cat >"$1"'))).toBe(false); - expect(scripts.some((script) => script.includes('cat >"$tmp"'))).toBe(true); - expect(scripts.some((script) => script.includes('mv -f -- "$1" "$2"'))).toBe(true); + expect(scripts.some((script) => script.includes('cat >"$tmp"'))).toBe(false); + expect(scripts.some((script) => script.includes("os.replace("))).toBe(true); }); - it("re-validates target before final rename and cleans temp file on failure", async () => { + it("routes mkdirp, remove, and rename through the pinned mutation helper", async () => { + await withTempDir("openclaw-fs-bridge-shell-write-", async (stateDir) => { + const { bridge } = await createSeededSandboxFsBridge(stateDir, { + rootFileName: "a.txt", + }); + + await bridge.mkdirp({ filePath: "nested" }); + await bridge.remove({ filePath: "nested/file.txt" }); + await bridge.rename({ from: "a.txt", to: "nested/b.txt" }); + + const scripts = getScriptsFromCalls(); + expect(scripts.filter((script) => script.includes("operation = sys.argv[1]")).length).toBe(3); + expect(scripts.some((script) => script.includes('mkdir -p -- "$2"'))).toBe(false); + expect(scripts.some((script) => script.includes('rm -f -- "$2"'))).toBe(false); + expect(scripts.some((script) => script.includes('mv -- "$3" "$2/$4"'))).toBe(false); + }); + }); + + it("re-validates target before the pinned write helper runs", async () => { const { mockedOpenBoundaryFile } = await import("./fs-bridge.test-helpers.js"); mockedOpenBoundaryFile .mockImplementationOnce(async () => ({ ok: false, reason: "path" })) @@ -150,8 +173,6 @@ describe("sandbox fs bridge shell compatibility", () => { ); const scripts = getScriptsFromCalls(); - expect(scripts.some((script) => script.includes("mktemp"))).toBe(true); - expect(scripts.some((script) => script.includes('mv -f -- "$1" "$2"'))).toBe(false); - expect(scripts.some((script) => script.includes('rm -f -- "$1"'))).toBe(true); + expect(scripts.some((script) => script.includes("os.replace("))).toBe(false); }); }); diff --git a/src/agents/sandbox/fs-bridge.test-helpers.ts b/src/agents/sandbox/fs-bridge.test-helpers.ts index e81bb65a4e0..0747371478d 100644 --- a/src/agents/sandbox/fs-bridge.test-helpers.ts +++ b/src/agents/sandbox/fs-bridge.test-helpers.ts @@ -48,6 +48,10 @@ export function findCallByScriptFragment(fragment: string) { return mockedExecDockerRaw.mock.calls.find(([args]) => getDockerScript(args).includes(fragment)); } +export function findCallByDockerArg(position: number, value: string) { + return mockedExecDockerRaw.mock.calls.find(([args]) => getDockerArg(args, position) === value); +} + export function findCallsByScriptFragment(fragment: string) { return mockedExecDockerRaw.mock.calls.filter(([args]) => getDockerScript(args).includes(fragment), @@ -75,6 +79,36 @@ export function createSandbox(overrides?: Partial): SandboxConte }); } +export async function createSeededSandboxFsBridge( + stateDir: string, + params?: { + rootFileName?: string; + rootContents?: string; + nestedFileName?: string; + nestedContents?: string; + }, +) { + const workspaceDir = path.join(stateDir, "workspace"); + await fs.mkdir(path.join(workspaceDir, "nested"), { recursive: true }); + await fs.writeFile( + path.join(workspaceDir, params?.rootFileName ?? "from.txt"), + params?.rootContents ?? "hello", + "utf8", + ); + await fs.writeFile( + path.join(workspaceDir, "nested", params?.nestedFileName ?? "file.txt"), + params?.nestedContents ?? "bye", + "utf8", + ); + const bridge = createSandboxFsBridge({ + sandbox: createSandbox({ + workspaceDir, + agentWorkspaceDir: workspaceDir, + }), + }); + return { workspaceDir, bridge }; +} + export async function withTempDir( prefix: string, run: (stateDir: string) => Promise, @@ -142,12 +176,16 @@ export async function expectMkdirpAllowsExistingDirectory(params?: { await expect(bridge.mkdirp({ filePath: "memory/kemik" })).resolves.toBeUndefined(); - const mkdirCall = findCallByScriptFragment('mkdir -p -- "$2"'); + const mkdirCall = mockedExecDockerRaw.mock.calls.find( + ([args]) => + getDockerScript(args).includes("operation = sys.argv[1]") && + getDockerArg(args, 1) === "mkdirp", + ); expect(mkdirCall).toBeDefined(); - const mkdirParent = mkdirCall ? getDockerArg(mkdirCall[0], 1) : ""; - const mkdirBase = mkdirCall ? getDockerArg(mkdirCall[0], 2) : ""; - expect(mkdirParent).toBe("/workspace/memory"); - expect(mkdirBase).toBe("kemik"); + const mountRoot = mkdirCall ? getDockerArg(mkdirCall[0], 2) : ""; + const relativePath = mkdirCall ? getDockerArg(mkdirCall[0], 3) : ""; + expect(mountRoot).toBe("/workspace"); + expect(relativePath).toBe("memory/kemik"); }); } diff --git a/src/agents/sandbox/fs-bridge.ts b/src/agents/sandbox/fs-bridge.ts index f937ad2c702..7a9a22d4459 100644 --- a/src/agents/sandbox/fs-bridge.ts +++ b/src/agents/sandbox/fs-bridge.ts @@ -1,20 +1,18 @@ import fs from "node:fs"; import { execDockerRaw, type ExecDockerRawResult } from "./docker.js"; -import { SandboxFsPathGuard } from "./fs-bridge-path-safety.js"; import { - buildMkdirpPlan, - buildRemovePlan, - buildRenamePlan, - buildStatPlan, - buildWriteCommitPlan, - type SandboxFsCommandPlan, -} from "./fs-bridge-shell-command-plans.js"; + buildPinnedMkdirpPlan, + buildPinnedRemovePlan, + buildPinnedRenamePlan, + buildPinnedWritePlan, +} from "./fs-bridge-mutation-helper.js"; +import { SandboxFsPathGuard } from "./fs-bridge-path-safety.js"; +import { buildStatPlan, type SandboxFsCommandPlan } from "./fs-bridge-shell-command-plans.js"; import { buildSandboxFsMounts, resolveSandboxFsPathWithMounts, type SandboxResolvedFsPath, } from "./fs-paths.js"; -import { normalizeContainerPath } from "./path-utils.js"; import type { SandboxContext, SandboxWorkspaceAccess } from "./types.js"; type RunCommandOptions = { @@ -112,33 +110,47 @@ class SandboxFsBridgeImpl implements SandboxFsBridge { }): Promise { const target = this.resolveResolvedPath(params); this.ensureWriteAccess(target, "write files"); - await this.pathGuard.assertPathSafety(target, { action: "write files", requireWritable: true }); + const writeCheck = { + target, + options: { action: "write files", requireWritable: true } as const, + }; + await this.pathGuard.assertPathSafety(target, writeCheck.options); const buffer = Buffer.isBuffer(params.data) ? params.data : Buffer.from(params.data, params.encoding ?? "utf8"); - const tempPath = await this.writeFileToTempPath({ - targetContainerPath: target.containerPath, - mkdir: params.mkdir !== false, - data: buffer, + const pinnedWriteTarget = await this.pathGuard.resolveAnchoredPinnedEntry( + target, + "write files", + ); + await this.runCheckedCommand({ + ...buildPinnedWritePlan({ + check: writeCheck, + pinned: pinnedWriteTarget, + mkdir: params.mkdir !== false, + }), + stdin: buffer, signal: params.signal, }); - - try { - await this.runCheckedCommand({ - ...buildWriteCommitPlan(target, tempPath), - signal: params.signal, - }); - } catch (error) { - await this.cleanupTempPath(tempPath, params.signal); - throw error; - } } async mkdirp(params: { filePath: string; cwd?: string; signal?: AbortSignal }): Promise { const target = this.resolveResolvedPath(params); this.ensureWriteAccess(target, "create directories"); - const anchoredTarget = await this.pathGuard.resolveAnchoredSandboxEntry(target); - await this.runPlannedCommand(buildMkdirpPlan(target, anchoredTarget), params.signal); + const mkdirCheck = { + target, + options: { + action: "create directories", + requireWritable: true, + allowedType: "directory", + } as const, + }; + await this.runCheckedCommand({ + ...buildPinnedMkdirpPlan({ + check: mkdirCheck, + pinned: this.pathGuard.resolvePinnedDirectoryEntry(target, "create directories"), + }), + signal: params.signal, + }); } async remove(params: { @@ -150,16 +162,22 @@ class SandboxFsBridgeImpl implements SandboxFsBridge { }): Promise { const target = this.resolveResolvedPath(params); this.ensureWriteAccess(target, "remove files"); - const anchoredTarget = await this.pathGuard.resolveAnchoredSandboxEntry(target); - await this.runPlannedCommand( - buildRemovePlan({ - target, - anchoredTarget, + const removeCheck = { + target, + options: { + action: "remove files", + requireWritable: true, + } as const, + }; + await this.runCheckedCommand({ + ...buildPinnedRemovePlan({ + check: removeCheck, + pinned: this.pathGuard.resolvePinnedEntry(target, "remove files"), recursive: params.recursive, force: params.force, }), - params.signal, - ); + signal: params.signal, + }); } async rename(params: { @@ -172,17 +190,29 @@ class SandboxFsBridgeImpl implements SandboxFsBridge { const to = this.resolveResolvedPath({ filePath: params.to, cwd: params.cwd }); this.ensureWriteAccess(from, "rename files"); this.ensureWriteAccess(to, "rename files"); - const anchoredFrom = await this.pathGuard.resolveAnchoredSandboxEntry(from); - const anchoredTo = await this.pathGuard.resolveAnchoredSandboxEntry(to); - await this.runPlannedCommand( - buildRenamePlan({ - from, - to, - anchoredFrom, - anchoredTo, + const fromCheck = { + target: from, + options: { + action: "rename files", + requireWritable: true, + } as const, + }; + const toCheck = { + target: to, + options: { + action: "rename files", + requireWritable: true, + } as const, + }; + await this.runCheckedCommand({ + ...buildPinnedRenamePlan({ + fromCheck, + toCheck, + from: this.pathGuard.resolvePinnedEntry(from, "rename files"), + to: this.pathGuard.resolvePinnedEntry(to, "rename files"), }), - params.signal, - ); + signal: params.signal, + }); } async stat(params: { @@ -191,7 +221,11 @@ class SandboxFsBridgeImpl implements SandboxFsBridge { signal?: AbortSignal; }): Promise { const target = this.resolveResolvedPath(params); - const result = await this.runPlannedCommand(buildStatPlan(target), params.signal); + const anchoredTarget = await this.pathGuard.resolveAnchoredSandboxEntry(target, "stat files"); + const result = await this.runPlannedCommand( + buildStatPlan(target, anchoredTarget), + params.signal, + ); if (result.code !== 0) { const stderr = result.stderr.toString("utf8"); if (stderr.includes("No such file or directory")) { @@ -265,58 +299,6 @@ class SandboxFsBridgeImpl implements SandboxFsBridge { return await this.runCheckedCommand({ ...plan, signal }); } - private async writeFileToTempPath(params: { - targetContainerPath: string; - mkdir: boolean; - data: Buffer; - signal?: AbortSignal; - }): Promise { - const script = params.mkdir - ? [ - "set -eu", - 'target="$1"', - 'dir=$(dirname -- "$target")', - 'if [ "$dir" != "." ]; then mkdir -p -- "$dir"; fi', - 'base=$(basename -- "$target")', - 'tmp=$(mktemp "$dir/.openclaw-write-$base.XXXXXX")', - 'cat >"$tmp"', - 'printf "%s\\n" "$tmp"', - ].join("\n") - : [ - "set -eu", - 'target="$1"', - 'dir=$(dirname -- "$target")', - 'base=$(basename -- "$target")', - 'tmp=$(mktemp "$dir/.openclaw-write-$base.XXXXXX")', - 'cat >"$tmp"', - 'printf "%s\\n" "$tmp"', - ].join("\n"); - const result = await this.runCommand(script, { - args: [params.targetContainerPath], - stdin: params.data, - signal: params.signal, - }); - const tempPath = result.stdout.toString("utf8").trim().split(/\r?\n/).at(-1)?.trim(); - if (!tempPath || !tempPath.startsWith("/")) { - throw new Error( - `Failed to create temporary sandbox write path for ${params.targetContainerPath}`, - ); - } - return normalizeContainerPath(tempPath); - } - - private async cleanupTempPath(tempPath: string, signal?: AbortSignal): Promise { - try { - await this.runCommand('set -eu; rm -f -- "$1"', { - args: [tempPath], - signal, - allowFailure: true, - }); - } catch { - // Best-effort cleanup only. - } - } - private ensureWriteAccess(target: SandboxResolvedFsPath, action: string) { if (!allowsWrites(this.sandbox.workspaceAccess) || !target.writable) { throw new Error(`Sandbox path is read-only; cannot ${action}: ${target.containerPath}`); diff --git a/src/agents/session-dirs.ts b/src/agents/session-dirs.ts index 1985dcf608a..90f42cdebb9 100644 --- a/src/agents/session-dirs.ts +++ b/src/agents/session-dirs.ts @@ -1,9 +1,15 @@ -import type { Dirent } from "node:fs"; +import fsSync, { type Dirent } from "node:fs"; import fs from "node:fs/promises"; import path from "node:path"; -export async function resolveAgentSessionDirs(stateDir: string): Promise { - const agentsDir = path.join(stateDir, "agents"); +function mapAgentSessionDirs(agentsDir: string, entries: Dirent[]): string[] { + return entries + .filter((entry) => entry.isDirectory()) + .map((entry) => path.join(agentsDir, entry.name, "sessions")) + .toSorted((a, b) => a.localeCompare(b)); +} + +export async function resolveAgentSessionDirsFromAgentsDir(agentsDir: string): Promise { let entries: Dirent[] = []; try { entries = await fs.readdir(agentsDir, { withFileTypes: true }); @@ -15,8 +21,24 @@ export async function resolveAgentSessionDirs(stateDir: string): Promise entry.isDirectory()) - .map((entry) => path.join(agentsDir, entry.name, "sessions")) - .toSorted((a, b) => a.localeCompare(b)); + return mapAgentSessionDirs(agentsDir, entries); +} + +export function resolveAgentSessionDirsFromAgentsDirSync(agentsDir: string): string[] { + let entries: Dirent[] = []; + try { + entries = fsSync.readdirSync(agentsDir, { withFileTypes: true }); + } catch (err) { + const code = (err as { code?: string }).code; + if (code === "ENOENT") { + return []; + } + throw err; + } + + return mapAgentSessionDirs(agentsDir, entries); +} + +export async function resolveAgentSessionDirs(stateDir: string): Promise { + return await resolveAgentSessionDirsFromAgentsDir(path.join(stateDir, "agents")); } diff --git a/src/agents/sessions-spawn-hooks.test.ts b/src/agents/sessions-spawn-hooks.test.ts index e7abc2dba9f..89004289369 100644 --- a/src/agents/sessions-spawn-hooks.test.ts +++ b/src/agents/sessions-spawn-hooks.test.ts @@ -380,4 +380,36 @@ describe("sessions_spawn subagent lifecycle hooks", () => { emitLifecycleHooks: true, }); }); + + it("cleans up the provisional session when lineage patching fails after thread binding", async () => { + const callGatewayMock = getCallGatewayMock(); + callGatewayMock.mockImplementation(async (opts: unknown) => { + const request = opts as { method?: string; params?: Record }; + if (request.method === "sessions.patch" && typeof request.params?.spawnedBy === "string") { + throw new Error("lineage patch failed"); + } + if (request.method === "sessions.delete") { + return { ok: true }; + } + return {}; + }); + + const result = await executeDiscordThreadSessionSpawn("call9"); + + expect(result.details).toMatchObject({ + status: "error", + error: "lineage patch failed", + }); + expect(hookRunnerMocks.runSubagentSpawned).not.toHaveBeenCalled(); + expect(hookRunnerMocks.runSubagentEnded).not.toHaveBeenCalled(); + const methods = getGatewayMethods(); + expect(methods).toContain("sessions.delete"); + expect(methods).not.toContain("agent"); + const deleteCall = findGatewayRequest("sessions.delete"); + expect(deleteCall?.params).toMatchObject({ + key: (result.details as { childSessionKey?: string }).childSessionKey, + deleteTranscript: true, + emitLifecycleHooks: true, + }); + }); }); diff --git a/src/agents/skills-install-extract.ts b/src/agents/skills-install-extract.ts index 4578935378f..02a5b22c3d5 100644 --- a/src/agents/skills-install-extract.ts +++ b/src/agents/skills-install-extract.ts @@ -1,14 +1,21 @@ import { createHash } from "node:crypto"; import fs from "node:fs"; import { - createTarEntrySafetyChecker, + createTarEntryPreflightChecker, extractArchive as extractArchiveSafe, + mergeExtractedTreeIntoDestination, + prepareArchiveDestinationDir, + withStagedArchiveDestination, } from "../infra/archive.js"; import { runCommandWithTimeout } from "../process/exec.js"; import { parseTarVerboseMetadata } from "./skills-install-tar-verbose.js"; import { hasBinary } from "./skills.js"; export type ArchiveExtractResult = { stdout: string; stderr: string; code: number | null }; +type TarPreflightResult = { + entries: string[]; + metadata: ReturnType; +}; async function hashFileSha256(filePath: string): Promise { const hash = createHash("sha256"); @@ -24,6 +31,112 @@ async function hashFileSha256(filePath: string): Promise { }); } +function commandFailureResult( + result: { stdout: string; stderr: string; code: number | null }, + fallbackStderr: string, +): ArchiveExtractResult { + return { + stdout: result.stdout, + stderr: result.stderr || fallbackStderr, + code: result.code, + }; +} + +function buildTarExtractArgv(params: { + archivePath: string; + targetDir: string; + stripComponents: number; +}): string[] { + const argv = ["tar", "xf", params.archivePath, "-C", params.targetDir]; + if (params.stripComponents > 0) { + argv.push("--strip-components", String(params.stripComponents)); + } + return argv; +} + +async function readTarPreflight(params: { + archivePath: string; + timeoutMs: number; +}): Promise { + const listResult = await runCommandWithTimeout(["tar", "tf", params.archivePath], { + timeoutMs: params.timeoutMs, + }); + if (listResult.code !== 0) { + return commandFailureResult(listResult, "tar list failed"); + } + const entries = listResult.stdout + .split("\n") + .map((line) => line.trim()) + .filter(Boolean); + + const verboseResult = await runCommandWithTimeout(["tar", "tvf", params.archivePath], { + timeoutMs: params.timeoutMs, + }); + if (verboseResult.code !== 0) { + return commandFailureResult(verboseResult, "tar verbose list failed"); + } + const metadata = parseTarVerboseMetadata(verboseResult.stdout); + if (metadata.length !== entries.length) { + return { + stdout: verboseResult.stdout, + stderr: `tar verbose/list entry count mismatch (${metadata.length} vs ${entries.length})`, + code: 1, + }; + } + return { entries, metadata }; +} + +function isArchiveExtractFailure( + value: TarPreflightResult | ArchiveExtractResult, +): value is ArchiveExtractResult { + return "code" in value; +} + +async function verifyArchiveHashStable(params: { + archivePath: string; + expectedHash: string; +}): Promise { + const postPreflightHash = await hashFileSha256(params.archivePath); + if (postPreflightHash === params.expectedHash) { + return null; + } + return { + stdout: "", + stderr: "tar archive changed during safety preflight; refusing to extract", + code: 1, + }; +} + +async function extractTarBz2WithStaging(params: { + archivePath: string; + destinationRealDir: string; + stripComponents: number; + timeoutMs: number; +}): Promise { + return await withStagedArchiveDestination({ + destinationRealDir: params.destinationRealDir, + run: async (stagingDir) => { + const extractResult = await runCommandWithTimeout( + buildTarExtractArgv({ + archivePath: params.archivePath, + targetDir: stagingDir, + stripComponents: params.stripComponents, + }), + { timeoutMs: params.timeoutMs }, + ); + if (extractResult.code !== 0) { + return extractResult; + } + await mergeExtractedTreeIntoDestination({ + sourceDir: stagingDir, + destinationDir: params.destinationRealDir, + destinationRealDir: params.destinationRealDir, + }); + return extractResult; + }, + }); +} + export async function extractArchive(params: { archivePath: string; archiveType: string; @@ -66,49 +179,25 @@ export async function extractArchive(params: { return { stdout: "", stderr: "tar not found on PATH", code: null }; } + const destinationRealDir = await prepareArchiveDestinationDir(targetDir); const preflightHash = await hashFileSha256(archivePath); // Preflight list to prevent zip-slip style traversal before extraction. - const listResult = await runCommandWithTimeout(["tar", "tf", archivePath], { timeoutMs }); - if (listResult.code !== 0) { - return { - stdout: listResult.stdout, - stderr: listResult.stderr || "tar list failed", - code: listResult.code, - }; + const preflight = await readTarPreflight({ archivePath, timeoutMs }); + if (isArchiveExtractFailure(preflight)) { + return preflight; } - const entries = listResult.stdout - .split("\n") - .map((line) => line.trim()) - .filter(Boolean); - - const verboseResult = await runCommandWithTimeout(["tar", "tvf", archivePath], { timeoutMs }); - if (verboseResult.code !== 0) { - return { - stdout: verboseResult.stdout, - stderr: verboseResult.stderr || "tar verbose list failed", - code: verboseResult.code, - }; - } - const metadata = parseTarVerboseMetadata(verboseResult.stdout); - if (metadata.length !== entries.length) { - return { - stdout: verboseResult.stdout, - stderr: `tar verbose/list entry count mismatch (${metadata.length} vs ${entries.length})`, - code: 1, - }; - } - const checkTarEntrySafety = createTarEntrySafetyChecker({ - rootDir: targetDir, + const checkTarEntrySafety = createTarEntryPreflightChecker({ + rootDir: destinationRealDir, stripComponents: strip, escapeLabel: "targetDir", }); - for (let i = 0; i < entries.length; i += 1) { - const entryPath = entries[i]; - const entryMeta = metadata[i]; + for (let i = 0; i < preflight.entries.length; i += 1) { + const entryPath = preflight.entries[i]; + const entryMeta = preflight.metadata[i]; if (!entryPath || !entryMeta) { return { - stdout: verboseResult.stdout, + stdout: "", stderr: "tar metadata parse failure", code: 1, }; @@ -120,20 +209,20 @@ export async function extractArchive(params: { }); } - const postPreflightHash = await hashFileSha256(archivePath); - if (postPreflightHash !== preflightHash) { - return { - stdout: "", - stderr: "tar archive changed during safety preflight; refusing to extract", - code: 1, - }; + const hashFailure = await verifyArchiveHashStable({ + archivePath, + expectedHash: preflightHash, + }); + if (hashFailure) { + return hashFailure; } - const argv = ["tar", "xf", archivePath, "-C", targetDir]; - if (strip > 0) { - argv.push("--strip-components", String(strip)); - } - return await runCommandWithTimeout(argv, { timeoutMs }); + return await extractTarBz2WithStaging({ + archivePath, + destinationRealDir, + stripComponents: strip, + timeoutMs, + }); } return { stdout: "", stderr: `unsupported archive type: ${archiveType}`, code: null }; diff --git a/src/agents/skills-install.download.test.ts b/src/agents/skills-install.download.test.ts index 0c357089678..cee0d37b876 100644 --- a/src/agents/skills-install.download.test.ts +++ b/src/agents/skills-install.download.test.ts @@ -425,4 +425,47 @@ describe("installDownloadSpec extraction safety (tar.bz2)", () => { .some((call) => (call[0] as string[])[1] === "xf"); expect(extractionAttempted).toBe(false); }); + + it("rejects tar.bz2 entries that traverse pre-existing targetDir symlinks", async () => { + const entry = buildEntry("tbz2-targetdir-symlink"); + const targetDir = path.join(resolveSkillToolsRootDir(entry), "target"); + const outsideDir = path.join(workspaceDir, "tbz2-targetdir-outside"); + await fs.mkdir(targetDir, { recursive: true }); + await fs.mkdir(outsideDir, { recursive: true }); + await fs.symlink( + outsideDir, + path.join(targetDir, "escape"), + process.platform === "win32" ? "junction" : undefined, + ); + + mockArchiveResponse(new Uint8Array([1, 2, 3])); + + runCommandWithTimeoutMock.mockImplementation(async (...argv: unknown[]) => { + const cmd = (argv[0] ?? []) as string[]; + if (cmd[0] === "tar" && cmd[1] === "tf") { + return runCommandResult({ stdout: "escape/pwn.txt\n" }); + } + if (cmd[0] === "tar" && cmd[1] === "tvf") { + return runCommandResult({ stdout: "-rw-r--r-- 0 0 0 0 Jan 1 00:00 escape/pwn.txt\n" }); + } + if (cmd[0] === "tar" && cmd[1] === "xf") { + const stagingDir = String(cmd[cmd.indexOf("-C") + 1] ?? ""); + await fs.mkdir(path.join(stagingDir, "escape"), { recursive: true }); + await fs.writeFile(path.join(stagingDir, "escape", "pwn.txt"), "owned"); + return runCommandResult({ stdout: "ok" }); + } + return runCommandResult(); + }); + + const result = await installDownloadSkill({ + name: "tbz2-targetdir-symlink", + url: "https://example.invalid/evil.tbz2", + archive: "tar.bz2", + targetDir, + }); + + expect(result.ok).toBe(false); + expect(result.stderr.toLowerCase()).toContain("archive entry traverses symlink in destination"); + expect(await fileExists(path.join(outsideDir, "pwn.txt"))).toBe(false); + }); }); diff --git a/src/agents/skills.build-workspace-skills-prompt.syncs-merged-skills-into-target-workspace.test.ts b/src/agents/skills.build-workspace-skills-prompt.syncs-merged-skills-into-target-workspace.test.ts index 0ee8a39a0b0..1f4da5163e1 100644 --- a/src/agents/skills.build-workspace-skills-prompt.syncs-merged-skills-into-target-workspace.test.ts +++ b/src/agents/skills.build-workspace-skills-prompt.syncs-merged-skills-into-target-workspace.test.ts @@ -25,6 +25,33 @@ async function createCaseDir(prefix: string): Promise { return dir; } +async function syncSourceSkillsToTarget(sourceWorkspace: string, targetWorkspace: string) { + await withEnv({ HOME: sourceWorkspace, PATH: "" }, () => + syncSkillsToWorkspace({ + sourceWorkspaceDir: sourceWorkspace, + targetWorkspaceDir: targetWorkspace, + bundledSkillsDir: path.join(sourceWorkspace, ".bundled"), + managedSkillsDir: path.join(sourceWorkspace, ".managed"), + }), + ); +} + +async function expectSyncedSkillConfinement(params: { + sourceWorkspace: string; + targetWorkspace: string; + safeSkillDirName: string; + escapedDest: string; +}) { + expect(await pathExists(params.escapedDest)).toBe(false); + await syncSourceSkillsToTarget(params.sourceWorkspace, params.targetWorkspace); + expect( + await pathExists( + path.join(params.targetWorkspace, "skills", params.safeSkillDirName, "SKILL.md"), + ), + ).toBe(true); + expect(await pathExists(params.escapedDest)).toBe(false); +} + beforeAll(async () => { fixtureRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-skills-sync-suite-")); syncSourceTemplateDir = await createCaseDir("source-template"); @@ -115,14 +142,7 @@ describe("buildWorkspaceSkillsPrompt", () => { "dir", ); - await withEnv({ HOME: sourceWorkspace, PATH: "" }, () => - syncSkillsToWorkspace({ - sourceWorkspaceDir: sourceWorkspace, - targetWorkspaceDir: targetWorkspace, - bundledSkillsDir: path.join(sourceWorkspace, ".bundled"), - managedSkillsDir: path.join(sourceWorkspace, ".managed"), - }), - ); + await syncSourceSkillsToTarget(sourceWorkspace, targetWorkspace); const prompt = buildPrompt(targetWorkspace, { bundledSkillsDir: path.join(targetWorkspace, ".bundled"), @@ -151,21 +171,12 @@ describe("buildWorkspaceSkillsPrompt", () => { expect(path.relative(path.join(targetWorkspace, "skills"), escapedDest).startsWith("..")).toBe( true, ); - expect(await pathExists(escapedDest)).toBe(false); - - await withEnv({ HOME: sourceWorkspace, PATH: "" }, () => - syncSkillsToWorkspace({ - sourceWorkspaceDir: sourceWorkspace, - targetWorkspaceDir: targetWorkspace, - bundledSkillsDir: path.join(sourceWorkspace, ".bundled"), - managedSkillsDir: path.join(sourceWorkspace, ".managed"), - }), - ); - - expect( - await pathExists(path.join(targetWorkspace, "skills", "safe-traversal-skill", "SKILL.md")), - ).toBe(true); - expect(await pathExists(escapedDest)).toBe(false); + await expectSyncedSkillConfinement({ + sourceWorkspace, + targetWorkspace, + safeSkillDirName: "safe-traversal-skill", + escapedDest, + }); }); it("keeps synced skills confined under target workspace when frontmatter name is absolute", async () => { const sourceWorkspace = await createCaseDir("source"); @@ -180,21 +191,12 @@ describe("buildWorkspaceSkillsPrompt", () => { description: "Absolute skill", }); - expect(await pathExists(absoluteDest)).toBe(false); - - await withEnv({ HOME: sourceWorkspace, PATH: "" }, () => - syncSkillsToWorkspace({ - sourceWorkspaceDir: sourceWorkspace, - targetWorkspaceDir: targetWorkspace, - bundledSkillsDir: path.join(sourceWorkspace, ".bundled"), - managedSkillsDir: path.join(sourceWorkspace, ".managed"), - }), - ); - - expect( - await pathExists(path.join(targetWorkspace, "skills", "safe-absolute-skill", "SKILL.md")), - ).toBe(true); - expect(await pathExists(absoluteDest)).toBe(false); + await expectSyncedSkillConfinement({ + sourceWorkspace, + targetWorkspace, + safeSkillDirName: "safe-absolute-skill", + escapedDest: absoluteDest, + }); }); it("filters skills based on env/config gates", async () => { const workspaceDir = await createCaseDir("workspace"); diff --git a/src/agents/skills.buildworkspaceskillsnapshot.test.ts b/src/agents/skills.buildworkspaceskillsnapshot.test.ts index aec0da8b49a..1292841ed13 100644 --- a/src/agents/skills.buildworkspaceskillsnapshot.test.ts +++ b/src/agents/skills.buildworkspaceskillsnapshot.test.ts @@ -43,22 +43,44 @@ function withWorkspaceHome(workspaceDir: string, cb: () => T): T { return withEnv({ HOME: workspaceDir, PATH: "" }, cb); } +function buildSnapshot( + workspaceDir: string, + options?: Parameters[1], +) { + return withWorkspaceHome(workspaceDir, () => + buildWorkspaceSkillSnapshot(workspaceDir, { + managedSkillsDir: path.join(workspaceDir, ".managed"), + bundledSkillsDir: path.join(workspaceDir, ".bundled"), + ...options, + }), + ); +} + async function cloneTemplateDir(templateDir: string, prefix: string): Promise { const cloned = await fixtureSuite.createCaseDir(prefix); await fs.cp(templateDir, cloned, { recursive: true }); return cloned; } +function expectSnapshotNamesAndPrompt( + snapshot: ReturnType, + params: { contains?: string[]; omits?: string[] }, +) { + for (const name of params.contains ?? []) { + expect(snapshot.skills.map((skill) => skill.name)).toContain(name); + expect(snapshot.prompt).toContain(name); + } + for (const name of params.omits ?? []) { + expect(snapshot.skills.map((skill) => skill.name)).not.toContain(name); + expect(snapshot.prompt).not.toContain(name); + } +} + describe("buildWorkspaceSkillSnapshot", () => { it("returns an empty snapshot when skills dirs are missing", async () => { const workspaceDir = await fixtureSuite.createCaseDir("workspace"); - const snapshot = withWorkspaceHome(workspaceDir, () => - buildWorkspaceSkillSnapshot(workspaceDir, { - managedSkillsDir: path.join(workspaceDir, ".managed"), - bundledSkillsDir: path.join(workspaceDir, ".bundled"), - }), - ); + const snapshot = buildSnapshot(workspaceDir); expect(snapshot.prompt).toBe(""); expect(snapshot.skills).toEqual([]); @@ -78,12 +100,7 @@ describe("buildWorkspaceSkillSnapshot", () => { frontmatterExtra: "disable-model-invocation: true", }); - const snapshot = withWorkspaceHome(workspaceDir, () => - buildWorkspaceSkillSnapshot(workspaceDir, { - managedSkillsDir: path.join(workspaceDir, ".managed"), - bundledSkillsDir: path.join(workspaceDir, ".bundled"), - }), - ); + const snapshot = buildSnapshot(workspaceDir); expect(snapshot.prompt).toContain("visible-skill"); expect(snapshot.prompt).not.toContain("hidden-skill"); @@ -204,24 +221,20 @@ describe("buildWorkspaceSkillSnapshot", () => { body: "x".repeat(5_000), }); - const snapshot = withWorkspaceHome(workspaceDir, () => - buildWorkspaceSkillSnapshot(workspaceDir, { - config: { - skills: { - limits: { - maxSkillFileBytes: 1000, - }, + const snapshot = buildSnapshot(workspaceDir, { + config: { + skills: { + limits: { + maxSkillFileBytes: 1000, }, }, - managedSkillsDir: path.join(workspaceDir, ".managed"), - bundledSkillsDir: path.join(workspaceDir, ".bundled"), - }), - ); + }, + }); - expect(snapshot.skills.map((s) => s.name)).toContain("small-skill"); - expect(snapshot.skills.map((s) => s.name)).not.toContain("big-skill"); - expect(snapshot.prompt).toContain("small-skill"); - expect(snapshot.prompt).not.toContain("big-skill"); + expectSnapshotNamesAndPrompt(snapshot, { + contains: ["small-skill"], + omits: ["big-skill"], + }); }); it("detects nested skills roots beyond the first 25 entries", async () => { @@ -241,26 +254,23 @@ describe("buildWorkspaceSkillSnapshot", () => { description: "Nested skill discovered late", }); - const snapshot = withWorkspaceHome(workspaceDir, () => - buildWorkspaceSkillSnapshot(workspaceDir, { - config: { - skills: { - load: { - extraDirs: [repoDir], - }, - limits: { - maxCandidatesPerRoot: 30, - maxSkillsLoadedPerSource: 30, - }, + const snapshot = buildSnapshot(workspaceDir, { + config: { + skills: { + load: { + extraDirs: [repoDir], + }, + limits: { + maxCandidatesPerRoot: 30, + maxSkillsLoadedPerSource: 30, }, }, - managedSkillsDir: path.join(workspaceDir, ".managed"), - bundledSkillsDir: path.join(workspaceDir, ".bundled"), - }), - ); + }, + }); - expect(snapshot.skills.map((s) => s.name)).toContain("late-skill"); - expect(snapshot.prompt).toContain("late-skill"); + expectSnapshotNamesAndPrompt(snapshot, { + contains: ["late-skill"], + }); }); it("enforces maxSkillFileBytes for root-level SKILL.md", async () => { @@ -274,24 +284,21 @@ describe("buildWorkspaceSkillSnapshot", () => { body: "x".repeat(5_000), }); - const snapshot = withWorkspaceHome(workspaceDir, () => - buildWorkspaceSkillSnapshot(workspaceDir, { - config: { - skills: { - load: { - extraDirs: [rootSkillDir], - }, - limits: { - maxSkillFileBytes: 1000, - }, + const snapshot = buildSnapshot(workspaceDir, { + config: { + skills: { + load: { + extraDirs: [rootSkillDir], + }, + limits: { + maxSkillFileBytes: 1000, }, }, - managedSkillsDir: path.join(workspaceDir, ".managed"), - bundledSkillsDir: path.join(workspaceDir, ".bundled"), - }), - ); + }, + }); - expect(snapshot.skills.map((s) => s.name)).not.toContain("root-big-skill"); - expect(snapshot.prompt).not.toContain("root-big-skill"); + expectSnapshotNamesAndPrompt(snapshot, { + omits: ["root-big-skill"], + }); }); }); diff --git a/src/agents/skills.test.ts b/src/agents/skills.test.ts index 394f476ffa8..c5c8c2077d9 100644 --- a/src/agents/skills.test.ts +++ b/src/agents/skills.test.ts @@ -49,6 +49,16 @@ const withClearedEnv = ( } }; +async function writeEnvSkill(workspaceDir: string) { + const skillDir = path.join(workspaceDir, "skills", "env-skill"); + await writeSkill({ + dir: skillDir, + name: "env-skill", + description: "Needs env", + metadata: '{"openclaw":{"requires":{"env":["ENV_KEY"]},"primaryEnv":"ENV_KEY"}}', + }); +} + beforeAll(async () => { await fixtureSuite.setup(); tempHome = await createTempHomeEnv("openclaw-skills-home-"); @@ -240,13 +250,7 @@ describe("buildWorkspaceSkillsPrompt", () => { describe("applySkillEnvOverrides", () => { it("sets and restores env vars", async () => { const workspaceDir = await makeWorkspace(); - const skillDir = path.join(workspaceDir, "skills", "env-skill"); - await writeSkill({ - dir: skillDir, - name: "env-skill", - description: "Needs env", - metadata: '{"openclaw":{"requires":{"env":["ENV_KEY"]},"primaryEnv":"ENV_KEY"}}', - }); + await writeEnvSkill(workspaceDir); const entries = loadWorkspaceSkillEntries(workspaceDir, resolveTestSkillDirs(workspaceDir)); @@ -269,13 +273,7 @@ describe("applySkillEnvOverrides", () => { it("keeps env keys tracked until all overlapping overrides restore", async () => { const workspaceDir = await makeWorkspace(); - const skillDir = path.join(workspaceDir, "skills", "env-skill"); - await writeSkill({ - dir: skillDir, - name: "env-skill", - description: "Needs env", - metadata: '{"openclaw":{"requires":{"env":["ENV_KEY"]},"primaryEnv":"ENV_KEY"}}', - }); + await writeEnvSkill(workspaceDir); const entries = loadWorkspaceSkillEntries(workspaceDir, resolveTestSkillDirs(workspaceDir)); @@ -301,13 +299,7 @@ describe("applySkillEnvOverrides", () => { it("applies env overrides from snapshots", async () => { const workspaceDir = await makeWorkspace(); - const skillDir = path.join(workspaceDir, "skills", "env-skill"); - await writeSkill({ - dir: skillDir, - name: "env-skill", - description: "Needs env", - metadata: '{"openclaw":{"requires":{"env":["ENV_KEY"]},"primaryEnv":"ENV_KEY"}}', - }); + await writeEnvSkill(workspaceDir); const snapshot = buildWorkspaceSkillSnapshot(workspaceDir, { ...resolveTestSkillDirs(workspaceDir), diff --git a/src/agents/spawned-context.test.ts b/src/agents/spawned-context.test.ts index 964bf47a789..3f163eb3030 100644 --- a/src/agents/spawned-context.test.ts +++ b/src/agents/spawned-context.test.ts @@ -44,18 +44,44 @@ describe("mapToolContextToSpawnedRunMetadata", () => { }); describe("resolveSpawnedWorkspaceInheritance", () => { + const config = { + agents: { + list: [ + { id: "main", workspace: "/tmp/workspace-main" }, + { id: "ops", workspace: "/tmp/workspace-ops" }, + ], + }, + }; + it("prefers explicit workspaceDir when provided", () => { const resolved = resolveSpawnedWorkspaceInheritance({ - config: {}, + config, requesterSessionKey: "agent:main:subagent:parent", explicitWorkspaceDir: " /tmp/explicit ", }); expect(resolved).toBe("/tmp/explicit"); }); + it("prefers targetAgentId over requester session agent for cross-agent spawns", () => { + const resolved = resolveSpawnedWorkspaceInheritance({ + config, + targetAgentId: "ops", + requesterSessionKey: "agent:main:subagent:parent", + }); + expect(resolved).toBe("/tmp/workspace-ops"); + }); + + it("falls back to requester session agent when targetAgentId is missing", () => { + const resolved = resolveSpawnedWorkspaceInheritance({ + config, + requesterSessionKey: "agent:main:subagent:parent", + }); + expect(resolved).toBe("/tmp/workspace-main"); + }); + it("returns undefined for missing requester context", () => { const resolved = resolveSpawnedWorkspaceInheritance({ - config: {}, + config, requesterSessionKey: undefined, explicitWorkspaceDir: undefined, }); diff --git a/src/agents/spawned-context.ts b/src/agents/spawned-context.ts index 32a4d299e74..d0919c86baa 100644 --- a/src/agents/spawned-context.ts +++ b/src/agents/spawned-context.ts @@ -58,6 +58,7 @@ export function mapToolContextToSpawnedRunMetadata( export function resolveSpawnedWorkspaceInheritance(params: { config: OpenClawConfig; + targetAgentId?: string; requesterSessionKey?: string; explicitWorkspaceDir?: string | null; }): string | undefined { @@ -65,12 +66,13 @@ export function resolveSpawnedWorkspaceInheritance(params: { if (explicit) { return explicit; } - const requesterAgentId = params.requesterSessionKey - ? parseAgentSessionKey(params.requesterSessionKey)?.agentId - : undefined; - return requesterAgentId - ? resolveAgentWorkspaceDir(params.config, normalizeAgentId(requesterAgentId)) - : undefined; + // For cross-agent spawns, use the target agent's workspace instead of the requester's. + const agentId = + params.targetAgentId ?? + (params.requesterSessionKey + ? parseAgentSessionKey(params.requesterSessionKey)?.agentId + : undefined); + return agentId ? resolveAgentWorkspaceDir(params.config, normalizeAgentId(agentId)) : undefined; } export function resolveIngressWorkspaceOverrideForSpawnedRun( diff --git a/src/agents/subagent-announce.timeout.test.ts b/src/agents/subagent-announce.timeout.test.ts index 1c4925d9272..5fae988fe73 100644 --- a/src/agents/subagent-announce.timeout.test.ts +++ b/src/agents/subagent-announce.timeout.test.ts @@ -8,6 +8,12 @@ type GatewayCall = { }; const gatewayCalls: GatewayCall[] = []; +let callGatewayImpl: (request: GatewayCall) => Promise = async (request) => { + if (request.method === "chat.history") { + return { messages: [] }; + } + return {}; +}; let sessionStore: Record> = {}; let configOverride: ReturnType<(typeof import("../config/config.js"))["loadConfig"]> = { session: { @@ -27,10 +33,7 @@ let fallbackRequesterResolution: { vi.mock("../gateway/call.js", () => ({ callGateway: vi.fn(async (request: GatewayCall) => { gatewayCalls.push(request); - if (request.method === "chat.history") { - return { messages: [] }; - } - return {}; + return await callGatewayImpl(request); }), })); @@ -117,9 +120,30 @@ function findGatewayCall(predicate: (call: GatewayCall) => boolean): GatewayCall return gatewayCalls.find(predicate); } +function findFinalDirectAgentCall(): GatewayCall | undefined { + return findGatewayCall((call) => call.method === "agent" && call.expectFinal === true); +} + +function setupParentSessionFallback(parentSessionKey: string): void { + requesterDepthResolver = (sessionKey?: string) => + sessionKey === parentSessionKey ? 1 : sessionKey?.includes(":subagent:") ? 1 : 0; + subagentSessionRunActive = false; + shouldIgnorePostCompletion = false; + fallbackRequesterResolution = { + requesterSessionKey: "agent:main:main", + requesterOrigin: { channel: "discord", to: "chan-main", accountId: "acct-main" }, + }; +} + describe("subagent announce timeout config", () => { beforeEach(() => { gatewayCalls.length = 0; + callGatewayImpl = async (request) => { + if (request.method === "chat.history") { + return { messages: [] }; + } + return {}; + }; sessionStore = {}; configOverride = { session: defaultSessionConfig, @@ -131,13 +155,13 @@ describe("subagent announce timeout config", () => { fallbackRequesterResolution = null; }); - it("uses 60s timeout by default for direct announce agent call", async () => { + it("uses 90s timeout by default for direct announce agent call", async () => { await runAnnounceFlowForTest("run-default-timeout"); const directAgentCall = findGatewayCall( (call) => call.method === "agent" && call.expectFinal === true, ); - expect(directAgentCall?.timeoutMs).toBe(60_000); + expect(directAgentCall?.timeoutMs).toBe(90_000); }); it("honors configured announce timeout for direct announce agent call", async () => { @@ -166,6 +190,35 @@ describe("subagent announce timeout config", () => { expect(completionDirectAgentCall?.timeoutMs).toBe(90_000); }); + it("does not retry gateway timeout for externally delivered completion announces", async () => { + vi.useFakeTimers(); + try { + callGatewayImpl = async (request) => { + if (request.method === "chat.history") { + return { messages: [] }; + } + throw new Error("gateway timeout after 90000ms"); + }; + + await expect( + runAnnounceFlowForTest("run-completion-timeout-no-retry", { + requesterOrigin: { + channel: "telegram", + to: "12345", + }, + expectsCompletionMessage: true, + }), + ).resolves.toBe(false); + + const directAgentCalls = gatewayCalls.filter( + (call) => call.method === "agent" && call.expectFinal === true, + ); + expect(directAgentCalls).toHaveLength(1); + } finally { + vi.useRealTimers(); + } + }); + it("regression, skips parent announce while descendants are still pending", async () => { requesterDepthResolver = () => 1; pendingDescendantRuns = 2; @@ -206,9 +259,7 @@ describe("subagent announce timeout config", () => { requesterOrigin: { channel: "discord", to: "channel:cron-results", accountId: "acct-1" }, }); - const directAgentCall = findGatewayCall( - (call) => call.method === "agent" && call.expectFinal === true, - ); + const directAgentCall = findFinalDirectAgentCall(); expect(directAgentCall?.params?.sessionKey).toBe(cronSessionKey); expect(directAgentCall?.params?.deliver).toBe(false); expect(directAgentCall?.params?.channel).toBeUndefined(); @@ -218,14 +269,7 @@ describe("subagent announce timeout config", () => { it("regression, routes child announce to parent session instead of grandparent when parent session still exists", async () => { const parentSessionKey = "agent:main:subagent:parent"; - requesterDepthResolver = (sessionKey?: string) => - sessionKey === parentSessionKey ? 1 : sessionKey?.includes(":subagent:") ? 1 : 0; - subagentSessionRunActive = false; - shouldIgnorePostCompletion = false; - fallbackRequesterResolution = { - requesterSessionKey: "agent:main:main", - requesterOrigin: { channel: "discord", to: "chan-main", accountId: "acct-main" }, - }; + setupParentSessionFallback(parentSessionKey); // No sessionId on purpose: existence in store should still count as alive. sessionStore[parentSessionKey] = { updatedAt: Date.now() }; @@ -235,23 +279,14 @@ describe("subagent announce timeout config", () => { childSessionKey: `${parentSessionKey}:subagent:child`, }); - const directAgentCall = findGatewayCall( - (call) => call.method === "agent" && call.expectFinal === true, - ); + const directAgentCall = findFinalDirectAgentCall(); expect(directAgentCall?.params?.sessionKey).toBe(parentSessionKey); expect(directAgentCall?.params?.deliver).toBe(false); }); it("regression, falls back to grandparent only when parent subagent session is missing", async () => { const parentSessionKey = "agent:main:subagent:parent-missing"; - requesterDepthResolver = (sessionKey?: string) => - sessionKey === parentSessionKey ? 1 : sessionKey?.includes(":subagent:") ? 1 : 0; - subagentSessionRunActive = false; - shouldIgnorePostCompletion = false; - fallbackRequesterResolution = { - requesterSessionKey: "agent:main:main", - requesterOrigin: { channel: "discord", to: "chan-main", accountId: "acct-main" }, - }; + setupParentSessionFallback(parentSessionKey); await runAnnounceFlowForTest("run-parent-fallback", { requesterSessionKey: parentSessionKey, @@ -259,9 +294,7 @@ describe("subagent announce timeout config", () => { childSessionKey: `${parentSessionKey}:subagent:child`, }); - const directAgentCall = findGatewayCall( - (call) => call.method === "agent" && call.expectFinal === true, - ); + const directAgentCall = findFinalDirectAgentCall(); expect(directAgentCall?.params?.sessionKey).toBe("agent:main:main"); expect(directAgentCall?.params?.deliver).toBe(true); expect(directAgentCall?.params?.channel).toBe("discord"); diff --git a/src/agents/subagent-announce.ts b/src/agents/subagent-announce.ts index 62b2cc6f0d3..5070b204392 100644 --- a/src/agents/subagent-announce.ts +++ b/src/agents/subagent-announce.ts @@ -51,8 +51,9 @@ import { isAnnounceSkip } from "./tools/sessions-send-helpers.js"; const FAST_TEST_MODE = process.env.OPENCLAW_TEST_FAST === "1"; const FAST_TEST_RETRY_INTERVAL_MS = 8; -const DEFAULT_SUBAGENT_ANNOUNCE_TIMEOUT_MS = 60_000; +const DEFAULT_SUBAGENT_ANNOUNCE_TIMEOUT_MS = 90_000; const MAX_TIMER_SAFE_TIMEOUT_MS = 2_147_000_000; +const GATEWAY_TIMEOUT_PATTERN = /gateway timeout/i; let subagentRegistryRuntimePromise: Promise< typeof import("./subagent-registry-runtime.js") > | null = null; @@ -107,7 +108,7 @@ const TRANSIENT_ANNOUNCE_DELIVERY_ERROR_PATTERNS: readonly RegExp[] = [ /no active .* listener/i, /gateway not connected/i, /gateway closed \(1006/i, - /gateway timeout/i, + GATEWAY_TIMEOUT_PATTERN, /\b(econnreset|econnrefused|etimedout|enotfound|ehostunreach|network error)\b/i, ]; @@ -133,6 +134,11 @@ function isTransientAnnounceDeliveryError(error: unknown): boolean { return TRANSIENT_ANNOUNCE_DELIVERY_ERROR_PATTERNS.some((re) => re.test(message)); } +function isGatewayTimeoutError(error: unknown): boolean { + const message = summarizeDeliveryError(error); + return Boolean(message) && GATEWAY_TIMEOUT_PATTERN.test(message); +} + async function waitForAnnounceRetryDelay(ms: number, signal?: AbortSignal): Promise { if (ms <= 0) { return; @@ -160,6 +166,7 @@ async function waitForAnnounceRetryDelay(ms: number, signal?: AbortSignal): Prom async function runAnnounceDeliveryWithRetry(params: { operation: string; + noRetryOnGatewayTimeout?: boolean; signal?: AbortSignal; run: () => Promise; }): Promise { @@ -171,6 +178,9 @@ async function runAnnounceDeliveryWithRetry(params: { try { return await params.run(); } catch (err) { + if (params.noRetryOnGatewayTimeout && isGatewayTimeoutError(err)) { + throw err; + } const delayMs = DIRECT_ANNOUNCE_TRANSIENT_RETRY_DELAYS_MS[retryIndex]; if (delayMs == null || !isTransientAnnounceDeliveryError(err) || params.signal?.aborted) { throw err; @@ -789,6 +799,7 @@ async function sendSubagentAnnounceDirectly(params: { operation: params.expectsCompletionMessage ? "completion direct announce agent call" : "direct announce agent call", + noRetryOnGatewayTimeout: params.expectsCompletionMessage && shouldDeliverExternally, signal: params.signal, run: async () => await callGateway({ diff --git a/src/agents/subagent-capabilities.ts b/src/agents/subagent-capabilities.ts new file mode 100644 index 00000000000..5350b4f6321 --- /dev/null +++ b/src/agents/subagent-capabilities.ts @@ -0,0 +1,156 @@ +import { DEFAULT_SUBAGENT_MAX_SPAWN_DEPTH } from "../config/agent-limits.js"; +import type { OpenClawConfig } from "../config/config.js"; +import { loadSessionStore, resolveStorePath } from "../config/sessions.js"; +import { isSubagentSessionKey, parseAgentSessionKey } from "../routing/session-key.js"; +import { getSubagentDepthFromSessionStore } from "./subagent-depth.js"; + +export const SUBAGENT_SESSION_ROLES = ["main", "orchestrator", "leaf"] as const; +export type SubagentSessionRole = (typeof SUBAGENT_SESSION_ROLES)[number]; + +export const SUBAGENT_CONTROL_SCOPES = ["children", "none"] as const; +export type SubagentControlScope = (typeof SUBAGENT_CONTROL_SCOPES)[number]; + +type SessionCapabilityEntry = { + sessionId?: unknown; + spawnDepth?: unknown; + subagentRole?: unknown; + subagentControlScope?: unknown; +}; + +function normalizeSessionKey(value: unknown): string | undefined { + if (typeof value !== "string") { + return undefined; + } + const trimmed = value.trim(); + return trimmed || undefined; +} + +function normalizeSubagentRole(value: unknown): SubagentSessionRole | undefined { + if (typeof value !== "string") { + return undefined; + } + const trimmed = value.trim().toLowerCase(); + return SUBAGENT_SESSION_ROLES.find((entry) => entry === trimmed); +} + +function normalizeSubagentControlScope(value: unknown): SubagentControlScope | undefined { + if (typeof value !== "string") { + return undefined; + } + const trimmed = value.trim().toLowerCase(); + return SUBAGENT_CONTROL_SCOPES.find((entry) => entry === trimmed); +} + +function readSessionStore(storePath: string): Record { + try { + return loadSessionStore(storePath); + } catch { + return {}; + } +} + +function findEntryBySessionId( + store: Record, + sessionId: string, +): SessionCapabilityEntry | undefined { + const normalizedSessionId = normalizeSessionKey(sessionId); + if (!normalizedSessionId) { + return undefined; + } + for (const entry of Object.values(store)) { + const candidateSessionId = normalizeSessionKey(entry?.sessionId); + if (candidateSessionId === normalizedSessionId) { + return entry; + } + } + return undefined; +} + +function resolveSessionCapabilityEntry(params: { + sessionKey: string; + cfg?: OpenClawConfig; + store?: Record; +}): SessionCapabilityEntry | undefined { + if (params.store) { + return params.store[params.sessionKey] ?? findEntryBySessionId(params.store, params.sessionKey); + } + if (!params.cfg) { + return undefined; + } + const parsed = parseAgentSessionKey(params.sessionKey); + if (!parsed?.agentId) { + return undefined; + } + const storePath = resolveStorePath(params.cfg.session?.store, { agentId: parsed.agentId }); + const store = readSessionStore(storePath); + return store[params.sessionKey] ?? findEntryBySessionId(store, params.sessionKey); +} + +export function resolveSubagentRoleForDepth(params: { + depth: number; + maxSpawnDepth?: number; +}): SubagentSessionRole { + const depth = Number.isInteger(params.depth) ? Math.max(0, params.depth) : 0; + const maxSpawnDepth = + typeof params.maxSpawnDepth === "number" && Number.isFinite(params.maxSpawnDepth) + ? Math.max(1, Math.floor(params.maxSpawnDepth)) + : DEFAULT_SUBAGENT_MAX_SPAWN_DEPTH; + if (depth <= 0) { + return "main"; + } + return depth < maxSpawnDepth ? "orchestrator" : "leaf"; +} + +export function resolveSubagentControlScopeForRole( + role: SubagentSessionRole, +): SubagentControlScope { + return role === "leaf" ? "none" : "children"; +} + +export function resolveSubagentCapabilities(params: { depth: number; maxSpawnDepth?: number }) { + const role = resolveSubagentRoleForDepth(params); + const controlScope = resolveSubagentControlScopeForRole(role); + return { + depth: Math.max(0, Math.floor(params.depth)), + role, + controlScope, + canSpawn: role === "main" || role === "orchestrator", + canControlChildren: controlScope === "children", + }; +} + +export function resolveStoredSubagentCapabilities( + sessionKey: string | undefined | null, + opts?: { + cfg?: OpenClawConfig; + store?: Record; + }, +) { + const normalizedSessionKey = normalizeSessionKey(sessionKey); + const maxSpawnDepth = + opts?.cfg?.agents?.defaults?.subagents?.maxSpawnDepth ?? DEFAULT_SUBAGENT_MAX_SPAWN_DEPTH; + const depth = getSubagentDepthFromSessionStore(normalizedSessionKey, { + cfg: opts?.cfg, + store: opts?.store, + }); + if (!normalizedSessionKey || !isSubagentSessionKey(normalizedSessionKey)) { + return resolveSubagentCapabilities({ depth, maxSpawnDepth }); + } + const entry = resolveSessionCapabilityEntry({ + sessionKey: normalizedSessionKey, + cfg: opts?.cfg, + store: opts?.store, + }); + const storedRole = normalizeSubagentRole(entry?.subagentRole); + const storedControlScope = normalizeSubagentControlScope(entry?.subagentControlScope); + const fallback = resolveSubagentCapabilities({ depth, maxSpawnDepth }); + const role = storedRole ?? fallback.role; + const controlScope = storedControlScope ?? resolveSubagentControlScopeForRole(role); + return { + depth, + role, + controlScope, + canSpawn: role === "main" || role === "orchestrator", + canControlChildren: controlScope === "children", + }; +} diff --git a/src/agents/subagent-control.ts b/src/agents/subagent-control.ts new file mode 100644 index 00000000000..528a84eebd3 --- /dev/null +++ b/src/agents/subagent-control.ts @@ -0,0 +1,768 @@ +import crypto from "node:crypto"; +import { clearSessionQueues } from "../auto-reply/reply/queue.js"; +import { + resolveSubagentLabel, + resolveSubagentTargetFromRuns, + sortSubagentRuns, + type SubagentTargetResolution, +} from "../auto-reply/reply/subagents-utils.js"; +import type { OpenClawConfig } from "../config/config.js"; +import type { SessionEntry } from "../config/sessions.js"; +import { loadSessionStore, resolveStorePath, updateSessionStore } from "../config/sessions.js"; +import { callGateway } from "../gateway/call.js"; +import { logVerbose } from "../globals.js"; +import { + isSubagentSessionKey, + parseAgentSessionKey, + type ParsedAgentSessionKey, +} from "../routing/session-key.js"; +import { + formatDurationCompact, + formatTokenUsageDisplay, + resolveTotalTokens, + truncateLine, +} from "../shared/subagents-format.js"; +import { INTERNAL_MESSAGE_CHANNEL } from "../utils/message-channel.js"; +import { AGENT_LANE_SUBAGENT } from "./lanes.js"; +import { abortEmbeddedPiRun } from "./pi-embedded.js"; +import { resolveStoredSubagentCapabilities } from "./subagent-capabilities.js"; +import { + clearSubagentRunSteerRestart, + countPendingDescendantRuns, + listSubagentRunsForController, + markSubagentRunTerminated, + markSubagentRunForSteerRestart, + replaceSubagentRunAfterSteer, + type SubagentRunRecord, +} from "./subagent-registry.js"; +import { + extractAssistantText, + resolveInternalSessionKey, + resolveMainSessionAlias, + stripToolMessages, +} from "./tools/sessions-helpers.js"; + +export const DEFAULT_RECENT_MINUTES = 30; +export const MAX_RECENT_MINUTES = 24 * 60; +export const MAX_STEER_MESSAGE_CHARS = 4_000; +export const STEER_RATE_LIMIT_MS = 2_000; +export const STEER_ABORT_SETTLE_TIMEOUT_MS = 5_000; + +const steerRateLimit = new Map(); + +export type SessionEntryResolution = { + storePath: string; + entry: SessionEntry | undefined; +}; + +export type ResolvedSubagentController = { + controllerSessionKey: string; + callerSessionKey: string; + callerIsSubagent: boolean; + controlScope: "children" | "none"; +}; + +export type SubagentListItem = { + index: number; + line: string; + runId: string; + sessionKey: string; + label: string; + task: string; + status: string; + pendingDescendants: number; + runtime: string; + runtimeMs: number; + model?: string; + totalTokens?: number; + startedAt?: number; + endedAt?: number; +}; + +export type BuiltSubagentList = { + total: number; + active: SubagentListItem[]; + recent: SubagentListItem[]; + text: string; +}; + +function resolveStorePathForKey( + cfg: OpenClawConfig, + key: string, + parsed?: ParsedAgentSessionKey | null, +) { + return resolveStorePath(cfg.session?.store, { + agentId: parsed?.agentId, + }); +} + +export function resolveSessionEntryForKey(params: { + cfg: OpenClawConfig; + key: string; + cache: Map>; +}): SessionEntryResolution { + const parsed = parseAgentSessionKey(params.key); + const storePath = resolveStorePathForKey(params.cfg, params.key, parsed); + let store = params.cache.get(storePath); + if (!store) { + store = loadSessionStore(storePath); + params.cache.set(storePath, store); + } + return { + storePath, + entry: store[params.key], + }; +} + +export function resolveSubagentController(params: { + cfg: OpenClawConfig; + agentSessionKey?: string; +}): ResolvedSubagentController { + const { mainKey, alias } = resolveMainSessionAlias(params.cfg); + const callerRaw = params.agentSessionKey?.trim() || alias; + const callerSessionKey = resolveInternalSessionKey({ + key: callerRaw, + alias, + mainKey, + }); + if (!isSubagentSessionKey(callerSessionKey)) { + return { + controllerSessionKey: callerSessionKey, + callerSessionKey, + callerIsSubagent: false, + controlScope: "children", + }; + } + const capabilities = resolveStoredSubagentCapabilities(callerSessionKey, { + cfg: params.cfg, + }); + return { + controllerSessionKey: callerSessionKey, + callerSessionKey, + callerIsSubagent: true, + controlScope: capabilities.controlScope, + }; +} + +export function listControlledSubagentRuns(controllerSessionKey: string): SubagentRunRecord[] { + return sortSubagentRuns(listSubagentRunsForController(controllerSessionKey)); +} + +export function createPendingDescendantCounter() { + const pendingDescendantCache = new Map(); + return (sessionKey: string) => { + if (pendingDescendantCache.has(sessionKey)) { + return pendingDescendantCache.get(sessionKey) ?? 0; + } + const pending = Math.max(0, countPendingDescendantRuns(sessionKey)); + pendingDescendantCache.set(sessionKey, pending); + return pending; + }; +} + +export function isActiveSubagentRun( + entry: SubagentRunRecord, + pendingDescendantCount: (sessionKey: string) => number, +) { + return !entry.endedAt || pendingDescendantCount(entry.childSessionKey) > 0; +} + +function resolveRunStatus(entry: SubagentRunRecord, options?: { pendingDescendants?: number }) { + const pendingDescendants = Math.max(0, options?.pendingDescendants ?? 0); + if (pendingDescendants > 0) { + const childLabel = pendingDescendants === 1 ? "child" : "children"; + return `active (waiting on ${pendingDescendants} ${childLabel})`; + } + if (!entry.endedAt) { + return "running"; + } + const status = entry.outcome?.status ?? "done"; + if (status === "ok") { + return "done"; + } + if (status === "error") { + return "failed"; + } + return status; +} + +function resolveModelRef(entry?: SessionEntry) { + const model = typeof entry?.model === "string" ? entry.model.trim() : ""; + const provider = typeof entry?.modelProvider === "string" ? entry.modelProvider.trim() : ""; + if (model.includes("/")) { + return model; + } + if (model && provider) { + return `${provider}/${model}`; + } + if (model) { + return model; + } + if (provider) { + return provider; + } + const overrideModel = typeof entry?.modelOverride === "string" ? entry.modelOverride.trim() : ""; + const overrideProvider = + typeof entry?.providerOverride === "string" ? entry.providerOverride.trim() : ""; + if (overrideModel.includes("/")) { + return overrideModel; + } + if (overrideModel && overrideProvider) { + return `${overrideProvider}/${overrideModel}`; + } + if (overrideModel) { + return overrideModel; + } + return overrideProvider || undefined; +} + +function resolveModelDisplay(entry?: SessionEntry, fallbackModel?: string) { + const modelRef = resolveModelRef(entry) || fallbackModel || undefined; + if (!modelRef) { + return "model n/a"; + } + const slash = modelRef.lastIndexOf("/"); + if (slash >= 0 && slash < modelRef.length - 1) { + return modelRef.slice(slash + 1); + } + return modelRef; +} + +function buildListText(params: { + active: Array<{ line: string }>; + recent: Array<{ line: string }>; + recentMinutes: number; +}) { + const lines: string[] = []; + lines.push("active subagents:"); + if (params.active.length === 0) { + lines.push("(none)"); + } else { + lines.push(...params.active.map((entry) => entry.line)); + } + lines.push(""); + lines.push(`recent (last ${params.recentMinutes}m):`); + if (params.recent.length === 0) { + lines.push("(none)"); + } else { + lines.push(...params.recent.map((entry) => entry.line)); + } + return lines.join("\n"); +} + +export function buildSubagentList(params: { + cfg: OpenClawConfig; + runs: SubagentRunRecord[]; + recentMinutes: number; + taskMaxChars?: number; +}): BuiltSubagentList { + const now = Date.now(); + const recentCutoff = now - params.recentMinutes * 60_000; + const cache = new Map>(); + const pendingDescendantCount = createPendingDescendantCounter(); + let index = 1; + const buildListEntry = (entry: SubagentRunRecord, runtimeMs: number) => { + const sessionEntry = resolveSessionEntryForKey({ + cfg: params.cfg, + key: entry.childSessionKey, + cache, + }).entry; + const totalTokens = resolveTotalTokens(sessionEntry); + const usageText = formatTokenUsageDisplay(sessionEntry); + const pendingDescendants = pendingDescendantCount(entry.childSessionKey); + const status = resolveRunStatus(entry, { + pendingDescendants, + }); + const runtime = formatDurationCompact(runtimeMs); + const label = truncateLine(resolveSubagentLabel(entry), 48); + const task = truncateLine(entry.task.trim(), params.taskMaxChars ?? 72); + const line = `${index}. ${label} (${resolveModelDisplay(sessionEntry, entry.model)}, ${runtime}${usageText ? `, ${usageText}` : ""}) ${status}${task.toLowerCase() !== label.toLowerCase() ? ` - ${task}` : ""}`; + const view: SubagentListItem = { + index, + line, + runId: entry.runId, + sessionKey: entry.childSessionKey, + label, + task, + status, + pendingDescendants, + runtime, + runtimeMs, + model: resolveModelRef(sessionEntry) || entry.model, + totalTokens, + startedAt: entry.startedAt, + ...(entry.endedAt ? { endedAt: entry.endedAt } : {}), + }; + index += 1; + return view; + }; + const active = params.runs + .filter((entry) => isActiveSubagentRun(entry, pendingDescendantCount)) + .map((entry) => buildListEntry(entry, now - (entry.startedAt ?? entry.createdAt))); + const recent = params.runs + .filter( + (entry) => + !isActiveSubagentRun(entry, pendingDescendantCount) && + !!entry.endedAt && + (entry.endedAt ?? 0) >= recentCutoff, + ) + .map((entry) => + buildListEntry(entry, (entry.endedAt ?? now) - (entry.startedAt ?? entry.createdAt)), + ); + return { + total: params.runs.length, + active, + recent, + text: buildListText({ active, recent, recentMinutes: params.recentMinutes }), + }; +} + +function ensureControllerOwnsRun(params: { + controller: ResolvedSubagentController; + entry: SubagentRunRecord; +}) { + const owner = params.entry.controllerSessionKey?.trim() || params.entry.requesterSessionKey; + if (owner === params.controller.controllerSessionKey) { + return undefined; + } + return "Subagents can only control runs spawned from their own session."; +} + +async function killSubagentRun(params: { + cfg: OpenClawConfig; + entry: SubagentRunRecord; + cache: Map>; +}): Promise<{ killed: boolean; sessionId?: string }> { + if (params.entry.endedAt) { + return { killed: false }; + } + const childSessionKey = params.entry.childSessionKey; + const resolved = resolveSessionEntryForKey({ + cfg: params.cfg, + key: childSessionKey, + cache: params.cache, + }); + const sessionId = resolved.entry?.sessionId; + const aborted = sessionId ? abortEmbeddedPiRun(sessionId) : false; + const cleared = clearSessionQueues([childSessionKey, sessionId]); + if (cleared.followupCleared > 0 || cleared.laneCleared > 0) { + logVerbose( + `subagents control kill: cleared followups=${cleared.followupCleared} lane=${cleared.laneCleared} keys=${cleared.keys.join(",")}`, + ); + } + if (resolved.entry) { + await updateSessionStore(resolved.storePath, (store) => { + const current = store[childSessionKey]; + if (!current) { + return; + } + current.abortedLastRun = true; + current.updatedAt = Date.now(); + store[childSessionKey] = current; + }); + } + const marked = markSubagentRunTerminated({ + runId: params.entry.runId, + childSessionKey, + reason: "killed", + }); + const killed = marked > 0 || aborted || cleared.followupCleared > 0 || cleared.laneCleared > 0; + return { killed, sessionId }; +} + +async function cascadeKillChildren(params: { + cfg: OpenClawConfig; + parentChildSessionKey: string; + cache: Map>; + seenChildSessionKeys?: Set; +}): Promise<{ killed: number; labels: string[] }> { + const childRuns = listSubagentRunsForController(params.parentChildSessionKey); + const seenChildSessionKeys = params.seenChildSessionKeys ?? new Set(); + let killed = 0; + const labels: string[] = []; + + for (const run of childRuns) { + const childKey = run.childSessionKey?.trim(); + if (!childKey || seenChildSessionKeys.has(childKey)) { + continue; + } + seenChildSessionKeys.add(childKey); + + if (!run.endedAt) { + const stopResult = await killSubagentRun({ + cfg: params.cfg, + entry: run, + cache: params.cache, + }); + if (stopResult.killed) { + killed += 1; + labels.push(resolveSubagentLabel(run)); + } + } + + const cascade = await cascadeKillChildren({ + cfg: params.cfg, + parentChildSessionKey: childKey, + cache: params.cache, + seenChildSessionKeys, + }); + killed += cascade.killed; + labels.push(...cascade.labels); + } + + return { killed, labels }; +} + +export async function killAllControlledSubagentRuns(params: { + cfg: OpenClawConfig; + controller: ResolvedSubagentController; + runs: SubagentRunRecord[]; +}) { + if (params.controller.controlScope !== "children") { + return { + status: "forbidden" as const, + error: "Leaf subagents cannot control other sessions.", + killed: 0, + labels: [], + }; + } + const cache = new Map>(); + const seenChildSessionKeys = new Set(); + const killedLabels: string[] = []; + let killed = 0; + for (const entry of params.runs) { + const childKey = entry.childSessionKey?.trim(); + if (!childKey || seenChildSessionKeys.has(childKey)) { + continue; + } + seenChildSessionKeys.add(childKey); + + if (!entry.endedAt) { + const stopResult = await killSubagentRun({ cfg: params.cfg, entry, cache }); + if (stopResult.killed) { + killed += 1; + killedLabels.push(resolveSubagentLabel(entry)); + } + } + + const cascade = await cascadeKillChildren({ + cfg: params.cfg, + parentChildSessionKey: childKey, + cache, + seenChildSessionKeys, + }); + killed += cascade.killed; + killedLabels.push(...cascade.labels); + } + return { status: "ok" as const, killed, labels: killedLabels }; +} + +export async function killControlledSubagentRun(params: { + cfg: OpenClawConfig; + controller: ResolvedSubagentController; + entry: SubagentRunRecord; +}) { + const ownershipError = ensureControllerOwnsRun({ + controller: params.controller, + entry: params.entry, + }); + if (ownershipError) { + return { + status: "forbidden" as const, + runId: params.entry.runId, + sessionKey: params.entry.childSessionKey, + error: ownershipError, + }; + } + if (params.controller.controlScope !== "children") { + return { + status: "forbidden" as const, + runId: params.entry.runId, + sessionKey: params.entry.childSessionKey, + error: "Leaf subagents cannot control other sessions.", + }; + } + const killCache = new Map>(); + const stopResult = await killSubagentRun({ + cfg: params.cfg, + entry: params.entry, + cache: killCache, + }); + const seenChildSessionKeys = new Set(); + const targetChildKey = params.entry.childSessionKey?.trim(); + if (targetChildKey) { + seenChildSessionKeys.add(targetChildKey); + } + const cascade = await cascadeKillChildren({ + cfg: params.cfg, + parentChildSessionKey: params.entry.childSessionKey, + cache: killCache, + seenChildSessionKeys, + }); + if (!stopResult.killed && cascade.killed === 0) { + return { + status: "done" as const, + runId: params.entry.runId, + sessionKey: params.entry.childSessionKey, + label: resolveSubagentLabel(params.entry), + text: `${resolveSubagentLabel(params.entry)} is already finished.`, + }; + } + const cascadeText = + cascade.killed > 0 ? ` (+ ${cascade.killed} descendant${cascade.killed === 1 ? "" : "s"})` : ""; + return { + status: "ok" as const, + runId: params.entry.runId, + sessionKey: params.entry.childSessionKey, + label: resolveSubagentLabel(params.entry), + cascadeKilled: cascade.killed, + cascadeLabels: cascade.killed > 0 ? cascade.labels : undefined, + text: stopResult.killed + ? `killed ${resolveSubagentLabel(params.entry)}${cascadeText}.` + : `killed ${cascade.killed} descendant${cascade.killed === 1 ? "" : "s"} of ${resolveSubagentLabel(params.entry)}.`, + }; +} + +export async function steerControlledSubagentRun(params: { + cfg: OpenClawConfig; + controller: ResolvedSubagentController; + entry: SubagentRunRecord; + message: string; +}): Promise< + | { + status: "forbidden" | "done" | "rate_limited" | "error"; + runId?: string; + sessionKey: string; + sessionId?: string; + error?: string; + text?: string; + } + | { + status: "accepted"; + runId: string; + sessionKey: string; + sessionId?: string; + mode: "restart"; + label: string; + text: string; + } +> { + const ownershipError = ensureControllerOwnsRun({ + controller: params.controller, + entry: params.entry, + }); + if (ownershipError) { + return { + status: "forbidden", + runId: params.entry.runId, + sessionKey: params.entry.childSessionKey, + error: ownershipError, + }; + } + if (params.controller.controlScope !== "children") { + return { + status: "forbidden", + runId: params.entry.runId, + sessionKey: params.entry.childSessionKey, + error: "Leaf subagents cannot control other sessions.", + }; + } + if (params.entry.endedAt) { + return { + status: "done", + runId: params.entry.runId, + sessionKey: params.entry.childSessionKey, + text: `${resolveSubagentLabel(params.entry)} is already finished.`, + }; + } + if (params.controller.callerSessionKey === params.entry.childSessionKey) { + return { + status: "forbidden", + runId: params.entry.runId, + sessionKey: params.entry.childSessionKey, + error: "Subagents cannot steer themselves.", + }; + } + + const rateKey = `${params.controller.callerSessionKey}:${params.entry.childSessionKey}`; + if (process.env.VITEST !== "true") { + const now = Date.now(); + const lastSentAt = steerRateLimit.get(rateKey) ?? 0; + if (now - lastSentAt < STEER_RATE_LIMIT_MS) { + return { + status: "rate_limited", + runId: params.entry.runId, + sessionKey: params.entry.childSessionKey, + error: "Steer rate limit exceeded. Wait a moment before sending another steer.", + }; + } + steerRateLimit.set(rateKey, now); + } + + markSubagentRunForSteerRestart(params.entry.runId); + + const targetSession = resolveSessionEntryForKey({ + cfg: params.cfg, + key: params.entry.childSessionKey, + cache: new Map>(), + }); + const sessionId = + typeof targetSession.entry?.sessionId === "string" && targetSession.entry.sessionId.trim() + ? targetSession.entry.sessionId.trim() + : undefined; + + if (sessionId) { + abortEmbeddedPiRun(sessionId); + } + const cleared = clearSessionQueues([params.entry.childSessionKey, sessionId]); + if (cleared.followupCleared > 0 || cleared.laneCleared > 0) { + logVerbose( + `subagents control steer: cleared followups=${cleared.followupCleared} lane=${cleared.laneCleared} keys=${cleared.keys.join(",")}`, + ); + } + + try { + await callGateway({ + method: "agent.wait", + params: { + runId: params.entry.runId, + timeoutMs: STEER_ABORT_SETTLE_TIMEOUT_MS, + }, + timeoutMs: STEER_ABORT_SETTLE_TIMEOUT_MS + 2_000, + }); + } catch { + // Continue even if wait fails; steer should still be attempted. + } + + const idempotencyKey = crypto.randomUUID(); + let runId: string = idempotencyKey; + try { + const response = await callGateway<{ runId: string }>({ + method: "agent", + params: { + message: params.message, + sessionKey: params.entry.childSessionKey, + sessionId, + idempotencyKey, + deliver: false, + channel: INTERNAL_MESSAGE_CHANNEL, + lane: AGENT_LANE_SUBAGENT, + timeout: 0, + }, + timeoutMs: 10_000, + }); + if (typeof response?.runId === "string" && response.runId) { + runId = response.runId; + } + } catch (err) { + clearSubagentRunSteerRestart(params.entry.runId); + const error = err instanceof Error ? err.message : String(err); + return { + status: "error", + runId, + sessionKey: params.entry.childSessionKey, + sessionId, + error, + }; + } + + replaceSubagentRunAfterSteer({ + previousRunId: params.entry.runId, + nextRunId: runId, + fallback: params.entry, + runTimeoutSeconds: params.entry.runTimeoutSeconds ?? 0, + }); + + return { + status: "accepted", + runId, + sessionKey: params.entry.childSessionKey, + sessionId, + mode: "restart", + label: resolveSubagentLabel(params.entry), + text: `steered ${resolveSubagentLabel(params.entry)}.`, + }; +} + +export async function sendControlledSubagentMessage(params: { + cfg: OpenClawConfig; + entry: SubagentRunRecord; + message: string; +}) { + const targetSessionKey = params.entry.childSessionKey; + const parsed = parseAgentSessionKey(targetSessionKey); + const storePath = resolveStorePath(params.cfg.session?.store, { agentId: parsed?.agentId }); + const store = loadSessionStore(storePath); + const targetSessionEntry = store[targetSessionKey]; + const targetSessionId = + typeof targetSessionEntry?.sessionId === "string" && targetSessionEntry.sessionId.trim() + ? targetSessionEntry.sessionId.trim() + : undefined; + + const idempotencyKey = crypto.randomUUID(); + let runId: string = idempotencyKey; + const response = await callGateway<{ runId: string }>({ + method: "agent", + params: { + message: params.message, + sessionKey: targetSessionKey, + sessionId: targetSessionId, + idempotencyKey, + deliver: false, + channel: INTERNAL_MESSAGE_CHANNEL, + lane: AGENT_LANE_SUBAGENT, + timeout: 0, + }, + timeoutMs: 10_000, + }); + const responseRunId = typeof response?.runId === "string" ? response.runId : undefined; + if (responseRunId) { + runId = responseRunId; + } + + const waitMs = 30_000; + const wait = await callGateway<{ status?: string; error?: string }>({ + method: "agent.wait", + params: { runId, timeoutMs: waitMs }, + timeoutMs: waitMs + 2_000, + }); + if (wait?.status === "timeout") { + return { status: "timeout" as const, runId }; + } + if (wait?.status === "error") { + const waitError = typeof wait.error === "string" ? wait.error : "unknown error"; + return { status: "error" as const, runId, error: waitError }; + } + + const history = await callGateway<{ messages: Array }>({ + method: "chat.history", + params: { sessionKey: targetSessionKey, limit: 50 }, + }); + const filtered = stripToolMessages(Array.isArray(history?.messages) ? history.messages : []); + const last = filtered.length > 0 ? filtered[filtered.length - 1] : undefined; + const replyText = last ? extractAssistantText(last) : undefined; + return { status: "ok" as const, runId, replyText }; +} + +export function resolveControlledSubagentTarget( + runs: SubagentRunRecord[], + token: string | undefined, + options?: { recentMinutes?: number; isActive?: (entry: SubagentRunRecord) => boolean }, +): SubagentTargetResolution { + return resolveSubagentTargetFromRuns({ + runs, + token, + recentWindowMinutes: options?.recentMinutes ?? DEFAULT_RECENT_MINUTES, + label: (entry) => resolveSubagentLabel(entry), + isActive: options?.isActive, + errors: { + missingTarget: "Missing subagent target.", + invalidIndex: (value) => `Invalid subagent index: ${value}`, + unknownSession: (value) => `Unknown subagent session: ${value}`, + ambiguousLabel: (value) => `Ambiguous subagent label: ${value}`, + ambiguousLabelPrefix: (value) => `Ambiguous subagent label prefix: ${value}`, + ambiguousRunIdPrefix: (value) => `Ambiguous subagent run id prefix: ${value}`, + unknownTarget: (value) => `Unknown subagent target: ${value}`, + }, + }); +} diff --git a/src/agents/subagent-registry-queries.ts b/src/agents/subagent-registry-queries.ts index 7c40444d6f1..4ddf23bf2db 100644 --- a/src/agents/subagent-registry-queries.ts +++ b/src/agents/subagent-registry-queries.ts @@ -1,6 +1,10 @@ import type { DeliveryContext } from "../utils/delivery-context.js"; import type { SubagentRunRecord } from "./subagent-registry.types.js"; +function resolveControllerSessionKey(entry: SubagentRunRecord): string { + return entry.controllerSessionKey?.trim() || entry.requesterSessionKey; +} + export function findRunIdsByChildSessionKeyFromRuns( runs: Map, childSessionKey: string, @@ -51,6 +55,17 @@ export function listRunsForRequesterFromRuns( }); } +export function listRunsForControllerFromRuns( + runs: Map, + controllerSessionKey: string, +): SubagentRunRecord[] { + const key = controllerSessionKey.trim(); + if (!key) { + return []; + } + return [...runs.values()].filter((entry) => resolveControllerSessionKey(entry) === key); +} + function findLatestRunForChildSession( runs: Map, childSessionKey: string, @@ -104,9 +119,9 @@ export function shouldIgnorePostCompletionAnnounceForSessionFromRuns( export function countActiveRunsForSessionFromRuns( runs: Map, - requesterSessionKey: string, + controllerSessionKey: string, ): number { - const key = requesterSessionKey.trim(); + const key = controllerSessionKey.trim(); if (!key) { return 0; } @@ -123,7 +138,7 @@ export function countActiveRunsForSessionFromRuns( let count = 0; for (const entry of runs.values()) { - if (entry.requesterSessionKey !== key) { + if (resolveControllerSessionKey(entry) !== key) { continue; } if (typeof entry.endedAt !== "number") { diff --git a/src/agents/subagent-registry.persistence.test.ts b/src/agents/subagent-registry.persistence.test.ts index 468de55953c..32f2e06311e 100644 --- a/src/agents/subagent-registry.persistence.test.ts +++ b/src/agents/subagent-registry.persistence.test.ts @@ -343,6 +343,35 @@ describe("subagent registry persistence", () => { expect(afterSecond.runs["run-3"].cleanupCompletedAt).toBeDefined(); }); + it("retries cleanup announce after announce flow rejects", async () => { + const persisted = createPersistedEndedRun({ + runId: "run-reject", + childSessionKey: "agent:main:subagent:reject", + task: "reject announce", + cleanup: "keep", + }); + const registryPath = await writePersistedRegistry(persisted); + + announceSpy.mockRejectedValueOnce(new Error("announce boom")); + await restartRegistryAndFlush(); + + expect(announceSpy).toHaveBeenCalledTimes(1); + const afterFirst = JSON.parse(await fs.readFile(registryPath, "utf8")) as { + runs: Record; + }; + expect(afterFirst.runs["run-reject"].cleanupHandled).toBe(false); + expect(afterFirst.runs["run-reject"].cleanupCompletedAt).toBeUndefined(); + + announceSpy.mockResolvedValueOnce(true); + await restartRegistryAndFlush(); + + expect(announceSpy).toHaveBeenCalledTimes(2); + const afterSecond = JSON.parse(await fs.readFile(registryPath, "utf8")) as { + runs: Record; + }; + expect(afterSecond.runs["run-reject"].cleanupCompletedAt).toBeDefined(); + }); + it("keeps delete-mode runs retryable when announce is deferred", async () => { const persisted = createPersistedEndedRun({ runId: "run-4", diff --git a/src/agents/subagent-registry.ts b/src/agents/subagent-registry.ts index 9ef58933f35..d9c593c3e84 100644 --- a/src/agents/subagent-registry.ts +++ b/src/agents/subagent-registry.ts @@ -45,6 +45,7 @@ import { countPendingDescendantRunsExcludingRunFromRuns, countPendingDescendantRunsFromRuns, findRunIdsByChildSessionKeyFromRuns, + listRunsForControllerFromRuns, listDescendantRunsForRequesterFromRuns, listRunsForRequesterFromRuns, resolveRequesterForChildSessionFromRuns, @@ -533,6 +534,18 @@ function startSubagentAnnounceCleanupFlow(runId: string, entry: SubagentRunRecor return false; } const requesterOrigin = normalizeDeliveryContext(entry.requesterOrigin); + const finalizeAnnounceCleanup = (didAnnounce: boolean) => { + void finalizeSubagentCleanup(runId, entry.cleanup, didAnnounce).catch((err) => { + defaultRuntime.log(`[warn] subagent cleanup finalize failed (${runId}): ${String(err)}`); + const current = subagentRuns.get(runId); + if (!current || current.cleanupCompletedAt) { + return; + } + current.cleanupHandled = false; + persistSubagentRuns(); + }); + }; + void runSubagentAnnounceFlow({ childSessionKey: entry.childSessionKey, childRunId: entry.runId, @@ -554,13 +567,13 @@ function startSubagentAnnounceCleanupFlow(runId: string, entry: SubagentRunRecor wakeOnDescendantSettle: entry.wakeOnDescendantSettle === true, }) .then((didAnnounce) => { - void finalizeSubagentCleanup(runId, entry.cleanup, didAnnounce); + finalizeAnnounceCleanup(didAnnounce); }) .catch((error) => { defaultRuntime.log( `[warn] Subagent announce flow failed during cleanup for run ${runId}: ${String(error)}`, ); - void finalizeSubagentCleanup(runId, entry.cleanup, false); + finalizeAnnounceCleanup(false); }); return true; } @@ -1146,6 +1159,7 @@ export function replaceSubagentRunAfterSteer(params: { export function registerSubagentRun(params: { runId: string; childSessionKey: string; + controllerSessionKey?: string; requesterSessionKey: string; requesterOrigin?: DeliveryContext; requesterDisplayKey: string; @@ -1173,6 +1187,7 @@ export function registerSubagentRun(params: { subagentRuns.set(params.runId, { runId: params.runId, childSessionKey: params.childSessionKey, + controllerSessionKey: params.controllerSessionKey ?? params.requesterSessionKey, requesterSessionKey: params.requesterSessionKey, requesterOrigin, requesterDisplayKey: params.requesterDisplayKey, @@ -1419,6 +1434,13 @@ export function listSubagentRunsForRequester( return listRunsForRequesterFromRuns(subagentRuns, requesterSessionKey, options); } +export function listSubagentRunsForController(controllerSessionKey: string): SubagentRunRecord[] { + return listRunsForControllerFromRuns( + getSubagentRunsSnapshotForRead(subagentRuns), + controllerSessionKey, + ); +} + export function countActiveRunsForSession(requesterSessionKey: string): number { return countActiveRunsForSessionFromRuns( getSubagentRunsSnapshotForRead(subagentRuns), diff --git a/src/agents/subagent-registry.types.ts b/src/agents/subagent-registry.types.ts index a153ddbadd7..f5dc56775ae 100644 --- a/src/agents/subagent-registry.types.ts +++ b/src/agents/subagent-registry.types.ts @@ -6,6 +6,7 @@ import type { SpawnSubagentMode } from "./subagent-spawn.js"; export type SubagentRunRecord = { runId: string; childSessionKey: string; + controllerSessionKey?: string; requesterSessionKey: string; requesterOrigin?: DeliveryContext; requesterDisplayKey: string; diff --git a/src/agents/subagent-spawn.attachments.test.ts b/src/agents/subagent-spawn.attachments.test.ts index b564e77a906..9fe774fa284 100644 --- a/src/agents/subagent-spawn.attachments.test.ts +++ b/src/agents/subagent-spawn.attachments.test.ts @@ -1,6 +1,7 @@ +import fs from "node:fs"; import os from "node:os"; import path from "node:path"; -import { beforeEach, describe, expect, it, vi } from "vitest"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import { resetSubagentRegistryForTests } from "./subagent-registry.js"; import { decodeStrictBase64, spawnSubagentDirect } from "./subagent-spawn.js"; @@ -31,6 +32,7 @@ let configOverride: Record = { }, }, }; +let workspaceDirOverride = ""; vi.mock("../config/config.js", async (importOriginal) => { const actual = await importOriginal(); @@ -61,7 +63,7 @@ vi.mock("./agent-scope.js", async (importOriginal) => { const actual = await importOriginal(); return { ...actual, - resolveAgentWorkspaceDir: () => path.join(os.tmpdir(), "agent-workspace"), + resolveAgentWorkspaceDir: () => workspaceDirOverride, }; }); @@ -145,6 +147,16 @@ describe("spawnSubagentDirect filename validation", () => { resetSubagentRegistryForTests(); callGatewayMock.mockClear(); setupGatewayMock(); + workspaceDirOverride = fs.mkdtempSync( + path.join(os.tmpdir(), `openclaw-subagent-attachments-${process.pid}-${Date.now()}-`), + ); + }); + + afterEach(() => { + if (workspaceDirOverride) { + fs.rmSync(workspaceDirOverride, { recursive: true, force: true }); + workspaceDirOverride = ""; + } }); const ctx = { @@ -210,4 +222,43 @@ describe("spawnSubagentDirect filename validation", () => { expect(result.status).toBe("error"); expect(result.error).toMatch(/attachments_invalid_name/); }); + + it("removes materialized attachments when lineage patching fails", async () => { + const calls: Array<{ method?: string; params?: Record }> = []; + callGatewayMock.mockImplementation(async (opts: unknown) => { + const request = opts as { method?: string; params?: Record }; + calls.push(request); + if (request.method === "sessions.patch" && typeof request.params?.spawnedBy === "string") { + throw new Error("lineage patch failed"); + } + if (request.method === "sessions.delete") { + return { ok: true }; + } + return {}; + }); + + const result = await spawnSubagentDirect( + { + task: "test", + attachments: [{ name: "file.txt", content: validContent, encoding: "base64" }], + }, + ctx, + ); + + expect(result).toMatchObject({ + status: "error", + error: "lineage patch failed", + }); + const attachmentsRoot = path.join(workspaceDirOverride, ".openclaw", "attachments"); + const retainedDirs = fs.existsSync(attachmentsRoot) + ? fs.readdirSync(attachmentsRoot).filter((entry) => !entry.startsWith(".")) + : []; + expect(retainedDirs).toHaveLength(0); + const deleteCall = calls.find((entry) => entry.method === "sessions.delete"); + expect(deleteCall?.params).toMatchObject({ + key: expect.stringMatching(/^agent:main:subagent:/), + deleteTranscript: true, + emitLifecycleHooks: false, + }); + }); }); diff --git a/src/agents/subagent-spawn.ts b/src/agents/subagent-spawn.ts index f2a63552189..1750d948e6c 100644 --- a/src/agents/subagent-spawn.ts +++ b/src/agents/subagent-spawn.ts @@ -27,6 +27,7 @@ import { materializeSubagentAttachments, type SubagentAttachmentReceiptFile, } from "./subagent-attachments.js"; +import { resolveSubagentCapabilities } from "./subagent-capabilities.js"; import { getSubagentDepthFromSessionStore } from "./subagent-depth.js"; import { countActiveRunsForSession, registerSubagentRun } from "./subagent-registry.js"; import { readStringParam } from "./tools/common.js"; @@ -152,6 +153,25 @@ async function cleanupProvisionalSession( } } +async function cleanupFailedSpawnBeforeAgentStart(params: { + childSessionKey: string; + attachmentAbsDir?: string; + emitLifecycleHooks?: boolean; + deleteTranscript?: boolean; +}): Promise { + if (params.attachmentAbsDir) { + try { + await fs.rm(params.attachmentAbsDir, { recursive: true, force: true }); + } catch { + // Best-effort cleanup only. + } + } + await cleanupProvisionalSession(params.childSessionKey, { + emitLifecycleHooks: params.emitLifecycleHooks, + deleteTranscript: params.deleteTranscript, + }); +} + function resolveSpawnMode(params: { requestedMode?: SpawnSubagentMode; threadRequested: boolean; @@ -376,6 +396,10 @@ export async function spawnSubagentDirect( } const childDepth = callerDepth + 1; const spawnedByKey = requesterInternalKey; + const childCapabilities = resolveSubagentCapabilities({ + depth: childDepth, + maxSpawnDepth, + }); const targetAgentConfig = resolveAgentConfig(cfg, targetAgentId); const resolvedModel = resolveSubagentSpawnModelSelection({ cfg, @@ -414,7 +438,11 @@ export async function spawnSubagentDirect( } }; - const spawnDepthPatchError = await patchChildSession({ spawnDepth: childDepth }); + const spawnDepthPatchError = await patchChildSession({ + spawnDepth: childDepth, + subagentRole: childCapabilities.role === "main" ? null : childCapabilities.role, + subagentControlScope: childCapabilities.controlScope, + }); if (spawnDepthPatchError) { return { status: "error", @@ -548,14 +576,39 @@ export async function spawnSubagentDirect( ...toolSpawnMetadata, workspaceDir: resolveSpawnedWorkspaceInheritance({ config: cfg, - requesterSessionKey: requesterInternalKey, - explicitWorkspaceDir: toolSpawnMetadata.workspaceDir, + targetAgentId, + // For cross-agent spawns, ignore the caller's inherited workspace; + // let targetAgentId resolve the correct workspace instead. + explicitWorkspaceDir: + targetAgentId !== requesterAgentId ? undefined : toolSpawnMetadata.workspaceDir, }), }); + const spawnLineagePatchError = await patchChildSession({ + spawnedBy: spawnedByKey, + ...(spawnedMetadata.workspaceDir ? { spawnedWorkspaceDir: spawnedMetadata.workspaceDir } : {}), + }); + if (spawnLineagePatchError) { + await cleanupFailedSpawnBeforeAgentStart({ + childSessionKey, + attachmentAbsDir, + emitLifecycleHooks: threadBindingReady, + deleteTranscript: true, + }); + return { + status: "error", + error: spawnLineagePatchError, + childSessionKey, + }; + } const childIdem = crypto.randomUUID(); let childRunId: string = childIdem; try { + const { + spawnedBy: _spawnedBy, + workspaceDir: _workspaceDir, + ...publicSpawnedMetadata + } = spawnedMetadata; const response = await callGateway<{ runId: string }>({ method: "agent", params: { @@ -572,7 +625,7 @@ export async function spawnSubagentDirect( thinking: thinkingOverride, timeout: runTimeoutSeconds, label: label || undefined, - ...spawnedMetadata, + ...publicSpawnedMetadata, }, timeoutMs: 10_000, }); @@ -643,6 +696,7 @@ export async function spawnSubagentDirect( registerSubagentRun({ runId: childRunId, childSessionKey, + controllerSessionKey: requesterInternalKey, requesterSessionKey: requesterInternalKey, requesterOrigin, requesterDisplayKey, diff --git a/src/agents/subagent-spawn.workspace.test.ts b/src/agents/subagent-spawn.workspace.test.ts new file mode 100644 index 00000000000..9955e587c89 --- /dev/null +++ b/src/agents/subagent-spawn.workspace.test.ts @@ -0,0 +1,173 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { spawnSubagentDirect } from "./subagent-spawn.js"; +import { installAcceptedSubagentGatewayMock } from "./test-helpers/subagent-gateway.js"; + +type TestAgentConfig = { + id?: string; + workspace?: string; + subagents?: { + allowAgents?: string[]; + }; +}; + +type TestConfig = { + agents?: { + list?: TestAgentConfig[]; + }; +}; + +const hoisted = vi.hoisted(() => ({ + callGatewayMock: vi.fn(), + configOverride: {} as Record, + registerSubagentRunMock: vi.fn(), +})); + +vi.mock("../gateway/call.js", () => ({ + callGateway: (opts: unknown) => hoisted.callGatewayMock(opts), +})); + +vi.mock("../config/config.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + loadConfig: () => hoisted.configOverride, + }; +}); + +vi.mock("@mariozechner/pi-ai/oauth", () => ({ + getOAuthApiKey: () => "", + getOAuthProviders: () => [], +})); + +vi.mock("./subagent-registry.js", () => ({ + countActiveRunsForSession: () => 0, + registerSubagentRun: (args: unknown) => hoisted.registerSubagentRunMock(args), +})); + +vi.mock("./subagent-announce.js", () => ({ + buildSubagentSystemPrompt: () => "system-prompt", +})); + +vi.mock("./subagent-depth.js", () => ({ + getSubagentDepthFromSessionStore: () => 0, +})); + +vi.mock("./model-selection.js", () => ({ + resolveSubagentSpawnModelSelection: () => undefined, +})); + +vi.mock("./sandbox/runtime-status.js", () => ({ + resolveSandboxRuntimeStatus: () => ({ sandboxed: false }), +})); + +vi.mock("../plugins/hook-runner-global.js", () => ({ + getGlobalHookRunner: () => ({ hasHooks: () => false }), +})); + +vi.mock("../utils/delivery-context.js", () => ({ + normalizeDeliveryContext: (value: unknown) => value, +})); + +vi.mock("./tools/sessions-helpers.js", () => ({ + resolveMainSessionAlias: () => ({ mainKey: "main", alias: "main" }), + resolveInternalSessionKey: ({ key }: { key?: string }) => key ?? "agent:main:main", + resolveDisplaySessionKey: ({ key }: { key?: string }) => key ?? "agent:main:main", +})); + +vi.mock("./agent-scope.js", () => ({ + resolveAgentConfig: (cfg: TestConfig, agentId: string) => + cfg.agents?.list?.find((entry) => entry.id === agentId), + resolveAgentWorkspaceDir: (cfg: TestConfig, agentId: string) => + cfg.agents?.list?.find((entry) => entry.id === agentId)?.workspace ?? + `/tmp/workspace-${agentId}`, +})); + +function createConfigOverride(overrides?: Record) { + return { + session: { + mainKey: "main", + scope: "per-sender", + }, + agents: { + list: [ + { + id: "main", + workspace: "/tmp/workspace-main", + }, + ], + }, + ...overrides, + }; +} + +function setupGatewayMock() { + installAcceptedSubagentGatewayMock(hoisted.callGatewayMock); +} + +function getRegisteredRun() { + return hoisted.registerSubagentRunMock.mock.calls.at(0)?.[0] as + | Record + | undefined; +} + +async function expectAcceptedWorkspace(params: { agentId: string; expectedWorkspaceDir: string }) { + const result = await spawnSubagentDirect( + { + task: "inspect workspace", + agentId: params.agentId, + }, + { + agentSessionKey: "agent:main:main", + agentChannel: "telegram", + agentAccountId: "123", + agentTo: "456", + workspaceDir: "/tmp/requester-workspace", + }, + ); + + expect(result.status).toBe("accepted"); + expect(getRegisteredRun()).toMatchObject({ + workspaceDir: params.expectedWorkspaceDir, + }); +} + +describe("spawnSubagentDirect workspace inheritance", () => { + beforeEach(() => { + hoisted.callGatewayMock.mockClear(); + hoisted.registerSubagentRunMock.mockClear(); + hoisted.configOverride = createConfigOverride(); + setupGatewayMock(); + }); + + it("uses the target agent workspace for cross-agent spawns", async () => { + hoisted.configOverride = createConfigOverride({ + agents: { + list: [ + { + id: "main", + workspace: "/tmp/workspace-main", + subagents: { + allowAgents: ["ops"], + }, + }, + { + id: "ops", + workspace: "/tmp/workspace-ops", + }, + ], + }, + }); + + await expectAcceptedWorkspace({ + agentId: "ops", + expectedWorkspaceDir: "/tmp/workspace-ops", + }); + }); + + it("preserves the inherited workspace for same-agent spawns", async () => { + await expectAcceptedWorkspace({ + agentId: "main", + expectedWorkspaceDir: "/tmp/requester-workspace", + }); + }); +}); diff --git a/src/agents/system-prompt.ts b/src/agents/system-prompt.ts index a3d593ab6b8..848222b7880 100644 --- a/src/agents/system-prompt.ts +++ b/src/agents/system-prompt.ts @@ -464,6 +464,9 @@ export function buildAgentSystemPrompt(params: { "Keep narration brief and value-dense; avoid repeating obvious steps.", "Use plain human language for narration unless in a technical context.", "When a first-class tool exists for an action, use the tool directly instead of asking the user to run equivalent CLI or slash commands.", + "When exec returns approval-pending, include the concrete /approve command from tool output (with allow-once|allow-always|deny) and do not ask for a different or rotated code.", + "Treat allow-once as single-command only: if another elevated command needs approval, request a fresh /approve and do not claim prior approval covered it.", + "When approvals are required, preserve and show the full command/script exactly as provided (including chained operators like &&, ||, |, ;, or multiline shells) so the user can approve what will actually run.", "", ...safetySection, "## OpenClaw CLI Quick Reference", diff --git a/src/agents/test-helpers/pi-embedded-runner-e2e-fixtures.ts b/src/agents/test-helpers/pi-embedded-runner-e2e-fixtures.ts new file mode 100644 index 00000000000..1d987c44d1a --- /dev/null +++ b/src/agents/test-helpers/pi-embedded-runner-e2e-fixtures.ts @@ -0,0 +1,57 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import type { OpenClawConfig } from "../../config/config.js"; + +export type EmbeddedPiRunnerTestWorkspace = { + tempRoot: string; + agentDir: string; + workspaceDir: string; +}; + +export async function createEmbeddedPiRunnerTestWorkspace( + prefix: string, +): Promise { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), prefix)); + const agentDir = path.join(tempRoot, "agent"); + const workspaceDir = path.join(tempRoot, "workspace"); + await fs.mkdir(agentDir, { recursive: true }); + await fs.mkdir(workspaceDir, { recursive: true }); + return { tempRoot, agentDir, workspaceDir }; +} + +export async function cleanupEmbeddedPiRunnerTestWorkspace( + workspace: EmbeddedPiRunnerTestWorkspace | undefined, +): Promise { + if (!workspace) { + return; + } + await fs.rm(workspace.tempRoot, { recursive: true, force: true }); +} + +export function createEmbeddedPiRunnerOpenAiConfig(modelIds: string[]): OpenClawConfig { + return { + models: { + providers: { + openai: { + api: "openai-responses", + apiKey: "sk-test", + baseUrl: "https://example.com", + models: modelIds.map((id) => ({ + id, + name: `Mock ${id}`, + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 16_000, + maxTokens: 2048, + })), + }, + }, + }, + }; +} + +export async function immediateEnqueue(task: () => Promise): Promise { + return await task(); +} diff --git a/src/agents/test-helpers/subagent-gateway.ts b/src/agents/test-helpers/subagent-gateway.ts new file mode 100644 index 00000000000..9491d971c33 --- /dev/null +++ b/src/agents/test-helpers/subagent-gateway.ts @@ -0,0 +1,9 @@ +export function installAcceptedSubagentGatewayMock(mock: { + mockImplementation: ( + impl: (opts: { method?: string; params?: unknown }) => Promise, + ) => unknown; +}) { + mock.mockImplementation(async ({ method }) => + method === "agent" ? { runId: "run-1" } : method?.startsWith("sessions.") ? { ok: true } : {}, + ); +} diff --git a/src/agents/tool-catalog.test.ts b/src/agents/tool-catalog.test.ts new file mode 100644 index 00000000000..120a744432c --- /dev/null +++ b/src/agents/tool-catalog.test.ts @@ -0,0 +1,11 @@ +import { describe, expect, it } from "vitest"; +import { resolveCoreToolProfilePolicy } from "./tool-catalog.js"; + +describe("tool-catalog", () => { + it("includes web_search and web_fetch in the coding profile policy", () => { + const policy = resolveCoreToolProfilePolicy("coding"); + expect(policy).toBeDefined(); + expect(policy!.allow).toContain("web_search"); + expect(policy!.allow).toContain("web_fetch"); + }); +}); diff --git a/src/agents/tool-catalog.ts b/src/agents/tool-catalog.ts index bbada8e7bc9..445cdc5f10b 100644 --- a/src/agents/tool-catalog.ts +++ b/src/agents/tool-catalog.ts @@ -86,7 +86,7 @@ const CORE_TOOL_DEFINITIONS: CoreToolDefinition[] = [ label: "web_search", description: "Search the web", sectionId: "web", - profiles: [], + profiles: ["coding"], includeInOpenClawGroup: true, }, { @@ -94,7 +94,7 @@ const CORE_TOOL_DEFINITIONS: CoreToolDefinition[] = [ label: "web_fetch", description: "Fetch web content", sectionId: "web", - profiles: [], + profiles: ["coding"], includeInOpenClawGroup: true, }, { @@ -145,6 +145,14 @@ const CORE_TOOL_DEFINITIONS: CoreToolDefinition[] = [ profiles: ["coding"], includeInOpenClawGroup: true, }, + { + id: "sessions_yield", + label: "sessions_yield", + description: "End turn to receive sub-agent results", + sectionId: "sessions", + profiles: ["coding"], + includeInOpenClawGroup: true, + }, { id: "subagents", label: "subagents", diff --git a/src/agents/tool-display-common.ts b/src/agents/tool-display-common.ts index a7564c98052..f5d231fd898 100644 --- a/src/agents/tool-display-common.ts +++ b/src/agents/tool-display-common.ts @@ -1081,9 +1081,10 @@ export function resolveExecDetail(args: unknown): string | undefined { const displaySummary = cwd ? `${summary} (in ${cwd})` : summary; - // Append the raw command when the summary differs meaningfully from the command itself. + // Keep the raw command inline so chat surfaces do not break "Exec:" onto a + // separate paragraph/code block. if (compact && compact !== displaySummary && compact !== summary) { - return `${displaySummary}\n\n\`${compact}\``; + return `${displaySummary} · \`${compact}\``; } return displaySummary; diff --git a/src/agents/tool-display.test.ts b/src/agents/tool-display.test.ts index b41db4d0552..19ef7652ffb 100644 --- a/src/agents/tool-display.test.ts +++ b/src/agents/tool-display.test.ts @@ -112,9 +112,7 @@ describe("tool display details", () => { }), ); - expect(detail).toBe( - "install dependencies (in ~/my-project)\n\n`cd ~/my-project && npm install`", - ); + expect(detail).toBe("install dependencies (in ~/my-project), `cd ~/my-project && npm install`"); }); it("moves cd path to context suffix with multiple stages and raw command", () => { @@ -126,7 +124,7 @@ describe("tool display details", () => { ); expect(detail).toBe( - "install dependencies → run tests (in ~/my-project)\n\n`cd ~/my-project && npm install && npm test`", + "install dependencies → run tests (in ~/my-project), `cd ~/my-project && npm install && npm test`", ); }); @@ -138,7 +136,7 @@ describe("tool display details", () => { }), ); - expect(detail).toBe("check git status (in /tmp)\n\n`pushd /tmp && git status`"); + expect(detail).toBe("check git status (in /tmp), `pushd /tmp && git status`"); }); it("clears inferred cwd when popd is stripped from preamble", () => { @@ -149,7 +147,7 @@ describe("tool display details", () => { }), ); - expect(detail).toBe("install dependencies\n\n`pushd /tmp && popd && npm install`"); + expect(detail).toBe("install dependencies, `pushd /tmp && popd && npm install`"); }); it("moves cd path to context suffix with || separator", () => { @@ -173,7 +171,7 @@ describe("tool display details", () => { }), ); - expect(detail).toBe("install dependencies (in /app)\n\n`cd /tmp && npm install`"); + expect(detail).toBe("install dependencies (in /app), `cd /tmp && npm install`"); }); it("summarizes all stages and appends raw command", () => { @@ -185,7 +183,7 @@ describe("tool display details", () => { ); expect(detail).toBe( - "fetch git changes → rebase git branch\n\n`git fetch && git rebase origin/main`", + "fetch git changes → rebase git branch, `git fetch && git rebase origin/main`", ); }); diff --git a/src/agents/tool-policy-pipeline.test.ts b/src/agents/tool-policy-pipeline.test.ts index 9d0a9d5846f..70d4301d42a 100644 --- a/src/agents/tool-policy-pipeline.test.ts +++ b/src/agents/tool-policy-pipeline.test.ts @@ -45,6 +45,31 @@ describe("tool-policy-pipeline", () => { expect(warnings[0]).toContain("unknown entries (wat)"); }); + test("warns gated core tools as unavailable instead of plugin-only unknowns", () => { + const warnings: string[] = []; + const tools = [{ name: "exec" }] as unknown as DummyTool[]; + applyToolPolicyPipeline({ + // oxlint-disable-next-line typescript/no-explicit-any + tools: tools as any, + // oxlint-disable-next-line typescript/no-explicit-any + toolMeta: () => undefined, + warn: (msg) => warnings.push(msg), + steps: [ + { + policy: { allow: ["apply_patch"] }, + label: "tools.profile (coding)", + stripPluginOnlyAllowlist: true, + }, + ], + }); + expect(warnings.length).toBe(1); + expect(warnings[0]).toContain("unknown entries (apply_patch)"); + expect(warnings[0]).toContain( + "shipped core tools but unavailable in the current runtime/provider/model/config", + ); + expect(warnings[0]).not.toContain("unless the plugin is enabled"); + }); + test("applies allowlist filtering when core tools are explicitly listed", () => { const tools = [{ name: "exec" }, { name: "process" }] as unknown as DummyTool[]; const filtered = applyToolPolicyPipeline({ diff --git a/src/agents/tool-policy-pipeline.ts b/src/agents/tool-policy-pipeline.ts index d3304a020d6..70a7bddaf29 100644 --- a/src/agents/tool-policy-pipeline.ts +++ b/src/agents/tool-policy-pipeline.ts @@ -1,5 +1,6 @@ import { filterToolsByPolicy } from "./pi-tools.policy.js"; import type { AnyAgentTool } from "./pi-tools.types.js"; +import { isKnownCoreToolId } from "./tool-catalog.js"; import { buildPluginToolGroups, expandPolicyWithPluginGroups, @@ -91,9 +92,15 @@ export function applyToolPolicyPipeline(params: { const resolved = stripPluginOnlyAllowlist(policy, pluginGroups, coreToolNames); if (resolved.unknownAllowlist.length > 0) { const entries = resolved.unknownAllowlist.join(", "); - const suffix = resolved.strippedAllowlist - ? "Ignoring allowlist so core tools remain available. Use tools.alsoAllow for additive plugin tool enablement." - : "These entries won't match any tool unless the plugin is enabled."; + const gatedCoreEntries = resolved.unknownAllowlist.filter((entry) => + isKnownCoreToolId(entry), + ); + const otherEntries = resolved.unknownAllowlist.filter((entry) => !isKnownCoreToolId(entry)); + const suffix = describeUnknownAllowlistSuffix({ + strippedAllowlist: resolved.strippedAllowlist, + hasGatedCoreEntries: gatedCoreEntries.length > 0, + hasOtherEntries: otherEntries.length > 0, + }); params.warn( `tools: ${step.label} allowlist contains unknown entries (${entries}). ${suffix}`, ); @@ -106,3 +113,20 @@ export function applyToolPolicyPipeline(params: { } return filtered; } + +function describeUnknownAllowlistSuffix(params: { + strippedAllowlist: boolean; + hasGatedCoreEntries: boolean; + hasOtherEntries: boolean; +}): string { + const preface = params.strippedAllowlist + ? "Ignoring allowlist so core tools remain available." + : ""; + const detail = + params.hasGatedCoreEntries && params.hasOtherEntries + ? "Some entries are shipped core tools but unavailable in the current runtime/provider/model/config; other entries won't match any tool unless the plugin is enabled." + : params.hasGatedCoreEntries + ? "These entries are shipped core tools but unavailable in the current runtime/provider/model/config." + : "These entries won't match any tool unless the plugin is enabled."; + return preface ? `${preface} ${detail}` : detail; +} diff --git a/src/agents/tool-policy.test.ts b/src/agents/tool-policy.test.ts index 9a9f512189b..963c703a409 100644 --- a/src/agents/tool-policy.test.ts +++ b/src/agents/tool-policy.test.ts @@ -80,6 +80,7 @@ describe("tool-policy", () => { expect(isOwnerOnlyToolName("whatsapp_login")).toBe(true); expect(isOwnerOnlyToolName("cron")).toBe(true); expect(isOwnerOnlyToolName("gateway")).toBe(true); + expect(isOwnerOnlyToolName("nodes")).toBe(true); expect(isOwnerOnlyToolName("read")).toBe(false); }); @@ -107,6 +108,27 @@ describe("tool-policy", () => { expect(applyOwnerOnlyToolPolicy(tools, false)).toEqual([]); expect(applyOwnerOnlyToolPolicy(tools, true)).toHaveLength(1); }); + + it("strips nodes for non-owner senders via fallback policy", () => { + const tools = [ + { + name: "read", + // oxlint-disable-next-line typescript/no-explicit-any + execute: async () => ({ content: [], details: {} }) as any, + }, + { + name: "nodes", + // oxlint-disable-next-line typescript/no-explicit-any + execute: async () => ({ content: [], details: {} }) as any, + }, + ] as unknown as AnyAgentTool[]; + + expect(applyOwnerOnlyToolPolicy(tools, false).map((tool) => tool.name)).toEqual(["read"]); + expect(applyOwnerOnlyToolPolicy(tools, true).map((tool) => tool.name)).toEqual([ + "read", + "nodes", + ]); + }); }); describe("TOOL_POLICY_CONFORMANCE", () => { diff --git a/src/agents/tool-policy.ts b/src/agents/tool-policy.ts index 188a9c3361c..5538fb765ce 100644 --- a/src/agents/tool-policy.ts +++ b/src/agents/tool-policy.ts @@ -28,7 +28,12 @@ function wrapOwnerOnlyToolExecution(tool: AnyAgentTool, senderIsOwner: boolean): }; } -const OWNER_ONLY_TOOL_NAME_FALLBACKS = new Set(["whatsapp_login", "cron", "gateway"]); +const OWNER_ONLY_TOOL_NAME_FALLBACKS = new Set([ + "whatsapp_login", + "cron", + "gateway", + "nodes", +]); export function isOwnerOnlyToolName(name: string) { return OWNER_ONLY_TOOL_NAME_FALLBACKS.has(normalizeToolName(name)); diff --git a/src/agents/tools/browser-tool.actions.ts b/src/agents/tools/browser-tool.actions.ts index 673585d16b3..a4b6cb456af 100644 --- a/src/agents/tools/browser-tool.actions.ts +++ b/src/agents/tools/browser-tool.actions.ts @@ -54,8 +54,27 @@ function formatTabsToolResult(tabs: unknown[]): AgentToolResult { }; } +function formatConsoleToolResult(result: { + targetId?: string; + messages?: unknown[]; +}): AgentToolResult { + const wrapped = wrapBrowserExternalJson({ + kind: "console", + payload: result, + includeWarning: false, + }); + return { + content: [{ type: "text" as const, text: wrapped.wrappedText }], + details: { + ...wrapped.safeDetails, + targetId: typeof result.targetId === "string" ? result.targetId : undefined, + messageCount: Array.isArray(result.messages) ? result.messages.length : undefined, + }, + }; +} + function isChromeStaleTargetError(profile: string | undefined, err: unknown): boolean { - if (profile !== "chrome") { + if (profile !== "chrome-relay" && profile !== "chrome") { return false; } const msg = String(err); @@ -258,34 +277,10 @@ export async function executeConsoleAction(params: { targetId, }, })) as { ok?: boolean; targetId?: string; messages?: unknown[] }; - const wrapped = wrapBrowserExternalJson({ - kind: "console", - payload: result, - includeWarning: false, - }); - return { - content: [{ type: "text" as const, text: wrapped.wrappedText }], - details: { - ...wrapped.safeDetails, - targetId: typeof result.targetId === "string" ? result.targetId : undefined, - messageCount: Array.isArray(result.messages) ? result.messages.length : undefined, - }, - }; + return formatConsoleToolResult(result); } const result = await browserConsoleMessages(baseUrl, { level, targetId, profile }); - const wrapped = wrapBrowserExternalJson({ - kind: "console", - payload: result, - includeWarning: false, - }); - return { - content: [{ type: "text" as const, text: wrapped.wrappedText }], - details: { - ...wrapped.safeDetails, - targetId: result.targetId, - messageCount: result.messages.length, - }, - }; + return formatConsoleToolResult(result); } export async function executeActAction(params: { @@ -345,7 +340,7 @@ export async function executeActAction(params: { ); } throw new Error( - `Chrome tab not found (stale targetId?). Run action=tabs profile="chrome" and use one of the returned targetIds.`, + `Chrome tab not found (stale targetId?). Run action=tabs profile="chrome-relay" and use one of the returned targetIds.`, { cause: err }, ); } diff --git a/src/agents/tools/browser-tool.test.ts b/src/agents/tools/browser-tool.test.ts index 81996afb419..adaaea78221 100644 --- a/src/agents/tools/browser-tool.test.ts +++ b/src/agents/tools/browser-tool.test.ts @@ -54,7 +54,45 @@ const browserConfigMocks = vi.hoisted(() => ({ resolveBrowserConfig: vi.fn(() => ({ enabled: true, controlPort: 18791, + profiles: {}, + defaultProfile: "openclaw", })), + resolveProfile: vi.fn((resolved: Record, name: string) => { + const profile = (resolved.profiles as Record> | undefined)?.[ + name + ]; + if (!profile) { + return null; + } + const driver = + profile.driver === "extension" + ? "extension" + : profile.driver === "existing-session" + ? "existing-session" + : "openclaw"; + if (driver === "existing-session") { + return { + name, + driver, + cdpPort: 0, + cdpUrl: "", + cdpHost: "", + cdpIsLoopback: true, + color: typeof profile.color === "string" ? profile.color : "#FF4500", + attachOnly: true, + }; + } + return { + name, + driver, + cdpPort: typeof profile.cdpPort === "number" ? profile.cdpPort : 18792, + cdpUrl: typeof profile.cdpUrl === "string" ? profile.cdpUrl : "http://127.0.0.1:18792", + cdpHost: "127.0.0.1", + cdpIsLoopback: true, + color: typeof profile.color === "string" ? profile.color : "#FF4500", + attachOnly: profile.attachOnly === true, + }; + }), })); vi.mock("../../browser/config.js", () => browserConfigMocks); @@ -117,9 +155,27 @@ function mockSingleBrowserProxyNode() { function resetBrowserToolMocks() { vi.clearAllMocks(); configMocks.loadConfig.mockReturnValue({ browser: {} }); + browserConfigMocks.resolveBrowserConfig.mockReturnValue({ + enabled: true, + controlPort: 18791, + profiles: {}, + defaultProfile: "openclaw", + }); nodesUtilsMocks.listNodes.mockResolvedValue([]); } +function setResolvedBrowserProfiles( + profiles: Record>, + defaultProfile = "openclaw", +) { + browserConfigMocks.resolveBrowserConfig.mockReturnValue({ + enabled: true, + controlPort: 18791, + profiles, + defaultProfile, + }); +} + function registerBrowserToolAfterEachReset() { afterEach(() => { resetBrowserToolMocks(); @@ -231,26 +287,91 @@ describe("browser tool snapshot maxChars", () => { expect(opts?.mode).toBeUndefined(); }); - it("defaults to host when using profile=chrome (even in sandboxed sessions)", async () => { + it("defaults to host when using profile=chrome-relay (even in sandboxed sessions)", async () => { + setResolvedBrowserProfiles({ + "chrome-relay": { + driver: "extension", + cdpUrl: "http://127.0.0.1:18792", + color: "#0066CC", + }, + }); const tool = createBrowserTool({ sandboxBridgeUrl: "http://127.0.0.1:9999" }); - await tool.execute?.("call-1", { action: "snapshot", profile: "chrome", snapshotFormat: "ai" }); + await tool.execute?.("call-1", { + action: "snapshot", + profile: "chrome-relay", + snapshotFormat: "ai", + }); expect(browserClientMocks.browserSnapshot).toHaveBeenCalledWith( undefined, expect.objectContaining({ - profile: "chrome", + profile: "chrome-relay", }), ); }); - it("lets the server choose snapshot format when the user does not request one", async () => { - const tool = createBrowserTool(); - await tool.execute?.("call-1", { action: "snapshot", profile: "chrome" }); + it("defaults to host when using profile=user (even in sandboxed sessions)", async () => { + setResolvedBrowserProfiles({ + user: { driver: "existing-session", attachOnly: true, color: "#00AA00" }, + }); + const tool = createBrowserTool({ sandboxBridgeUrl: "http://127.0.0.1:9999" }); + await tool.execute?.("call-1", { + action: "snapshot", + profile: "user", + snapshotFormat: "ai", + }); expect(browserClientMocks.browserSnapshot).toHaveBeenCalledWith( undefined, expect.objectContaining({ - profile: "chrome", + profile: "user", + }), + ); + }); + + it("defaults to host for custom existing-session profiles too", async () => { + setResolvedBrowserProfiles({ + "chrome-live": { driver: "existing-session", attachOnly: true, color: "#00AA00" }, + }); + const tool = createBrowserTool({ sandboxBridgeUrl: "http://127.0.0.1:9999" }); + await tool.execute?.("call-1", { + action: "snapshot", + profile: "chrome-live", + snapshotFormat: "ai", + }); + + expect(browserClientMocks.browserSnapshot).toHaveBeenCalledWith( + undefined, + expect.objectContaining({ + profile: "chrome-live", + }), + ); + }); + + it('rejects profile="user" with target="sandbox"', async () => { + setResolvedBrowserProfiles({ + user: { driver: "existing-session", attachOnly: true, color: "#00AA00" }, + }); + const tool = createBrowserTool({ sandboxBridgeUrl: "http://127.0.0.1:9999" }); + + await expect( + tool.execute?.("call-1", { + action: "snapshot", + profile: "user", + target: "sandbox", + snapshotFormat: "ai", + }), + ).rejects.toThrow(/profile="user" cannot use the sandbox browser/i); + }); + + it("lets the server choose snapshot format when the user does not request one", async () => { + const tool = createBrowserTool(); + await tool.execute?.("call-1", { action: "snapshot", profile: "chrome-relay" }); + + expect(browserClientMocks.browserSnapshot).toHaveBeenCalledWith( + undefined, + expect.objectContaining({ + profile: "chrome-relay", }), ); const opts = browserClientMocks.browserSnapshot.mock.calls.at(-1)?.[1] as @@ -317,14 +438,21 @@ describe("browser tool snapshot maxChars", () => { expect(gatewayMocks.callGatewayTool).not.toHaveBeenCalled(); }); - it("keeps chrome profile on host when node proxy is available", async () => { + it("keeps chrome-relay profile on host when node proxy is available", async () => { mockSingleBrowserProxyNode(); + setResolvedBrowserProfiles({ + "chrome-relay": { + driver: "extension", + cdpUrl: "http://127.0.0.1:18792", + color: "#0066CC", + }, + }); const tool = createBrowserTool(); - await tool.execute?.("call-1", { action: "status", profile: "chrome" }); + await tool.execute?.("call-1", { action: "status", profile: "chrome-relay" }); expect(browserClientMocks.browserStatus).toHaveBeenCalledWith( undefined, - expect.objectContaining({ profile: "chrome" }), + expect.objectContaining({ profile: "chrome-relay" }), ); expect(gatewayMocks.callGatewayTool).not.toHaveBeenCalled(); }); @@ -617,7 +745,7 @@ describe("browser tool external content wrapping", () => { describe("browser tool act stale target recovery", () => { registerBrowserToolAfterEachReset(); - it("retries safe chrome act once without targetId when exactly one tab remains", async () => { + it("retries safe chrome-relay act once without targetId when exactly one tab remains", async () => { browserActionsMocks.browserAct .mockRejectedValueOnce(new Error("404: tab not found")) .mockResolvedValueOnce({ ok: true }); @@ -626,7 +754,7 @@ describe("browser tool act stale target recovery", () => { const tool = createBrowserTool(); const result = await tool.execute?.("call-1", { action: "act", - profile: "chrome", + profile: "chrome-relay", request: { kind: "hover", targetId: "stale-tab", @@ -639,18 +767,18 @@ describe("browser tool act stale target recovery", () => { 1, undefined, expect.objectContaining({ targetId: "stale-tab", kind: "hover", ref: "btn-1" }), - expect.objectContaining({ profile: "chrome" }), + expect.objectContaining({ profile: "chrome-relay" }), ); expect(browserActionsMocks.browserAct).toHaveBeenNthCalledWith( 2, undefined, expect.not.objectContaining({ targetId: expect.anything() }), - expect.objectContaining({ profile: "chrome" }), + expect.objectContaining({ profile: "chrome-relay" }), ); expect(result?.details).toMatchObject({ ok: true }); }); - it("does not retry mutating chrome act requests without targetId", async () => { + it("does not retry mutating chrome-relay act requests without targetId", async () => { browserActionsMocks.browserAct.mockRejectedValueOnce(new Error("404: tab not found")); browserClientMocks.browserTabs.mockResolvedValueOnce([{ targetId: "only-tab" }]); @@ -658,14 +786,14 @@ describe("browser tool act stale target recovery", () => { await expect( tool.execute?.("call-1", { action: "act", - profile: "chrome", + profile: "chrome-relay", request: { kind: "click", targetId: "stale-tab", ref: "btn-1", }, }), - ).rejects.toThrow(/Run action=tabs profile="chrome"/i); + ).rejects.toThrow(/Run action=tabs profile="chrome-relay"/i); expect(browserActionsMocks.browserAct).toHaveBeenCalledTimes(1); }); diff --git a/src/agents/tools/browser-tool.ts b/src/agents/tools/browser-tool.ts index 200013ff1a7..8cb57435100 100644 --- a/src/agents/tools/browser-tool.ts +++ b/src/agents/tools/browser-tool.ts @@ -16,8 +16,9 @@ import { browserStatus, browserStop, } from "../../browser/client.js"; -import { resolveBrowserConfig } from "../../browser/config.js"; +import { resolveBrowserConfig, resolveProfile } from "../../browser/config.js"; import { DEFAULT_UPLOAD_DIR, resolveExistingPathsWithinRoot } from "../../browser/paths.js"; +import { getBrowserProfileCapabilities } from "../../browser/profile-capabilities.js"; import { applyBrowserProxyPaths, persistBrowserProxyFiles } from "../../browser/proxy-files.js"; import { trackSessionBrowserTab, @@ -278,6 +279,24 @@ function resolveBrowserBaseUrl(params: { return undefined; } +function shouldPreferHostForProfile(profileName: string | undefined) { + if (!profileName) { + return false; + } + const cfg = loadConfig(); + const resolved = resolveBrowserConfig(cfg.browser, cfg); + const profile = resolveProfile(resolved, profileName); + if (!profile) { + return false; + } + const capabilities = getBrowserProfileCapabilities(profile); + return capabilities.requiresRelay || capabilities.usesChromeMcp; +} + +function isHostOnlyProfileName(profileName: string | undefined) { + return profileName === "user" || profileName === "chrome-relay"; +} + export function createBrowserTool(opts?: { sandboxBridgeUrl?: string; allowHostControl?: boolean; @@ -291,10 +310,12 @@ export function createBrowserTool(opts?: { name: "browser", description: [ "Control the browser via OpenClaw's browser control server (status/start/stop/profiles/tabs/open/snapshot/screenshot/actions).", - 'Profiles: use profile="chrome" for Chrome extension relay takeover (your existing Chrome tabs). Use profile="openclaw" for the isolated openclaw-managed browser.', - 'If the user mentions the Chrome extension / Browser Relay / toolbar button / “attach tab”, ALWAYS use profile="chrome" (do not ask which profile).', + "Browser choice: omit profile by default for the isolated OpenClaw-managed browser (`openclaw`).", + 'For the logged-in user browser on the local host, prefer profile="user". Use it only when existing logins/cookies matter and the user is present to click/approve any browser attach prompt.', + 'Use profile="chrome-relay" only for the Chrome extension / Browser Relay / toolbar-button attach-tab flow, or when the user explicitly asks for the extension relay.', + 'If the user mentions the Chrome extension / Browser Relay / toolbar button / “attach tab”, ALWAYS prefer profile="chrome-relay". Otherwise prefer profile="user" over the extension relay for user-browser work.', 'When a node-hosted browser proxy is available, the tool may auto-route to it. Pin a node with node= or target="node".', - "Chrome extension relay needs an attached tab: user must click the OpenClaw Browser Relay toolbar icon on the tab (badge ON). If no tab is connected, ask them to attach it.", + 'User-browser flows need user interaction: profile="user" may require approving a browser attach prompt; profile="chrome-relay" needs the user to click the OpenClaw Browser Relay toolbar icon on the tab (badge ON). If user presence is unclear, ask first.', "When using refs from snapshot (e.g. e12), keep the same tab: prefer passing targetId from the snapshot response into subsequent actions (act/click/type/etc).", 'For stable, self-resolving refs across calls, use snapshot with refs="aria" (Playwright aria-ref ids). Default refs="role" are role+name-based.', "Use snapshot+act for UI automation. Avoid act:wait by default; use only in exceptional cases when no reliable UI state exists.", @@ -312,9 +333,18 @@ export function createBrowserTool(opts?: { if (requestedNode && target && target !== "node") { throw new Error('node is only supported with target="node".'); } - - if (!target && !requestedNode && profile === "chrome") { - // Chrome extension relay takeover is a host Chrome feature; prefer host unless explicitly targeting a node. + if (isHostOnlyProfileName(profile)) { + if (requestedNode || target === "node") { + throw new Error(`profile="${profile}" only supports the local host browser.`); + } + if (target === "sandbox") { + throw new Error( + `profile="${profile}" cannot use the sandbox browser; use target="host" or omit target.`, + ); + } + } + if (!target && !requestedNode && shouldPreferHostForProfile(profile)) { + // Local host user-browser profiles should not silently bind to sandbox/node browsers. target = "host"; } diff --git a/src/agents/tools/common.ts b/src/agents/tools/common.ts index 19cca2d7927..81d3f4efc00 100644 --- a/src/agents/tools/common.ts +++ b/src/agents/tools/common.ts @@ -1,6 +1,7 @@ import fs from "node:fs/promises"; import type { AgentTool, AgentToolResult } from "@mariozechner/pi-agent-core"; import { detectMime } from "../../media/mime.js"; +import { readSnakeCaseParamRaw } from "../../param-key.js"; import type { ImageSanitizationLimits } from "../image-sanitization.js"; import { sanitizeToolResultImages } from "../tool-images.js"; @@ -53,22 +54,8 @@ export function createActionGate>( }; } -function toSnakeCaseKey(key: string): string { - return key - .replace(/([A-Z]+)([A-Z][a-z])/g, "$1_$2") - .replace(/([a-z0-9])([A-Z])/g, "$1_$2") - .toLowerCase(); -} - function readParamRaw(params: Record, key: string): unknown { - if (Object.hasOwn(params, key)) { - return params[key]; - } - const snakeKey = toSnakeCaseKey(key); - if (snakeKey !== key && Object.hasOwn(params, snakeKey)) { - return params[snakeKey]; - } - return undefined; + return readSnakeCaseParamRaw(params, key); } export function readStringParam( diff --git a/src/agents/tools/cron-tool.ts b/src/agents/tools/cron-tool.ts index 14df6901024..2976dee3924 100644 --- a/src/agents/tools/cron-tool.ts +++ b/src/agents/tools/cron-tool.ts @@ -230,11 +230,22 @@ JOB SCHEMA (for add action): "name": "string (optional)", "schedule": { ... }, // Required: when to run "payload": { ... }, // Required: what to execute - "delivery": { ... }, // Optional: announce summary or webhook POST - "sessionTarget": "main" | "isolated", // Required + "delivery": { ... }, // Optional: announce summary (isolated/current/session:xxx only) or webhook POST + "sessionTarget": "main" | "isolated" | "current" | "session:", // Optional, defaults based on context "enabled": true | false // Optional, default true } +SESSION TARGET OPTIONS: +- "main": Run in the main session (requires payload.kind="systemEvent") +- "isolated": Run in an ephemeral isolated session (requires payload.kind="agentTurn") +- "current": Bind to the current session where the cron is created (resolved at creation time) +- "session:": Run in a persistent named session (e.g., "session:project-alpha-daily") + +DEFAULT BEHAVIOR (unchanged for backward compatibility): +- payload.kind="systemEvent" → defaults to "main" +- payload.kind="agentTurn" → defaults to "isolated" +To use current session binding, explicitly set sessionTarget="current". + SCHEDULE TYPES (schedule.kind): - "at": One-shot at absolute time { "kind": "at", "at": "" } @@ -260,9 +271,9 @@ DELIVERY (top-level): CRITICAL CONSTRAINTS: - sessionTarget="main" REQUIRES payload.kind="systemEvent" -- sessionTarget="isolated" REQUIRES payload.kind="agentTurn" +- sessionTarget="isolated" | "current" | "session:xxx" REQUIRES payload.kind="agentTurn" - For webhook callbacks, use delivery.mode="webhook" with delivery.to set to a URL. -Default: prefer isolated agentTurn jobs unless the user explicitly wants a main-session system event. +Default: prefer isolated agentTurn jobs unless the user explicitly wants current-session binding. WAKE MODES (for wake action): - "next-heartbeat" (default): Wake on next heartbeat @@ -346,7 +357,10 @@ Use jobId as the canonical identifier; id is accepted for compatibility. Use con if (!params.job || typeof params.job !== "object") { throw new Error("job required"); } - const job = normalizeCronJobCreate(params.job) ?? params.job; + const job = + normalizeCronJobCreate(params.job, { + sessionContext: { sessionKey: opts?.agentSessionKey }, + }) ?? params.job; if (job && typeof job === "object") { const cfg = loadConfig(); const { mainKey, alias } = resolveMainSessionAlias(cfg); diff --git a/src/agents/tools/discord-actions-guild.ts b/src/agents/tools/discord-actions-guild.ts index 5fb10c87820..ba0ba300985 100644 --- a/src/agents/tools/discord-actions-guild.ts +++ b/src/agents/tools/discord-actions-guild.ts @@ -60,6 +60,13 @@ async function runRoleMutation(params: { await params.mutate({ guildId, userId, roleId }); } +function readChannelPermissionTarget(params: Record) { + return { + channelId: readStringParam(params, "channelId", { required: true }), + targetId: readStringParam(params, "targetId", { required: true }), + }; +} + export async function handleDiscordGuildAction( action: string, params: Record, @@ -453,10 +460,7 @@ export async function handleDiscordGuildAction( if (!isActionEnabled("channels")) { throw new Error("Discord channel management is disabled."); } - const channelId = readStringParam(params, "channelId", { - required: true, - }); - const targetId = readStringParam(params, "targetId", { required: true }); + const { channelId, targetId } = readChannelPermissionTarget(params); const targetTypeRaw = readStringParam(params, "targetType", { required: true, }); @@ -489,10 +493,7 @@ export async function handleDiscordGuildAction( if (!isActionEnabled("channels")) { throw new Error("Discord channel management is disabled."); } - const channelId = readStringParam(params, "channelId", { - required: true, - }); - const targetId = readStringParam(params, "targetId", { required: true }); + const { channelId, targetId } = readChannelPermissionTarget(params); if (accountId) { await removeChannelPermissionDiscord(channelId, targetId, { accountId }); } else { diff --git a/src/agents/tools/image-tool.test.ts b/src/agents/tools/image-tool.test.ts index 78a7754e84a..bcec7f32de7 100644 --- a/src/agents/tools/image-tool.test.ts +++ b/src/agents/tools/image-tool.test.ts @@ -48,6 +48,19 @@ async function withTempWorkspacePng( } } +function registerImageToolEnvReset(priorFetch: typeof global.fetch, keys: string[]) { + beforeEach(() => { + for (const key of keys) { + vi.stubEnv(key, ""); + } + }); + + afterEach(() => { + vi.unstubAllEnvs(); + global.fetch = priorFetch; + }); +} + function stubMinimaxOkFetch() { const fetch = vi.fn().mockResolvedValue({ ok: true, @@ -229,24 +242,18 @@ function findSchemaUnionKeywords(schema: unknown, path = "root"): string[] { describe("image tool implicit imageModel config", () => { const priorFetch = global.fetch; - - beforeEach(() => { - vi.stubEnv("OPENAI_API_KEY", ""); - vi.stubEnv("ANTHROPIC_API_KEY", ""); - vi.stubEnv("ANTHROPIC_OAUTH_TOKEN", ""); - vi.stubEnv("MINIMAX_API_KEY", ""); - vi.stubEnv("ZAI_API_KEY", ""); - vi.stubEnv("Z_AI_API_KEY", ""); + registerImageToolEnvReset(priorFetch, [ + "OPENAI_API_KEY", + "ANTHROPIC_API_KEY", + "ANTHROPIC_OAUTH_TOKEN", + "MINIMAX_API_KEY", + "ZAI_API_KEY", + "Z_AI_API_KEY", // Avoid implicit Copilot provider discovery hitting the network in tests. - vi.stubEnv("COPILOT_GITHUB_TOKEN", ""); - vi.stubEnv("GH_TOKEN", ""); - vi.stubEnv("GITHUB_TOKEN", ""); - }); - - afterEach(() => { - vi.unstubAllEnvs(); - global.fetch = priorFetch; - }); + "COPILOT_GITHUB_TOKEN", + "GH_TOKEN", + "GITHUB_TOKEN", + ]); it("stays disabled without auth when no pairing is possible", async () => { await withTempAgentDir(async (agentDir) => { @@ -683,18 +690,12 @@ describe("image tool MiniMax VLM routing", () => { const pngB64 = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/woAAn8B9FD5fHAAAAAASUVORK5CYII="; const priorFetch = global.fetch; - - beforeEach(() => { - vi.stubEnv("MINIMAX_API_KEY", ""); - vi.stubEnv("COPILOT_GITHUB_TOKEN", ""); - vi.stubEnv("GH_TOKEN", ""); - vi.stubEnv("GITHUB_TOKEN", ""); - }); - - afterEach(() => { - vi.unstubAllEnvs(); - global.fetch = priorFetch; - }); + registerImageToolEnvReset(priorFetch, [ + "MINIMAX_API_KEY", + "COPILOT_GITHUB_TOKEN", + "GH_TOKEN", + "GITHUB_TOKEN", + ]); async function createMinimaxVlmFixture(baseResp: { status_code: number; status_msg: string }) { const fetch = stubMinimaxFetch(baseResp, baseResp.status_code === 0 ? "ok" : ""); diff --git a/src/agents/tools/memory-tool.citations.test.ts b/src/agents/tools/memory-tool.citations.test.ts index 0fe84c6f5fa..ea097658ecf 100644 --- a/src/agents/tools/memory-tool.citations.test.ts +++ b/src/agents/tools/memory-tool.citations.test.ts @@ -6,24 +6,14 @@ import { setMemorySearchImpl, type MemoryReadParams, } from "../../../test/helpers/memory-tool-manager-mock.js"; -import type { OpenClawConfig } from "../../config/config.js"; -import { createMemoryGetTool, createMemorySearchTool } from "./memory-tool.js"; - -function asOpenClawConfig(config: Partial): OpenClawConfig { - return config as OpenClawConfig; -} - -function createToolConfig() { - return asOpenClawConfig({ agents: { list: [{ id: "main", default: true }] } }); -} - -function createMemoryGetToolOrThrow(config: OpenClawConfig = createToolConfig()) { - const tool = createMemoryGetTool({ config }); - if (!tool) { - throw new Error("tool missing"); - } - return tool; -} +import { + asOpenClawConfig, + createAutoCitationsMemorySearchTool, + createDefaultMemoryToolConfig, + createMemoryGetToolOrThrow, + createMemorySearchToolOrThrow, + expectUnavailableMemorySearchDetails, +} from "./memory-tool.test-helpers.js"; beforeEach(() => { resetMemoryToolMockState({ @@ -49,10 +39,7 @@ describe("memory search citations", () => { memory: { citations: "on" }, agents: { list: [{ id: "main", default: true }] }, }); - const tool = createMemorySearchTool({ config: cfg }); - if (!tool) { - throw new Error("tool missing"); - } + const tool = createMemorySearchToolOrThrow({ config: cfg }); const result = await tool.execute("call_citations_on", { query: "notes" }); const details = result.details as { results: Array<{ snippet: string; citation?: string }> }; expect(details.results[0]?.snippet).toMatch(/Source: MEMORY.md#L5-L7/); @@ -65,10 +52,7 @@ describe("memory search citations", () => { memory: { citations: "off" }, agents: { list: [{ id: "main", default: true }] }, }); - const tool = createMemorySearchTool({ config: cfg }); - if (!tool) { - throw new Error("tool missing"); - } + const tool = createMemorySearchToolOrThrow({ config: cfg }); const result = await tool.execute("call_citations_off", { query: "notes" }); const details = result.details as { results: Array<{ snippet: string; citation?: string }> }; expect(details.results[0]?.snippet).not.toMatch(/Source:/); @@ -81,10 +65,7 @@ describe("memory search citations", () => { memory: { citations: "on", backend: "qmd", qmd: { limits: { maxInjectedChars: 20 } } }, agents: { list: [{ id: "main", default: true }] }, }); - const tool = createMemorySearchTool({ config: cfg }); - if (!tool) { - throw new Error("tool missing"); - } + const tool = createMemorySearchToolOrThrow({ config: cfg }); const result = await tool.execute("call_citations_qmd", { query: "notes" }); const details = result.details as { results: Array<{ snippet: string; citation?: string }> }; expect(details.results[0]?.snippet.length).toBeLessThanOrEqual(20); @@ -92,17 +73,7 @@ describe("memory search citations", () => { it("honors auto mode for direct chats", async () => { setMemoryBackend("builtin"); - const cfg = asOpenClawConfig({ - memory: { citations: "auto" }, - agents: { list: [{ id: "main", default: true }] }, - }); - const tool = createMemorySearchTool({ - config: cfg, - agentSessionKey: "agent:main:discord:dm:u123", - }); - if (!tool) { - throw new Error("tool missing"); - } + const tool = createAutoCitationsMemorySearchTool("agent:main:discord:dm:u123"); const result = await tool.execute("auto_mode_direct", { query: "notes" }); const details = result.details as { results: Array<{ snippet: string }> }; expect(details.results[0]?.snippet).toMatch(/Source:/); @@ -110,17 +81,7 @@ describe("memory search citations", () => { it("suppresses citations for auto mode in group chats", async () => { setMemoryBackend("builtin"); - const cfg = asOpenClawConfig({ - memory: { citations: "auto" }, - agents: { list: [{ id: "main", default: true }] }, - }); - const tool = createMemorySearchTool({ - config: cfg, - agentSessionKey: "agent:main:discord:group:c123", - }); - if (!tool) { - throw new Error("tool missing"); - } + const tool = createAutoCitationsMemorySearchTool("agent:main:discord:group:c123"); const result = await tool.execute("auto_mode_group", { query: "notes" }); const details = result.details as { results: Array<{ snippet: string }> }; expect(details.results[0]?.snippet).not.toMatch(/Source:/); @@ -133,18 +94,11 @@ describe("memory tools", () => { throw new Error("openai embeddings failed: 429 insufficient_quota"); }); - const cfg = { agents: { list: [{ id: "main", default: true }] } }; - const tool = createMemorySearchTool({ config: cfg }); - expect(tool).not.toBeNull(); - if (!tool) { - throw new Error("tool missing"); - } + const cfg = createDefaultMemoryToolConfig(); + const tool = createMemorySearchToolOrThrow({ config: cfg }); const result = await tool.execute("call_1", { query: "hello" }); - expect(result.details).toEqual({ - results: [], - disabled: true, - unavailable: true, + expectUnavailableMemorySearchDetails(result.details, { error: "openai embeddings failed: 429 insufficient_quota", warning: "Memory search is unavailable because the embedding provider quota is exhausted.", action: "Top up or switch embedding provider, then retry memory_search.", diff --git a/src/agents/tools/memory-tool.test-helpers.ts b/src/agents/tools/memory-tool.test-helpers.ts new file mode 100644 index 00000000000..9a1d0e455f3 --- /dev/null +++ b/src/agents/tools/memory-tool.test-helpers.ts @@ -0,0 +1,63 @@ +import { expect } from "vitest"; +import type { OpenClawConfig } from "../../config/config.js"; +import { createMemoryGetTool, createMemorySearchTool } from "./memory-tool.js"; + +export function asOpenClawConfig(config: Partial): OpenClawConfig { + return config as OpenClawConfig; +} + +export function createDefaultMemoryToolConfig(): OpenClawConfig { + return asOpenClawConfig({ agents: { list: [{ id: "main", default: true }] } }); +} + +export function createMemorySearchToolOrThrow(params?: { + config?: OpenClawConfig; + agentSessionKey?: string; +}) { + const tool = createMemorySearchTool({ + config: params?.config ?? createDefaultMemoryToolConfig(), + ...(params?.agentSessionKey ? { agentSessionKey: params.agentSessionKey } : {}), + }); + if (!tool) { + throw new Error("tool missing"); + } + return tool; +} + +export function createMemoryGetToolOrThrow( + config: OpenClawConfig = createDefaultMemoryToolConfig(), +) { + const tool = createMemoryGetTool({ config }); + if (!tool) { + throw new Error("tool missing"); + } + return tool; +} + +export function createAutoCitationsMemorySearchTool(agentSessionKey: string) { + return createMemorySearchToolOrThrow({ + config: asOpenClawConfig({ + memory: { citations: "auto" }, + agents: { list: [{ id: "main", default: true }] }, + }), + agentSessionKey, + }); +} + +export function expectUnavailableMemorySearchDetails( + details: unknown, + params: { + error: string; + warning: string; + action: string; + }, +) { + expect(details).toEqual({ + results: [], + disabled: true, + unavailable: true, + error: params.error, + warning: params.warning, + action: params.action, + }); +} diff --git a/src/agents/tools/memory-tool.test.ts b/src/agents/tools/memory-tool.test.ts index de907c01632..e8764bd9f46 100644 --- a/src/agents/tools/memory-tool.test.ts +++ b/src/agents/tools/memory-tool.test.ts @@ -1,9 +1,12 @@ -import { beforeEach, describe, expect, it } from "vitest"; +import { beforeEach, describe, it } from "vitest"; import { resetMemoryToolMockState, setMemorySearchImpl, } from "../../../test/helpers/memory-tool-manager-mock.js"; -import { createMemorySearchTool } from "./memory-tool.js"; +import { + createMemorySearchToolOrThrow, + expectUnavailableMemorySearchDetails, +} from "./memory-tool.test-helpers.js"; describe("memory_search unavailable payloads", () => { beforeEach(() => { @@ -15,18 +18,9 @@ describe("memory_search unavailable payloads", () => { throw new Error("openai embeddings failed: 429 insufficient_quota"); }); - const tool = createMemorySearchTool({ - config: { agents: { list: [{ id: "main", default: true }] } }, - }); - if (!tool) { - throw new Error("tool missing"); - } - + const tool = createMemorySearchToolOrThrow(); const result = await tool.execute("quota", { query: "hello" }); - expect(result.details).toEqual({ - results: [], - disabled: true, - unavailable: true, + expectUnavailableMemorySearchDetails(result.details, { error: "openai embeddings failed: 429 insufficient_quota", warning: "Memory search is unavailable because the embedding provider quota is exhausted.", action: "Top up or switch embedding provider, then retry memory_search.", @@ -38,18 +32,9 @@ describe("memory_search unavailable payloads", () => { throw new Error("embedding provider timeout"); }); - const tool = createMemorySearchTool({ - config: { agents: { list: [{ id: "main", default: true }] } }, - }); - if (!tool) { - throw new Error("tool missing"); - } - + const tool = createMemorySearchToolOrThrow(); const result = await tool.execute("generic", { query: "hello" }); - expect(result.details).toEqual({ - results: [], - disabled: true, - unavailable: true, + expectUnavailableMemorySearchDetails(result.details, { error: "embedding provider timeout", warning: "Memory search is unavailable due to an embedding/provider error.", action: "Check embedding provider configuration and retry memory_search.", diff --git a/src/agents/tools/memory-tool.ts b/src/agents/tools/memory-tool.ts index c0d595b21a2..bb5086bdb15 100644 --- a/src/agents/tools/memory-tool.ts +++ b/src/agents/tools/memory-tool.ts @@ -37,106 +37,135 @@ function resolveMemoryToolContext(options: { config?: OpenClawConfig; agentSessi return { cfg, agentId }; } +async function getMemoryManagerContext(params: { cfg: OpenClawConfig; agentId: string }): Promise< + | { + manager: NonNullable>["manager"]>; + } + | { + error: string | undefined; + } +> { + const { manager, error } = await getMemorySearchManager({ + cfg: params.cfg, + agentId: params.agentId, + }); + return manager ? { manager } : { error }; +} + +function createMemoryTool(params: { + options: { + config?: OpenClawConfig; + agentSessionKey?: string; + }; + label: string; + name: string; + description: string; + parameters: typeof MemorySearchSchema | typeof MemoryGetSchema; + execute: (ctx: { cfg: OpenClawConfig; agentId: string }) => AnyAgentTool["execute"]; +}): AnyAgentTool | null { + const ctx = resolveMemoryToolContext(params.options); + if (!ctx) { + return null; + } + return { + label: params.label, + name: params.name, + description: params.description, + parameters: params.parameters, + execute: params.execute(ctx), + }; +} + export function createMemorySearchTool(options: { config?: OpenClawConfig; agentSessionKey?: string; }): AnyAgentTool | null { - const ctx = resolveMemoryToolContext(options); - if (!ctx) { - return null; - } - const { cfg, agentId } = ctx; - return { + return createMemoryTool({ + options, label: "Memory Search", name: "memory_search", description: "Mandatory recall step: semantically search MEMORY.md + memory/*.md (and optional session transcripts) before answering questions about prior work, decisions, dates, people, preferences, or todos; returns top snippets with path + lines. If response has disabled=true, memory retrieval is unavailable and should be surfaced to the user.", parameters: MemorySearchSchema, - execute: async (_toolCallId, params) => { - const query = readStringParam(params, "query", { required: true }); - const maxResults = readNumberParam(params, "maxResults"); - const minScore = readNumberParam(params, "minScore"); - const { manager, error } = await getMemorySearchManager({ - cfg, - agentId, - }); - if (!manager) { - return jsonResult(buildMemorySearchUnavailableResult(error)); - } - try { - const citationsMode = resolveMemoryCitationsMode(cfg); - const includeCitations = shouldIncludeCitations({ - mode: citationsMode, - sessionKey: options.agentSessionKey, - }); - const rawResults = await manager.search(query, { - maxResults, - minScore, - sessionKey: options.agentSessionKey, - }); - const status = manager.status(); - const decorated = decorateCitations(rawResults, includeCitations); - const resolved = resolveMemoryBackendConfig({ cfg, agentId }); - const results = - status.backend === "qmd" - ? clampResultsByInjectedChars(decorated, resolved.qmd?.limits.maxInjectedChars) - : decorated; - const searchMode = (status.custom as { searchMode?: string } | undefined)?.searchMode; - return jsonResult({ - results, - provider: status.provider, - model: status.model, - fallback: status.fallback, - citations: citationsMode, - mode: searchMode, - }); - } catch (err) { - const message = err instanceof Error ? err.message : String(err); - return jsonResult(buildMemorySearchUnavailableResult(message)); - } - }, - }; + execute: + ({ cfg, agentId }) => + async (_toolCallId, params) => { + const query = readStringParam(params, "query", { required: true }); + const maxResults = readNumberParam(params, "maxResults"); + const minScore = readNumberParam(params, "minScore"); + const memory = await getMemoryManagerContext({ cfg, agentId }); + if ("error" in memory) { + return jsonResult(buildMemorySearchUnavailableResult(memory.error)); + } + try { + const citationsMode = resolveMemoryCitationsMode(cfg); + const includeCitations = shouldIncludeCitations({ + mode: citationsMode, + sessionKey: options.agentSessionKey, + }); + const rawResults = await memory.manager.search(query, { + maxResults, + minScore, + sessionKey: options.agentSessionKey, + }); + const status = memory.manager.status(); + const decorated = decorateCitations(rawResults, includeCitations); + const resolved = resolveMemoryBackendConfig({ cfg, agentId }); + const results = + status.backend === "qmd" + ? clampResultsByInjectedChars(decorated, resolved.qmd?.limits.maxInjectedChars) + : decorated; + const searchMode = (status.custom as { searchMode?: string } | undefined)?.searchMode; + return jsonResult({ + results, + provider: status.provider, + model: status.model, + fallback: status.fallback, + citations: citationsMode, + mode: searchMode, + }); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + return jsonResult(buildMemorySearchUnavailableResult(message)); + } + }, + }); } export function createMemoryGetTool(options: { config?: OpenClawConfig; agentSessionKey?: string; }): AnyAgentTool | null { - const ctx = resolveMemoryToolContext(options); - if (!ctx) { - return null; - } - const { cfg, agentId } = ctx; - return { + return createMemoryTool({ + options, label: "Memory Get", name: "memory_get", description: "Safe snippet read from MEMORY.md or memory/*.md with optional from/lines; use after memory_search to pull only the needed lines and keep context small.", parameters: MemoryGetSchema, - execute: async (_toolCallId, params) => { - const relPath = readStringParam(params, "path", { required: true }); - const from = readNumberParam(params, "from", { integer: true }); - const lines = readNumberParam(params, "lines", { integer: true }); - const { manager, error } = await getMemorySearchManager({ - cfg, - agentId, - }); - if (!manager) { - return jsonResult({ path: relPath, text: "", disabled: true, error }); - } - try { - const result = await manager.readFile({ - relPath, - from: from ?? undefined, - lines: lines ?? undefined, - }); - return jsonResult(result); - } catch (err) { - const message = err instanceof Error ? err.message : String(err); - return jsonResult({ path: relPath, text: "", disabled: true, error: message }); - } - }, - }; + execute: + ({ cfg, agentId }) => + async (_toolCallId, params) => { + const relPath = readStringParam(params, "path", { required: true }); + const from = readNumberParam(params, "from", { integer: true }); + const lines = readNumberParam(params, "lines", { integer: true }); + const memory = await getMemoryManagerContext({ cfg, agentId }); + if ("error" in memory) { + return jsonResult({ path: relPath, text: "", disabled: true, error: memory.error }); + } + try { + const result = await memory.manager.readFile({ + relPath, + from: from ?? undefined, + lines: lines ?? undefined, + }); + return jsonResult(result); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + return jsonResult({ path: relPath, text: "", disabled: true, error: message }); + } + }, + }); } function resolveMemoryCitationsMode(cfg: OpenClawConfig): MemoryCitationsMode { diff --git a/src/agents/tools/nodes-tool.test.ts b/src/agents/tools/nodes-tool.test.ts index 99780a16238..2a98973f693 100644 --- a/src/agents/tools/nodes-tool.test.ts +++ b/src/agents/tools/nodes-tool.test.ts @@ -53,6 +53,11 @@ describe("createNodesTool screen_record duration guardrails", () => { screenMocks.writeScreenRecordToFile.mockClear(); }); + it("marks nodes as owner-only", () => { + const tool = createNodesTool(); + expect(tool.ownerOnly).toBe(true); + }); + it("caps durationMs schema at 300000", () => { const tool = createNodesTool(); const schema = tool.parameters as { @@ -97,11 +102,11 @@ describe("createNodesTool screen_record duration guardrails", () => { if (payload?.command === "system.run.prepare") { return { payload: { - cmdText: "echo hi", plan: { argv: ["bash", "-lc", "echo hi"], cwd: null, - rawCommand: null, + commandText: 'bash -lc "echo hi"', + commandPreview: "echo hi", agentId: null, sessionKey: null, }, diff --git a/src/agents/tools/nodes-tool.ts b/src/agents/tools/nodes-tool.ts index 9c335c012b4..d6f4832d914 100644 --- a/src/agents/tools/nodes-tool.ts +++ b/src/agents/tools/nodes-tool.ts @@ -175,6 +175,7 @@ export function createNodesTool(options?: { return { label: "Nodes", name: "nodes", + ownerOnly: true, description: "Discover and control paired nodes (status/describe/pairing/notify/camera/photos/screen/location/notifications/run/invoke).", parameters: NodesToolSchema, @@ -664,7 +665,7 @@ export function createNodesTool(options?: { } const runParams = { command: prepared.plan.argv, - rawCommand: prepared.plan.rawCommand ?? prepared.cmdText, + rawCommand: prepared.plan.commandText, cwd: prepared.plan.cwd ?? cwd, env, timeoutMs: commandTimeoutMs, @@ -699,8 +700,6 @@ export function createNodesTool(options?: { { ...gatewayOpts, timeoutMs: APPROVAL_TIMEOUT_MS + 5_000 }, { id: approvalId, - command: prepared.cmdText, - commandArgv: prepared.plan.argv, systemRunPlan: prepared.plan, cwd: prepared.plan.cwd ?? cwd, nodeId, diff --git a/src/agents/tools/pdf-native-providers.ts b/src/agents/tools/pdf-native-providers.ts index 36d43ffb9f7..70a1e2e0e94 100644 --- a/src/agents/tools/pdf-native-providers.ts +++ b/src/agents/tools/pdf-native-providers.ts @@ -137,10 +137,9 @@ export async function geminiAnalyzePdf(params: { } parts.push({ text: params.prompt }); - const baseUrl = (params.baseUrl ?? "https://generativelanguage.googleapis.com").replace( - /\/+$/, - "", - ); + const baseUrl = (params.baseUrl ?? "https://generativelanguage.googleapis.com") + .replace(/\/+$/, "") + .replace(/\/v1beta$/, ""); const url = `${baseUrl}/v1beta/models/${encodeURIComponent(params.modelId)}:generateContent?key=${encodeURIComponent(apiKey)}`; const res = await fetch(url, { diff --git a/src/agents/tools/pdf-tool.test.ts b/src/agents/tools/pdf-tool.test.ts index 6cbc6ca54d1..381fc53c4b9 100644 --- a/src/agents/tools/pdf-tool.test.ts +++ b/src/agents/tools/pdf-tool.test.ts @@ -711,6 +711,26 @@ describe("native PDF provider API calls", () => { "apiKey required", ); }); + + it("geminiAnalyzePdf does not duplicate /v1beta when baseUrl already includes it", async () => { + const { geminiAnalyzePdf } = await import("./pdf-native-providers.js"); + const fetchMock = mockFetchResponse({ + ok: true, + json: async () => ({ + candidates: [{ content: { parts: [{ text: "ok" }] } }], + }), + }); + + await geminiAnalyzePdf( + makeGeminiAnalyzeParams({ + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + }), + ); + + const [url] = fetchMock.mock.calls[0]; + expect(url).toContain("/v1beta/models/"); + expect(url).not.toContain("/v1beta/v1beta"); + }); }); // --------------------------------------------------------------------------- diff --git a/src/agents/tools/session-status-tool.ts b/src/agents/tools/session-status-tool.ts index 2277b6e8ad2..132b470fd2f 100644 --- a/src/agents/tools/session-status-tool.ts +++ b/src/agents/tools/session-status-tool.ts @@ -19,9 +19,11 @@ import { import { buildAgentMainSessionKey, DEFAULT_AGENT_ID, + parseAgentSessionKey, resolveAgentIdFromSessionKey, } from "../../routing/session-key.js"; import { applyModelOverrideToSessionEntry } from "../../sessions/model-overrides.js"; +import { resolvePreferredSessionKeyForSessionIdMatches } from "../../sessions/session-id-resolution.js"; import { resolveAgentDir } from "../agent-scope.js"; import { formatUserTime, resolveUserTimeFormat, resolveUserTimezone } from "../date-time.js"; import { resolveModelAuthLabel } from "../model-auth-label.js"; @@ -36,10 +38,12 @@ import { import type { AnyAgentTool } from "./common.js"; import { readStringParam } from "./common.js"; import { + createSessionVisibilityGuard, shouldResolveSessionIdInput, - resolveInternalSessionKey, - resolveMainSessionAlias, createAgentToAgentPolicy, + resolveEffectiveSessionToolsVisibility, + resolveInternalSessionKey, + resolveSandboxedSessionToolContext, } from "./sessions-helpers.js"; const SessionStatusToolSchema = Type.Object({ @@ -97,16 +101,12 @@ function resolveSessionKeyFromSessionId(params: { return null; } const { store } = loadCombinedSessionStoreForGateway(params.cfg); - const match = Object.entries(store).find(([key, entry]) => { - if (entry?.sessionId !== trimmed) { - return false; - } - if (!params.agentId) { - return true; - } - return resolveAgentIdFromSessionKey(key) === params.agentId; - }); - return match?.[0] ?? null; + const matches = Object.entries(store).filter( + (entry): entry is [string, SessionEntry] => + entry[1]?.sessionId === trimmed && + (!params.agentId || resolveAgentIdFromSessionKey(entry[0]) === params.agentId), + ); + return resolvePreferredSessionKeyForSessionIdMatches(matches, trimmed) ?? null; } async function resolveModelOverride(params: { @@ -148,6 +148,7 @@ async function resolveModelOverride(params: { catalog, defaultProvider: currentProvider, defaultModel: currentModel, + agentId: params.agentId, }); const resolved = resolveModelRefFromString({ @@ -175,6 +176,7 @@ async function resolveModelOverride(params: { export function createSessionStatusTool(opts?: { agentSessionKey?: string; config?: OpenClawConfig; + sandboxed?: boolean; }): AnyAgentTool { return { label: "Session Status", @@ -185,18 +187,70 @@ export function createSessionStatusTool(opts?: { execute: async (_toolCallId, args) => { const params = args as Record; const cfg = opts?.config ?? loadConfig(); - const { mainKey, alias } = resolveMainSessionAlias(cfg); + const { mainKey, alias, effectiveRequesterKey } = resolveSandboxedSessionToolContext({ + cfg, + agentSessionKey: opts?.agentSessionKey, + sandboxed: opts?.sandboxed, + }); const a2aPolicy = createAgentToAgentPolicy(cfg); + const requesterAgentId = resolveAgentIdFromSessionKey( + opts?.agentSessionKey ?? effectiveRequesterKey, + ); + const visibilityRequesterKey = effectiveRequesterKey.trim(); + const usesLegacyMainAlias = alias === mainKey; + const isLegacyMainVisibilityKey = (sessionKey: string) => { + const trimmed = sessionKey.trim(); + return usesLegacyMainAlias && (trimmed === "main" || trimmed === mainKey); + }; + const resolveVisibilityMainSessionKey = (sessionAgentId: string) => { + const requesterParsed = parseAgentSessionKey(visibilityRequesterKey); + if ( + resolveAgentIdFromSessionKey(visibilityRequesterKey) === sessionAgentId && + (requesterParsed?.rest === mainKey || isLegacyMainVisibilityKey(visibilityRequesterKey)) + ) { + return visibilityRequesterKey; + } + return buildAgentMainSessionKey({ + agentId: sessionAgentId, + mainKey, + }); + }; + const normalizeVisibilityTargetSessionKey = (sessionKey: string, sessionAgentId: string) => { + const trimmed = sessionKey.trim(); + if (!trimmed) { + return trimmed; + } + if (trimmed.startsWith("agent:")) { + const parsed = parseAgentSessionKey(trimmed); + if (parsed?.rest === mainKey) { + return resolveVisibilityMainSessionKey(sessionAgentId); + } + return trimmed; + } + // Preserve legacy bare main keys for requester tree checks. + if (isLegacyMainVisibilityKey(trimmed)) { + return resolveVisibilityMainSessionKey(sessionAgentId); + } + return trimmed; + }; + const visibilityGuard = + opts?.sandboxed === true + ? await createSessionVisibilityGuard({ + action: "status", + requesterSessionKey: visibilityRequesterKey, + visibility: resolveEffectiveSessionToolsVisibility({ + cfg, + sandboxed: true, + }), + a2aPolicy, + }) + : null; const requestedKeyParam = readStringParam(params, "sessionKey"); let requestedKeyRaw = requestedKeyParam ?? opts?.agentSessionKey; if (!requestedKeyRaw?.trim()) { throw new Error("sessionKey required"); } - - const requesterAgentId = resolveAgentIdFromSessionKey( - opts?.agentSessionKey ?? requestedKeyRaw, - ); const ensureAgentAccess = (targetAgentId: string) => { if (targetAgentId === requesterAgentId) { return; @@ -213,7 +267,14 @@ export function createSessionStatusTool(opts?: { }; if (requestedKeyRaw.startsWith("agent:")) { - ensureAgentAccess(resolveAgentIdFromSessionKey(requestedKeyRaw)); + const requestedAgentId = resolveAgentIdFromSessionKey(requestedKeyRaw); + ensureAgentAccess(requestedAgentId); + const access = visibilityGuard?.check( + normalizeVisibilityTargetSessionKey(requestedKeyRaw, requestedAgentId), + ); + if (access && !access.allowed) { + throw new Error(access.error); + } } const isExplicitAgentKey = requestedKeyRaw.startsWith("agent:"); @@ -258,6 +319,15 @@ export function createSessionStatusTool(opts?: { throw new Error(`Unknown ${kind}: ${requestedKeyRaw}`); } + if (visibilityGuard && !requestedKeyRaw.startsWith("agent:")) { + const access = visibilityGuard.check( + normalizeVisibilityTargetSessionKey(resolved.key, agentId), + ); + if (!access.allowed) { + throw new Error(access.error); + } + } + const configured = resolveDefaultModelForAgent({ cfg, agentId }); const modelRaw = readStringParam(params, "model"); let changedModel = false; diff --git a/src/agents/tools/sessions-access.ts b/src/agents/tools/sessions-access.ts index 6574c2296cf..47bd0806f7b 100644 --- a/src/agents/tools/sessions-access.ts +++ b/src/agents/tools/sessions-access.ts @@ -14,7 +14,7 @@ export type AgentToAgentPolicy = { isAllowed: (requesterAgentId: string, targetAgentId: string) => boolean; }; -export type SessionAccessAction = "history" | "send" | "list"; +export type SessionAccessAction = "history" | "send" | "list" | "status"; export type SessionAccessResult = | { allowed: true } @@ -130,6 +130,9 @@ function actionPrefix(action: SessionAccessAction): string { if (action === "send") { return "Session send"; } + if (action === "status") { + return "Session status"; + } return "Session list"; } @@ -140,6 +143,9 @@ function a2aDisabledMessage(action: SessionAccessAction): string { if (action === "send") { return "Agent-to-agent messaging is disabled. Set tools.agentToAgent.enabled=true to allow cross-agent sends."; } + if (action === "status") { + return "Agent-to-agent status is disabled. Set tools.agentToAgent.enabled=true to allow cross-agent access."; + } return "Agent-to-agent listing is disabled. Set tools.agentToAgent.enabled=true to allow cross-agent visibility."; } @@ -150,6 +156,9 @@ function a2aDeniedMessage(action: SessionAccessAction): string { if (action === "send") { return "Agent-to-agent messaging denied by tools.agentToAgent.allow."; } + if (action === "status") { + return "Agent-to-agent status denied by tools.agentToAgent.allow."; + } return "Agent-to-agent listing denied by tools.agentToAgent.allow."; } @@ -160,6 +169,9 @@ function crossVisibilityMessage(action: SessionAccessAction): string { if (action === "send") { return "Session send visibility is restricted. Set tools.sessions.visibility=all to allow cross-agent access."; } + if (action === "status") { + return "Session status visibility is restricted. Set tools.sessions.visibility=all to allow cross-agent access."; + } return "Session list visibility is restricted. Set tools.sessions.visibility=all to allow cross-agent access."; } diff --git a/src/agents/tools/sessions-helpers.ts b/src/agents/tools/sessions-helpers.ts index 7a244e32de0..74393ef44ad 100644 --- a/src/agents/tools/sessions-helpers.ts +++ b/src/agents/tools/sessions-helpers.ts @@ -12,6 +12,7 @@ export { resolveSandboxedSessionToolContext, resolveSessionToolsVisibility, } from "./sessions-access.js"; +import { resolveSandboxedSessionToolContext } from "./sessions-access.js"; export type { SessionReferenceResolution } from "./sessions-resolution.js"; export { isRequesterSpawnedSessionVisible, @@ -27,11 +28,13 @@ export { shouldResolveSessionIdInput, shouldVerifyRequesterSpawnedSessionVisibility, } from "./sessions-resolution.js"; +import { type OpenClawConfig, loadConfig } from "../../config/config.js"; import { extractTextFromChatContent } from "../../shared/chat-content.js"; import { sanitizeUserFacingText } from "../pi-embedded-helpers.js"; import { stripDowngradedToolCallText, stripMinimaxToolCallXml, + stripModelSpecialTokens, stripThinkingTagsFromText, } from "../pi-embedded-utils.js"; @@ -72,6 +75,22 @@ function normalizeKey(value?: string) { return trimmed ? trimmed : undefined; } +export function resolveSessionToolContext(opts?: { + agentSessionKey?: string; + sandboxed?: boolean; + config?: OpenClawConfig; +}) { + const cfg = opts?.config ?? loadConfig(); + return { + cfg, + ...resolveSandboxedSessionToolContext({ + cfg, + agentSessionKey: opts?.agentSessionKey, + sandboxed: opts?.sandboxed, + }), + }; +} + export function classifySessionKind(params: { key: string; gatewayKind?: string | null; @@ -142,7 +161,9 @@ export function sanitizeTextContent(text: string): string { if (!text) { return text; } - return stripThinkingTagsFromText(stripDowngradedToolCallText(stripMinimaxToolCallXml(text))); + return stripThinkingTagsFromText( + stripDowngradedToolCallText(stripModelSpecialTokens(stripMinimaxToolCallXml(text))), + ); } export function extractAssistantText(message: unknown): string | undefined { @@ -163,9 +184,9 @@ export function extractAssistantText(message: unknown): string | undefined { normalizeText: (text) => text.trim(), }) ?? ""; const stopReason = (message as { stopReason?: unknown }).stopReason; - const errorMessage = (message as { errorMessage?: unknown }).errorMessage; - const errorContext = - stopReason === "error" || (typeof errorMessage === "string" && Boolean(errorMessage.trim())); + // Gate on stopReason only — a non-error response with a stale/background errorMessage + // should not have its content rewritten with error templates (#13935). + const errorContext = stopReason === "error"; return joined ? sanitizeUserFacingText(joined, { errorContext }) : undefined; } diff --git a/src/agents/tools/sessions-history-tool.ts b/src/agents/tools/sessions-history-tool.ts index 3d5deeadcdb..a3e8d4d9461 100644 --- a/src/agents/tools/sessions-history-tool.ts +++ b/src/agents/tools/sessions-history-tool.ts @@ -1,5 +1,5 @@ import { Type } from "@sinclair/typebox"; -import { loadConfig } from "../../config/config.js"; +import { type OpenClawConfig, loadConfig } from "../../config/config.js"; import { callGateway } from "../../gateway/call.js"; import { capArrayByJsonBytes } from "../../gateway/session-utils.fs.js"; import { jsonUtf8Bytes } from "../../infra/json-utf8-bytes.js"; @@ -169,6 +169,7 @@ function enforceSessionsHistoryHardCap(params: { export function createSessionsHistoryTool(opts?: { agentSessionKey?: string; sandboxed?: boolean; + config?: OpenClawConfig; }): AnyAgentTool { return { label: "Session History", @@ -180,7 +181,7 @@ export function createSessionsHistoryTool(opts?: { const sessionKeyParam = readStringParam(params, "sessionKey", { required: true, }); - const cfg = loadConfig(); + const cfg = opts?.config ?? loadConfig(); const { mainKey, alias, effectiveRequesterKey, restrictToSpawned } = resolveSandboxedSessionToolContext({ cfg, diff --git a/src/agents/tools/sessions-list-tool.ts b/src/agents/tools/sessions-list-tool.ts index 0cba87e5653..ff3f56212d2 100644 --- a/src/agents/tools/sessions-list-tool.ts +++ b/src/agents/tools/sessions-list-tool.ts @@ -1,6 +1,6 @@ import path from "node:path"; import { Type } from "@sinclair/typebox"; -import { loadConfig } from "../../config/config.js"; +import { type OpenClawConfig, loadConfig } from "../../config/config.js"; import { resolveSessionFilePath, resolveSessionFilePathOptions, @@ -33,6 +33,7 @@ const SessionsListToolSchema = Type.Object({ export function createSessionsListTool(opts?: { agentSessionKey?: string; sandboxed?: boolean; + config?: OpenClawConfig; }): AnyAgentTool { return { label: "Sessions", @@ -41,7 +42,7 @@ export function createSessionsListTool(opts?: { parameters: SessionsListToolSchema, execute: async (_toolCallId, args) => { const params = args as Record; - const cfg = loadConfig(); + const cfg = opts?.config ?? loadConfig(); const { mainKey, alias, requesterInternalKey, restrictToSpawned } = resolveSandboxedSessionToolContext({ cfg, diff --git a/src/agents/tools/sessions-send-helpers.ts b/src/agents/tools/sessions-send-helpers.ts index 94dc3fe0c6a..d987932bb60 100644 --- a/src/agents/tools/sessions-send-helpers.ts +++ b/src/agents/tools/sessions-send-helpers.ts @@ -70,13 +70,13 @@ export function resolveAnnounceTargetFromKey(sessionKey: string): AnnounceTarget }; } -export function buildAgentToAgentMessageContext(params: { +function buildAgentSessionLines(params: { requesterSessionKey?: string; requesterChannel?: string; targetSessionKey: string; -}) { - const lines = [ - "Agent-to-agent message context:", + targetChannel?: string; +}): string[] { + return [ params.requesterSessionKey ? `Agent 1 (requester) session: ${params.requesterSessionKey}.` : undefined, @@ -84,7 +84,18 @@ export function buildAgentToAgentMessageContext(params: { ? `Agent 1 (requester) channel: ${params.requesterChannel}.` : undefined, `Agent 2 (target) session: ${params.targetSessionKey}.`, - ].filter(Boolean); + params.targetChannel ? `Agent 2 (target) channel: ${params.targetChannel}.` : undefined, + ].filter((line): line is string => Boolean(line)); +} + +export function buildAgentToAgentMessageContext(params: { + requesterSessionKey?: string; + requesterChannel?: string; + targetSessionKey: string; +}) { + const lines = ["Agent-to-agent message context:", ...buildAgentSessionLines(params)].filter( + Boolean, + ); return lines.join("\n"); } @@ -103,14 +114,7 @@ export function buildAgentToAgentReplyContext(params: { "Agent-to-agent reply step:", `Current agent: ${currentLabel}.`, `Turn ${params.turn} of ${params.maxTurns}.`, - params.requesterSessionKey - ? `Agent 1 (requester) session: ${params.requesterSessionKey}.` - : undefined, - params.requesterChannel - ? `Agent 1 (requester) channel: ${params.requesterChannel}.` - : undefined, - `Agent 2 (target) session: ${params.targetSessionKey}.`, - params.targetChannel ? `Agent 2 (target) channel: ${params.targetChannel}.` : undefined, + ...buildAgentSessionLines(params), `If you want to stop the ping-pong, reply exactly "${REPLY_SKIP_TOKEN}".`, ].filter(Boolean); return lines.join("\n"); @@ -127,14 +131,7 @@ export function buildAgentToAgentAnnounceContext(params: { }) { const lines = [ "Agent-to-agent announce step:", - params.requesterSessionKey - ? `Agent 1 (requester) session: ${params.requesterSessionKey}.` - : undefined, - params.requesterChannel - ? `Agent 1 (requester) channel: ${params.requesterChannel}.` - : undefined, - `Agent 2 (target) session: ${params.targetSessionKey}.`, - params.targetChannel ? `Agent 2 (target) channel: ${params.targetChannel}.` : undefined, + ...buildAgentSessionLines(params), `Original request: ${params.originalMessage}`, params.roundOneReply ? `Round 1 reply: ${params.roundOneReply}` diff --git a/src/agents/tools/sessions-send-tool.ts b/src/agents/tools/sessions-send-tool.ts index 82eff0adf7a..b2873e5cd1f 100644 --- a/src/agents/tools/sessions-send-tool.ts +++ b/src/agents/tools/sessions-send-tool.ts @@ -1,6 +1,6 @@ import crypto from "node:crypto"; import { Type } from "@sinclair/typebox"; -import { loadConfig } from "../../config/config.js"; +import type { OpenClawConfig } from "../../config/config.js"; import { callGateway } from "../../gateway/call.js"; import { normalizeAgentId, resolveAgentIdFromSessionKey } from "../../routing/session-key.js"; import { SESSION_LABEL_MAX_LENGTH } from "../../sessions/session-label.js"; @@ -17,7 +17,7 @@ import { extractAssistantText, resolveEffectiveSessionToolsVisibility, resolveSessionReference, - resolveSandboxedSessionToolContext, + resolveSessionToolContext, resolveVisibleSessionReference, stripToolMessages, } from "./sessions-helpers.js"; @@ -32,10 +32,41 @@ const SessionsSendToolSchema = Type.Object({ timeoutSeconds: Type.Optional(Type.Number({ minimum: 0 })), }); +async function startAgentRun(params: { + runId: string; + sendParams: Record; + sessionKey: string; +}): Promise<{ ok: true; runId: string } | { ok: false; result: ReturnType }> { + try { + const response = await callGateway<{ runId: string }>({ + method: "agent", + params: params.sendParams, + timeoutMs: 10_000, + }); + return { + ok: true, + runId: typeof response?.runId === "string" && response.runId ? response.runId : params.runId, + }; + } catch (err) { + const messageText = + err instanceof Error ? err.message : typeof err === "string" ? err : "error"; + return { + ok: false, + result: jsonResult({ + runId: params.runId, + status: "error", + error: messageText, + sessionKey: params.sessionKey, + }), + }; + } +} + export function createSessionsSendTool(opts?: { agentSessionKey?: string; agentChannel?: GatewayMessageChannel; sandboxed?: boolean; + config?: OpenClawConfig; }): AnyAgentTool { return { label: "Session Send", @@ -46,13 +77,8 @@ export function createSessionsSendTool(opts?: { execute: async (_toolCallId, args) => { const params = args as Record; const message = readStringParam(params, "message", { required: true }); - const cfg = loadConfig(); - const { mainKey, alias, effectiveRequesterKey, restrictToSpawned } = - resolveSandboxedSessionToolContext({ - cfg, - agentSessionKey: opts?.agentSessionKey, - sandboxed: opts?.sandboxed, - }); + const { cfg, mainKey, alias, effectiveRequesterKey, restrictToSpawned } = + resolveSessionToolContext(opts); const a2aPolicy = createAgentToAgentPolicy(cfg); const sessionVisibility = resolveEffectiveSessionToolsVisibility({ @@ -251,54 +277,34 @@ export function createSessionsSendTool(opts?: { }; if (timeoutSeconds === 0) { - try { - const response = await callGateway<{ runId: string }>({ - method: "agent", - params: sendParams, - timeoutMs: 10_000, - }); - if (typeof response?.runId === "string" && response.runId) { - runId = response.runId; - } - startA2AFlow(undefined, runId); - return jsonResult({ - runId, - status: "accepted", - sessionKey: displayKey, - delivery, - }); - } catch (err) { - const messageText = - err instanceof Error ? err.message : typeof err === "string" ? err : "error"; - return jsonResult({ - runId, - status: "error", - error: messageText, - sessionKey: displayKey, - }); - } - } - - try { - const response = await callGateway<{ runId: string }>({ - method: "agent", - params: sendParams, - timeoutMs: 10_000, - }); - if (typeof response?.runId === "string" && response.runId) { - runId = response.runId; - } - } catch (err) { - const messageText = - err instanceof Error ? err.message : typeof err === "string" ? err : "error"; - return jsonResult({ + const start = await startAgentRun({ runId, - status: "error", - error: messageText, + sendParams, sessionKey: displayKey, }); + if (!start.ok) { + return start.result; + } + runId = start.runId; + startA2AFlow(undefined, runId); + return jsonResult({ + runId, + status: "accepted", + sessionKey: displayKey, + delivery, + }); } + const start = await startAgentRun({ + runId, + sendParams, + sessionKey: displayKey, + }); + if (!start.ok) { + return start.result; + } + runId = start.runId; + let waitStatus: string | undefined; let waitError: string | undefined; try { diff --git a/src/agents/tools/sessions-spawn-tool.test.ts b/src/agents/tools/sessions-spawn-tool.test.ts index 01568462912..4fe106a7ebd 100644 --- a/src/agents/tools/sessions-spawn-tool.test.ts +++ b/src/agents/tools/sessions-spawn-tool.test.ts @@ -163,6 +163,43 @@ describe("sessions_spawn tool", () => { ); }); + it("passes resumeSessionId through to ACP spawns", async () => { + const tool = createSessionsSpawnTool({ + agentSessionKey: "agent:main:main", + }); + + await tool.execute("call-2c", { + runtime: "acp", + task: "resume prior work", + agentId: "codex", + resumeSessionId: "7f4a78e0-f6be-43fe-855c-c1c4fd229bc4", + }); + + expect(hoisted.spawnAcpDirectMock).toHaveBeenCalledWith( + expect.objectContaining({ + task: "resume prior work", + agentId: "codex", + resumeSessionId: "7f4a78e0-f6be-43fe-855c-c1c4fd229bc4", + }), + expect.any(Object), + ); + }); + + it("rejects resumeSessionId without runtime=acp", async () => { + const tool = createSessionsSpawnTool({ + agentSessionKey: "agent:main:main", + }); + + const result = await tool.execute("call-guard", { + task: "resume prior work", + resumeSessionId: "7f4a78e0-f6be-43fe-855c-c1c4fd229bc4", + }); + + expect(JSON.stringify(result)).toContain("resumeSessionId is only supported for runtime=acp"); + expect(hoisted.spawnSubagentDirectMock).not.toHaveBeenCalled(); + expect(hoisted.spawnAcpDirectMock).not.toHaveBeenCalled(); + }); + it("rejects attachments for ACP runtime", async () => { const tool = createSessionsSpawnTool({ agentSessionKey: "agent:main:main", diff --git a/src/agents/tools/sessions-spawn-tool.ts b/src/agents/tools/sessions-spawn-tool.ts index b2214f6bc70..b735084d2b0 100644 --- a/src/agents/tools/sessions-spawn-tool.ts +++ b/src/agents/tools/sessions-spawn-tool.ts @@ -25,6 +25,12 @@ const SessionsSpawnToolSchema = Type.Object({ label: Type.Optional(Type.String()), runtime: optionalStringEnum(SESSIONS_SPAWN_RUNTIMES), agentId: Type.Optional(Type.String()), + resumeSessionId: Type.Optional( + Type.String({ + description: + 'Resume an existing agent session by its ID (e.g. a Codex session UUID from ~/.codex/sessions/). Requires runtime="acp". The agent replays conversation history via session/load instead of starting fresh.', + }), + ), model: Type.Optional(Type.String()), thinking: Type.Optional(Type.String()), cwd: Type.Optional(Type.String()), @@ -91,6 +97,7 @@ export function createSessionsSpawnTool( const label = typeof params.label === "string" ? params.label.trim() : ""; const runtime = params.runtime === "acp" ? "acp" : "subagent"; const requestedAgentId = readStringParam(params, "agentId"); + const resumeSessionId = readStringParam(params, "resumeSessionId"); const modelOverride = readStringParam(params, "model"); const thinkingOverrideRaw = readStringParam(params, "thinking"); const cwd = readStringParam(params, "cwd"); @@ -127,6 +134,13 @@ export function createSessionsSpawnTool( }); } + if (resumeSessionId && runtime !== "acp") { + return jsonResult({ + status: "error", + error: `resumeSessionId is only supported for runtime=acp; got runtime=${runtime}`, + }); + } + if (runtime === "acp") { if (Array.isArray(attachments) && attachments.length > 0) { return jsonResult({ @@ -140,6 +154,7 @@ export function createSessionsSpawnTool( task, label: label || undefined, agentId: requestedAgentId, + resumeSessionId, cwd, mode: mode && ACP_SPAWN_MODES.includes(mode) ? mode : undefined, thread, diff --git a/src/agents/tools/sessions-yield-tool.test.ts b/src/agents/tools/sessions-yield-tool.test.ts new file mode 100644 index 00000000000..f7def7cbb73 --- /dev/null +++ b/src/agents/tools/sessions-yield-tool.test.ts @@ -0,0 +1,45 @@ +import { describe, expect, it, vi } from "vitest"; +import { createSessionsYieldTool } from "./sessions-yield-tool.js"; + +describe("sessions_yield tool", () => { + it("returns error when no sessionId is provided", async () => { + const onYield = vi.fn(); + const tool = createSessionsYieldTool({ onYield }); + const result = await tool.execute("call-1", {}); + expect(result.details).toMatchObject({ + status: "error", + error: "No session context", + }); + expect(onYield).not.toHaveBeenCalled(); + }); + + it("invokes onYield callback with default message", async () => { + const onYield = vi.fn(); + const tool = createSessionsYieldTool({ sessionId: "test-session", onYield }); + const result = await tool.execute("call-1", {}); + expect(result.details).toMatchObject({ status: "yielded", message: "Turn yielded." }); + expect(onYield).toHaveBeenCalledOnce(); + expect(onYield).toHaveBeenCalledWith("Turn yielded."); + }); + + it("passes the custom message through the yield callback", async () => { + const onYield = vi.fn(); + const tool = createSessionsYieldTool({ sessionId: "test-session", onYield }); + const result = await tool.execute("call-1", { message: "Waiting for fact-checker" }); + expect(result.details).toMatchObject({ + status: "yielded", + message: "Waiting for fact-checker", + }); + expect(onYield).toHaveBeenCalledOnce(); + expect(onYield).toHaveBeenCalledWith("Waiting for fact-checker"); + }); + + it("returns error without onYield callback", async () => { + const tool = createSessionsYieldTool({ sessionId: "test-session" }); + const result = await tool.execute("call-1", {}); + expect(result.details).toMatchObject({ + status: "error", + error: "Yield not supported in this context", + }); + }); +}); diff --git a/src/agents/tools/sessions-yield-tool.ts b/src/agents/tools/sessions-yield-tool.ts new file mode 100644 index 00000000000..8b4c3e7ad90 --- /dev/null +++ b/src/agents/tools/sessions-yield-tool.ts @@ -0,0 +1,32 @@ +import { Type } from "@sinclair/typebox"; +import type { AnyAgentTool } from "./common.js"; +import { jsonResult, readStringParam } from "./common.js"; + +const SessionsYieldToolSchema = Type.Object({ + message: Type.Optional(Type.String()), +}); + +export function createSessionsYieldTool(opts?: { + sessionId?: string; + onYield?: (message: string) => Promise | void; +}): AnyAgentTool { + return { + label: "Yield", + name: "sessions_yield", + description: + "End your current turn. Use after spawning subagents to receive their results as the next message.", + parameters: SessionsYieldToolSchema, + execute: async (_toolCallId, args) => { + const params = args as Record; + const message = readStringParam(params, "message") || "Turn yielded."; + if (!opts?.sessionId) { + return jsonResult({ status: "error", error: "No session context" }); + } + if (!opts?.onYield) { + return jsonResult({ status: "error", error: "Yield not supported in this context" }); + } + await opts.onYield(message); + return jsonResult({ status: "yielded", message }); + }, + }; +} diff --git a/src/agents/tools/sessions.test.ts b/src/agents/tools/sessions.test.ts index aa831027f68..ce849e45d07 100644 --- a/src/agents/tools/sessions.test.ts +++ b/src/agents/tools/sessions.test.ts @@ -199,6 +199,16 @@ describe("extractAssistantText", () => { "Firebase downgraded us to the free Spark plan. Check whether billing should be re-enabled.", ); }); + + it("preserves successful turns with stale background errorMessage", () => { + const message = { + role: "assistant", + stopReason: "end_turn", + errorMessage: "insufficient credits for embedding model", + content: [{ type: "text", text: "Handle payment required errors in your API." }], + }; + expect(extractAssistantText(message)).toBe("Handle payment required errors in your API."); + }); }); describe("resolveAnnounceTarget", () => { diff --git a/src/agents/tools/subagents-tool.ts b/src/agents/tools/subagents-tool.ts index f2b073934ab..a7eb53c5d46 100644 --- a/src/agents/tools/subagents-tool.ts +++ b/src/agents/tools/subagents-tool.ts @@ -1,58 +1,26 @@ -import crypto from "node:crypto"; import { Type } from "@sinclair/typebox"; -import { clearSessionQueues } from "../../auto-reply/reply/queue.js"; -import { - resolveSubagentLabel, - resolveSubagentTargetFromRuns, - sortSubagentRuns, - type SubagentTargetResolution, -} from "../../auto-reply/reply/subagents-utils.js"; -import { DEFAULT_SUBAGENT_MAX_SPAWN_DEPTH } from "../../config/agent-limits.js"; import { loadConfig } from "../../config/config.js"; -import type { SessionEntry } from "../../config/sessions.js"; -import { loadSessionStore, resolveStorePath, updateSessionStore } from "../../config/sessions.js"; -import { callGateway } from "../../gateway/call.js"; -import { logVerbose } from "../../globals.js"; -import { - isSubagentSessionKey, - parseAgentSessionKey, - type ParsedAgentSessionKey, -} from "../../routing/session-key.js"; -import { - formatDurationCompact, - formatTokenUsageDisplay, - resolveTotalTokens, - truncateLine, -} from "../../shared/subagents-format.js"; -import { INTERNAL_MESSAGE_CHANNEL } from "../../utils/message-channel.js"; -import { AGENT_LANE_SUBAGENT } from "../lanes.js"; -import { abortEmbeddedPiRun } from "../pi-embedded.js"; import { optionalStringEnum } from "../schema/typebox.js"; -import { getSubagentDepthFromSessionStore } from "../subagent-depth.js"; import { - clearSubagentRunSteerRestart, - countPendingDescendantRuns, - listSubagentRunsForRequester, - markSubagentRunTerminated, - markSubagentRunForSteerRestart, - replaceSubagentRunAfterSteer, - type SubagentRunRecord, -} from "../subagent-registry.js"; + buildSubagentList, + DEFAULT_RECENT_MINUTES, + isActiveSubagentRun, + killAllControlledSubagentRuns, + killControlledSubagentRun, + listControlledSubagentRuns, + MAX_RECENT_MINUTES, + MAX_STEER_MESSAGE_CHARS, + resolveControlledSubagentTarget, + resolveSubagentController, + steerControlledSubagentRun, + createPendingDescendantCounter, +} from "../subagent-control.js"; import type { AnyAgentTool } from "./common.js"; import { jsonResult, readNumberParam, readStringParam } from "./common.js"; -import { resolveInternalSessionKey, resolveMainSessionAlias } from "./sessions-helpers.js"; const SUBAGENT_ACTIONS = ["list", "kill", "steer"] as const; type SubagentAction = (typeof SUBAGENT_ACTIONS)[number]; -const DEFAULT_RECENT_MINUTES = 30; -const MAX_RECENT_MINUTES = 24 * 60; -const MAX_STEER_MESSAGE_CHARS = 4_000; -const STEER_RATE_LIMIT_MS = 2_000; -const STEER_ABORT_SETTLE_TIMEOUT_MS = 5_000; - -const steerRateLimit = new Map(); - const SubagentsToolSchema = Type.Object({ action: optionalStringEnum(SUBAGENT_ACTIONS), target: Type.Optional(Type.String()), @@ -60,292 +28,6 @@ const SubagentsToolSchema = Type.Object({ recentMinutes: Type.Optional(Type.Number({ minimum: 1 })), }); -type SessionEntryResolution = { - storePath: string; - entry: SessionEntry | undefined; -}; - -type ResolvedRequesterKey = { - requesterSessionKey: string; - callerSessionKey: string; - callerIsSubagent: boolean; -}; - -function resolveRunStatus(entry: SubagentRunRecord, options?: { pendingDescendants?: number }) { - const pendingDescendants = Math.max(0, options?.pendingDescendants ?? 0); - if (pendingDescendants > 0) { - const childLabel = pendingDescendants === 1 ? "child" : "children"; - return `active (waiting on ${pendingDescendants} ${childLabel})`; - } - if (!entry.endedAt) { - return "running"; - } - const status = entry.outcome?.status ?? "done"; - if (status === "ok") { - return "done"; - } - if (status === "error") { - return "failed"; - } - return status; -} - -function resolveModelRef(entry?: SessionEntry) { - const model = typeof entry?.model === "string" ? entry.model.trim() : ""; - const provider = typeof entry?.modelProvider === "string" ? entry.modelProvider.trim() : ""; - if (model.includes("/")) { - return model; - } - if (model && provider) { - return `${provider}/${model}`; - } - if (model) { - return model; - } - if (provider) { - return provider; - } - // Fall back to override fields which are populated at spawn time, - // before the first run completes and writes model/modelProvider. - const overrideModel = typeof entry?.modelOverride === "string" ? entry.modelOverride.trim() : ""; - const overrideProvider = - typeof entry?.providerOverride === "string" ? entry.providerOverride.trim() : ""; - if (overrideModel.includes("/")) { - return overrideModel; - } - if (overrideModel && overrideProvider) { - return `${overrideProvider}/${overrideModel}`; - } - if (overrideModel) { - return overrideModel; - } - return overrideProvider || undefined; -} - -function resolveModelDisplay(entry?: SessionEntry, fallbackModel?: string) { - const modelRef = resolveModelRef(entry) || fallbackModel || undefined; - if (!modelRef) { - return "model n/a"; - } - const slash = modelRef.lastIndexOf("/"); - if (slash >= 0 && slash < modelRef.length - 1) { - return modelRef.slice(slash + 1); - } - return modelRef; -} - -function resolveSubagentTarget( - runs: SubagentRunRecord[], - token: string | undefined, - options?: { recentMinutes?: number; isActive?: (entry: SubagentRunRecord) => boolean }, -): SubagentTargetResolution { - return resolveSubagentTargetFromRuns({ - runs, - token, - recentWindowMinutes: options?.recentMinutes ?? DEFAULT_RECENT_MINUTES, - label: (entry) => resolveSubagentLabel(entry), - isActive: options?.isActive, - errors: { - missingTarget: "Missing subagent target.", - invalidIndex: (value) => `Invalid subagent index: ${value}`, - unknownSession: (value) => `Unknown subagent session: ${value}`, - ambiguousLabel: (value) => `Ambiguous subagent label: ${value}`, - ambiguousLabelPrefix: (value) => `Ambiguous subagent label prefix: ${value}`, - ambiguousRunIdPrefix: (value) => `Ambiguous subagent run id prefix: ${value}`, - unknownTarget: (value) => `Unknown subagent target: ${value}`, - }, - }); -} - -function resolveStorePathForKey( - cfg: ReturnType, - key: string, - parsed?: ParsedAgentSessionKey | null, -) { - return resolveStorePath(cfg.session?.store, { - agentId: parsed?.agentId, - }); -} - -function resolveSessionEntryForKey(params: { - cfg: ReturnType; - key: string; - cache: Map>; -}): SessionEntryResolution { - const parsed = parseAgentSessionKey(params.key); - const storePath = resolveStorePathForKey(params.cfg, params.key, parsed); - let store = params.cache.get(storePath); - if (!store) { - store = loadSessionStore(storePath); - params.cache.set(storePath, store); - } - return { - storePath, - entry: store[params.key], - }; -} - -function resolveRequesterKey(params: { - cfg: ReturnType; - agentSessionKey?: string; -}): ResolvedRequesterKey { - const { mainKey, alias } = resolveMainSessionAlias(params.cfg); - const callerRaw = params.agentSessionKey?.trim() || alias; - const callerSessionKey = resolveInternalSessionKey({ - key: callerRaw, - alias, - mainKey, - }); - if (!isSubagentSessionKey(callerSessionKey)) { - return { - requesterSessionKey: callerSessionKey, - callerSessionKey, - callerIsSubagent: false, - }; - } - - // Check if this sub-agent can spawn children (orchestrator). - // If so, it should see its own children, not its parent's children. - const callerDepth = getSubagentDepthFromSessionStore(callerSessionKey, { cfg: params.cfg }); - const maxSpawnDepth = - params.cfg.agents?.defaults?.subagents?.maxSpawnDepth ?? DEFAULT_SUBAGENT_MAX_SPAWN_DEPTH; - if (callerDepth < maxSpawnDepth) { - // Orchestrator sub-agent: use its own session key as requester - // so it sees children it spawned. - return { - requesterSessionKey: callerSessionKey, - callerSessionKey, - callerIsSubagent: true, - }; - } - - // Leaf sub-agent: walk up to its parent so it can see sibling runs. - const cache = new Map>(); - const callerEntry = resolveSessionEntryForKey({ - cfg: params.cfg, - key: callerSessionKey, - cache, - }).entry; - const spawnedBy = typeof callerEntry?.spawnedBy === "string" ? callerEntry.spawnedBy.trim() : ""; - return { - requesterSessionKey: spawnedBy || callerSessionKey, - callerSessionKey, - callerIsSubagent: true, - }; -} - -async function killSubagentRun(params: { - cfg: ReturnType; - entry: SubagentRunRecord; - cache: Map>; -}): Promise<{ killed: boolean; sessionId?: string }> { - if (params.entry.endedAt) { - return { killed: false }; - } - const childSessionKey = params.entry.childSessionKey; - const resolved = resolveSessionEntryForKey({ - cfg: params.cfg, - key: childSessionKey, - cache: params.cache, - }); - const sessionId = resolved.entry?.sessionId; - const aborted = sessionId ? abortEmbeddedPiRun(sessionId) : false; - const cleared = clearSessionQueues([childSessionKey, sessionId]); - if (cleared.followupCleared > 0 || cleared.laneCleared > 0) { - logVerbose( - `subagents tool kill: cleared followups=${cleared.followupCleared} lane=${cleared.laneCleared} keys=${cleared.keys.join(",")}`, - ); - } - if (resolved.entry) { - await updateSessionStore(resolved.storePath, (store) => { - const current = store[childSessionKey]; - if (!current) { - return; - } - current.abortedLastRun = true; - current.updatedAt = Date.now(); - store[childSessionKey] = current; - }); - } - const marked = markSubagentRunTerminated({ - runId: params.entry.runId, - childSessionKey, - reason: "killed", - }); - const killed = marked > 0 || aborted || cleared.followupCleared > 0 || cleared.laneCleared > 0; - return { killed, sessionId }; -} - -/** - * Recursively kill all descendant subagent runs spawned by a given parent session key. - * This ensures that when a subagent is killed, all of its children (and their children) are also killed. - */ -async function cascadeKillChildren(params: { - cfg: ReturnType; - parentChildSessionKey: string; - cache: Map>; - seenChildSessionKeys?: Set; -}): Promise<{ killed: number; labels: string[] }> { - const childRuns = listSubagentRunsForRequester(params.parentChildSessionKey); - const seenChildSessionKeys = params.seenChildSessionKeys ?? new Set(); - let killed = 0; - const labels: string[] = []; - - for (const run of childRuns) { - const childKey = run.childSessionKey?.trim(); - if (!childKey || seenChildSessionKeys.has(childKey)) { - continue; - } - seenChildSessionKeys.add(childKey); - - if (!run.endedAt) { - const stopResult = await killSubagentRun({ - cfg: params.cfg, - entry: run, - cache: params.cache, - }); - if (stopResult.killed) { - killed += 1; - labels.push(resolveSubagentLabel(run)); - } - } - - // Recurse for grandchildren even if this parent already ended. - const cascade = await cascadeKillChildren({ - cfg: params.cfg, - parentChildSessionKey: childKey, - cache: params.cache, - seenChildSessionKeys, - }); - killed += cascade.killed; - labels.push(...cascade.labels); - } - - return { killed, labels }; -} - -function buildListText(params: { - active: Array<{ line: string }>; - recent: Array<{ line: string }>; - recentMinutes: number; -}) { - const lines: string[] = []; - lines.push("active subagents:"); - if (params.active.length === 0) { - lines.push("(none)"); - } else { - lines.push(...params.active.map((entry) => entry.line)); - } - lines.push(""); - lines.push(`recent (last ${params.recentMinutes}m):`); - if (params.recent.length === 0) { - lines.push("(none)"); - } else { - lines.push(...params.recent.map((entry) => entry.line)); - } - return lines.join("\n"); -} - export function createSubagentsTool(opts?: { agentSessionKey?: string }): AnyAgentTool { return { label: "Subagents", @@ -357,139 +39,69 @@ export function createSubagentsTool(opts?: { agentSessionKey?: string }): AnyAge const params = args as Record; const action = (readStringParam(params, "action") ?? "list") as SubagentAction; const cfg = loadConfig(); - const requester = resolveRequesterKey({ + const controller = resolveSubagentController({ cfg, agentSessionKey: opts?.agentSessionKey, }); - const runs = sortSubagentRuns(listSubagentRunsForRequester(requester.requesterSessionKey)); + const runs = listControlledSubagentRuns(controller.controllerSessionKey); const recentMinutesRaw = readNumberParam(params, "recentMinutes"); const recentMinutes = recentMinutesRaw ? Math.max(1, Math.min(MAX_RECENT_MINUTES, Math.floor(recentMinutesRaw))) : DEFAULT_RECENT_MINUTES; - const pendingDescendantCache = new Map(); - const pendingDescendantCount = (sessionKey: string) => { - if (pendingDescendantCache.has(sessionKey)) { - return pendingDescendantCache.get(sessionKey) ?? 0; - } - const pending = Math.max(0, countPendingDescendantRuns(sessionKey)); - pendingDescendantCache.set(sessionKey, pending); - return pending; - }; - const isActiveRun = (entry: SubagentRunRecord) => - !entry.endedAt || pendingDescendantCount(entry.childSessionKey) > 0; + const pendingDescendantCount = createPendingDescendantCounter(); + const isActive = (entry: (typeof runs)[number]) => + isActiveSubagentRun(entry, pendingDescendantCount); if (action === "list") { - const now = Date.now(); - const recentCutoff = now - recentMinutes * 60_000; - const cache = new Map>(); - - let index = 1; - const buildListEntry = (entry: SubagentRunRecord, runtimeMs: number) => { - const sessionEntry = resolveSessionEntryForKey({ - cfg, - key: entry.childSessionKey, - cache, - }).entry; - const totalTokens = resolveTotalTokens(sessionEntry); - const usageText = formatTokenUsageDisplay(sessionEntry); - const pendingDescendants = pendingDescendantCount(entry.childSessionKey); - const status = resolveRunStatus(entry, { - pendingDescendants, - }); - const runtime = formatDurationCompact(runtimeMs); - const label = truncateLine(resolveSubagentLabel(entry), 48); - const task = truncateLine(entry.task.trim(), 72); - const line = `${index}. ${label} (${resolveModelDisplay(sessionEntry, entry.model)}, ${runtime}${usageText ? `, ${usageText}` : ""}) ${status}${task.toLowerCase() !== label.toLowerCase() ? ` - ${task}` : ""}`; - const baseView = { - index, - runId: entry.runId, - sessionKey: entry.childSessionKey, - label, - task, - status, - pendingDescendants, - runtime, - runtimeMs, - model: resolveModelRef(sessionEntry) || entry.model, - totalTokens, - startedAt: entry.startedAt, - }; - index += 1; - return { line, view: entry.endedAt ? { ...baseView, endedAt: entry.endedAt } : baseView }; - }; - const active = runs - .filter((entry) => isActiveRun(entry)) - .map((entry) => buildListEntry(entry, now - (entry.startedAt ?? entry.createdAt))); - const recent = runs - .filter( - (entry) => - !isActiveRun(entry) && !!entry.endedAt && (entry.endedAt ?? 0) >= recentCutoff, - ) - .map((entry) => - buildListEntry(entry, (entry.endedAt ?? now) - (entry.startedAt ?? entry.createdAt)), - ); - - const text = buildListText({ active, recent, recentMinutes }); + const list = buildSubagentList({ + cfg, + runs, + recentMinutes, + }); return jsonResult({ status: "ok", action: "list", - requesterSessionKey: requester.requesterSessionKey, - callerSessionKey: requester.callerSessionKey, - callerIsSubagent: requester.callerIsSubagent, - total: runs.length, - active: active.map((entry) => entry.view), - recent: recent.map((entry) => entry.view), - text, + requesterSessionKey: controller.controllerSessionKey, + callerSessionKey: controller.callerSessionKey, + callerIsSubagent: controller.callerIsSubagent, + total: list.total, + active: list.active.map(({ line: _line, ...view }) => view), + recent: list.recent.map(({ line: _line, ...view }) => view), + text: list.text, }); } if (action === "kill") { const target = readStringParam(params, "target", { required: true }); if (target === "all" || target === "*") { - const cache = new Map>(); - const seenChildSessionKeys = new Set(); - const killedLabels: string[] = []; - let killed = 0; - for (const entry of runs) { - const childKey = entry.childSessionKey?.trim(); - if (!childKey || seenChildSessionKeys.has(childKey)) { - continue; - } - seenChildSessionKeys.add(childKey); - - if (!entry.endedAt) { - const stopResult = await killSubagentRun({ cfg, entry, cache }); - if (stopResult.killed) { - killed += 1; - killedLabels.push(resolveSubagentLabel(entry)); - } - } - - // Traverse descendants even when the direct run is already finished. - const cascade = await cascadeKillChildren({ - cfg, - parentChildSessionKey: childKey, - cache, - seenChildSessionKeys, + const result = await killAllControlledSubagentRuns({ + cfg, + controller, + runs, + }); + if (result.status === "forbidden") { + return jsonResult({ + status: "forbidden", + action: "kill", + target: "all", + error: result.error, }); - killed += cascade.killed; - killedLabels.push(...cascade.labels); } return jsonResult({ status: "ok", action: "kill", target: "all", - killed, - labels: killedLabels, + killed: result.killed, + labels: result.labels, text: - killed > 0 - ? `killed ${killed} subagent${killed === 1 ? "" : "s"}.` + result.killed > 0 + ? `killed ${result.killed} subagent${result.killed === 1 ? "" : "s"}.` : "no running subagents to kill.", }); } - const resolved = resolveSubagentTarget(runs, target, { + const resolved = resolveControlledSubagentTarget(runs, target, { recentMinutes, - isActive: isActiveRun, + isActive, }); if (!resolved.entry) { return jsonResult({ @@ -499,52 +111,25 @@ export function createSubagentsTool(opts?: { agentSessionKey?: string }): AnyAge error: resolved.error ?? "Unknown subagent target.", }); } - const killCache = new Map>(); - const stopResult = await killSubagentRun({ + const result = await killControlledSubagentRun({ cfg, + controller, entry: resolved.entry, - cache: killCache, }); - const seenChildSessionKeys = new Set(); - const targetChildKey = resolved.entry.childSessionKey?.trim(); - if (targetChildKey) { - seenChildSessionKeys.add(targetChildKey); - } - // Traverse descendants even when the selected run is already finished. - const cascade = await cascadeKillChildren({ - cfg, - parentChildSessionKey: resolved.entry.childSessionKey, - cache: killCache, - seenChildSessionKeys, - }); - if (!stopResult.killed && cascade.killed === 0) { - return jsonResult({ - status: "done", - action: "kill", - target, - runId: resolved.entry.runId, - sessionKey: resolved.entry.childSessionKey, - text: `${resolveSubagentLabel(resolved.entry)} is already finished.`, - }); - } - const cascadeText = - cascade.killed > 0 - ? ` (+ ${cascade.killed} descendant${cascade.killed === 1 ? "" : "s"})` - : ""; return jsonResult({ - status: "ok", + status: result.status, action: "kill", target, - runId: resolved.entry.runId, - sessionKey: resolved.entry.childSessionKey, - label: resolveSubagentLabel(resolved.entry), - cascadeKilled: cascade.killed, - cascadeLabels: cascade.killed > 0 ? cascade.labels : undefined, - text: stopResult.killed - ? `killed ${resolveSubagentLabel(resolved.entry)}${cascadeText}.` - : `killed ${cascade.killed} descendant${cascade.killed === 1 ? "" : "s"} of ${resolveSubagentLabel(resolved.entry)}.`, + runId: result.runId, + sessionKey: result.sessionKey, + label: result.label, + cascadeKilled: "cascadeKilled" in result ? result.cascadeKilled : undefined, + cascadeLabels: "cascadeLabels" in result ? result.cascadeLabels : undefined, + error: "error" in result ? result.error : undefined, + text: result.text, }); } + if (action === "steer") { const target = readStringParam(params, "target", { required: true }); const message = readStringParam(params, "message", { required: true }); @@ -556,9 +141,9 @@ export function createSubagentsTool(opts?: { agentSessionKey?: string }): AnyAge error: `Message too long (${message.length} chars, max ${MAX_STEER_MESSAGE_CHARS}).`, }); } - const resolved = resolveSubagentTarget(runs, target, { + const resolved = resolveControlledSubagentTarget(runs, target, { recentMinutes, - isActive: isActiveRun, + isActive, }); if (!resolved.entry) { return jsonResult({ @@ -568,140 +153,26 @@ export function createSubagentsTool(opts?: { agentSessionKey?: string }): AnyAge error: resolved.error ?? "Unknown subagent target.", }); } - if (resolved.entry.endedAt) { - return jsonResult({ - status: "done", - action: "steer", - target, - runId: resolved.entry.runId, - sessionKey: resolved.entry.childSessionKey, - text: `${resolveSubagentLabel(resolved.entry)} is already finished.`, - }); - } - if ( - requester.callerIsSubagent && - requester.callerSessionKey === resolved.entry.childSessionKey - ) { - return jsonResult({ - status: "forbidden", - action: "steer", - target, - runId: resolved.entry.runId, - sessionKey: resolved.entry.childSessionKey, - error: "Subagents cannot steer themselves.", - }); - } - - const rateKey = `${requester.callerSessionKey}:${resolved.entry.childSessionKey}`; - const now = Date.now(); - const lastSentAt = steerRateLimit.get(rateKey) ?? 0; - if (now - lastSentAt < STEER_RATE_LIMIT_MS) { - return jsonResult({ - status: "rate_limited", - action: "steer", - target, - runId: resolved.entry.runId, - sessionKey: resolved.entry.childSessionKey, - error: "Steer rate limit exceeded. Wait a moment before sending another steer.", - }); - } - steerRateLimit.set(rateKey, now); - - // Suppress announce for the interrupted run before aborting so we don't - // emit stale pre-steer findings if the run exits immediately. - markSubagentRunForSteerRestart(resolved.entry.runId); - - const targetSession = resolveSessionEntryForKey({ + const result = await steerControlledSubagentRun({ cfg, - key: resolved.entry.childSessionKey, - cache: new Map>(), + controller, + entry: resolved.entry, + message, }); - const sessionId = - typeof targetSession.entry?.sessionId === "string" && targetSession.entry.sessionId.trim() - ? targetSession.entry.sessionId.trim() - : undefined; - - // Interrupt current work first so steer takes precedence immediately. - if (sessionId) { - abortEmbeddedPiRun(sessionId); - } - const cleared = clearSessionQueues([resolved.entry.childSessionKey, sessionId]); - if (cleared.followupCleared > 0 || cleared.laneCleared > 0) { - logVerbose( - `subagents tool steer: cleared followups=${cleared.followupCleared} lane=${cleared.laneCleared} keys=${cleared.keys.join(",")}`, - ); - } - - // Best effort: wait for the interrupted run to settle so the steer - // message appends onto the existing conversation context. - try { - await callGateway({ - method: "agent.wait", - params: { - runId: resolved.entry.runId, - timeoutMs: STEER_ABORT_SETTLE_TIMEOUT_MS, - }, - timeoutMs: STEER_ABORT_SETTLE_TIMEOUT_MS + 2_000, - }); - } catch { - // Continue even if wait fails; steer should still be attempted. - } - - const idempotencyKey = crypto.randomUUID(); - let runId: string = idempotencyKey; - try { - const response = await callGateway<{ runId: string }>({ - method: "agent", - params: { - message, - sessionKey: resolved.entry.childSessionKey, - sessionId, - idempotencyKey, - deliver: false, - channel: INTERNAL_MESSAGE_CHANNEL, - lane: AGENT_LANE_SUBAGENT, - timeout: 0, - }, - timeoutMs: 10_000, - }); - if (typeof response?.runId === "string" && response.runId) { - runId = response.runId; - } - } catch (err) { - // Replacement launch failed; restore normal announce behavior for the - // original run so completion is not silently suppressed. - clearSubagentRunSteerRestart(resolved.entry.runId); - const error = err instanceof Error ? err.message : String(err); - return jsonResult({ - status: "error", - action: "steer", - target, - runId, - sessionKey: resolved.entry.childSessionKey, - sessionId, - error, - }); - } - - replaceSubagentRunAfterSteer({ - previousRunId: resolved.entry.runId, - nextRunId: runId, - fallback: resolved.entry, - runTimeoutSeconds: resolved.entry.runTimeoutSeconds ?? 0, - }); - return jsonResult({ - status: "accepted", + status: result.status, action: "steer", target, - runId, - sessionKey: resolved.entry.childSessionKey, - sessionId, - mode: "restart", - label: resolveSubagentLabel(resolved.entry), - text: `steered ${resolveSubagentLabel(resolved.entry)}.`, + runId: result.runId, + sessionKey: result.sessionKey, + sessionId: result.sessionId, + mode: "mode" in result ? result.mode : undefined, + label: "label" in result ? result.label : undefined, + error: "error" in result ? result.error : undefined, + text: result.text, }); } + return jsonResult({ status: "error", error: "Unsupported action.", diff --git a/src/agents/tools/telegram-actions.test.ts b/src/agents/tools/telegram-actions.test.ts index eeeb7bbf35b..e15b4bd2e17 100644 --- a/src/agents/tools/telegram-actions.test.ts +++ b/src/agents/tools/telegram-actions.test.ts @@ -18,6 +18,16 @@ const sendStickerTelegram = vi.fn(async () => ({ chatId: "123", })); const deleteMessageTelegram = vi.fn(async () => ({ ok: true })); +const editMessageTelegram = vi.fn(async () => ({ + ok: true, + messageId: "456", + chatId: "123", +})); +const createForumTopicTelegram = vi.fn(async () => ({ + topicId: 99, + name: "Topic", + chatId: "123", +})); let envSnapshot: ReturnType; vi.mock("../../telegram/send.js", () => ({ @@ -30,6 +40,10 @@ vi.mock("../../telegram/send.js", () => ({ sendStickerTelegram(...args), deleteMessageTelegram: (...args: Parameters) => deleteMessageTelegram(...args), + editMessageTelegram: (...args: Parameters) => + editMessageTelegram(...args), + createForumTopicTelegram: (...args: Parameters) => + createForumTopicTelegram(...args), })); describe("handleTelegramAction", () => { @@ -90,6 +104,8 @@ describe("handleTelegramAction", () => { sendPollTelegram.mockClear(); sendStickerTelegram.mockClear(); deleteMessageTelegram.mockClear(); + editMessageTelegram.mockClear(); + createForumTopicTelegram.mockClear(); process.env.TELEGRAM_BOT_TOKEN = "tok"; }); @@ -379,6 +395,85 @@ describe("handleTelegramAction", () => { ); }); + it.each([ + { + name: "react", + params: { action: "react", chatId: "123", messageId: 456, emoji: "✅" }, + cfg: reactionConfig("minimal"), + assertCall: ( + readCallOpts: (calls: unknown[][], argIndex: number) => Record, + ) => readCallOpts(reactMessageTelegram.mock.calls as unknown[][], 3), + }, + { + name: "sendMessage", + params: { action: "sendMessage", to: "123", content: "hello" }, + cfg: telegramConfig(), + assertCall: ( + readCallOpts: (calls: unknown[][], argIndex: number) => Record, + ) => readCallOpts(sendMessageTelegram.mock.calls as unknown[][], 2), + }, + { + name: "poll", + params: { + action: "poll", + to: "123", + question: "Q?", + answers: ["A", "B"], + }, + cfg: telegramConfig(), + assertCall: ( + readCallOpts: (calls: unknown[][], argIndex: number) => Record, + ) => readCallOpts(sendPollTelegram.mock.calls as unknown[][], 2), + }, + { + name: "deleteMessage", + params: { action: "deleteMessage", chatId: "123", messageId: 1 }, + cfg: telegramConfig(), + assertCall: ( + readCallOpts: (calls: unknown[][], argIndex: number) => Record, + ) => readCallOpts(deleteMessageTelegram.mock.calls as unknown[][], 2), + }, + { + name: "editMessage", + params: { action: "editMessage", chatId: "123", messageId: 1, content: "updated" }, + cfg: telegramConfig(), + assertCall: ( + readCallOpts: (calls: unknown[][], argIndex: number) => Record, + ) => readCallOpts(editMessageTelegram.mock.calls as unknown[][], 3), + }, + { + name: "sendSticker", + params: { action: "sendSticker", to: "123", fileId: "sticker-1" }, + cfg: telegramConfig({ actions: { sticker: true } }), + assertCall: ( + readCallOpts: (calls: unknown[][], argIndex: number) => Record, + ) => readCallOpts(sendStickerTelegram.mock.calls as unknown[][], 2), + }, + { + name: "createForumTopic", + params: { action: "createForumTopic", chatId: "123", name: "Topic" }, + cfg: telegramConfig({ actions: { createForumTopic: true } }), + assertCall: ( + readCallOpts: (calls: unknown[][], argIndex: number) => Record, + ) => readCallOpts(createForumTopicTelegram.mock.calls as unknown[][], 2), + }, + ])("forwards resolved cfg for $name action", async ({ params, cfg, assertCall }) => { + const readCallOpts = (calls: unknown[][], argIndex: number): Record => { + const args = calls[0]; + if (!Array.isArray(args)) { + throw new Error("Expected Telegram action call args"); + } + const opts = args[argIndex]; + if (!opts || typeof opts !== "object") { + throw new Error("Expected Telegram action options object"); + } + return opts as Record; + }; + await handleTelegramAction(params as Record, cfg); + const opts = assertCall(readCallOpts); + expect(opts.cfg).toBe(cfg); + }); + it.each([ { name: "media", diff --git a/src/agents/tools/telegram-actions.ts b/src/agents/tools/telegram-actions.ts index 30c07530159..143d154e633 100644 --- a/src/agents/tools/telegram-actions.ts +++ b/src/agents/tools/telegram-actions.ts @@ -154,6 +154,7 @@ export async function handleTelegramAction( let reactionResult: Awaited>; try { reactionResult = await reactMessageTelegram(chatId ?? "", messageId ?? 0, emoji ?? "", { + cfg, token, remove, accountId: accountId ?? undefined, @@ -237,6 +238,7 @@ export async function handleTelegramAction( ); } const result = await sendMessageTelegram(to, content, { + cfg, token, accountId: accountId ?? undefined, mediaUrl: mediaUrl || undefined, @@ -293,6 +295,7 @@ export async function handleTelegramAction( durationHours: durationHours ?? undefined, }, { + cfg, token, accountId: accountId ?? undefined, replyToMessageId: replyToMessageId ?? undefined, @@ -327,6 +330,7 @@ export async function handleTelegramAction( ); } await deleteMessageTelegram(chatId ?? "", messageId ?? 0, { + cfg, token, accountId: accountId ?? undefined, }); @@ -367,6 +371,7 @@ export async function handleTelegramAction( ); } const result = await editMessageTelegram(chatId ?? "", messageId ?? 0, content, { + cfg, token, accountId: accountId ?? undefined, buttons, @@ -399,6 +404,7 @@ export async function handleTelegramAction( ); } const result = await sendStickerTelegram(to, fileId, { + cfg, token, accountId: accountId ?? undefined, replyToMessageId: replyToMessageId ?? undefined, @@ -454,6 +460,7 @@ export async function handleTelegramAction( ); } const result = await createForumTopicTelegram(chatId ?? "", name, { + cfg, token, accountId: accountId ?? undefined, iconColor: iconColor ?? undefined, diff --git a/src/agents/tools/web-fetch.cf-markdown.test.ts b/src/agents/tools/web-fetch.cf-markdown.test.ts index 6e7768fc43a..4dd22714574 100644 --- a/src/agents/tools/web-fetch.cf-markdown.test.ts +++ b/src/agents/tools/web-fetch.cf-markdown.test.ts @@ -4,6 +4,7 @@ import { withFetchPreconnect } from "../../test-utils/fetch-mock.js"; import { createBaseWebFetchToolConfig, installWebFetchSsrfHarness, + makeFetchHeaders, } from "./web-fetch.test-harness.js"; import "./web-fetch.test-mocks.js"; import { createWebFetchTool } from "./web-tools.js"; @@ -11,17 +12,14 @@ import { createWebFetchTool } from "./web-tools.js"; const baseToolConfig = createBaseWebFetchToolConfig(); installWebFetchSsrfHarness(); -function makeHeaders(map: Record): { get: (key: string) => string | null } { - return { - get: (key) => map[key.toLowerCase()] ?? null, - }; -} - function markdownResponse(body: string, extraHeaders: Record = {}): Response { return { ok: true, status: 200, - headers: makeHeaders({ "content-type": "text/markdown; charset=utf-8", ...extraHeaders }), + headers: makeFetchHeaders({ + "content-type": "text/markdown; charset=utf-8", + ...extraHeaders, + }), text: async () => body, } as Response; } @@ -30,7 +28,7 @@ function htmlResponse(body: string): Response { return { ok: true, status: 200, - headers: makeHeaders({ "content-type": "text/html; charset=utf-8" }), + headers: makeFetchHeaders({ "content-type": "text/html; charset=utf-8" }), text: async () => body, } as Response; } @@ -84,6 +82,47 @@ describe("web_fetch Cloudflare Markdown for Agents", () => { expect(details?.contentType).toBe("text/html"); }); + it("bypasses Firecrawl when runtime metadata marks Firecrawl inactive", async () => { + const fetchSpy = vi + .fn() + .mockResolvedValue( + htmlResponse( + "

Runtime Off

Use direct fetch.

", + ), + ); + global.fetch = withFetchPreconnect(fetchSpy); + + const tool = createWebFetchTool({ + config: { + tools: { + web: { + fetch: { + firecrawl: { + enabled: true, + apiKey: { + source: "env", + provider: "default", + id: "MISSING_FIRECRAWL_KEY_REF", + }, + }, + }, + }, + }, + }, + sandboxed: false, + runtimeFirecrawl: { + active: false, + apiKeySource: "secretRef", // pragma: allowlist secret + diagnostics: [], + }, + }); + + await tool?.execute?.("call", { url: "https://example.com/runtime-firecrawl-off" }); + + expect(fetchSpy).toHaveBeenCalled(); + expect(fetchSpy.mock.calls[0]?.[0]).toBe("https://example.com/runtime-firecrawl-off"); + }); + it("logs x-markdown-tokens when header is present", async () => { const logSpy = vi.spyOn(logger, "logDebug").mockImplementation(() => {}); const fetchSpy = vi diff --git a/src/agents/tools/web-fetch.ssrf.test.ts b/src/agents/tools/web-fetch.ssrf.test.ts index eb868068ece..c0489c9b5ba 100644 --- a/src/agents/tools/web-fetch.ssrf.test.ts +++ b/src/agents/tools/web-fetch.ssrf.test.ts @@ -1,21 +1,16 @@ import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import * as ssrf from "../../infra/net/ssrf.js"; import { type FetchMock, withFetchPreconnect } from "../../test-utils/fetch-mock.js"; +import { makeFetchHeaders } from "./web-fetch.test-harness.js"; const lookupMock = vi.fn(); const resolvePinnedHostname = ssrf.resolvePinnedHostname; -function makeHeaders(map: Record): { get: (key: string) => string | null } { - return { - get: (key) => map[key.toLowerCase()] ?? null, - }; -} - function redirectResponse(location: string): Response { return { ok: false, status: 302, - headers: makeHeaders({ location }), + headers: makeFetchHeaders({ location }), body: { cancel: vi.fn() }, } as unknown as Response; } @@ -24,7 +19,7 @@ function textResponse(body: string): Response { return { ok: true, status: 200, - headers: makeHeaders({ "content-type": "text/plain" }), + headers: makeFetchHeaders({ "content-type": "text/plain" }), text: async () => body, } as unknown as Response; } diff --git a/src/agents/tools/web-fetch.test-harness.ts b/src/agents/tools/web-fetch.test-harness.ts index c86a028e155..1bd8e33e89b 100644 --- a/src/agents/tools/web-fetch.test-harness.ts +++ b/src/agents/tools/web-fetch.test-harness.ts @@ -1,6 +1,14 @@ import { afterEach, beforeEach, vi } from "vitest"; import * as ssrf from "../../infra/net/ssrf.js"; +export function makeFetchHeaders(map: Record): { + get: (key: string) => string | null; +} { + return { + get: (key) => map[key.toLowerCase()] ?? null, + }; +} + export function installWebFetchSsrfHarness() { const lookupMock = vi.fn(); const resolvePinnedHostname = ssrf.resolvePinnedHostname; diff --git a/src/agents/tools/web-fetch.ts b/src/agents/tools/web-fetch.ts index 4ac7a1d7bfd..f4cc88e2d83 100644 --- a/src/agents/tools/web-fetch.ts +++ b/src/agents/tools/web-fetch.ts @@ -1,7 +1,9 @@ import { Type } from "@sinclair/typebox"; import type { OpenClawConfig } from "../../config/config.js"; +import { normalizeResolvedSecretInputString } from "../../config/types.secrets.js"; import { SsrFBlockedError } from "../../infra/net/ssrf.js"; import { logDebug } from "../../logger.js"; +import type { RuntimeWebFetchFirecrawlMetadata } from "../../secrets/runtime-web-tools.js"; import { wrapExternalContent, wrapWebContent } from "../../security/external-content.js"; import { normalizeSecretInput } from "../../utils/normalize-secret-input.js"; import { stringEnum } from "../schema/typebox.js"; @@ -71,7 +73,7 @@ type WebFetchConfig = NonNullable["web"] extends infer type FirecrawlFetchConfig = | { enabled?: boolean; - apiKey?: string; + apiKey?: unknown; baseUrl?: string; onlyMainContent?: boolean; maxAgeMs?: number; @@ -136,10 +138,14 @@ function resolveFirecrawlConfig(fetch?: WebFetchConfig): FirecrawlFetchConfig { } function resolveFirecrawlApiKey(firecrawl?: FirecrawlFetchConfig): string | undefined { - const fromConfig = - firecrawl && "apiKey" in firecrawl && typeof firecrawl.apiKey === "string" - ? normalizeSecretInput(firecrawl.apiKey) - : ""; + const fromConfigRaw = + firecrawl && "apiKey" in firecrawl + ? normalizeResolvedSecretInputString({ + value: firecrawl.apiKey, + path: "tools.web.fetch.firecrawl.apiKey", + }) + : undefined; + const fromConfig = normalizeSecretInput(fromConfigRaw); const fromEnv = normalizeSecretInput(process.env.FIRECRAWL_API_KEY); return fromConfig || fromEnv || undefined; } @@ -712,6 +718,7 @@ function resolveFirecrawlEndpoint(baseUrl: string): string { export function createWebFetchTool(options?: { config?: OpenClawConfig; sandboxed?: boolean; + runtimeFirecrawl?: RuntimeWebFetchFirecrawlMetadata; }): AnyAgentTool | null { const fetch = resolveFetchConfig(options?.config); if (!resolveFetchEnabled({ fetch, sandboxed: options?.sandboxed })) { @@ -719,8 +726,14 @@ export function createWebFetchTool(options?: { } const readabilityEnabled = resolveFetchReadabilityEnabled(fetch); const firecrawl = resolveFirecrawlConfig(fetch); - const firecrawlApiKey = resolveFirecrawlApiKey(firecrawl); - const firecrawlEnabled = resolveFirecrawlEnabled({ firecrawl, apiKey: firecrawlApiKey }); + const runtimeFirecrawlActive = options?.runtimeFirecrawl?.active; + const shouldResolveFirecrawlApiKey = + runtimeFirecrawlActive === undefined ? firecrawl?.enabled !== false : runtimeFirecrawlActive; + const firecrawlApiKey = shouldResolveFirecrawlApiKey + ? resolveFirecrawlApiKey(firecrawl) + : undefined; + const firecrawlEnabled = + runtimeFirecrawlActive ?? resolveFirecrawlEnabled({ firecrawl, apiKey: firecrawlApiKey }); const firecrawlBaseUrl = resolveFirecrawlBaseUrl(firecrawl); const firecrawlOnlyMainContent = resolveFirecrawlOnlyMainContent(firecrawl); const firecrawlMaxAgeMs = resolveFirecrawlMaxAgeMsOrDefault(firecrawl); diff --git a/src/agents/tools/web-search.test.ts b/src/agents/tools/web-search.test.ts index 4a7b002d784..b8bccd7dfd3 100644 --- a/src/agents/tools/web-search.test.ts +++ b/src/agents/tools/web-search.test.ts @@ -23,6 +23,7 @@ const { resolveKimiBaseUrl, extractKimiCitations, resolveBraveMode, + mapBraveLlmContextResults, } = __testing; const kimiApiKeyEnv = ["KIMI_API", "KEY"].join("_"); @@ -393,3 +394,77 @@ describe("resolveBraveMode", () => { expect(resolveBraveMode({ mode: "invalid" })).toBe("web"); }); }); + +describe("mapBraveLlmContextResults", () => { + it("maps plain string snippets correctly", () => { + const results = mapBraveLlmContextResults({ + grounding: { + generic: [ + { + url: "https://example.com/page", + title: "Example Page", + snippets: ["first snippet", "second snippet"], + }, + ], + }, + }); + expect(results).toEqual([ + { + url: "https://example.com/page", + title: "Example Page", + snippets: ["first snippet", "second snippet"], + siteName: "example.com", + }, + ]); + }); + + it("filters out non-string and empty snippets", () => { + const results = mapBraveLlmContextResults({ + grounding: { + generic: [ + { + url: "https://example.com", + title: "Test", + snippets: ["valid", "", null, undefined, 42, { text: "object" }] as string[], + }, + ], + }, + }); + expect(results[0].snippets).toEqual(["valid"]); + }); + + it("handles missing snippets array", () => { + const results = mapBraveLlmContextResults({ + grounding: { + generic: [{ url: "https://example.com", title: "No Snippets" } as never], + }, + }); + expect(results[0].snippets).toEqual([]); + }); + + it("handles empty grounding.generic", () => { + expect(mapBraveLlmContextResults({ grounding: { generic: [] } })).toEqual([]); + }); + + it("handles missing grounding.generic", () => { + expect(mapBraveLlmContextResults({ grounding: {} } as never)).toEqual([]); + }); + + it("resolves siteName from URL hostname", () => { + const results = mapBraveLlmContextResults({ + grounding: { + generic: [{ url: "https://docs.example.org/path", title: "Docs", snippets: ["text"] }], + }, + }); + expect(results[0].siteName).toBe("docs.example.org"); + }); + + it("sets siteName to undefined for invalid URLs", () => { + const results = mapBraveLlmContextResults({ + grounding: { + generic: [{ url: "not-a-url", title: "Bad URL", snippets: ["text"] }], + }, + }); + expect(results[0].siteName).toBeUndefined(); + }); +}); diff --git a/src/agents/tools/web-search.ts b/src/agents/tools/web-search.ts index 47c5a5abc94..6e9518f1ede 100644 --- a/src/agents/tools/web-search.ts +++ b/src/agents/tools/web-search.ts @@ -3,6 +3,7 @@ import { formatCliCommand } from "../../cli/command-format.js"; import type { OpenClawConfig } from "../../config/config.js"; import { normalizeResolvedSecretInputString } from "../../config/types.secrets.js"; import { logVerbose } from "../../globals.js"; +import type { RuntimeWebSearchMetadata } from "../../secrets/runtime-web-tools.js"; import { wrapWebContent } from "../../security/external-content.js"; import { normalizeSecretInput } from "../../utils/normalize-secret-input.js"; import type { AnyAgentTool } from "./common.js"; @@ -193,6 +194,33 @@ function createWebSearchSchema(params: { ), } as const; + const perplexityStructuredFilterSchema = { + country: Type.Optional( + Type.String({ + description: + "Native Perplexity Search API only. 2-letter country code for region-specific results (e.g., 'DE', 'US', 'ALL'). Default: 'US'.", + }), + ), + language: Type.Optional( + Type.String({ + description: + "Native Perplexity Search API only. ISO 639-1 language code for results (e.g., 'en', 'de', 'fr').", + }), + ), + date_after: Type.Optional( + Type.String({ + description: + "Native Perplexity Search API only. Only results published after this date (YYYY-MM-DD).", + }), + ), + date_before: Type.Optional( + Type.String({ + description: + "Native Perplexity Search API only. Only results published before this date (YYYY-MM-DD).", + }), + ), + } as const; + if (params.provider === "brave") { return Type.Object({ ...querySchema, @@ -221,7 +249,8 @@ function createWebSearchSchema(params: { } return Type.Object({ ...querySchema, - ...filterSchema, + freshness: filterSchema.freshness, + ...perplexityStructuredFilterSchema, domain_filter: Type.Optional( Type.Array(Type.String(), { description: @@ -272,8 +301,7 @@ type BraveSearchResponse = { }; }; -type BraveLlmContextSnippet = { text: string }; -type BraveLlmContextResult = { url: string; title: string; snippets: BraveLlmContextSnippet[] }; +type BraveLlmContextResult = { url: string; title: string; snippets: string[] }; type BraveLlmContextResponse = { grounding: { generic?: BraveLlmContextResult[] }; sources?: { url?: string; hostname?: string; date?: string }[]; @@ -368,6 +396,16 @@ type PerplexitySearchResponse = { choices?: Array<{ message?: { content?: string; + annotations?: Array<{ + type?: string; + url?: string; + url_citation?: { + url?: string; + title?: string; + start_index?: number; + end_index?: number; + }; + }>; }; }>; citations?: string[]; @@ -386,6 +424,38 @@ type PerplexitySearchApiResponse = { id?: string; }; +function extractPerplexityCitations(data: PerplexitySearchResponse): string[] { + const normalizeUrl = (value: unknown): string | undefined => { + if (typeof value !== "string") { + return undefined; + } + const trimmed = value.trim(); + return trimmed ? trimmed : undefined; + }; + + const topLevel = (data.citations ?? []) + .map(normalizeUrl) + .filter((url): url is string => Boolean(url)); + if (topLevel.length > 0) { + return [...new Set(topLevel)]; + } + + const citations: string[] = []; + for (const choice of data.choices ?? []) { + for (const annotation of choice.message?.annotations ?? []) { + if (annotation.type !== "url_citation") { + continue; + } + const url = normalizeUrl(annotation.url_citation?.url ?? annotation.url); + if (url) { + citations.push(url); + } + } + } + + return [...new Set(citations)]; +} + function extractGrokContent(data: GrokSearchResponse): { text: string | undefined; annotationCitations: string[]; @@ -743,6 +813,16 @@ function resolvePerplexityTransport(perplexity?: PerplexityConfig): { }; } +function resolvePerplexitySchemaTransportHint( + perplexity?: PerplexityConfig, +): PerplexityTransport | undefined { + const hasLegacyOverride = Boolean( + (perplexity?.baseUrl && perplexity.baseUrl.trim()) || + (perplexity?.model && perplexity.model.trim()), + ); + return hasLegacyOverride ? "chat_completions" : undefined; +} + function resolveGrokConfig(search?: WebSearchConfig): GrokConfig { if (!search || typeof search !== "object") { return {}; @@ -1214,7 +1294,8 @@ async function runPerplexitySearch(params: { const data = (await res.json()) as PerplexitySearchResponse; const content = data.choices?.[0]?.message?.content ?? "No response"; - const citations = data.citations ?? []; + // Prefer top-level citations; fall back to OpenRouter-style message annotations. + const citations = extractPerplexityCitations(data); return { content, citations }; }, @@ -1429,6 +1510,18 @@ async function runKimiSearch(params: { }; } +function mapBraveLlmContextResults( + data: BraveLlmContextResponse, +): { url: string; title: string; snippets: string[]; siteName?: string }[] { + const genericResults = Array.isArray(data.grounding?.generic) ? data.grounding.generic : []; + return genericResults.map((entry) => ({ + url: entry.url ?? "", + title: entry.title ?? "", + snippets: (entry.snippets ?? []).filter((s) => typeof s === "string" && s.length > 0), + siteName: resolveSiteName(entry.url) || undefined, + })); +} + async function runBraveLlmContextSearch(params: { query: string; apiKey: string; @@ -1477,13 +1570,7 @@ async function runBraveLlmContextSearch(params: { } const data = (await res.json()) as BraveLlmContextResponse; - const genericResults = Array.isArray(data.grounding?.generic) ? data.grounding.generic : []; - const mapped = genericResults.map((entry) => ({ - url: entry.url ?? "", - title: entry.title ?? "", - snippets: (entry.snippets ?? []).map((s) => s.text ?? "").filter(Boolean), - siteName: resolveSiteName(entry.url) || undefined, - })); + const mapped = mapBraveLlmContextResults(data); return { results: mapped, sources: data.sources }; }, @@ -1804,15 +1891,21 @@ async function runWebSearch(params: { export function createWebSearchTool(options?: { config?: OpenClawConfig; sandboxed?: boolean; + runtimeWebSearch?: RuntimeWebSearchMetadata; }): AnyAgentTool | null { const search = resolveSearchConfig(options?.config); if (!resolveSearchEnabled({ search, sandboxed: options?.sandboxed })) { return null; } - const provider = resolveSearchProvider(search); + const provider = + options?.runtimeWebSearch?.selectedProvider ?? + options?.runtimeWebSearch?.providerConfigured ?? + resolveSearchProvider(search); const perplexityConfig = resolvePerplexityConfig(search); - const perplexityTransport = resolvePerplexityTransport(perplexityConfig); + const perplexitySchemaTransportHint = + options?.runtimeWebSearch?.perplexityTransport ?? + resolvePerplexitySchemaTransportHint(perplexityConfig); const grokConfig = resolveGrokConfig(search); const geminiConfig = resolveGeminiConfig(search); const kimiConfig = resolveKimiConfig(search); @@ -1821,9 +1914,9 @@ export function createWebSearchTool(options?: { const description = provider === "perplexity" - ? perplexityTransport.transport === "chat_completions" + ? perplexitySchemaTransportHint === "chat_completions" ? "Search the web using Perplexity Sonar via Perplexity/OpenRouter chat completions. Returns AI-synthesized answers with citations from web-grounded search." - : "Search the web using the Perplexity Search API. Returns structured results (title, URL, snippet) for fast research. Supports domain, region, language, and freshness filtering." + : "Search the web using Perplexity. Runtime routing decides between native Search API and Sonar chat-completions compatibility. Structured filters are available on the native Search API path." : provider === "grok" ? "Search the web using xAI Grok. Returns AI-synthesized answers with citations from real-time web search." : provider === "kimi" @@ -1840,10 +1933,13 @@ export function createWebSearchTool(options?: { description, parameters: createWebSearchSchema({ provider, - perplexityTransport: provider === "perplexity" ? perplexityTransport.transport : undefined, + perplexityTransport: provider === "perplexity" ? perplexitySchemaTransportHint : undefined, }), execute: async (_toolCallId, args) => { - const perplexityRuntime = provider === "perplexity" ? perplexityTransport : undefined; + // Resolve Perplexity auth/transport lazily at execution time so unrelated providers + // do not touch Perplexity-only credential surfaces during tool construction. + const perplexityRuntime = + provider === "perplexity" ? resolvePerplexityTransport(perplexityConfig) : undefined; const apiKey = provider === "perplexity" ? perplexityRuntime?.apiKey @@ -2122,4 +2218,5 @@ export const __testing = { extractKimiCitations, resolveRedirectUrl: resolveCitationRedirectUrl, resolveBraveMode, + mapBraveLlmContextResults, } as const; diff --git a/src/agents/tools/web-tools.enabled-defaults.test.ts b/src/agents/tools/web-tools.enabled-defaults.test.ts index 54485908b8b..c416804fa11 100644 --- a/src/agents/tools/web-tools.enabled-defaults.test.ts +++ b/src/agents/tools/web-tools.enabled-defaults.test.ts @@ -113,11 +113,13 @@ function installPerplexitySearchApiFetch(results?: Array }); } -function installPerplexityChatFetch() { - return installMockFetch({ - choices: [{ message: { content: "ok" } }], - citations: ["https://example.com"], - }); +function installPerplexityChatFetch(payload?: Record) { + return installMockFetch( + payload ?? { + choices: [{ message: { content: "ok" } }], + citations: ["https://example.com"], + }, + ); } function createProviderSuccessPayload( @@ -166,6 +168,39 @@ describe("web tools defaults", () => { const tool = createWebSearchTool({ config: {}, sandboxed: false }); expect(tool?.name).toBe("web_search"); }); + + it("prefers runtime-selected web_search provider over local provider config", async () => { + const mockFetch = installMockFetch(createProviderSuccessPayload("gemini")); + const tool = createWebSearchTool({ + config: { + tools: { + web: { + search: { + provider: "brave", + apiKey: "brave-config-test", // pragma: allowlist secret + gemini: { + apiKey: "gemini-config-test", // pragma: allowlist secret + }, + }, + }, + }, + }, + sandboxed: true, + runtimeWebSearch: { + providerConfigured: "brave", + providerSource: "auto-detect", + selectedProvider: "gemini", + selectedProviderKeySource: "secretRef", + diagnostics: [], + }, + }); + + const result = await tool?.execute?.("call-runtime-provider", { query: "runtime override" }); + + expect(mockFetch).toHaveBeenCalled(); + expect(String(mockFetch.mock.calls[0]?.[0])).toContain("generativelanguage.googleapis.com"); + expect((result?.details as { provider?: string } | undefined)?.provider).toBe("gemini"); + }); }); describe("web_search country and language parameters", () => { @@ -476,6 +511,42 @@ describe("web_search perplexity OpenRouter compatibility", () => { expect(body.search_recency_filter).toBe("week"); }); + it("falls back to message annotations when top-level citations are missing", async () => { + vi.stubEnv("OPENROUTER_API_KEY", "sk-or-v1-test"); // pragma: allowlist secret + const mockFetch = installPerplexityChatFetch({ + choices: [ + { + message: { + content: "ok", + annotations: [ + { + type: "url_citation", + url_citation: { url: "https://example.com/a" }, + }, + { + type: "url_citation", + url_citation: { url: "https://example.com/b" }, + }, + { + type: "url_citation", + url_citation: { url: "https://example.com/a" }, + }, + ], + }, + }, + ], + }); + const tool = createPerplexitySearchTool(); + const result = await tool?.execute?.("call-1", { query: "test" }); + + expect(mockFetch).toHaveBeenCalled(); + expect(result?.details).toMatchObject({ + provider: "perplexity", + citations: ["https://example.com/a", "https://example.com/b"], + content: expect.stringContaining("ok"), + }); + }); + it("fails loud for Search API-only filters on the compatibility path", async () => { vi.stubEnv("OPENROUTER_API_KEY", "sk-or-v1-test"); // pragma: allowlist secret const mockFetch = installPerplexityChatFetch(); @@ -489,20 +560,56 @@ describe("web_search perplexity OpenRouter compatibility", () => { expect(result?.details).toMatchObject({ error: "unsupported_domain_filter" }); }); - it("hides Search API-only schema params on the compatibility path", () => { + it("keeps Search API schema params visible before runtime auth routing", () => { vi.stubEnv("OPENROUTER_API_KEY", "sk-or-v1-test"); // pragma: allowlist secret const tool = createPerplexitySearchTool(); const properties = (tool?.parameters as { properties?: Record } | undefined) ?.properties; expect(properties?.freshness).toBeDefined(); - expect(properties?.country).toBeUndefined(); - expect(properties?.language).toBeUndefined(); - expect(properties?.date_after).toBeUndefined(); - expect(properties?.date_before).toBeUndefined(); - expect(properties?.domain_filter).toBeUndefined(); - expect(properties?.max_tokens).toBeUndefined(); - expect(properties?.max_tokens_per_page).toBeUndefined(); + expect(properties?.country).toBeDefined(); + expect(properties?.language).toBeDefined(); + expect(properties?.date_after).toBeDefined(); + expect(properties?.date_before).toBeDefined(); + expect(properties?.domain_filter).toBeDefined(); + expect(properties?.max_tokens).toBeDefined(); + expect(properties?.max_tokens_per_page).toBeDefined(); + expect( + ( + properties?.country as + | { + description?: string; + } + | undefined + )?.description, + ).toContain("Native Perplexity Search API only."); + expect( + ( + properties?.language as + | { + description?: string; + } + | undefined + )?.description, + ).toContain("Native Perplexity Search API only."); + expect( + ( + properties?.date_after as + | { + description?: string; + } + | undefined + )?.description, + ).toContain("Native Perplexity Search API only."); + expect( + ( + properties?.date_before as + | { + description?: string; + } + | undefined + )?.description, + ).toContain("Native Perplexity Search API only."); }); it("keeps structured schema params on the native Search API path", () => { @@ -522,6 +629,61 @@ describe("web_search perplexity OpenRouter compatibility", () => { }); }); +describe("web_search Perplexity lazy resolution", () => { + const priorFetch = global.fetch; + + afterEach(() => { + vi.unstubAllEnvs(); + global.fetch = priorFetch; + }); + + it("does not read Perplexity credentials while creating non-Perplexity tools", () => { + const perplexityConfig: Record = {}; + Object.defineProperty(perplexityConfig, "apiKey", { + enumerable: true, + get() { + throw new Error("perplexity-apiKey-getter-called"); + }, + }); + + const tool = createWebSearchTool({ + config: { + tools: { + web: { + search: { + provider: "gemini", + gemini: { apiKey: "gemini-config-test" }, // pragma: allowlist secret + perplexity: perplexityConfig as { apiKey?: string; baseUrl?: string; model?: string }, + }, + }, + }, + }, + sandboxed: true, + }); + + expect(tool?.name).toBe("web_search"); + }); + + it("defers Perplexity credential reads until execute", async () => { + const perplexityConfig: Record = {}; + Object.defineProperty(perplexityConfig, "apiKey", { + enumerable: true, + get() { + throw new Error("perplexity-apiKey-getter-called"); + }, + }); + + const tool = createPerplexitySearchTool( + perplexityConfig as { apiKey?: string; baseUrl?: string; model?: string }, + ); + + expect(tool?.name).toBe("web_search"); + await expect(tool?.execute?.("call-1", { query: "test" })).rejects.toThrow( + /perplexity-apiKey-getter-called/, + ); + }); +}); + describe("web_search kimi provider", () => { const priorFetch = global.fetch; @@ -694,7 +856,7 @@ describe("web_search external content wrapping", () => { const mockFetch = installBraveLlmContextFetch({ title: "Context title", url: "https://example.com/ctx", - snippets: [{ text: "Context chunk one" }, { text: "Context chunk two" }], + snippets: ["Context chunk one", "Context chunk two"], }); const tool = createWebSearchTool({ diff --git a/src/agents/tools/web-tools.fetch.test.ts b/src/agents/tools/web-tools.fetch.test.ts index 9da57a35b45..e9bfabbee7a 100644 --- a/src/agents/tools/web-tools.fetch.test.ts +++ b/src/agents/tools/web-tools.fetch.test.ts @@ -1,7 +1,9 @@ import { EnvHttpProxyAgent } from "undici"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import * as ssrf from "../../infra/net/ssrf.js"; +import { resolveRequestUrl } from "../../plugin-sdk/request-url.js"; import { withFetchPreconnect } from "../../test-utils/fetch-mock.js"; +import { makeFetchHeaders } from "./web-fetch.test-harness.js"; import { createWebFetchTool } from "./web-tools.js"; type MockResponse = { @@ -13,18 +15,12 @@ type MockResponse = { json?: () => Promise; }; -function makeHeaders(map: Record): { get: (key: string) => string | null } { - return { - get: (key) => map[key.toLowerCase()] ?? null, - }; -} - function htmlResponse(html: string, url = "https://example.com/"): MockResponse { return { ok: true, status: 200, url, - headers: makeHeaders({ "content-type": "text/html; charset=utf-8" }), + headers: makeFetchHeaders({ "content-type": "text/html; charset=utf-8" }), text: async () => html, }; } @@ -62,7 +58,7 @@ function textResponse( ok: true, status: 200, url, - headers: makeHeaders({ "content-type": contentType }), + headers: makeFetchHeaders({ "content-type": contentType }), text: async () => text, }; } @@ -77,23 +73,10 @@ function errorHtmlResponse( ok: false, status, url, - headers: contentType ? makeHeaders({ "content-type": contentType }) : makeHeaders({}), + headers: contentType ? makeFetchHeaders({ "content-type": contentType }) : makeFetchHeaders({}), text: async () => html, }; } -function requestUrl(input: RequestInfo | URL): string { - if (typeof input === "string") { - return input; - } - if (input instanceof URL) { - return input.toString(); - } - if ("url" in input && typeof input.url === "string") { - return input.url; - } - return ""; -} - function installMockFetch( impl: (input: RequestInfo | URL, init?: RequestInit) => Promise, ) { @@ -125,9 +108,9 @@ function installPlainTextFetch(text: string) { Promise.resolve({ ok: true, status: 200, - headers: makeHeaders({ "content-type": "text/plain" }), + headers: makeFetchHeaders({ "content-type": "text/plain" }), text: async () => text, - url: requestUrl(input), + url: resolveRequestUrl(input), } as Response), ); } @@ -215,9 +198,9 @@ describe("web_fetch extraction fallbacks", () => { Promise.resolve({ ok: true, status: 200, - headers: makeHeaders({ "content-type": "text/plain" }), + headers: makeFetchHeaders({ "content-type": "text/plain" }), text: async () => longText, - url: requestUrl(input), + url: resolveRequestUrl(input), } as Response), ); @@ -277,9 +260,9 @@ describe("web_fetch extraction fallbacks", () => { Promise.resolve({ ok: true, status: 200, - headers: makeHeaders({ "content-type": "text/plain" }), + headers: makeFetchHeaders({ "content-type": "text/plain" }), text: async () => "proxy body", - url: requestUrl(input), + url: resolveRequestUrl(input), } as Response), ); const tool = createFetchTool({ firecrawl: { enabled: false } }); @@ -298,7 +281,7 @@ describe("web_fetch extraction fallbacks", () => { it("falls back to firecrawl when readability returns no content", async () => { installMockFetch((input: RequestInfo | URL) => { - const url = requestUrl(input); + const url = resolveRequestUrl(input); if (url.includes("api.firecrawl.dev")) { return Promise.resolve(firecrawlResponse("firecrawl content")) as Promise; } @@ -316,7 +299,7 @@ describe("web_fetch extraction fallbacks", () => { it("normalizes firecrawl Authorization header values", async () => { const fetchSpy = installMockFetch((input: RequestInfo | URL) => { - const url = requestUrl(input); + const url = resolveRequestUrl(input); if (url.includes("api.firecrawl.dev/v2/scrape")) { return Promise.resolve(firecrawlResponse("firecrawl normalized")) as Promise; } @@ -333,7 +316,7 @@ describe("web_fetch extraction fallbacks", () => { expect(result?.details).toMatchObject({ extractor: "firecrawl" }); const firecrawlCall = fetchSpy.mock.calls.find((call) => - requestUrl(call[0]).includes("/v2/scrape"), + resolveRequestUrl(call[0]).includes("/v2/scrape"), ); expect(firecrawlCall).toBeTruthy(); const init = firecrawlCall?.[1]; @@ -345,7 +328,7 @@ describe("web_fetch extraction fallbacks", () => { installMockFetch( (input: RequestInfo | URL) => Promise.resolve( - htmlResponse("hi", requestUrl(input)), + htmlResponse("hi", resolveRequestUrl(input)), ) as Promise, ); @@ -361,7 +344,7 @@ describe("web_fetch extraction fallbacks", () => { it("throws when readability is empty and firecrawl fails", async () => { installMockFetch((input: RequestInfo | URL) => { - const url = requestUrl(input); + const url = resolveRequestUrl(input); if (url.includes("api.firecrawl.dev")) { return Promise.resolve(firecrawlError()) as Promise; } @@ -378,14 +361,14 @@ describe("web_fetch extraction fallbacks", () => { it("uses firecrawl when direct fetch fails", async () => { installMockFetch((input: RequestInfo | URL) => { - const url = requestUrl(input); + const url = resolveRequestUrl(input); if (url.includes("api.firecrawl.dev")) { return Promise.resolve(firecrawlResponse("firecrawl fallback", url)) as Promise; } return Promise.resolve({ ok: false, status: 403, - headers: makeHeaders({ "content-type": "text/html" }), + headers: makeFetchHeaders({ "content-type": "text/html" }), text: async () => "blocked", } as Response); }); @@ -404,7 +387,7 @@ describe("web_fetch extraction fallbacks", () => { const large = "a".repeat(80_000); installMockFetch( (input: RequestInfo | URL) => - Promise.resolve(textResponse(large, requestUrl(input))) as Promise, + Promise.resolve(textResponse(large, resolveRequestUrl(input))) as Promise, ); const tool = createFetchTool({ @@ -432,7 +415,7 @@ describe("web_fetch extraction fallbacks", () => { installMockFetch( (input: RequestInfo | URL) => Promise.resolve( - errorHtmlResponse(html, 404, requestUrl(input), "Text/HTML; charset=utf-8"), + errorHtmlResponse(html, 404, resolveRequestUrl(input), "Text/HTML; charset=utf-8"), ) as Promise, ); @@ -455,7 +438,9 @@ describe("web_fetch extraction fallbacks", () => { "Oops

Oops

"; installMockFetch( (input: RequestInfo | URL) => - Promise.resolve(errorHtmlResponse(html, 500, requestUrl(input), null)) as Promise, + Promise.resolve( + errorHtmlResponse(html, 500, resolveRequestUrl(input), null), + ) as Promise, ); const tool = createFetchTool({ firecrawl: { enabled: false } }); @@ -471,7 +456,7 @@ describe("web_fetch extraction fallbacks", () => { it("wraps firecrawl error details", async () => { installMockFetch((input: RequestInfo | URL) => { - const url = requestUrl(input); + const url = resolveRequestUrl(input); if (url.includes("api.firecrawl.dev")) { return Promise.resolve({ ok: false, diff --git a/src/agents/transcript-policy.ts b/src/agents/transcript-policy.ts index d6d9ec5916a..46795bad1bc 100644 --- a/src/agents/transcript-policy.ts +++ b/src/agents/transcript-policy.ts @@ -80,9 +80,9 @@ export function resolveTranscriptPolicy(params: { }); const requiresOpenAiCompatibleToolIdSanitization = params.modelApi === "openai-completions"; - // GitHub Copilot's Claude endpoints can reject persisted `thinking` blocks with - // non-binary/non-base64 signatures (e.g. thinkingSignature: "reasoning_text"). - // Drop these blocks at send-time to keep sessions usable. + // Anthropic Claude endpoints can reject replayed `thinking` blocks unless the + // original signatures are preserved byte-for-byte. Drop them at send-time to + // keep persisted sessions usable across follow-up turns. const dropThinkingBlocks = shouldDropThinkingBlocksForModel({ provider, modelId }); const needsNonImageSanitize = diff --git a/src/agents/venice-models.test.ts b/src/agents/venice-models.test.ts index 5a93568f9b7..ed1769cf044 100644 --- a/src/agents/venice-models.test.ts +++ b/src/agents/venice-models.test.ts @@ -59,6 +59,55 @@ function makeModelsResponse(id: string): Response { ); } +type ModelSpecOverride = { + id: string; + availableContextTokens?: number; + maxCompletionTokens?: number; + capabilities?: { + supportsReasoning?: boolean; + supportsVision?: boolean; + supportsFunctionCalling?: boolean; + }; + includeModelSpec?: boolean; +}; + +function makeModelRow(params: ModelSpecOverride) { + if (params.includeModelSpec === false) { + return { id: params.id }; + } + return { + id: params.id, + model_spec: { + name: params.id, + privacy: "private", + ...(params.availableContextTokens === undefined + ? {} + : { availableContextTokens: params.availableContextTokens }), + ...(params.maxCompletionTokens === undefined + ? {} + : { maxCompletionTokens: params.maxCompletionTokens }), + ...(params.capabilities === undefined ? {} : { capabilities: params.capabilities }), + }, + }; +} + +function stubVeniceModelsFetch(rows: ModelSpecOverride[]) { + const fetchMock = vi.fn( + async () => + new Response( + JSON.stringify({ + data: rows.map((row) => makeModelRow(row)), + }), + { + status: 200, + headers: { "Content-Type": "application/json" }, + }, + ), + ); + vi.stubGlobal("fetch", fetchMock as unknown as typeof fetch); + return fetchMock; +} + describe("venice-models", () => { afterEach(() => { vi.unstubAllGlobals(); @@ -96,34 +145,18 @@ describe("venice-models", () => { }); it("uses API maxCompletionTokens for catalog models when present", async () => { - const fetchMock = vi.fn( - async () => - new Response( - JSON.stringify({ - data: [ - { - id: "llama-3.3-70b", - model_spec: { - name: "llama-3.3-70b", - privacy: "private", - availableContextTokens: 131072, - maxCompletionTokens: 2048, - capabilities: { - supportsReasoning: false, - supportsVision: false, - supportsFunctionCalling: true, - }, - }, - }, - ], - }), - { - status: 200, - headers: { "Content-Type": "application/json" }, - }, - ), - ); - vi.stubGlobal("fetch", fetchMock as unknown as typeof fetch); + stubVeniceModelsFetch([ + { + id: "llama-3.3-70b", + availableContextTokens: 131072, + maxCompletionTokens: 2048, + capabilities: { + supportsReasoning: false, + supportsVision: false, + supportsFunctionCalling: true, + }, + }, + ]); const models = await runWithDiscoveryEnabled(() => discoverVeniceModels()); const llama = models.find((m) => m.id === "llama-3.3-70b"); @@ -131,33 +164,17 @@ describe("venice-models", () => { }); it("retains catalog maxTokens when the API omits maxCompletionTokens", async () => { - const fetchMock = vi.fn( - async () => - new Response( - JSON.stringify({ - data: [ - { - id: "qwen3-235b-a22b-instruct-2507", - model_spec: { - name: "qwen3-235b-a22b-instruct-2507", - privacy: "private", - availableContextTokens: 131072, - capabilities: { - supportsReasoning: false, - supportsVision: false, - supportsFunctionCalling: true, - }, - }, - }, - ], - }), - { - status: 200, - headers: { "Content-Type": "application/json" }, - }, - ), - ); - vi.stubGlobal("fetch", fetchMock as unknown as typeof fetch); + stubVeniceModelsFetch([ + { + id: "qwen3-235b-a22b-instruct-2507", + availableContextTokens: 131072, + capabilities: { + supportsReasoning: false, + supportsVision: false, + supportsFunctionCalling: true, + }, + }, + ]); const models = await runWithDiscoveryEnabled(() => discoverVeniceModels()); const qwen = models.find((m) => m.id === "qwen3-235b-a22b-instruct-2507"); @@ -172,34 +189,18 @@ describe("venice-models", () => { }); it("uses a conservative bounded maxTokens value for new models", async () => { - const fetchMock = vi.fn( - async () => - new Response( - JSON.stringify({ - data: [ - { - id: "new-model-2026", - model_spec: { - name: "new-model-2026", - privacy: "private", - availableContextTokens: 50_000, - maxCompletionTokens: 200_000, - capabilities: { - supportsReasoning: false, - supportsVision: false, - supportsFunctionCalling: false, - }, - }, - }, - ], - }), - { - status: 200, - headers: { "Content-Type": "application/json" }, - }, - ), - ); - vi.stubGlobal("fetch", fetchMock as unknown as typeof fetch); + stubVeniceModelsFetch([ + { + id: "new-model-2026", + availableContextTokens: 50_000, + maxCompletionTokens: 200_000, + capabilities: { + supportsReasoning: false, + supportsVision: false, + supportsFunctionCalling: false, + }, + }, + ]); const models = await runWithDiscoveryEnabled(() => discoverVeniceModels()); const newModel = models.find((m) => m.id === "new-model-2026"); @@ -209,33 +210,17 @@ describe("venice-models", () => { }); it("caps new-model maxTokens to the fallback context window when API context is missing", async () => { - const fetchMock = vi.fn( - async () => - new Response( - JSON.stringify({ - data: [ - { - id: "new-model-without-context", - model_spec: { - name: "new-model-without-context", - privacy: "private", - maxCompletionTokens: 200_000, - capabilities: { - supportsReasoning: false, - supportsVision: false, - supportsFunctionCalling: true, - }, - }, - }, - ], - }), - { - status: 200, - headers: { "Content-Type": "application/json" }, - }, - ), - ); - vi.stubGlobal("fetch", fetchMock as unknown as typeof fetch); + stubVeniceModelsFetch([ + { + id: "new-model-without-context", + maxCompletionTokens: 200_000, + capabilities: { + supportsReasoning: false, + supportsVision: false, + supportsFunctionCalling: true, + }, + }, + ]); const models = await runWithDiscoveryEnabled(() => discoverVeniceModels()); const newModel = models.find((m) => m.id === "new-model-without-context"); @@ -244,37 +229,17 @@ describe("venice-models", () => { }); it("ignores missing capabilities on partial metadata instead of aborting discovery", async () => { - const fetchMock = vi.fn( - async () => - new Response( - JSON.stringify({ - data: [ - { - id: "llama-3.3-70b", - model_spec: { - name: "llama-3.3-70b", - privacy: "private", - availableContextTokens: 131072, - maxCompletionTokens: 2048, - }, - }, - { - id: "new-model-partial", - model_spec: { - name: "new-model-partial", - privacy: "private", - maxCompletionTokens: 2048, - }, - }, - ], - }), - { - status: 200, - headers: { "Content-Type": "application/json" }, - }, - ), - ); - vi.stubGlobal("fetch", fetchMock as unknown as typeof fetch); + stubVeniceModelsFetch([ + { + id: "llama-3.3-70b", + availableContextTokens: 131072, + maxCompletionTokens: 2048, + }, + { + id: "new-model-partial", + maxCompletionTokens: 2048, + }, + ]); const models = await runWithDiscoveryEnabled(() => discoverVeniceModels()); const knownModel = models.find((m) => m.id === "llama-3.3-70b"); @@ -287,37 +252,19 @@ describe("venice-models", () => { }); it("keeps known models discoverable when a row omits model_spec", async () => { - const fetchMock = vi.fn( - async () => - new Response( - JSON.stringify({ - data: [ - { - id: "llama-3.3-70b", - }, - { - id: "new-model-valid", - model_spec: { - name: "new-model-valid", - privacy: "private", - availableContextTokens: 32_000, - maxCompletionTokens: 2_048, - capabilities: { - supportsReasoning: false, - supportsVision: false, - supportsFunctionCalling: true, - }, - }, - }, - ], - }), - { - status: 200, - headers: { "Content-Type": "application/json" }, - }, - ), - ); - vi.stubGlobal("fetch", fetchMock as unknown as typeof fetch); + stubVeniceModelsFetch([ + { id: "llama-3.3-70b", includeModelSpec: false }, + { + id: "new-model-valid", + availableContextTokens: 32_000, + maxCompletionTokens: 2_048, + capabilities: { + supportsReasoning: false, + supportsVision: false, + supportsFunctionCalling: true, + }, + }, + ]); const models = await runWithDiscoveryEnabled(() => discoverVeniceModels()); const knownModel = models.find((m) => m.id === "llama-3.3-70b"); diff --git a/src/agents/workspace.ts b/src/agents/workspace.ts index 830b44504ad..c4f1044a8d9 100644 --- a/src/agents/workspace.ts +++ b/src/agents/workspace.ts @@ -458,41 +458,24 @@ export async function ensureAgentWorkspace(params?: { }; } -async function resolveMemoryBootstrapEntries( +async function resolveMemoryBootstrapEntry( resolvedDir: string, -): Promise> { - const candidates: WorkspaceBootstrapFileName[] = [ - DEFAULT_MEMORY_FILENAME, - DEFAULT_MEMORY_ALT_FILENAME, - ]; - const entries: Array<{ name: WorkspaceBootstrapFileName; filePath: string }> = []; - for (const name of candidates) { +): Promise<{ name: WorkspaceBootstrapFileName; filePath: string } | null> { + // Prefer MEMORY.md; fall back to memory.md only when absent. + // Checking both and deduplicating via realpath is unreliable on case-insensitive + // file systems mounted in Docker (e.g. macOS volumes), where both names pass + // fs.access() but realpath does not normalise case through the mount layer, + // causing the same content to be injected twice and wasting tokens. + for (const name of [DEFAULT_MEMORY_FILENAME, DEFAULT_MEMORY_ALT_FILENAME] as const) { const filePath = path.join(resolvedDir, name); try { await fs.access(filePath); - entries.push({ name, filePath }); + return { name, filePath }; } catch { - // optional + // try next candidate } } - if (entries.length <= 1) { - return entries; - } - - const seen = new Set(); - const deduped: Array<{ name: WorkspaceBootstrapFileName; filePath: string }> = []; - for (const entry of entries) { - let key = entry.filePath; - try { - key = await fs.realpath(entry.filePath); - } catch {} - if (seen.has(key)) { - continue; - } - seen.add(key); - deduped.push(entry); - } - return deduped; + return null; } export async function loadWorkspaceBootstrapFiles(dir: string): Promise { @@ -532,7 +515,10 @@ export async function loadWorkspaceBootstrapFiles(dir: string): Promise { ]); }); + it("registers fast mode as a first-class options command", () => { + const fast = listChatCommands().find((command) => command.key === "fast"); + expect(fast).toMatchObject({ + nativeName: "fast", + textAliases: ["/fast"], + category: "options", + }); + const modeArg = fast?.args?.find((arg) => arg.name === "mode"); + expect(modeArg?.choices).toEqual(["status", "on", "off"]); + }); + it("detects known text commands", () => { const detection = getCommandDetection(); expect(detection.exact.has("/commands")).toBe(true); diff --git a/src/auto-reply/reply.directive.directive-behavior.e2e-harness.ts b/src/auto-reply/reply.directive.directive-behavior.e2e-harness.ts index 9908bad1653..0d7c2f9c936 100644 --- a/src/auto-reply/reply.directive.directive-behavior.e2e-harness.ts +++ b/src/auto-reply/reply.directive.directive-behavior.e2e-harness.ts @@ -4,6 +4,7 @@ import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.j import { loadModelCatalog } from "../agents/model-catalog.js"; import { runEmbeddedPiAgent } from "../agents/pi-embedded.js"; import { loadSessionStore } from "../config/sessions.js"; +import { runEmbeddedPiAgentMock } from "./reply.directive.directive-behavior.e2e-mocks.js"; export { loadModelCatalog } from "../agents/model-catalog.js"; export { runEmbeddedPiAgent } from "../agents/pi-embedded.js"; @@ -134,7 +135,7 @@ export function assertElevatedOffStatusReply(text: string | undefined) { export function installDirectiveBehaviorE2EHooks() { beforeEach(() => { - vi.mocked(runEmbeddedPiAgent).mockReset(); + runEmbeddedPiAgentMock.mockReset(); vi.mocked(loadModelCatalog).mockResolvedValue(DEFAULT_TEST_MODEL_CATALOG); }); diff --git a/src/auto-reply/reply.directive.directive-behavior.shows-current-verbose-level-verbose-has-no.test.ts b/src/auto-reply/reply.directive.directive-behavior.shows-current-verbose-level-verbose-has-no.test.ts index 2e6f63df210..a35f9b1bd1f 100644 --- a/src/auto-reply/reply.directive.directive-behavior.shows-current-verbose-level-verbose-has-no.test.ts +++ b/src/auto-reply/reply.directive.directive-behavior.shows-current-verbose-level-verbose-has-no.test.ts @@ -1,5 +1,5 @@ import "./reply.directive.directive-behavior.e2e-mocks.js"; -import { describe, expect, it, vi } from "vitest"; +import { describe, expect, it } from "vitest"; import type { OpenClawConfig } from "../config/config.js"; import { loadSessionStore } from "../config/sessions.js"; import { @@ -10,10 +10,10 @@ import { makeRestrictedElevatedDisabledConfig, makeWhatsAppDirectiveConfig, replyText, - runEmbeddedPiAgent, sessionStorePath, withTempHome, } from "./reply.directive.directive-behavior.e2e-harness.js"; +import { runEmbeddedPiAgentMock } from "./reply.directive.directive-behavior.e2e-mocks.js"; import { getReplyFromConfig } from "./reply.js"; const COMMAND_MESSAGE_BASE = { @@ -126,6 +126,18 @@ describe("directive behavior", () => { it("reports current directive defaults when no arguments are provided", async () => { await withTempHome(async (home) => { + const fastText = await runCommand(home, "/fast", { + defaults: { + models: { + "anthropic/claude-opus-4-5": { + params: { fastMode: true }, + }, + }, + }, + }); + expect(fastText).toContain("Current fast mode: on (config)"); + expect(fastText).toContain("Options: on, off."); + const verboseText = await runCommand(home, "/verbose", { defaults: { verboseDefault: "on" }, }); @@ -158,7 +170,28 @@ describe("directive behavior", () => { expect(execText).toContain( "Options: host=sandbox|gateway|node, security=deny|allowlist|full, ask=off|on-miss|always, node=.", ); - expect(runEmbeddedPiAgent).not.toHaveBeenCalled(); + expect(runEmbeddedPiAgentMock).not.toHaveBeenCalled(); + }); + }); + it("persists fast toggles across /status and /fast", async () => { + await withTempHome(async (home) => { + const storePath = sessionStorePath(home); + + const onText = await runCommand(home, "/fast on"); + expect(onText).toContain("Fast mode enabled"); + expect(loadSessionStore(storePath)["agent:main:main"]?.fastMode).toBe(true); + + const statusText = await runCommand(home, "/status"); + const optionsLine = statusText?.split("\n").find((line) => line.trim().startsWith("⚙️")); + expect(optionsLine).toContain("Fast: on"); + + const offText = await runCommand(home, "/fast off"); + expect(offText).toContain("Fast mode disabled"); + expect(loadSessionStore(storePath)["agent:main:main"]?.fastMode).toBe(false); + + const fastText = await runCommand(home, "/fast"); + expect(fastText).toContain("Current fast mode: off"); + expect(runEmbeddedPiAgentMock).not.toHaveBeenCalled(); }); }); it("persists elevated toggles across /status and /elevated", async () => { @@ -181,7 +214,7 @@ describe("directive behavior", () => { const store = loadSessionStore(storePath); expect(store["agent:main:main"]?.elevatedLevel).toBe("on"); - expect(runEmbeddedPiAgent).not.toHaveBeenCalled(); + expect(runEmbeddedPiAgentMock).not.toHaveBeenCalled(); }); }); it("enforces per-agent elevated restrictions and status visibility", async () => { @@ -217,7 +250,7 @@ describe("directive behavior", () => { ); const statusText = replyText(statusRes); expect(statusText).not.toContain("elevated"); - expect(runEmbeddedPiAgent).not.toHaveBeenCalled(); + expect(runEmbeddedPiAgentMock).not.toHaveBeenCalled(); }); }); it("applies per-agent allowlist requirements before allowing elevated", async () => { @@ -245,7 +278,7 @@ describe("directive behavior", () => { const allowedText = replyText(allowedRes); expect(allowedText).toContain("Elevated mode set to ask"); - expect(runEmbeddedPiAgent).not.toHaveBeenCalled(); + expect(runEmbeddedPiAgentMock).not.toHaveBeenCalled(); }); }); it("handles runtime warning, invalid level, and multi-directive elevated inputs", async () => { @@ -280,7 +313,7 @@ describe("directive behavior", () => { expect(text).toContain(snippet); } } - expect(runEmbeddedPiAgent).not.toHaveBeenCalled(); + expect(runEmbeddedPiAgentMock).not.toHaveBeenCalled(); }); }); it("persists queue overrides and reset behavior", async () => { @@ -317,12 +350,12 @@ describe("directive behavior", () => { expect(entry?.queueDebounceMs).toBeUndefined(); expect(entry?.queueCap).toBeUndefined(); expect(entry?.queueDrop).toBeUndefined(); - expect(runEmbeddedPiAgent).not.toHaveBeenCalled(); + expect(runEmbeddedPiAgentMock).not.toHaveBeenCalled(); }); }); it("strips inline elevated directives from the user text (does not persist session override)", async () => { await withTempHome(async (home) => { - vi.mocked(runEmbeddedPiAgent).mockResolvedValue({ + runEmbeddedPiAgentMock.mockResolvedValue({ payloads: [{ text: "ok" }], meta: { durationMs: 1, @@ -346,7 +379,7 @@ describe("directive behavior", () => { const store = loadSessionStore(storePath); expect(store["agent:main:main"]?.elevatedLevel).toBeUndefined(); - const calls = vi.mocked(runEmbeddedPiAgent).mock.calls; + const calls = runEmbeddedPiAgentMock.mock.calls; expect(calls.length).toBeGreaterThan(0); const call = calls[0]?.[0]; expect(call?.prompt).toContain("hello there"); diff --git a/src/auto-reply/reply.directive.parse.test.ts b/src/auto-reply/reply.directive.parse.test.ts index bbaa3f0d0fc..6d0b484511c 100644 --- a/src/auto-reply/reply.directive.parse.test.ts +++ b/src/auto-reply/reply.directive.parse.test.ts @@ -8,7 +8,7 @@ import { extractThinkDirective, extractVerboseDirective, } from "./reply.js"; -import { extractStatusDirective } from "./reply/directives.js"; +import { extractFastDirective, extractStatusDirective } from "./reply/directives.js"; describe("directive parsing", () => { it("ignores verbose directive inside URL", () => { @@ -49,6 +49,12 @@ describe("directive parsing", () => { expect(res.reasoningLevel).toBe("stream"); }); + it("matches fast directive", () => { + const res = extractFastDirective("/fast on please"); + expect(res.hasDirective).toBe(true); + expect(res.fastMode).toBe(true); + }); + it("matches elevated with leading space", () => { const res = extractElevatedDirective(" please /elevated on now"); expect(res.hasDirective).toBe(true); @@ -106,6 +112,14 @@ describe("directive parsing", () => { expect(res.cleaned).toBe(""); }); + it("matches fast with no argument", () => { + const res = extractFastDirective("/fast:"); + expect(res.hasDirective).toBe(true); + expect(res.fastMode).toBeUndefined(); + expect(res.rawLevel).toBeUndefined(); + expect(res.cleaned).toBe(""); + }); + it("matches reasoning with no argument", () => { const res = extractReasoningDirective("/reasoning:"); expect(res.hasDirective).toBe(true); diff --git a/src/auto-reply/reply.stage-sandbox-media.scp-remote-path.test.ts b/src/auto-reply/reply.stage-sandbox-media.scp-remote-path.test.ts new file mode 100644 index 00000000000..d5d628421d9 --- /dev/null +++ b/src/auto-reply/reply.stage-sandbox-media.scp-remote-path.test.ts @@ -0,0 +1,75 @@ +import fs from "node:fs/promises"; +import { basename, join } from "node:path"; +import { afterEach, describe, expect, it, vi } from "vitest"; +import { + createSandboxMediaContexts, + createSandboxMediaStageConfig, + withSandboxMediaTempHome, +} from "./stage-sandbox-media.test-harness.js"; + +const sandboxMocks = vi.hoisted(() => ({ + ensureSandboxWorkspaceForSession: vi.fn(), +})); +const childProcessMocks = vi.hoisted(() => ({ + spawn: vi.fn(), +})); + +vi.mock("../agents/sandbox.js", () => sandboxMocks); +vi.mock("node:child_process", () => childProcessMocks); + +import { stageSandboxMedia } from "./reply/stage-sandbox-media.js"; + +afterEach(() => { + vi.restoreAllMocks(); + childProcessMocks.spawn.mockClear(); +}); + +function createRemoteStageParams(home: string): { + cfg: ReturnType; + workspaceDir: string; + sessionKey: string; + remoteCacheDir: string; +} { + const sessionKey = "agent:main:main"; + vi.mocked(sandboxMocks.ensureSandboxWorkspaceForSession).mockResolvedValue(null); + return { + cfg: createSandboxMediaStageConfig(home), + workspaceDir: join(home, "openclaw"), + sessionKey, + remoteCacheDir: join(home, ".openclaw", "media", "remote-cache", sessionKey), + }; +} + +function createRemoteContexts(remotePath: string) { + const { ctx, sessionCtx } = createSandboxMediaContexts(remotePath); + ctx.Provider = "imessage"; + ctx.MediaRemoteHost = "user@gateway-host"; + sessionCtx.Provider = "imessage"; + sessionCtx.MediaRemoteHost = "user@gateway-host"; + return { ctx, sessionCtx }; +} + +describe("stageSandboxMedia scp remote paths", () => { + it("rejects remote attachment filenames with shell metacharacters before spawning scp", async () => { + await withSandboxMediaTempHome("openclaw-triggers-", async (home) => { + const { cfg, workspaceDir, sessionKey, remoteCacheDir } = createRemoteStageParams(home); + const remotePath = "/Users/demo/Library/Messages/Attachments/ab/cd/evil$(touch pwned).jpg"; + const { ctx, sessionCtx } = createRemoteContexts(remotePath); + + await stageSandboxMedia({ + ctx, + sessionCtx, + cfg, + sessionKey, + workspaceDir, + }); + + expect(childProcessMocks.spawn).not.toHaveBeenCalled(); + await expect(fs.stat(join(remoteCacheDir, basename(remotePath)))).rejects.toThrow(); + expect(ctx.MediaPath).toBe(remotePath); + expect(sessionCtx.MediaPath).toBe(remotePath); + expect(ctx.MediaUrl).toBe(remotePath); + expect(sessionCtx.MediaUrl).toBe(remotePath); + }); + }); +}); diff --git a/src/auto-reply/reply.triggers.trigger-handling.test-harness.ts b/src/auto-reply/reply.triggers.trigger-handling.test-harness.ts index 69db49e97ee..db8dd5b1fae 100644 --- a/src/auto-reply/reply.triggers.trigger-handling.test-harness.ts +++ b/src/auto-reply/reply.triggers.trigger-handling.test-harness.ts @@ -363,7 +363,7 @@ export async function runGreetingPromptForBareNewOrReset(params: { expect(runEmbeddedPiAgentMock).toHaveBeenCalledOnce(); const prompt = runEmbeddedPiAgentMock.mock.calls.at(-1)?.[0]?.prompt ?? ""; expect(prompt).toContain("A new session was started via /new or /reset"); - expect(prompt).toContain("Execute your Session Startup sequence now"); + expect(prompt).toContain("Run your Session Startup sequence"); } export function installTriggerHandlingE2eTestHooks() { diff --git a/src/auto-reply/reply/abort.ts b/src/auto-reply/reply/abort.ts index d0f97f04fa8..58ea5e59fa6 100644 --- a/src/auto-reply/reply/abort.ts +++ b/src/auto-reply/reply/abort.ts @@ -2,7 +2,7 @@ import { getAcpSessionManager } from "../../acp/control-plane/manager.js"; import { resolveSessionAgentId } from "../../agents/agent-scope.js"; import { abortEmbeddedPiRun } from "../../agents/pi-embedded.js"; import { - listSubagentRunsForRequester, + listSubagentRunsForController, markSubagentRunTerminated, } from "../../agents/subagent-registry.js"; import { @@ -222,7 +222,7 @@ export function stopSubagentsForRequester(params: { if (!requesterKey) { return { stopped: 0 }; } - const runs = listSubagentRunsForRequester(requesterKey); + const runs = listSubagentRunsForController(requesterKey); if (runs.length === 0) { return { stopped: 0 }; } diff --git a/src/auto-reply/reply/agent-runner-execution.ts b/src/auto-reply/reply/agent-runner-execution.ts index 6748e3cbe68..27a31c2387a 100644 --- a/src/auto-reply/reply/agent-runner-execution.ts +++ b/src/auto-reply/reply/agent-runner-execution.ts @@ -6,8 +6,10 @@ import { getCliSessionId } from "../../agents/cli-session.js"; import { runWithModelFallback } from "../../agents/model-fallback.js"; import { isCliProvider } from "../../agents/model-selection.js"; import { + BILLING_ERROR_USER_MESSAGE, isCompactionFailureError, isContextOverflowError, + isBillingErrorMessage, isLikelyContextOverflowError, isTransientHttpError, sanitizeUserFacingText, @@ -38,8 +40,7 @@ import { } from "../tokens.js"; import type { GetReplyOptions, ReplyPayload } from "../types.js"; import { - buildEmbeddedRunBaseParams, - buildEmbeddedRunContexts, + buildEmbeddedRunExecutionParams, resolveModelFallbackOptions, } from "./agent-runner-utils.js"; import { type BlockReplyPipeline } from "./block-reply-pipeline.js"; @@ -199,6 +200,7 @@ export async function runAgentTurnWithFallback(params: { const onToolResult = params.opts?.onToolResult; const fallbackResult = await runWithModelFallback({ ...resolveModelFallbackOptions(params.followupRun.run), + runId, run: (provider, model, runOptions) => { // Notify that model selection is complete (including after fallback). // This allows responsePrefix template interpolation with the actual model. @@ -305,20 +307,17 @@ export async function runAgentTurnWithFallback(params: { } })(); } - const { authProfile, embeddedContext, senderContext } = buildEmbeddedRunContexts({ - run: params.followupRun.run, - sessionCtx: params.sessionCtx, - hasRepliedRef: params.opts?.hasRepliedRef, - provider, - }); - const runBaseParams = buildEmbeddedRunBaseParams({ - run: params.followupRun.run, - provider, - model, - runId, - authProfile, - allowTransientCooldownProbe: runOptions?.allowTransientCooldownProbe, - }); + const { embeddedContext, senderContext, runBaseParams } = buildEmbeddedRunExecutionParams( + { + run: params.followupRun.run, + sessionCtx: params.sessionCtx, + hasRepliedRef: params.opts?.hasRepliedRef, + provider, + runId, + allowTransientCooldownProbe: runOptions?.allowTransientCooldownProbe, + model, + }, + ); return (async () => { const result = await runEmbeddedPiAgent({ ...embeddedContext, @@ -390,11 +389,15 @@ export async function runAgentTurnWithFallback(params: { await params.opts?.onToolStart?.({ name, phase }); } } - // Track auto-compaction completion + // Track auto-compaction completion and notify UI layer if (evt.stream === "compaction") { const phase = typeof evt.data.phase === "string" ? evt.data.phase : ""; + if (phase === "start") { + await params.opts?.onCompactionStart?.(); + } if (phase === "end") { autoCompactionCompleted = true; + await params.opts?.onCompactionEnd?.(); } } }, @@ -444,8 +447,8 @@ export async function runAgentTurnWithFallback(params: { } await params.typingSignals.signalTextDelta(text); await onToolResult({ + ...payload, text, - mediaUrls: payload.mediaUrls, }); }) .catch((err) => { @@ -513,8 +516,9 @@ export async function runAgentTurnWithFallback(params: { break; } catch (err) { const message = err instanceof Error ? err.message : String(err); - const isContextOverflow = isLikelyContextOverflowError(message); - const isCompactionFailure = isCompactionFailureError(message); + const isBilling = isBillingErrorMessage(message); + const isContextOverflow = !isBilling && isLikelyContextOverflowError(message); + const isCompactionFailure = !isBilling && isCompactionFailureError(message); const isSessionCorruption = /function call turn comes immediately after/i.test(message); const isRoleOrderingError = /incorrect role information|roles must alternate/i.test(message); const isTransientHttp = isTransientHttpError(message); @@ -609,11 +613,13 @@ export async function runAgentTurnWithFallback(params: { ? sanitizeUserFacingText(message, { errorContext: true }) : message; const trimmedMessage = safeMessage.replace(/\.\s*$/, ""); - const fallbackText = isContextOverflow - ? "⚠️ Context overflow — prompt too large for this model. Try a shorter message or a larger-context model." - : isRoleOrderingError - ? "⚠️ Message ordering conflict - please try again. If this persists, use /new to start a fresh session." - : `⚠️ Agent failed before reply: ${trimmedMessage}.\nLogs: openclaw logs --follow`; + const fallbackText = isBilling + ? BILLING_ERROR_USER_MESSAGE + : isContextOverflow + ? "⚠️ Context overflow — prompt too large for this model. Try a shorter message or a larger-context model." + : isRoleOrderingError + ? "⚠️ Message ordering conflict - please try again. If this persists, use /new to start a fresh session." + : `⚠️ Agent failed before reply: ${trimmedMessage}.\nLogs: openclaw logs --follow`; return { kind: "final", diff --git a/src/auto-reply/reply/agent-runner-memory.ts b/src/auto-reply/reply/agent-runner-memory.ts index 374d37d52f7..d52c6d05761 100644 --- a/src/auto-reply/reply/agent-runner-memory.ts +++ b/src/auto-reply/reply/agent-runner-memory.ts @@ -27,13 +27,13 @@ import type { TemplateContext } from "../templating.js"; import type { VerboseLevel } from "../thinking.js"; import type { GetReplyOptions } from "../types.js"; import { - buildEmbeddedRunBaseParams, - buildEmbeddedRunContexts, + buildEmbeddedRunExecutionParams, resolveModelFallbackOptions, } from "./agent-runner-utils.js"; import { hasAlreadyFlushedForCurrentCompaction, resolveMemoryFlushContextWindowTokens, + resolveMemoryFlushRelativePathForRun, resolveMemoryFlushPromptForRun, resolveMemoryFlushSettings, shouldRunMemoryFlush, @@ -465,6 +465,11 @@ export async function runMemoryFlushIfNeeded(params: { }); } let memoryCompactionCompleted = false; + const memoryFlushNowMs = Date.now(); + const memoryFlushWritePath = resolveMemoryFlushRelativePathForRun({ + cfg: params.cfg, + nowMs: memoryFlushNowMs, + }); const flushSystemPrompt = [ params.followupRun.run.extraSystemPrompt, memoryFlushSettings.systemPrompt, @@ -474,19 +479,15 @@ export async function runMemoryFlushIfNeeded(params: { try { await runWithModelFallback({ ...resolveModelFallbackOptions(params.followupRun.run), + runId: flushRunId, run: async (provider, model, runOptions) => { - const { authProfile, embeddedContext, senderContext } = buildEmbeddedRunContexts({ + const { embeddedContext, senderContext, runBaseParams } = buildEmbeddedRunExecutionParams({ run: params.followupRun.run, sessionCtx: params.sessionCtx, hasRepliedRef: params.opts?.hasRepliedRef, provider, - }); - const runBaseParams = buildEmbeddedRunBaseParams({ - run: params.followupRun.run, - provider, model, runId: flushRunId, - authProfile, allowTransientCooldownProbe: runOptions?.allowTransientCooldownProbe, }); const result = await runEmbeddedPiAgent({ @@ -494,9 +495,11 @@ export async function runMemoryFlushIfNeeded(params: { ...senderContext, ...runBaseParams, trigger: "memory", + memoryFlushWritePath, prompt: resolveMemoryFlushPromptForRun({ prompt: memoryFlushSettings.prompt, cfg: params.cfg, + nowMs: memoryFlushNowMs, }), extraSystemPrompt: flushSystemPrompt, bootstrapPromptWarningSignaturesSeen, diff --git a/src/auto-reply/reply/agent-runner-payloads.test.ts b/src/auto-reply/reply/agent-runner-payloads.test.ts index 94088b2b5b8..db237848e3c 100644 --- a/src/auto-reply/reply/agent-runner-payloads.test.ts +++ b/src/auto-reply/reply/agent-runner-payloads.test.ts @@ -9,6 +9,20 @@ const baseParams = { replyToMode: "off" as const, }; +async function expectSameTargetRepliesSuppressed(params: { provider: string; to: string }) { + const { replyPayloads } = await buildReplyPayloads({ + ...baseParams, + payloads: [{ text: "hello world!" }], + messageProvider: "heartbeat", + originatingChannel: "feishu", + originatingTo: "ou_abc123", + messagingToolSentTexts: ["different message"], + messagingToolSentTargets: [{ tool: "message", provider: params.provider, to: params.to }], + }); + + expect(replyPayloads).toHaveLength(0); +} + describe("buildReplyPayloads media filter integration", () => { it("strips media URL from payload when in messagingToolSentMediaUrls", async () => { const { replyPayloads } = await buildReplyPayloads({ @@ -142,28 +156,52 @@ describe("buildReplyPayloads media filter integration", () => { }); it("suppresses same-target replies when message tool target provider is generic", async () => { + await expectSameTargetRepliesSuppressed({ provider: "message", to: "ou_abc123" }); + }); + + it("suppresses same-target replies when target provider is channel alias", async () => { + await expectSameTargetRepliesSuppressed({ provider: "lark", to: "ou_abc123" }); + }); + + it("drops all final payloads when block pipeline streamed successfully", async () => { + const pipeline: Parameters[0]["blockReplyPipeline"] = { + didStream: () => true, + isAborted: () => false, + hasSentPayload: () => false, + enqueue: () => {}, + flush: async () => {}, + stop: () => {}, + hasBuffered: () => false, + }; + // shouldDropFinalPayloads short-circuits to [] when the pipeline streamed + // without aborting, so hasSentPayload is never reached. const { replyPayloads } = await buildReplyPayloads({ ...baseParams, - payloads: [{ text: "hello world!" }], - messageProvider: "heartbeat", - originatingChannel: "feishu", - originatingTo: "ou_abc123", - messagingToolSentTexts: ["different message"], - messagingToolSentTargets: [{ tool: "message", provider: "message", to: "ou_abc123" }], + blockStreamingEnabled: true, + blockReplyPipeline: pipeline, + replyToMode: "all", + payloads: [{ text: "response", replyToId: "post-123" }], }); expect(replyPayloads).toHaveLength(0); }); - it("suppresses same-target replies when target provider is channel alias", async () => { + it("deduplicates final payloads against directly sent block keys regardless of replyToId", async () => { + // When block streaming is not active but directlySentBlockKeys has entries + // (e.g. from pre-tool flush), the key should match even if replyToId differs. + const { createBlockReplyContentKey } = await import("./block-reply-pipeline.js"); + const directlySentBlockKeys = new Set(); + directlySentBlockKeys.add( + createBlockReplyContentKey({ text: "response", replyToId: "post-1" }), + ); + const { replyPayloads } = await buildReplyPayloads({ ...baseParams, - payloads: [{ text: "hello world!" }], - messageProvider: "heartbeat", - originatingChannel: "feishu", - originatingTo: "ou_abc123", - messagingToolSentTexts: ["different message"], - messagingToolSentTargets: [{ tool: "message", provider: "lark", to: "ou_abc123" }], + blockStreamingEnabled: false, + blockReplyPipeline: null, + directlySentBlockKeys, + replyToMode: "off", + payloads: [{ text: "response" }], }); expect(replyPayloads).toHaveLength(0); diff --git a/src/auto-reply/reply/agent-runner-payloads.ts b/src/auto-reply/reply/agent-runner-payloads.ts index 263dea9fd54..9e89c921407 100644 --- a/src/auto-reply/reply/agent-runner-payloads.ts +++ b/src/auto-reply/reply/agent-runner-payloads.ts @@ -5,7 +5,7 @@ import type { OriginatingChannelType } from "../templating.js"; import { SILENT_REPLY_TOKEN } from "../tokens.js"; import type { ReplyPayload } from "../types.js"; import { formatBunFetchSocketError, isBunFetchSocketError } from "./agent-runner-utils.js"; -import { createBlockReplyPayloadKey, type BlockReplyPipeline } from "./block-reply-pipeline.js"; +import { createBlockReplyContentKey, type BlockReplyPipeline } from "./block-reply-pipeline.js"; import { resolveOriginAccountId, resolveOriginMessageProvider, @@ -213,7 +213,7 @@ export async function buildReplyPayloads(params: { ) : params.directlySentBlockKeys?.size ? mediaFilteredPayloads.filter( - (payload) => !params.directlySentBlockKeys!.has(createBlockReplyPayloadKey(payload)), + (payload) => !params.directlySentBlockKeys!.has(createBlockReplyContentKey(payload)), ) : mediaFilteredPayloads; const replyPayloads = suppressMessagingToolReplies ? [] : filteredPayloads; diff --git a/src/auto-reply/reply/agent-runner-utils.test.ts b/src/auto-reply/reply/agent-runner-utils.test.ts index 350c6b63e47..5bf77cd9f70 100644 --- a/src/auto-reply/reply/agent-runner-utils.test.ts +++ b/src/auto-reply/reply/agent-runner-utils.test.ts @@ -12,6 +12,7 @@ vi.mock("../../agents/agent-scope.js", () => ({ })); const { + buildThreadingToolContext, buildEmbeddedRunBaseParams, buildEmbeddedRunContexts, resolveModelFallbackOptions, @@ -173,4 +174,44 @@ describe("agent-runner-utils", () => { expect(resolved.embeddedContext.messageProvider).toBe("telegram"); expect(resolved.embeddedContext.messageTo).toBe("268300329"); }); + + it("uses OriginatingTo for threading tool context on telegram native commands", () => { + const context = buildThreadingToolContext({ + sessionCtx: { + Provider: "telegram", + To: "slash:8460800771", + OriginatingChannel: "telegram", + OriginatingTo: "telegram:-1003841603622", + MessageThreadId: 928, + MessageSid: "2284", + }, + config: { channels: { telegram: { allowFrom: ["*"] } } }, + hasRepliedRef: undefined, + }); + + expect(context).toMatchObject({ + currentChannelId: "telegram:-1003841603622", + currentThreadTs: "928", + currentMessageId: "2284", + }); + }); + + it("uses OriginatingTo for threading tool context on discord native commands", () => { + const context = buildThreadingToolContext({ + sessionCtx: { + Provider: "discord", + To: "slash:1177378744822943744", + OriginatingChannel: "discord", + OriginatingTo: "channel:123456789012345678", + MessageSid: "msg-9", + }, + config: {}, + hasRepliedRef: undefined, + }); + + expect(context).toMatchObject({ + currentChannelId: "channel:123456789012345678", + currentMessageId: "msg-9", + }); + }); }); diff --git a/src/auto-reply/reply/agent-runner-utils.ts b/src/auto-reply/reply/agent-runner-utils.ts index 36e45bd9bf1..c6e71a9bab0 100644 --- a/src/auto-reply/reply/agent-runner-utils.ts +++ b/src/auto-reply/reply/agent-runner-utils.ts @@ -23,12 +23,20 @@ export function buildThreadingToolContext(params: { }): ChannelThreadingToolContext { const { sessionCtx, config, hasRepliedRef } = params; const currentMessageId = sessionCtx.MessageSidFull ?? sessionCtx.MessageSid; + const originProvider = resolveOriginMessageProvider({ + originatingChannel: sessionCtx.OriginatingChannel, + provider: sessionCtx.Provider, + }); + const originTo = resolveOriginMessageTo({ + originatingTo: sessionCtx.OriginatingTo, + to: sessionCtx.To, + }); if (!config) { return { currentMessageId, }; } - const rawProvider = sessionCtx.Provider?.trim().toLowerCase(); + const rawProvider = originProvider?.trim().toLowerCase(); if (!rawProvider) { return { currentMessageId, @@ -39,7 +47,7 @@ export function buildThreadingToolContext(params: { const dock = provider ? getChannelDock(provider) : undefined; if (!dock?.threading?.buildToolContext) { return { - currentChannelId: sessionCtx.To?.trim() || undefined, + currentChannelId: originTo?.trim() || undefined, currentChannelProvider: provider ?? (rawProvider as ChannelId), currentMessageId, hasRepliedRef, @@ -50,9 +58,9 @@ export function buildThreadingToolContext(params: { cfg: config, accountId: sessionCtx.AccountId, context: { - Channel: sessionCtx.Provider, + Channel: originProvider, From: sessionCtx.From, - To: sessionCtx.To, + To: originTo, ChatType: sessionCtx.ChatType, CurrentMessageId: currentMessageId, ReplyToId: sessionCtx.ReplyToId, @@ -255,6 +263,31 @@ export function buildEmbeddedRunContexts(params: { }; } +export function buildEmbeddedRunExecutionParams(params: { + run: FollowupRun["run"]; + sessionCtx: TemplateContext; + hasRepliedRef: { value: boolean } | undefined; + provider: string; + model: string; + runId: string; + allowTransientCooldownProbe?: boolean; +}) { + const { authProfile, embeddedContext, senderContext } = buildEmbeddedRunContexts(params); + const runBaseParams = buildEmbeddedRunBaseParams({ + run: params.run, + provider: params.provider, + model: params.model, + runId: params.runId, + authProfile, + allowTransientCooldownProbe: params.allowTransientCooldownProbe, + }); + return { + embeddedContext, + senderContext, + runBaseParams, + }; +} + export function resolveProviderScopedAuthProfile(params: { provider: string; primaryProvider: string; diff --git a/src/auto-reply/reply/agent-runner.media-paths.test.ts b/src/auto-reply/reply/agent-runner.media-paths.test.ts index f5658287aff..a759c539bdc 100644 --- a/src/auto-reply/reply/agent-runner.media-paths.test.ts +++ b/src/auto-reply/reply/agent-runner.media-paths.test.ts @@ -2,7 +2,7 @@ import path from "node:path"; import { beforeEach, describe, expect, it, vi } from "vitest"; import type { TemplateContext } from "../templating.js"; import type { FollowupRun, QueueSettings } from "./queue.js"; -import { createMockTypingController } from "./test-helpers.js"; +import { createMockFollowupRun, createMockTypingController } from "./test-helpers.js"; const runEmbeddedPiAgentMock = vi.fn(); const runWithModelFallbackMock = vi.fn(); @@ -72,32 +72,15 @@ describe("runReplyAgent media path normalization", () => { const result = await runReplyAgent({ commandBody: "generate", - followupRun: { + followupRun: createMockFollowupRun({ prompt: "generate", - enqueuedAt: Date.now(), run: { agentId: "main", agentDir: "/tmp/agent", - sessionId: "session", - sessionKey: "main", messageProvider: "telegram", - sessionFile: "/tmp/session.jsonl", workspaceDir: "/tmp/workspace", - config: {}, - provider: "anthropic", - model: "claude", - thinkLevel: "low", - verboseLevel: "off", - elevatedLevel: "off", - bashElevated: { - enabled: false, - allowed: false, - defaultLevel: "off", - }, - timeoutMs: 1_000, - blockReplyBreak: "message_end", }, - } as unknown as FollowupRun, + }) as unknown as FollowupRun, queueKey: "main", resolvedQueue: { mode: "interrupt" } as QueueSettings, shouldSteer: false, diff --git a/src/auto-reply/reply/agent-runner.misc.runreplyagent.test.ts b/src/auto-reply/reply/agent-runner.misc.runreplyagent.test.ts index 659ccfe7951..14731dbb0ff 100644 --- a/src/auto-reply/reply/agent-runner.misc.runreplyagent.test.ts +++ b/src/auto-reply/reply/agent-runner.misc.runreplyagent.test.ts @@ -1628,3 +1628,72 @@ describe("runReplyAgent transient HTTP retry", () => { expect(payload?.text).toContain("Recovered response"); }); }); + +describe("runReplyAgent billing error classification", () => { + // Regression guard for the runner-level catch block in runAgentTurnWithFallback. + // Billing errors from providers like OpenRouter can contain token/size wording that + // matches context overflow heuristics. This test verifies the final user-visible + // message is the billing-specific one, not the "Context overflow" fallback. + it("returns billing message for mixed-signal error (billing text + overflow patterns)", async () => { + runEmbeddedPiAgentMock.mockRejectedValueOnce( + new Error("402 Payment Required: request token limit exceeded for this billing plan"), + ); + + const typing = createMockTypingController(); + const sessionCtx = { + Provider: "telegram", + MessageSid: "msg", + } as unknown as TemplateContext; + const resolvedQueue = { mode: "interrupt" } as unknown as QueueSettings; + const followupRun = { + prompt: "hello", + summaryLine: "hello", + enqueuedAt: Date.now(), + run: { + sessionId: "session", + sessionKey: "main", + messageProvider: "telegram", + sessionFile: "/tmp/session.jsonl", + workspaceDir: "/tmp", + config: {}, + skillsSnapshot: {}, + provider: "anthropic", + model: "claude", + thinkLevel: "low", + verboseLevel: "off", + elevatedLevel: "off", + bashElevated: { + enabled: false, + allowed: false, + defaultLevel: "off", + }, + timeoutMs: 1_000, + blockReplyBreak: "message_end", + }, + } as unknown as FollowupRun; + + const result = await runReplyAgent({ + commandBody: "hello", + followupRun, + queueKey: "main", + resolvedQueue, + shouldSteer: false, + shouldFollowup: false, + isActive: false, + isStreaming: false, + typing, + sessionCtx, + defaultModel: "anthropic/claude", + resolvedVerboseLevel: "off", + isNewSession: false, + blockStreamingEnabled: false, + resolvedBlockStreamingBreak: "message_end", + shouldInjectGroupIntro: false, + typingMode: "instant", + }); + + const payload = Array.isArray(result) ? result[0] : result; + expect(payload?.text).toContain("billing error"); + expect(payload?.text).not.toContain("Context overflow"); + }); +}); diff --git a/src/auto-reply/reply/agent-runner.runreplyagent.e2e.test.ts b/src/auto-reply/reply/agent-runner.runreplyagent.e2e.test.ts index 83c1796515c..6bebdc6a390 100644 --- a/src/auto-reply/reply/agent-runner.runreplyagent.e2e.test.ts +++ b/src/auto-reply/reply/agent-runner.runreplyagent.e2e.test.ts @@ -21,13 +21,14 @@ type AgentRunParams = { onAssistantMessageStart?: () => Promise | void; onReasoningStream?: (payload: { text?: string }) => Promise | void; onBlockReply?: (payload: { text?: string; mediaUrls?: string[] }) => Promise | void; - onToolResult?: (payload: { text?: string; mediaUrls?: string[] }) => Promise | void; + onToolResult?: (payload: ReplyPayload) => Promise | void; onAgentEvent?: (evt: { stream: string; data: Record }) => void; }; type EmbeddedRunParams = { prompt?: string; extraSystemPrompt?: string; + memoryFlushWritePath?: string; bootstrapPromptWarningSignaturesSeen?: string[]; bootstrapPromptWarningSignature?: string; onAgentEvent?: (evt: { stream?: string; data?: { phase?: string; willRetry?: boolean } }) => void; @@ -594,6 +595,40 @@ describe("runReplyAgent typing (heartbeat)", () => { } }); + it("preserves channelData on forwarded tool results", async () => { + const onToolResult = vi.fn(); + state.runEmbeddedPiAgentMock.mockImplementationOnce(async (params: AgentRunParams) => { + await params.onToolResult?.({ + text: "Approval required.\n\n```txt\n/approve 117ba06d allow-once\n```", + channelData: { + execApproval: { + approvalId: "117ba06d-1111-2222-3333-444444444444", + approvalSlug: "117ba06d", + allowedDecisions: ["allow-once", "allow-always", "deny"], + }, + }, + }); + return { payloads: [{ text: "final" }], meta: {} }; + }); + + const { run } = createMinimalRun({ + typingMode: "message", + opts: { onToolResult }, + }); + await run(); + + expect(onToolResult).toHaveBeenCalledWith({ + text: "Approval required.\n\n```txt\n/approve 117ba06d allow-once\n```", + channelData: { + execApproval: { + approvalId: "117ba06d-1111-2222-3333-444444444444", + approvalSlug: "117ba06d", + allowedDecisions: ["allow-once", "allow-always", "deny"], + }, + }, + }); + }); + it("retries transient HTTP failures once with timer-driven backoff", async () => { vi.useFakeTimers(); let calls = 0; @@ -1220,6 +1255,79 @@ describe("runReplyAgent typing (heartbeat)", () => { }); }); + it("clears stale runtime model fields when resetSession retries after compaction failure", async () => { + await withTempStateDir(async (stateDir) => { + const sessionId = "session-stale-model"; + const storePath = path.join(stateDir, "sessions", "sessions.json"); + const transcriptPath = sessions.resolveSessionTranscriptPath(sessionId); + const sessionEntry: SessionEntry = { + sessionId, + updatedAt: Date.now(), + sessionFile: transcriptPath, + modelProvider: "qwencode", + model: "qwen3.5-plus-2026-02-15", + contextTokens: 123456, + systemPromptReport: { + source: "run", + generatedAt: Date.now(), + sessionId, + sessionKey: "main", + provider: "qwencode", + model: "qwen3.5-plus-2026-02-15", + workspaceDir: stateDir, + bootstrapMaxChars: 1000, + bootstrapTotalMaxChars: 2000, + systemPrompt: { + chars: 10, + projectContextChars: 5, + nonProjectContextChars: 5, + }, + injectedWorkspaceFiles: [], + skills: { + promptChars: 0, + entries: [], + }, + tools: { + listChars: 0, + schemaChars: 0, + entries: [], + }, + }, + }; + const sessionStore = { main: sessionEntry }; + + await fs.mkdir(path.dirname(storePath), { recursive: true }); + await fs.writeFile(storePath, JSON.stringify(sessionStore), "utf-8"); + await fs.mkdir(path.dirname(transcriptPath), { recursive: true }); + await fs.writeFile(transcriptPath, "ok", "utf-8"); + + state.runEmbeddedPiAgentMock.mockImplementationOnce(async () => { + throw new Error( + 'Context overflow: Summarization failed: 400 {"message":"prompt is too long"}', + ); + }); + + const { run } = createMinimalRun({ + sessionEntry, + sessionStore, + sessionKey: "main", + storePath, + }); + await run(); + + expect(sessionStore.main.modelProvider).toBeUndefined(); + expect(sessionStore.main.model).toBeUndefined(); + expect(sessionStore.main.contextTokens).toBeUndefined(); + expect(sessionStore.main.systemPromptReport).toBeUndefined(); + + const persisted = JSON.parse(await fs.readFile(storePath, "utf-8")); + expect(persisted.main.modelProvider).toBeUndefined(); + expect(persisted.main.model).toBeUndefined(); + expect(persisted.main.contextTokens).toBeUndefined(); + expect(persisted.main.systemPromptReport).toBeUndefined(); + }); + }); + it("surfaces overflow fallback when embedded run returns empty payloads", async () => { state.runEmbeddedPiAgentMock.mockImplementationOnce(async () => ({ payloads: [], @@ -1577,9 +1685,14 @@ describe("runReplyAgent memory flush", () => { const flushCall = calls[0]; expect(flushCall?.prompt).toContain("Write notes."); expect(flushCall?.prompt).toContain("NO_REPLY"); + expect(flushCall?.prompt).toMatch(/memory\/\d{4}-\d{2}-\d{2}\.md/); + expect(flushCall?.prompt).toContain("MEMORY.md"); + expect(flushCall?.memoryFlushWritePath).toMatch(/^memory\/\d{4}-\d{2}-\d{2}\.md$/); expect(flushCall?.extraSystemPrompt).toContain("extra system"); expect(flushCall?.extraSystemPrompt).toContain("Flush memory now."); expect(flushCall?.extraSystemPrompt).toContain("NO_REPLY"); + expect(flushCall?.extraSystemPrompt).toContain("memory/YYYY-MM-DD.md"); + expect(flushCall?.extraSystemPrompt).toContain("MEMORY.md"); expect(calls[1]?.prompt).toBe("hello"); }); }); @@ -1667,9 +1780,17 @@ describe("runReplyAgent memory flush", () => { await seedSessionStore({ storePath, sessionKey, entry: sessionEntry }); - const calls: Array<{ prompt?: string }> = []; + const calls: Array<{ + prompt?: string; + extraSystemPrompt?: string; + memoryFlushWritePath?: string; + }> = []; state.runEmbeddedPiAgentMock.mockImplementation(async (params: EmbeddedRunParams) => { - calls.push({ prompt: params.prompt }); + calls.push({ + prompt: params.prompt, + extraSystemPrompt: params.extraSystemPrompt, + memoryFlushWritePath: params.memoryFlushWritePath, + }); if (params.prompt?.includes("Pre-compaction memory flush.")) { return { payloads: [], meta: {} }; } @@ -1696,6 +1817,10 @@ describe("runReplyAgent memory flush", () => { expect(calls[0]?.prompt).toContain("Pre-compaction memory flush."); expect(calls[0]?.prompt).toContain("Current time:"); expect(calls[0]?.prompt).toMatch(/memory\/\d{4}-\d{2}-\d{2}\.md/); + expect(calls[0]?.prompt).toContain("MEMORY.md"); + expect(calls[0]?.memoryFlushWritePath).toMatch(/^memory\/\d{4}-\d{2}-\d{2}\.md$/); + expect(calls[0]?.extraSystemPrompt).toContain("memory/YYYY-MM-DD.md"); + expect(calls[0]?.extraSystemPrompt).toContain("MEMORY.md"); expect(calls[1]?.prompt).toBe("hello"); const stored = JSON.parse(await fs.readFile(storePath, "utf-8")); @@ -1952,3 +2077,4 @@ describe("runReplyAgent memory flush", () => { }); }); }); +import type { ReplyPayload } from "../types.js"; diff --git a/src/auto-reply/reply/agent-runner.ts b/src/auto-reply/reply/agent-runner.ts index b6dcd7dcd91..edc441a2552 100644 --- a/src/auto-reply/reply/agent-runner.ts +++ b/src/auto-reply/reply/agent-runner.ts @@ -278,6 +278,10 @@ export async function runReplyAgent(params: { updatedAt: Date.now(), systemSent: false, abortedLastRun: false, + modelProvider: undefined, + model: undefined, + contextTokens: undefined, + systemPromptReport: undefined, fallbackNoticeSelectedModel: undefined, fallbackNoticeActiveModel: undefined, fallbackNoticeReason: undefined, diff --git a/src/auto-reply/reply/block-reply-pipeline.test.ts b/src/auto-reply/reply/block-reply-pipeline.test.ts new file mode 100644 index 00000000000..92564033df5 --- /dev/null +++ b/src/auto-reply/reply/block-reply-pipeline.test.ts @@ -0,0 +1,79 @@ +import { describe, expect, it } from "vitest"; +import { + createBlockReplyContentKey, + createBlockReplyPayloadKey, + createBlockReplyPipeline, +} from "./block-reply-pipeline.js"; + +describe("createBlockReplyPayloadKey", () => { + it("produces different keys for payloads differing only by replyToId", () => { + const a = createBlockReplyPayloadKey({ text: "hello world", replyToId: "post-1" }); + const b = createBlockReplyPayloadKey({ text: "hello world", replyToId: "post-2" }); + const c = createBlockReplyPayloadKey({ text: "hello world" }); + expect(a).not.toBe(b); + expect(a).not.toBe(c); + }); + + it("produces different keys for payloads with different text", () => { + const a = createBlockReplyPayloadKey({ text: "hello" }); + const b = createBlockReplyPayloadKey({ text: "world" }); + expect(a).not.toBe(b); + }); + + it("produces different keys for payloads with different media", () => { + const a = createBlockReplyPayloadKey({ text: "hello", mediaUrl: "file:///a.png" }); + const b = createBlockReplyPayloadKey({ text: "hello", mediaUrl: "file:///b.png" }); + expect(a).not.toBe(b); + }); + + it("trims whitespace from text for key comparison", () => { + const a = createBlockReplyPayloadKey({ text: " hello " }); + const b = createBlockReplyPayloadKey({ text: "hello" }); + expect(a).toBe(b); + }); +}); + +describe("createBlockReplyContentKey", () => { + it("produces the same key for payloads differing only by replyToId", () => { + const a = createBlockReplyContentKey({ text: "hello world", replyToId: "post-1" }); + const b = createBlockReplyContentKey({ text: "hello world", replyToId: "post-2" }); + const c = createBlockReplyContentKey({ text: "hello world" }); + expect(a).toBe(b); + expect(a).toBe(c); + }); +}); + +describe("createBlockReplyPipeline dedup with threading", () => { + it("keeps separate deliveries for same text with different replyToId", async () => { + const sent: Array<{ text?: string; replyToId?: string }> = []; + const pipeline = createBlockReplyPipeline({ + onBlockReply: async (payload) => { + sent.push({ text: payload.text, replyToId: payload.replyToId }); + }, + timeoutMs: 5000, + }); + + pipeline.enqueue({ text: "response text", replyToId: "thread-root-1" }); + pipeline.enqueue({ text: "response text", replyToId: undefined }); + await pipeline.flush(); + + expect(sent).toEqual([ + { text: "response text", replyToId: "thread-root-1" }, + { text: "response text", replyToId: undefined }, + ]); + }); + + it("hasSentPayload matches regardless of replyToId", async () => { + const pipeline = createBlockReplyPipeline({ + onBlockReply: async () => {}, + timeoutMs: 5000, + }); + + pipeline.enqueue({ text: "response text", replyToId: "thread-root-1" }); + await pipeline.flush(); + + // Final payload with no replyToId should be recognized as already sent + expect(pipeline.hasSentPayload({ text: "response text" })).toBe(true); + expect(pipeline.hasSentPayload({ text: "response text", replyToId: "other-id" })).toBe(true); + }); +}); diff --git a/src/auto-reply/reply/block-reply-pipeline.ts b/src/auto-reply/reply/block-reply-pipeline.ts index 752c70a1da2..9ce85334238 100644 --- a/src/auto-reply/reply/block-reply-pipeline.ts +++ b/src/auto-reply/reply/block-reply-pipeline.ts @@ -48,6 +48,19 @@ export function createBlockReplyPayloadKey(payload: ReplyPayload): string { }); } +export function createBlockReplyContentKey(payload: ReplyPayload): string { + const text = payload.text?.trim() ?? ""; + const mediaList = payload.mediaUrls?.length + ? payload.mediaUrls + : payload.mediaUrl + ? [payload.mediaUrl] + : []; + // Content-only key used for final-payload suppression after block streaming. + // This intentionally ignores replyToId so a streamed threaded payload and the + // later final payload still collapse when they carry the same content. + return JSON.stringify({ text, mediaList }); +} + const withTimeout = async ( promise: Promise, timeoutMs: number, @@ -80,6 +93,7 @@ export function createBlockReplyPipeline(params: { }): BlockReplyPipeline { const { onBlockReply, timeoutMs, coalescing, buffer } = params; const sentKeys = new Set(); + const sentContentKeys = new Set(); const pendingKeys = new Set(); const seenKeys = new Set(); const bufferedKeys = new Set(); @@ -95,6 +109,7 @@ export function createBlockReplyPipeline(params: { return; } const payloadKey = createBlockReplyPayloadKey(payload); + const contentKey = createBlockReplyContentKey(payload); if (!bypassSeenCheck) { if (seenKeys.has(payloadKey)) { return; @@ -130,6 +145,7 @@ export function createBlockReplyPipeline(params: { return; } sentKeys.add(payloadKey); + sentContentKeys.add(contentKey); didStream = true; }) .catch((err) => { @@ -238,8 +254,8 @@ export function createBlockReplyPipeline(params: { didStream: () => didStream, isAborted: () => aborted, hasSentPayload: (payload) => { - const payloadKey = createBlockReplyPayloadKey(payload); - return sentKeys.has(payloadKey); + const payloadKey = createBlockReplyContentKey(payload); + return sentContentKeys.has(payloadKey); }, }; } diff --git a/src/auto-reply/reply/block-streaming.ts b/src/auto-reply/reply/block-streaming.ts index 6d306b166c1..b24ee8cac1a 100644 --- a/src/auto-reply/reply/block-streaming.ts +++ b/src/auto-reply/reply/block-streaming.ts @@ -26,6 +26,22 @@ function normalizeChunkProvider(provider?: string): TextChunkProvider | undefine : undefined; } +function resolveProviderChunkContext( + cfg: OpenClawConfig | undefined, + provider?: string, + accountId?: string | null, +) { + const providerKey = normalizeChunkProvider(provider); + const providerId = providerKey ? normalizeChannelId(providerKey) : null; + const providerChunkLimit = providerId + ? getChannelDock(providerId)?.outbound?.textChunkLimit + : undefined; + const textLimit = resolveTextChunkLimit(cfg, providerKey, accountId, { + fallbackLimit: providerChunkLimit, + }); + return { providerKey, providerId, textLimit }; +} + type ProviderBlockStreamingConfig = { blockStreamingCoalesce?: BlockStreamingCoalesceConfig; accounts?: Record; @@ -97,14 +113,7 @@ export function resolveEffectiveBlockStreamingConfig(params: { chunking: BlockStreamingChunking; coalescing: BlockStreamingCoalescing; } { - const providerKey = normalizeChunkProvider(params.provider); - const providerId = providerKey ? normalizeChannelId(providerKey) : null; - const providerChunkLimit = providerId - ? getChannelDock(providerId)?.outbound?.textChunkLimit - : undefined; - const textLimit = resolveTextChunkLimit(params.cfg, providerKey, params.accountId, { - fallbackLimit: providerChunkLimit, - }); + const { textLimit } = resolveProviderChunkContext(params.cfg, params.provider, params.accountId); const chunkingDefaults = params.chunking ?? resolveBlockStreamingChunking(params.cfg, params.provider, params.accountId); const chunkingMax = clampPositiveInteger(params.maxChunkChars, chunkingDefaults.maxChars, { @@ -154,21 +163,13 @@ export function resolveBlockStreamingChunking( provider?: string, accountId?: string | null, ): BlockStreamingChunking { - const providerKey = normalizeChunkProvider(provider); - const providerConfigKey = providerKey; - const providerId = providerKey ? normalizeChannelId(providerKey) : null; - const providerChunkLimit = providerId - ? getChannelDock(providerId)?.outbound?.textChunkLimit - : undefined; - const textLimit = resolveTextChunkLimit(cfg, providerConfigKey, accountId, { - fallbackLimit: providerChunkLimit, - }); + const { providerKey, textLimit } = resolveProviderChunkContext(cfg, provider, accountId); const chunkCfg = cfg?.agents?.defaults?.blockStreamingChunk; // When chunkMode="newline", the outbound delivery splits on paragraph boundaries. // The block chunker should flush eagerly on \n\n boundaries during streaming, // regardless of minChars, so each paragraph is sent as its own message. - const chunkMode = resolveChunkMode(cfg, providerConfigKey, accountId); + const chunkMode = resolveChunkMode(cfg, providerKey, accountId); const maxRequested = Math.max(1, Math.floor(chunkCfg?.maxChars ?? DEFAULT_BLOCK_STREAM_MAX)); const maxChars = Math.max(1, Math.min(maxRequested, textLimit)); @@ -198,20 +199,15 @@ export function resolveBlockStreamingCoalescing( }, opts?: { chunkMode?: "length" | "newline" }, ): BlockStreamingCoalescing | undefined { - const providerKey = normalizeChunkProvider(provider); - const providerConfigKey = providerKey; + const { providerKey, providerId, textLimit } = resolveProviderChunkContext( + cfg, + provider, + accountId, + ); // Resolve the outbound chunkMode so the coalescer can flush on paragraph boundaries // when chunkMode="newline", matching the delivery-time splitting behavior. - const chunkMode = opts?.chunkMode ?? resolveChunkMode(cfg, providerConfigKey, accountId); - - const providerId = providerKey ? normalizeChannelId(providerKey) : null; - const providerChunkLimit = providerId - ? getChannelDock(providerId)?.outbound?.textChunkLimit - : undefined; - const textLimit = resolveTextChunkLimit(cfg, providerConfigKey, accountId, { - fallbackLimit: providerChunkLimit, - }); + const chunkMode = opts?.chunkMode ?? resolveChunkMode(cfg, providerKey, accountId); const providerDefaults = providerId ? getChannelDock(providerId)?.streaming?.blockStreamingCoalesceDefaults : undefined; diff --git a/src/auto-reply/reply/command-gates.ts b/src/auto-reply/reply/command-gates.ts index 49cf21c6861..1f0b441f51a 100644 --- a/src/auto-reply/reply/command-gates.ts +++ b/src/auto-reply/reply/command-gates.ts @@ -1,6 +1,7 @@ import type { CommandFlagKey } from "../../config/commands.js"; import { isCommandFlagEnabled } from "../../config/commands.js"; import { logVerbose } from "../../globals.js"; +import { redactIdentifier } from "../../logging/redact-identifier.js"; import { isInternalMessageChannel } from "../../utils/message-channel.js"; import type { ReplyPayload } from "../types.js"; import type { CommandHandlerResult, HandleCommandsParams } from "./commands-types.js"; @@ -13,7 +14,20 @@ export function rejectUnauthorizedCommand( return null; } logVerbose( - `Ignoring ${commandLabel} from unauthorized sender: ${params.command.senderId || ""}`, + `Ignoring ${commandLabel} from unauthorized sender: ${redactIdentifier(params.command.senderId)}`, + ); + return { shouldContinue: false }; +} + +export function rejectNonOwnerCommand( + params: HandleCommandsParams, + commandLabel: string, +): CommandHandlerResult | null { + if (params.command.senderIsOwner) { + return null; + } + logVerbose( + `Ignoring ${commandLabel} from non-owner sender: ${redactIdentifier(params.command.senderId)}`, ); return { shouldContinue: false }; } diff --git a/src/auto-reply/reply/commands-acp/context.ts b/src/auto-reply/reply/commands-acp/context.ts index 16291713fda..84acb828015 100644 --- a/src/auto-reply/reply/commands-acp/context.ts +++ b/src/auto-reply/reply/commands-acp/context.ts @@ -1,40 +1,33 @@ import { buildTelegramTopicConversationId, + normalizeConversationText, parseTelegramChatIdFromTarget, } from "../../../acp/conversation-id.js"; import { DISCORD_THREAD_BINDING_CHANNEL } from "../../../channels/thread-bindings-policy.js"; import { resolveConversationIdFromTargets } from "../../../infra/outbound/conversation-id.js"; -import { parseAgentSessionKey } from "../../../routing/session-key.js"; import type { HandleCommandsParams } from "../commands-types.js"; +import { parseDiscordParentChannelFromSessionKey } from "../discord-parent-channel.js"; import { resolveTelegramConversationId } from "../telegram-context.js"; -function normalizeString(value: unknown): string { - if (typeof value === "string") { - return value.trim(); - } - if (typeof value === "number" || typeof value === "bigint" || typeof value === "boolean") { - return `${value}`.trim(); - } - return ""; -} - export function resolveAcpCommandChannel(params: HandleCommandsParams): string { const raw = params.ctx.OriginatingChannel ?? params.command.channel ?? params.ctx.Surface ?? params.ctx.Provider; - return normalizeString(raw).toLowerCase(); + return normalizeConversationText(raw).toLowerCase(); } export function resolveAcpCommandAccountId(params: HandleCommandsParams): string { - const accountId = normalizeString(params.ctx.AccountId); + const accountId = normalizeConversationText(params.ctx.AccountId); return accountId || "default"; } export function resolveAcpCommandThreadId(params: HandleCommandsParams): string | undefined { const threadId = - params.ctx.MessageThreadId != null ? normalizeString(String(params.ctx.MessageThreadId)) : ""; + params.ctx.MessageThreadId != null + ? normalizeConversationText(String(params.ctx.MessageThreadId)) + : ""; return threadId || undefined; } @@ -71,21 +64,8 @@ export function resolveAcpCommandConversationId(params: HandleCommandsParams): s }); } -function parseDiscordParentChannelFromSessionKey(raw: unknown): string | undefined { - const sessionKey = normalizeString(raw); - if (!sessionKey) { - return undefined; - } - const scoped = parseAgentSessionKey(sessionKey)?.rest ?? sessionKey.toLowerCase(); - const match = scoped.match(/(?:^|:)channel:([^:]+)$/); - if (!match?.[1]) { - return undefined; - } - return match[1]; -} - function parseDiscordParentChannelFromContext(raw: unknown): string | undefined { - const parentId = normalizeString(raw); + const parentId = normalizeConversationText(raw); if (!parentId) { return undefined; } diff --git a/src/auto-reply/reply/commands-allowlist.ts b/src/auto-reply/reply/commands-allowlist.ts index 766bb5f41b3..fcecb0b31f3 100644 --- a/src/auto-reply/reply/commands-allowlist.ts +++ b/src/auto-reply/reply/commands-allowlist.ts @@ -1,5 +1,5 @@ import { getChannelDock } from "../../channels/dock.js"; -import { resolveChannelConfigWrites } from "../../channels/plugins/config-writes.js"; +import { resolveExplicitConfigWriteTarget } from "../../channels/plugins/config-writes.js"; import { listPairingChannels } from "../../channels/plugins/pairing.js"; import type { ChannelId } from "../../channels/plugins/types.js"; import { normalizeChannelId } from "../../channels/registry.js"; @@ -31,6 +31,7 @@ import { resolveTelegramAccount } from "../../telegram/accounts.js"; import { resolveWhatsAppAccount } from "../../web/accounts.js"; import { rejectUnauthorizedCommand, requireCommandFlagEnabled } from "./command-gates.js"; import type { CommandHandler } from "./commands-types.js"; +import { resolveConfigWriteDeniedText } from "./config-write-authorization.js"; type AllowlistScope = "dm" | "group" | "all"; type AllowlistAction = "list" | "add" | "remove"; @@ -231,12 +232,22 @@ function resolveAccountTarget( const channel = (channels[channelId] ??= {}) as Record; const normalizedAccountId = normalizeAccountId(accountId); if (isBlockedObjectKey(normalizedAccountId)) { - return { target: channel, pathPrefix: `channels.${channelId}`, accountId: DEFAULT_ACCOUNT_ID }; + return { + target: channel, + pathPrefix: `channels.${channelId}`, + accountId: DEFAULT_ACCOUNT_ID, + writeTarget: resolveExplicitConfigWriteTarget({ channelId }), + }; } const hasAccounts = Boolean(channel.accounts && typeof channel.accounts === "object"); const useAccount = normalizedAccountId !== DEFAULT_ACCOUNT_ID || hasAccounts; if (!useAccount) { - return { target: channel, pathPrefix: `channels.${channelId}`, accountId: normalizedAccountId }; + return { + target: channel, + pathPrefix: `channels.${channelId}`, + accountId: normalizedAccountId, + writeTarget: resolveExplicitConfigWriteTarget({ channelId }), + }; } const accounts = (channel.accounts ??= {}) as Record; const existingAccount = Object.hasOwn(accounts, normalizedAccountId) @@ -250,6 +261,10 @@ function resolveAccountTarget( target: account, pathPrefix: `channels.${channelId}.accounts.${normalizedAccountId}`, accountId: normalizedAccountId, + writeTarget: resolveExplicitConfigWriteTarget({ + channelId, + accountId: normalizedAccountId, + }), }; } @@ -585,19 +600,6 @@ export const handleAllowlistCommand: CommandHandler = async (params, allowTextCo const shouldTouchStore = parsed.target !== "config" && listPairingChannels().includes(channelId); if (shouldUpdateConfig) { - const allowWrites = resolveChannelConfigWrites({ - cfg: params.cfg, - channelId, - accountId: params.ctx.AccountId, - }); - if (!allowWrites) { - const hint = `channels.${channelId}.configWrites=true`; - return { - shouldContinue: false, - reply: { text: `⚠️ Config writes are disabled for ${channelId}. Set ${hint} to enable.` }, - }; - } - const allowlistPath = resolveChannelAllowFromPaths(channelId, scope); if (!allowlistPath) { return { @@ -620,7 +622,25 @@ export const handleAllowlistCommand: CommandHandler = async (params, allowTextCo target, pathPrefix, accountId: normalizedAccountId, + writeTarget, } = resolveAccountTarget(parsedConfig, channelId, accountId); + const deniedText = resolveConfigWriteDeniedText({ + cfg: params.cfg, + channel: params.command.channel, + channelId, + accountId: params.ctx.AccountId, + gatewayClientScopes: params.ctx.GatewayClientScopes, + target: writeTarget, + }); + if (deniedText) { + return { + shouldContinue: false, + reply: { + text: deniedText, + }, + }; + } + const existing: string[] = []; const existingPaths = scope === "dm" && (channelId === "slack" || channelId === "discord") diff --git a/src/auto-reply/reply/commands-approve.ts b/src/auto-reply/reply/commands-approve.ts index 9773ba03ad5..5b0caec9c8f 100644 --- a/src/auto-reply/reply/commands-approve.ts +++ b/src/auto-reply/reply/commands-approve.ts @@ -1,10 +1,15 @@ import { callGateway } from "../../gateway/call.js"; import { logVerbose } from "../../globals.js"; +import { + isTelegramExecApprovalApprover, + isTelegramExecApprovalClientEnabled, +} from "../../telegram/exec-approvals.js"; import { GATEWAY_CLIENT_MODES, GATEWAY_CLIENT_NAMES } from "../../utils/message-channel.js"; import { requireGatewayClientScopeForInternalChannel } from "./command-gates.js"; import type { CommandHandler } from "./commands-types.js"; -const COMMAND = "/approve"; +const COMMAND_REGEX = /^\/approve(?:\s|$)/i; +const FOREIGN_COMMAND_MENTION_REGEX = /^\/approve@([^\s]+)(?:\s|$)/i; const DECISION_ALIASES: Record = { allow: "allow-once", @@ -25,10 +30,14 @@ type ParsedApproveCommand = function parseApproveCommand(raw: string): ParsedApproveCommand | null { const trimmed = raw.trim(); - if (!trimmed.toLowerCase().startsWith(COMMAND)) { + if (FOREIGN_COMMAND_MENTION_REGEX.test(trimmed)) { + return { ok: false, error: "❌ This /approve command targets a different Telegram bot." }; + } + const commandMatch = trimmed.match(COMMAND_REGEX); + if (!commandMatch) { return null; } - const rest = trimmed.slice(COMMAND.length).trim(); + const rest = trimmed.slice(commandMatch[0].length).trim(); if (!rest) { return { ok: false, error: "Usage: /approve allow-once|allow-always|deny" }; } @@ -83,6 +92,29 @@ export const handleApproveCommand: CommandHandler = async (params, allowTextComm return { shouldContinue: false, reply: { text: parsed.error } }; } + if (params.command.channel === "telegram") { + if ( + !isTelegramExecApprovalClientEnabled({ cfg: params.cfg, accountId: params.ctx.AccountId }) + ) { + return { + shouldContinue: false, + reply: { text: "❌ Telegram exec approvals are not enabled for this bot account." }, + }; + } + if ( + !isTelegramExecApprovalApprover({ + cfg: params.cfg, + accountId: params.ctx.AccountId, + senderId: params.command.senderId, + }) + ) { + return { + shouldContinue: false, + reply: { text: "❌ You are not authorized to approve exec requests on Telegram." }, + }; + } + } + const missingScope = requireGatewayClientScopeForInternalChannel(params, { label: "/approve", allowedScopes: ["operator.approvals", "operator.admin"], diff --git a/src/auto-reply/reply/commands-config.ts b/src/auto-reply/reply/commands-config.ts index 00ef8048efe..b40032758d3 100644 --- a/src/auto-reply/reply/commands-config.ts +++ b/src/auto-reply/reply/commands-config.ts @@ -1,4 +1,4 @@ -import { resolveChannelConfigWrites } from "../../channels/plugins/config-writes.js"; +import { resolveConfigWriteTargetFromPath } from "../../channels/plugins/config-writes.js"; import { normalizeChannelId } from "../../channels/registry.js"; import { getConfigValueAtPath, @@ -17,13 +17,16 @@ import { setConfigOverride, unsetConfigOverride, } from "../../config/runtime-overrides.js"; +import { isInternalMessageChannel } from "../../utils/message-channel.js"; import { + rejectNonOwnerCommand, rejectUnauthorizedCommand, requireCommandFlagEnabled, requireGatewayClientScopeForInternalChannel, } from "./command-gates.js"; import type { CommandHandler } from "./commands-types.js"; import { parseConfigCommand } from "./config-commands.js"; +import { resolveConfigWriteDeniedText } from "./config-write-authorization.js"; import { parseDebugCommand } from "./debug-commands.js"; export const handleConfigCommand: CommandHandler = async (params, allowTextCommands) => { @@ -38,6 +41,12 @@ export const handleConfigCommand: CommandHandler = async (params, allowTextComma if (unauthorized) { return unauthorized; } + const allowInternalReadOnlyShow = + configCommand.action === "show" && isInternalMessageChannel(params.command.channel); + const nonOwner = allowInternalReadOnlyShow ? null : rejectNonOwnerCommand(params, "/config"); + if (nonOwner) { + return nonOwner; + } const disabled = requireCommandFlagEnabled(params.cfg, { label: "/config", configKey: "config", @@ -52,6 +61,7 @@ export const handleConfigCommand: CommandHandler = async (params, allowTextComma }; } + let parsedWritePath: string[] | undefined; if (configCommand.action === "set" || configCommand.action === "unset") { const missingAdminScope = requireGatewayClientScopeForInternalChannel(params, { label: "/config write", @@ -61,21 +71,28 @@ export const handleConfigCommand: CommandHandler = async (params, allowTextComma if (missingAdminScope) { return missingAdminScope; } + const parsedPath = parseConfigPath(configCommand.path); + if (!parsedPath.ok || !parsedPath.path) { + return { + shouldContinue: false, + reply: { text: `⚠️ ${parsedPath.error ?? "Invalid path."}` }, + }; + } + parsedWritePath = parsedPath.path; const channelId = params.command.channelId ?? normalizeChannelId(params.command.channel); - const allowWrites = resolveChannelConfigWrites({ + const deniedText = resolveConfigWriteDeniedText({ cfg: params.cfg, + channel: params.command.channel, channelId, accountId: params.ctx.AccountId, + gatewayClientScopes: params.ctx.GatewayClientScopes, + target: resolveConfigWriteTargetFromPath(parsedWritePath), }); - if (!allowWrites) { - const channelLabel = channelId ?? "this channel"; - const hint = channelId - ? `channels.${channelId}.configWrites=true` - : "channels..configWrites=true"; + if (deniedText) { return { shouldContinue: false, reply: { - text: `⚠️ Config writes are disabled for ${channelLabel}. Set ${hint} to enable.`, + text: deniedText, }, }; } @@ -119,14 +136,7 @@ export const handleConfigCommand: CommandHandler = async (params, allowTextComma } if (configCommand.action === "unset") { - const parsedPath = parseConfigPath(configCommand.path); - if (!parsedPath.ok || !parsedPath.path) { - return { - shouldContinue: false, - reply: { text: `⚠️ ${parsedPath.error ?? "Invalid path."}` }, - }; - } - const removed = unsetConfigValueAtPath(parsedBase, parsedPath.path); + const removed = unsetConfigValueAtPath(parsedBase, parsedWritePath ?? []); if (!removed) { return { shouldContinue: false, @@ -151,14 +161,7 @@ export const handleConfigCommand: CommandHandler = async (params, allowTextComma } if (configCommand.action === "set") { - const parsedPath = parseConfigPath(configCommand.path); - if (!parsedPath.ok || !parsedPath.path) { - return { - shouldContinue: false, - reply: { text: `⚠️ ${parsedPath.error ?? "Invalid path."}` }, - }; - } - setConfigValueAtPath(parsedBase, parsedPath.path, configCommand.value); + setConfigValueAtPath(parsedBase, parsedWritePath ?? [], configCommand.value); const validated = validateConfigObjectWithPlugins(parsedBase); if (!validated.ok) { const issue = validated.issues[0]; @@ -197,6 +200,10 @@ export const handleDebugCommand: CommandHandler = async (params, allowTextComman if (unauthorized) { return unauthorized; } + const nonOwner = rejectNonOwnerCommand(params, "/debug"); + if (nonOwner) { + return nonOwner; + } const disabled = requireCommandFlagEnabled(params.cfg, { label: "/debug", configKey: "debug", diff --git a/src/auto-reply/reply/commands-context.ts b/src/auto-reply/reply/commands-context.ts index 3d177c2b5f9..1c5056b4b46 100644 --- a/src/auto-reply/reply/commands-context.ts +++ b/src/auto-reply/reply/commands-context.ts @@ -26,6 +26,7 @@ export function buildCommandContext(params: { const rawBodyNormalized = triggerBodyNormalized; const commandBodyNormalized = normalizeCommandBody( isGroup ? stripMentions(rawBodyNormalized, ctx, cfg, agentId) : rawBodyNormalized, + { botUsername: ctx.BotUsername }, ); return { diff --git a/src/auto-reply/reply/commands-core.ts b/src/auto-reply/reply/commands-core.ts index 894724bcfb0..ca67bbc3549 100644 --- a/src/auto-reply/reply/commands-core.ts +++ b/src/auto-reply/reply/commands-core.ts @@ -26,6 +26,7 @@ import { handlePluginCommand } from "./commands-plugin.js"; import { handleAbortTrigger, handleActivationCommand, + handleFastCommand, handleRestartCommand, handleSessionCommand, handleSendPolicyCommand, @@ -176,6 +177,7 @@ export async function handleCommands(params: HandleCommandsParams): Promise[0], + abortTarget: AbortTarget, +) { + return { + abortTarget, + sessionStore: params.sessionStore, + storePath: params.storePath, + abortKey: params.command.abortKey, + abortCutoff: resolveAbortCutoffForTarget({ + ctx: params.ctx, + commandSessionKey: params.sessionKey, + targetSessionKey: abortTarget.key, + }), + }; +} + export const handleStopCommand: CommandHandler = async (params, allowTextCommands) => { if (!allowTextCommands) { return null; @@ -109,17 +126,7 @@ export const handleStopCommand: CommandHandler = async (params, allowTextCommand `stop: cleared followups=${cleared.followupCleared} lane=${cleared.laneCleared} keys=${cleared.keys.join(",")}`, ); } - await applyAbortTarget({ - abortTarget, - sessionStore: params.sessionStore, - storePath: params.storePath, - abortKey: params.command.abortKey, - abortCutoff: resolveAbortCutoffForTarget({ - ctx: params.ctx, - commandSessionKey: params.sessionKey, - targetSessionKey: abortTarget.key, - }), - }); + await applyAbortTarget(buildAbortTargetApplyParams(params, abortTarget)); // Trigger internal hook for stop command const hookEvent = createInternalHookEvent( @@ -160,16 +167,6 @@ export const handleAbortTrigger: CommandHandler = async (params, allowTextComman sessionEntry: params.sessionEntry, sessionStore: params.sessionStore, }); - await applyAbortTarget({ - abortTarget, - sessionStore: params.sessionStore, - storePath: params.storePath, - abortKey: params.command.abortKey, - abortCutoff: resolveAbortCutoffForTarget({ - ctx: params.ctx, - commandSessionKey: params.sessionKey, - targetSessionKey: abortTarget.key, - }), - }); + await applyAbortTarget(buildAbortTargetApplyParams(params, abortTarget)); return { shouldContinue: false, reply: { text: "⚙️ Agent was aborted." } }; }; diff --git a/src/auto-reply/reply/commands-session-lifecycle.test.ts b/src/auto-reply/reply/commands-session-lifecycle.test.ts index 79882f13921..baf5addc60e 100644 --- a/src/auto-reply/reply/commands-session-lifecycle.test.ts +++ b/src/auto-reply/reply/commands-session-lifecycle.test.ts @@ -139,6 +139,21 @@ function createTelegramBinding(overrides?: Partial): Sessi }; } +function expectIdleTimeoutSetReply( + mock: ReturnType, + text: string, + idleTimeoutMs: number, + idleTimeoutLabel: string, +) { + expect(mock).toHaveBeenCalledWith({ + targetSessionKey: "agent:main:subagent:child", + accountId: "default", + idleTimeoutMs, + }); + expect(text).toContain(`Idle timeout set to ${idleTimeoutLabel}`); + expect(text).toContain("2026-02-20T02:00:00.000Z"); +} + function createFakeThreadBindingManager(binding: FakeBinding | null) { return { getByThreadId: vi.fn((_threadId: string) => binding), @@ -175,13 +190,12 @@ describe("/session idle and /session max-age", () => { const result = await handleSessionCommand(createDiscordCommandParams("/session idle 2h"), true); const text = result?.reply?.text ?? ""; - expect(hoisted.setThreadBindingIdleTimeoutBySessionKeyMock).toHaveBeenCalledWith({ - targetSessionKey: "agent:main:subagent:child", - accountId: "default", - idleTimeoutMs: 2 * 60 * 60 * 1000, - }); - expect(text).toContain("Idle timeout set to 2h"); - expect(text).toContain("2026-02-20T02:00:00.000Z"); + expectIdleTimeoutSetReply( + hoisted.setThreadBindingIdleTimeoutBySessionKeyMock, + text, + 2 * 60 * 60 * 1000, + "2h", + ); }); it("shows active idle timeout when no value is provided", async () => { @@ -248,13 +262,12 @@ describe("/session idle and /session max-age", () => { ); const text = result?.reply?.text ?? ""; - expect(hoisted.setTelegramThreadBindingIdleTimeoutBySessionKeyMock).toHaveBeenCalledWith({ - targetSessionKey: "agent:main:subagent:child", - accountId: "default", - idleTimeoutMs: 2 * 60 * 60 * 1000, - }); - expect(text).toContain("Idle timeout set to 2h"); - expect(text).toContain("2026-02-20T02:00:00.000Z"); + expectIdleTimeoutSetReply( + hoisted.setTelegramThreadBindingIdleTimeoutBySessionKeyMock, + text, + 2 * 60 * 60 * 1000, + "2h", + ); }); it("reports Telegram max-age expiry from the original bind time", async () => { diff --git a/src/auto-reply/reply/commands-session.ts b/src/auto-reply/reply/commands-session.ts index 106525492f7..c4d0c88e432 100644 --- a/src/auto-reply/reply/commands-session.ts +++ b/src/auto-reply/reply/commands-session.ts @@ -1,3 +1,4 @@ +import { resolveFastModeState } from "../../agents/fast-mode.js"; import { parseDurationMs } from "../../cli/parse-duration.js"; import { isRestartEnabled } from "../../config/commands.js"; import { @@ -22,7 +23,7 @@ import { import { formatTokenCount, formatUsd } from "../../utils/usage-format.js"; import { parseActivationCommand } from "../group-activation.js"; import { parseSendPolicyCommand } from "../send-policy.js"; -import { normalizeUsageDisplay, resolveResponseUsageMode } from "../thinking.js"; +import { normalizeFastMode, normalizeUsageDisplay, resolveResponseUsageMode } from "../thinking.js"; import { isDiscordSurface, isTelegramSurface, resolveChannelAccountId } from "./channel-context.js"; import { handleAbortTrigger, handleStopCommand } from "./commands-session-abort.js"; import { persistSessionEntry } from "./commands-session-store.js"; @@ -291,6 +292,57 @@ export const handleUsageCommand: CommandHandler = async (params, allowTextComman }; }; +export const handleFastCommand: CommandHandler = async (params, allowTextCommands) => { + if (!allowTextCommands) { + return null; + } + const normalized = params.command.commandBodyNormalized; + if (normalized !== "/fast" && !normalized.startsWith("/fast ")) { + return null; + } + if (!params.command.isAuthorizedSender) { + logVerbose( + `Ignoring /fast from unauthorized sender: ${params.command.senderId || ""}`, + ); + return { shouldContinue: false }; + } + + const rawArgs = normalized === "/fast" ? "" : normalized.slice("/fast".length).trim(); + const rawMode = rawArgs.toLowerCase(); + if (!rawMode || rawMode === "status") { + const state = resolveFastModeState({ + cfg: params.cfg, + provider: params.provider, + model: params.model, + sessionEntry: params.sessionEntry, + }); + const suffix = + state.source === "config" ? " (config)" : state.source === "default" ? " (default)" : ""; + return { + shouldContinue: false, + reply: { text: `⚙️ Current fast mode: ${state.enabled ? "on" : "off"}${suffix}.` }, + }; + } + + const nextMode = normalizeFastMode(rawMode); + if (nextMode === undefined) { + return { + shouldContinue: false, + reply: { text: "⚙️ Usage: /fast status|on|off" }, + }; + } + + if (params.sessionEntry && params.sessionStore && params.sessionKey) { + params.sessionEntry.fastMode = nextMode; + await persistSessionEntry(params); + } + + return { + shouldContinue: false, + reply: { text: `⚙️ Fast mode ${nextMode ? "enabled" : "disabled"}.` }, + }; +}; + export const handleSessionCommand: CommandHandler = async (params, allowTextCommands) => { if (!allowTextCommands) { return null; diff --git a/src/auto-reply/reply/commands-status.ts b/src/auto-reply/reply/commands-status.ts index 50d007321c4..f802a7c6050 100644 --- a/src/auto-reply/reply/commands-status.ts +++ b/src/auto-reply/reply/commands-status.ts @@ -3,6 +3,7 @@ import { resolveDefaultAgentId, resolveSessionAgentId, } from "../../agents/agent-scope.js"; +import { resolveFastModeState } from "../../agents/fast-mode.js"; import { resolveModelAuthLabel } from "../../agents/model-auth-label.js"; import { listSubagentRunsForRequester } from "../../agents/subagent-registry.js"; import { @@ -40,6 +41,7 @@ export async function buildStatusReply(params: { model: string; contextTokens: number; resolvedThinkLevel?: ThinkLevel; + resolvedFastMode?: boolean; resolvedVerboseLevel: VerboseLevel; resolvedReasoningLevel: ReasoningLevel; resolvedElevatedLevel?: ElevatedLevel; @@ -60,6 +62,7 @@ export async function buildStatusReply(params: { model, contextTokens, resolvedThinkLevel, + resolvedFastMode, resolvedVerboseLevel, resolvedReasoningLevel, resolvedElevatedLevel, @@ -160,6 +163,14 @@ export async function buildStatusReply(params: { }) : selectedModelAuth; const agentDefaults = cfg.agents?.defaults ?? {}; + const effectiveFastMode = + resolvedFastMode ?? + resolveFastModeState({ + cfg, + provider, + model, + sessionEntry, + }).enabled; const statusText = buildStatusMessage({ config: cfg, agent: { @@ -181,6 +192,7 @@ export async function buildStatusReply(params: { sessionStorePath: storePath, groupActivation, resolvedThink: resolvedThinkLevel ?? (await resolveDefaultThinkingLevel()), + resolvedFast: effectiveFastMode, resolvedVerbose: resolvedVerboseLevel, resolvedReasoning: resolvedReasoningLevel, resolvedElevated: resolvedElevatedLevel, diff --git a/src/auto-reply/reply/commands-subagents.ts b/src/auto-reply/reply/commands-subagents.ts index 906ad93eb48..cffc6e003a8 100644 --- a/src/auto-reply/reply/commands-subagents.ts +++ b/src/auto-reply/reply/commands-subagents.ts @@ -1,4 +1,4 @@ -import { listSubagentRunsForRequester } from "../../agents/subagent-registry.js"; +import { listSubagentRunsForController } from "../../agents/subagent-registry.js"; import { logVerbose } from "../../globals.js"; import { handleSubagentsAgentsAction } from "./commands-subagents/action-agents.js"; import { handleSubagentsFocusAction } from "./commands-subagents/action-focus.js"; @@ -61,7 +61,7 @@ export const handleSubagentsCommand: CommandHandler = async (params, allowTextCo params, handledPrefix, requesterKey, - runs: listSubagentRunsForRequester(requesterKey), + runs: listSubagentRunsForController(requesterKey), restTokens, }; diff --git a/src/auto-reply/reply/commands-subagents/action-kill.ts b/src/auto-reply/reply/commands-subagents/action-kill.ts index cb91b4432f7..597e3b4c9c4 100644 --- a/src/auto-reply/reply/commands-subagents/action-kill.ts +++ b/src/auto-reply/reply/commands-subagents/action-kill.ts @@ -1,19 +1,13 @@ -import { abortEmbeddedPiRun } from "../../../agents/pi-embedded.js"; -import { markSubagentRunTerminated } from "../../../agents/subagent-registry.js"; import { - loadSessionStore, - resolveStorePath, - updateSessionStore, -} from "../../../config/sessions.js"; -import { logVerbose } from "../../../globals.js"; -import { stopSubagentsForRequester } from "../abort.js"; + killAllControlledSubagentRuns, + killControlledSubagentRun, +} from "../../../agents/subagent-control.js"; import type { CommandHandlerResult } from "../commands-types.js"; -import { clearSessionQueues } from "../queue.js"; import { formatRunLabel } from "../subagents-utils.js"; import { type SubagentsCommandContext, COMMAND, - loadSubagentSessionEntry, + resolveCommandSubagentController, resolveSubagentEntryForToken, stopWithText, } from "./shared.js"; @@ -30,10 +24,18 @@ export async function handleSubagentsKillAction( } if (target === "all" || target === "*") { - stopSubagentsForRequester({ + const controller = resolveCommandSubagentController(params, requesterKey); + const result = await killAllControlledSubagentRuns({ cfg: params.cfg, - requesterSessionKey: requesterKey, + controller, + runs, }); + if (result.status === "forbidden") { + return stopWithText(`⚠️ ${result.error}`); + } + if (result.killed > 0) { + return { shouldContinue: false }; + } return { shouldContinue: false }; } @@ -45,42 +47,17 @@ export async function handleSubagentsKillAction( return stopWithText(`${formatRunLabel(targetResolution.entry)} is already finished.`); } - const childKey = targetResolution.entry.childSessionKey; - const { storePath, store, entry } = loadSubagentSessionEntry(params, childKey, { - loadSessionStore, - resolveStorePath, - }); - const sessionId = entry?.sessionId; - if (sessionId) { - abortEmbeddedPiRun(sessionId); - } - - const cleared = clearSessionQueues([childKey, sessionId]); - if (cleared.followupCleared > 0 || cleared.laneCleared > 0) { - logVerbose( - `subagents kill: cleared followups=${cleared.followupCleared} lane=${cleared.laneCleared} keys=${cleared.keys.join(",")}`, - ); - } - - if (entry) { - entry.abortedLastRun = true; - entry.updatedAt = Date.now(); - store[childKey] = entry; - await updateSessionStore(storePath, (nextStore) => { - nextStore[childKey] = entry; - }); - } - - markSubagentRunTerminated({ - runId: targetResolution.entry.runId, - childSessionKey: childKey, - reason: "killed", - }); - - stopSubagentsForRequester({ + const controller = resolveCommandSubagentController(params, requesterKey); + const result = await killControlledSubagentRun({ cfg: params.cfg, - requesterSessionKey: childKey, + controller, + entry: targetResolution.entry, }); - + if (result.status === "forbidden") { + return stopWithText(`⚠️ ${result.error}`); + } + if (result.status === "done") { + return stopWithText(result.text); + } return { shouldContinue: false }; } diff --git a/src/auto-reply/reply/commands-subagents/action-list.ts b/src/auto-reply/reply/commands-subagents/action-list.ts index 026874e22aa..e777c498d5f 100644 --- a/src/auto-reply/reply/commands-subagents/action-list.ts +++ b/src/auto-reply/reply/commands-subagents/action-list.ts @@ -1,79 +1,26 @@ -import { countPendingDescendantRuns } from "../../../agents/subagent-registry.js"; -import { loadSessionStore, resolveStorePath } from "../../../config/sessions.js"; +import { buildSubagentList } from "../../../agents/subagent-control.js"; import type { CommandHandlerResult } from "../commands-types.js"; -import { sortSubagentRuns } from "../subagents-utils.js"; -import { - type SessionStoreCache, - type SubagentsCommandContext, - RECENT_WINDOW_MINUTES, - formatSubagentListLine, - loadSubagentSessionEntry, - stopWithText, -} from "./shared.js"; +import { type SubagentsCommandContext, RECENT_WINDOW_MINUTES, stopWithText } from "./shared.js"; export function handleSubagentsListAction(ctx: SubagentsCommandContext): CommandHandlerResult { const { params, runs } = ctx; - const sorted = sortSubagentRuns(runs); - const now = Date.now(); - const recentCutoff = now - RECENT_WINDOW_MINUTES * 60_000; - const storeCache: SessionStoreCache = new Map(); - const pendingDescendantCache = new Map(); - const pendingDescendantCount = (sessionKey: string) => { - if (pendingDescendantCache.has(sessionKey)) { - return pendingDescendantCache.get(sessionKey) ?? 0; - } - const pending = Math.max(0, countPendingDescendantRuns(sessionKey)); - pendingDescendantCache.set(sessionKey, pending); - return pending; - }; - const isActiveRun = (entry: (typeof runs)[number]) => - !entry.endedAt || pendingDescendantCount(entry.childSessionKey) > 0; - - let index = 1; - - const mapRuns = (entries: typeof runs, runtimeMs: (entry: (typeof runs)[number]) => number) => - entries.map((entry) => { - const { entry: sessionEntry } = loadSubagentSessionEntry( - params, - entry.childSessionKey, - { - loadSessionStore, - resolveStorePath, - }, - storeCache, - ); - const line = formatSubagentListLine({ - entry, - index, - runtimeMs: runtimeMs(entry), - sessionEntry, - pendingDescendants: pendingDescendantCount(entry.childSessionKey), - }); - index += 1; - return line; - }); - - const activeEntries = sorted.filter((entry) => isActiveRun(entry)); - const activeLines = mapRuns(activeEntries, (entry) => now - (entry.startedAt ?? entry.createdAt)); - const recentEntries = sorted.filter( - (entry) => !isActiveRun(entry) && !!entry.endedAt && (entry.endedAt ?? 0) >= recentCutoff, - ); - const recentLines = mapRuns( - recentEntries, - (entry) => (entry.endedAt ?? now) - (entry.startedAt ?? entry.createdAt), - ); - + const list = buildSubagentList({ + cfg: params.cfg, + runs, + recentMinutes: RECENT_WINDOW_MINUTES, + taskMaxChars: 110, + }); const lines = ["active subagents:", "-----"]; - if (activeLines.length === 0) { + if (list.active.length === 0) { lines.push("(none)"); } else { - lines.push(activeLines.join("\n")); + lines.push(list.active.map((entry) => entry.line).join("\n")); } lines.push("", `recent subagents (last ${RECENT_WINDOW_MINUTES}m):`, "-----"); - if (recentLines.length === 0) { + if (list.recent.length === 0) { lines.push("(none)"); } else { - lines.push(recentLines.join("\n")); + lines.push(list.recent.map((entry) => entry.line).join("\n")); } return stopWithText(lines.join("\n")); diff --git a/src/auto-reply/reply/commands-subagents/action-send.ts b/src/auto-reply/reply/commands-subagents/action-send.ts index d8b752571c0..3e764e2a6bb 100644 --- a/src/auto-reply/reply/commands-subagents/action-send.ts +++ b/src/auto-reply/reply/commands-subagents/action-send.ts @@ -1,27 +1,15 @@ -import crypto from "node:crypto"; -import { AGENT_LANE_SUBAGENT } from "../../../agents/lanes.js"; -import { abortEmbeddedPiRun } from "../../../agents/pi-embedded.js"; import { - clearSubagentRunSteerRestart, - replaceSubagentRunAfterSteer, - markSubagentRunForSteerRestart, -} from "../../../agents/subagent-registry.js"; -import { loadSessionStore, resolveStorePath } from "../../../config/sessions.js"; -import { callGateway } from "../../../gateway/call.js"; -import { logVerbose } from "../../../globals.js"; -import { INTERNAL_MESSAGE_CHANNEL } from "../../../utils/message-channel.js"; + sendControlledSubagentMessage, + steerControlledSubagentRun, +} from "../../../agents/subagent-control.js"; import type { CommandHandlerResult } from "../commands-types.js"; -import { clearSessionQueues } from "../queue.js"; import { formatRunLabel } from "../subagents-utils.js"; import { type SubagentsCommandContext, COMMAND, - STEER_ABORT_SETTLE_TIMEOUT_MS, - extractAssistantText, - loadSubagentSessionEntry, + resolveCommandSubagentController, resolveSubagentEntryForToken, stopWithText, - stripToolMessages, } from "./shared.js"; export async function handleSubagentsSendAction( @@ -49,111 +37,41 @@ export async function handleSubagentsSendAction( return stopWithText(`${formatRunLabel(targetResolution.entry)} is already finished.`); } - const { entry: targetSessionEntry } = loadSubagentSessionEntry( - params, - targetResolution.entry.childSessionKey, - { - loadSessionStore, - resolveStorePath, - }, - ); - const targetSessionId = - typeof targetSessionEntry?.sessionId === "string" && targetSessionEntry.sessionId.trim() - ? targetSessionEntry.sessionId.trim() - : undefined; - if (steerRequested) { - markSubagentRunForSteerRestart(targetResolution.entry.runId); - - if (targetSessionId) { - abortEmbeddedPiRun(targetSessionId); - } - - const cleared = clearSessionQueues([targetResolution.entry.childSessionKey, targetSessionId]); - if (cleared.followupCleared > 0 || cleared.laneCleared > 0) { - logVerbose( - `subagents steer: cleared followups=${cleared.followupCleared} lane=${cleared.laneCleared} keys=${cleared.keys.join(",")}`, + const controller = resolveCommandSubagentController(params, ctx.requesterKey); + const result = await steerControlledSubagentRun({ + cfg: params.cfg, + controller, + entry: targetResolution.entry, + message, + }); + if (result.status === "accepted") { + return stopWithText( + `steered ${formatRunLabel(targetResolution.entry)} (run ${result.runId.slice(0, 8)}).`, ); } - - try { - await callGateway({ - method: "agent.wait", - params: { - runId: targetResolution.entry.runId, - timeoutMs: STEER_ABORT_SETTLE_TIMEOUT_MS, - }, - timeoutMs: STEER_ABORT_SETTLE_TIMEOUT_MS + 2_000, - }); - } catch { - // Continue even if wait fails; steer should still be attempted. + if (result.status === "done" && result.text) { + return stopWithText(result.text); } + if (result.status === "error") { + return stopWithText(`send failed: ${result.error ?? "error"}`); + } + return stopWithText(`⚠️ ${result.error ?? "send failed"}`); } - const idempotencyKey = crypto.randomUUID(); - let runId: string = idempotencyKey; - try { - const response = await callGateway<{ runId: string }>({ - method: "agent", - params: { - message, - sessionKey: targetResolution.entry.childSessionKey, - sessionId: targetSessionId, - idempotencyKey, - deliver: false, - channel: INTERNAL_MESSAGE_CHANNEL, - lane: AGENT_LANE_SUBAGENT, - timeout: 0, - }, - timeoutMs: 10_000, - }); - const responseRunId = typeof response?.runId === "string" ? response.runId : undefined; - if (responseRunId) { - runId = responseRunId; - } - } catch (err) { - if (steerRequested) { - clearSubagentRunSteerRestart(targetResolution.entry.runId); - } - const messageText = - err instanceof Error ? err.message : typeof err === "string" ? err : "error"; - return stopWithText(`send failed: ${messageText}`); - } - - if (steerRequested) { - replaceSubagentRunAfterSteer({ - previousRunId: targetResolution.entry.runId, - nextRunId: runId, - fallback: targetResolution.entry, - runTimeoutSeconds: targetResolution.entry.runTimeoutSeconds ?? 0, - }); - return stopWithText( - `steered ${formatRunLabel(targetResolution.entry)} (run ${runId.slice(0, 8)}).`, - ); - } - - const waitMs = 30_000; - const wait = await callGateway<{ status?: string; error?: string }>({ - method: "agent.wait", - params: { runId, timeoutMs: waitMs }, - timeoutMs: waitMs + 2000, + const result = await sendControlledSubagentMessage({ + cfg: params.cfg, + entry: targetResolution.entry, + message, }); - if (wait?.status === "timeout") { - return stopWithText(`⏳ Subagent still running (run ${runId.slice(0, 8)}).`); + if (result.status === "timeout") { + return stopWithText(`⏳ Subagent still running (run ${result.runId.slice(0, 8)}).`); } - if (wait?.status === "error") { - const waitError = typeof wait.error === "string" ? wait.error : "unknown error"; - return stopWithText(`⚠️ Subagent error: ${waitError} (run ${runId.slice(0, 8)}).`); + if (result.status === "error") { + return stopWithText(`⚠️ Subagent error: ${result.error} (run ${result.runId.slice(0, 8)}).`); } - - const history = await callGateway<{ messages: Array }>({ - method: "chat.history", - params: { sessionKey: targetResolution.entry.childSessionKey, limit: 50 }, - }); - const filtered = stripToolMessages(Array.isArray(history?.messages) ? history.messages : []); - const last = filtered.length > 0 ? filtered[filtered.length - 1] : undefined; - const replyText = last ? extractAssistantText(last) : undefined; return stopWithText( - replyText ?? `✅ Sent to ${formatRunLabel(targetResolution.entry)} (run ${runId.slice(0, 8)}).`, + result.replyText ?? + `✅ Sent to ${formatRunLabel(targetResolution.entry)} (run ${result.runId.slice(0, 8)}).`, ); } diff --git a/src/auto-reply/reply/commands-subagents/shared.ts b/src/auto-reply/reply/commands-subagents/shared.ts index ec96437e645..bb923b52e46 100644 --- a/src/auto-reply/reply/commands-subagents/shared.ts +++ b/src/auto-reply/reply/commands-subagents/shared.ts @@ -1,3 +1,5 @@ +import { resolveStoredSubagentCapabilities } from "../../../agents/subagent-capabilities.js"; +import type { ResolvedSubagentController } from "../../../agents/subagent-control.js"; import { countPendingDescendantRuns, type SubagentRunRecord, @@ -18,6 +20,7 @@ import { parseDiscordTarget } from "../../../discord/targets.js"; import { callGateway } from "../../../gateway/call.js"; import { formatTimeAgo } from "../../../infra/format-time/format-relative.ts"; import { parseAgentSessionKey } from "../../../routing/session-key.js"; +import { isSubagentSessionKey } from "../../../routing/session-key.js"; import { looksLikeSessionId } from "../../../sessions/session-id.js"; import { extractTextFromChatContent } from "../../../shared/chat-content.js"; import { @@ -247,6 +250,29 @@ export function resolveRequesterSessionKey( return resolveInternalSessionKey({ key: raw, alias, mainKey }); } +export function resolveCommandSubagentController( + params: SubagentsCommandParams, + requesterKey: string, +): ResolvedSubagentController { + if (!isSubagentSessionKey(requesterKey)) { + return { + controllerSessionKey: requesterKey, + callerSessionKey: requesterKey, + callerIsSubagent: false, + controlScope: "children", + }; + } + const capabilities = resolveStoredSubagentCapabilities(requesterKey, { + cfg: params.cfg, + }); + return { + controllerSessionKey: requesterKey, + callerSessionKey: requesterKey, + callerIsSubagent: true, + controlScope: capabilities.controlScope, + }; +} + export function resolveHandledPrefix(normalized: string): string | null { return normalized.startsWith(COMMAND) ? COMMAND diff --git a/src/auto-reply/reply/commands.test-harness.ts b/src/auto-reply/reply/commands.test-harness.ts index 84ef0c0f84d..806e36895c8 100644 --- a/src/auto-reply/reply/commands.test-harness.ts +++ b/src/auto-reply/reply/commands.test-harness.ts @@ -26,7 +26,7 @@ export function buildCommandTestParams( ctx, cfg, isGroup: false, - triggerBodyNormalized: commandBody.trim().toLowerCase(), + triggerBodyNormalized: commandBody.trim(), commandAuthorized: true, }); diff --git a/src/auto-reply/reply/commands.test.ts b/src/auto-reply/reply/commands.test.ts index 38be7c43531..f6d2d88f5ba 100644 --- a/src/auto-reply/reply/commands.test.ts +++ b/src/auto-reply/reply/commands.test.ts @@ -105,27 +105,6 @@ vi.mock("../../gateway/call.js", () => ({ callGateway: (opts: unknown) => callGatewayMock(opts), })); -type ResetAcpSessionInPlaceResult = { ok: true } | { ok: false; skipped?: boolean; error?: string }; - -const resetAcpSessionInPlaceMock = vi.hoisted(() => - vi.fn( - async (_params: unknown): Promise => ({ - ok: false, - skipped: true, - }), - ), -); -vi.mock("../../acp/persistent-bindings.js", async () => { - const actual = await vi.importActual( - "../../acp/persistent-bindings.js", - ); - return { - ...actual, - resetAcpSessionInPlace: (params: unknown) => resetAcpSessionInPlaceMock(params), - }; -}); - -import { buildConfiguredAcpSessionKey } from "../../acp/persistent-bindings.js"; import type { HandleCommandsParams } from "./commands-types.js"; import { buildCommandContext, handleCommands } from "./commands.js"; @@ -154,15 +133,35 @@ afterAll(async () => { await fs.rm(testWorkspaceDir, { recursive: true, force: true }); }); +async function withTempConfigPath( + initialConfig: Record, + run: (configPath: string) => Promise, +): Promise { + const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-commands-config-")); + const configPath = path.join(dir, "openclaw.json"); + const previous = process.env.OPENCLAW_CONFIG_PATH; + process.env.OPENCLAW_CONFIG_PATH = configPath; + await fs.writeFile(configPath, JSON.stringify(initialConfig, null, 2), "utf-8"); + try { + return await run(configPath); + } finally { + if (previous === undefined) { + delete process.env.OPENCLAW_CONFIG_PATH; + } else { + process.env.OPENCLAW_CONFIG_PATH = previous; + } + await fs.rm(dir, { recursive: true, force: true }); + } +} + +async function readJsonFile(filePath: string): Promise { + return JSON.parse(await fs.readFile(filePath, "utf-8")) as T; +} + function buildParams(commandBody: string, cfg: OpenClawConfig, ctxOverrides?: Partial) { return buildCommandTestParams(commandBody, cfg, ctxOverrides, { workspaceDir: testWorkspaceDir }); } -beforeEach(() => { - resetAcpSessionInPlaceMock.mockReset(); - resetAcpSessionInPlaceMock.mockResolvedValue({ ok: false, skipped: true } as const); -}); - describe("handleCommands gating", () => { it("blocks gated commands when disabled or not elevated-allowlisted", async () => { const cases = typedCases<{ @@ -207,6 +206,9 @@ describe("handleCommands gating", () => { commands: { config: false, debug: false, text: true }, channels: { whatsapp: { allowFrom: ["*"] } }, }) as OpenClawConfig, + applyParams: (params: ReturnType) => { + params.command.senderIsOwner = true; + }, expectedText: "/config is disabled", }, { @@ -217,6 +219,9 @@ describe("handleCommands gating", () => { commands: { config: false, debug: false, text: true }, channels: { whatsapp: { allowFrom: ["*"] } }, }) as OpenClawConfig, + applyParams: (params: ReturnType) => { + params.command.senderIsOwner = true; + }, expectedText: "/debug is disabled", }, { @@ -249,6 +254,9 @@ describe("handleCommands gating", () => { channels: { whatsapp: { allowFrom: ["*"] } }, } as OpenClawConfig; }, + applyParams: (params: ReturnType) => { + params.command.senderIsOwner = true; + }, expectedText: "/config is disabled", }, { @@ -265,6 +273,9 @@ describe("handleCommands gating", () => { channels: { whatsapp: { allowFrom: ["*"] } }, } as OpenClawConfig; }, + applyParams: (params: ReturnType) => { + params.command.senderIsOwner = true; + }, expectedText: "/debug is disabled", }, ]); @@ -316,6 +327,122 @@ describe("/approve command", () => { ); }); + it("accepts Telegram command mentions for /approve", async () => { + const cfg = { + commands: { text: true }, + channels: { + telegram: { + allowFrom: ["*"], + execApprovals: { enabled: true, approvers: ["123"], target: "dm" }, + }, + }, + } as OpenClawConfig; + const params = buildParams("/approve@bot abc12345 allow-once", cfg, { + BotUsername: "bot", + Provider: "telegram", + Surface: "telegram", + SenderId: "123", + }); + + callGatewayMock.mockResolvedValue({ ok: true }); + + const result = await handleCommands(params); + expect(result.shouldContinue).toBe(false); + expect(result.reply?.text).toContain("Exec approval allow-once submitted"); + expect(callGatewayMock).toHaveBeenCalledWith( + expect.objectContaining({ + method: "exec.approval.resolve", + params: { id: "abc12345", decision: "allow-once" }, + }), + ); + }); + + it("rejects Telegram /approve mentions targeting a different bot", async () => { + const cfg = { + commands: { text: true }, + channels: { + telegram: { + allowFrom: ["*"], + execApprovals: { enabled: true, approvers: ["123"], target: "dm" }, + }, + }, + } as OpenClawConfig; + const params = buildParams("/approve@otherbot abc12345 allow-once", cfg, { + BotUsername: "bot", + Provider: "telegram", + Surface: "telegram", + SenderId: "123", + }); + + const result = await handleCommands(params); + + expect(result.shouldContinue).toBe(false); + expect(result.reply?.text).toContain("targets a different Telegram bot"); + expect(callGatewayMock).not.toHaveBeenCalled(); + }); + + it("surfaces unknown or expired approval id errors", async () => { + const cfg = { + commands: { text: true }, + channels: { + telegram: { + allowFrom: ["*"], + execApprovals: { enabled: true, approvers: ["123"], target: "dm" }, + }, + }, + } as OpenClawConfig; + const params = buildParams("/approve abc12345 allow-once", cfg, { + Provider: "telegram", + Surface: "telegram", + SenderId: "123", + }); + + callGatewayMock.mockRejectedValue(new Error("unknown or expired approval id")); + + const result = await handleCommands(params); + expect(result.shouldContinue).toBe(false); + expect(result.reply?.text).toContain("unknown or expired approval id"); + }); + + it("rejects Telegram /approve when telegram exec approvals are disabled", async () => { + const cfg = { + commands: { text: true }, + channels: { telegram: { allowFrom: ["*"] } }, + } as OpenClawConfig; + const params = buildParams("/approve abc12345 allow-once", cfg, { + Provider: "telegram", + Surface: "telegram", + SenderId: "123", + }); + + const result = await handleCommands(params); + expect(result.shouldContinue).toBe(false); + expect(result.reply?.text).toContain("Telegram exec approvals are not enabled"); + expect(callGatewayMock).not.toHaveBeenCalled(); + }); + + it("rejects Telegram /approve from non-approvers", async () => { + const cfg = { + commands: { text: true }, + channels: { + telegram: { + allowFrom: ["*"], + execApprovals: { enabled: true, approvers: ["999"], target: "dm" }, + }, + }, + } as OpenClawConfig; + const params = buildParams("/approve abc12345 allow-once", cfg, { + Provider: "telegram", + Surface: "telegram", + SenderId: "123", + }); + + const result = await handleCommands(params); + expect(result.shouldContinue).toBe(false); + expect(result.reply?.text).toContain("not authorized to approve"); + expect(callGatewayMock).not.toHaveBeenCalled(); + }); + it("rejects gateway clients without approvals scope", async () => { const cfg = { commands: { text: true }, @@ -580,6 +707,36 @@ describe("extractMessageText", () => { }); }); +describe("handleCommands /config owner gating", () => { + it("blocks /config show from authorized non-owner senders", async () => { + const cfg = { + commands: { config: true, text: true }, + channels: { whatsapp: { allowFrom: ["*"] } }, + } as OpenClawConfig; + const params = buildParams("/config show", cfg); + params.command.senderIsOwner = false; + const result = await handleCommands(params); + expect(result.shouldContinue).toBe(false); + expect(result.reply).toBeUndefined(); + }); + + it("keeps /config show working for owners", async () => { + const cfg = { + commands: { config: true, text: true }, + channels: { whatsapp: { allowFrom: ["*"] } }, + } as OpenClawConfig; + readConfigFileSnapshotMock.mockResolvedValueOnce({ + valid: true, + parsed: { messages: { ackReaction: ":)" } }, + }); + const params = buildParams("/config show messages.ackReaction", cfg); + params.command.senderIsOwner = true; + const result = await handleCommands(params); + expect(result.shouldContinue).toBe(false); + expect(result.reply?.text).toContain("Config messages.ackReaction"); + }); +}); + describe("handleCommands /config configWrites gating", () => { it("blocks /config set when channel config writes are disabled", async () => { const cfg = { @@ -587,11 +744,60 @@ describe("handleCommands /config configWrites gating", () => { channels: { whatsapp: { allowFrom: ["*"], configWrites: false } }, } as OpenClawConfig; const params = buildParams('/config set messages.ackReaction=":)"', cfg); + params.command.senderIsOwner = true; const result = await handleCommands(params); expect(result.shouldContinue).toBe(false); expect(result.reply?.text).toContain("Config writes are disabled"); }); + it("blocks /config set when the target account disables writes", async () => { + const previousWriteCount = writeConfigFileMock.mock.calls.length; + const cfg = { + commands: { config: true, text: true }, + channels: { + telegram: { + configWrites: true, + accounts: { + work: { configWrites: false, enabled: true }, + }, + }, + }, + } as OpenClawConfig; + const params = buildPolicyParams( + "/config set channels.telegram.accounts.work.enabled=false", + cfg, + { + AccountId: "default", + Provider: "telegram", + Surface: "telegram", + }, + ); + params.command.senderIsOwner = true; + const result = await handleCommands(params); + expect(result.shouldContinue).toBe(false); + expect(result.reply?.text).toContain("channels.telegram.accounts.work.configWrites=true"); + expect(writeConfigFileMock.mock.calls.length).toBe(previousWriteCount); + }); + + it("blocks ambiguous channel-root /config writes from channel commands", async () => { + const previousWriteCount = writeConfigFileMock.mock.calls.length; + const cfg = { + commands: { config: true, text: true }, + channels: { telegram: { configWrites: true } }, + } as OpenClawConfig; + const params = buildPolicyParams('/config set channels.telegram={"enabled":false}', cfg, { + Provider: "telegram", + Surface: "telegram", + }); + params.command.senderIsOwner = true; + const result = await handleCommands(params); + expect(result.shouldContinue).toBe(false); + expect(result.reply?.text).toContain( + "cannot replace channels, channel roots, or accounts collections", + ); + expect(writeConfigFileMock.mock.calls.length).toBe(previousWriteCount); + }); + it("blocks /config set from gateway clients without operator.admin", async () => { const cfg = { commands: { config: true, text: true }, @@ -602,6 +808,7 @@ describe("handleCommands /config configWrites gating", () => { GatewayClientScopes: ["operator.write"], }); params.command.channel = INTERNAL_MESSAGE_CHANNEL; + params.command.senderIsOwner = true; const result = await handleCommands(params); expect(result.shouldContinue).toBe(false); expect(result.reply?.text).toContain("requires operator.admin"); @@ -613,7 +820,7 @@ describe("handleCommands /config configWrites gating", () => { } as OpenClawConfig; readConfigFileSnapshotMock.mockResolvedValueOnce({ valid: true, - parsed: { messages: { ackreaction: ":)" } }, + parsed: { messages: { ackReaction: ":)" } }, }); const params = buildParams("/config show messages.ackReaction", cfg, { Provider: INTERNAL_MESSAGE_CHANNEL, @@ -621,33 +828,111 @@ describe("handleCommands /config configWrites gating", () => { GatewayClientScopes: ["operator.write"], }); params.command.channel = INTERNAL_MESSAGE_CHANNEL; + params.command.senderIsOwner = false; const result = await handleCommands(params); expect(result.shouldContinue).toBe(false); - expect(result.reply?.text).toContain("Config messages.ackreaction"); + expect(result.reply?.text).toContain("Config messages.ackReaction"); }); it("keeps /config set working for gateway operator.admin clients", async () => { + await withTempConfigPath({ messages: { ackReaction: ":)" } }, async (configPath) => { + const cfg = { + commands: { config: true, text: true }, + } as OpenClawConfig; + readConfigFileSnapshotMock.mockResolvedValueOnce({ + valid: true, + parsed: { messages: { ackReaction: ":)" } }, + }); + validateConfigObjectWithPluginsMock.mockImplementation((config: unknown) => ({ + ok: true, + config, + })); + const params = buildParams('/config set messages.ackReaction=":D"', cfg, { + Provider: INTERNAL_MESSAGE_CHANNEL, + Surface: INTERNAL_MESSAGE_CHANNEL, + GatewayClientScopes: ["operator.write", "operator.admin"], + }); + params.command.channel = INTERNAL_MESSAGE_CHANNEL; + params.command.senderIsOwner = true; + const result = await handleCommands(params); + expect(result.shouldContinue).toBe(false); + expect(result.reply?.text).toContain("Config updated"); + const written = await readJsonFile(configPath); + expect(written.messages?.ackReaction).toBe(":D"); + }); + }); + + it("keeps /config set working for gateway operator.admin on protected account paths", async () => { + const initialConfig = { + channels: { + telegram: { + accounts: { + work: { enabled: true, configWrites: false }, + }, + }, + }, + }; + await withTempConfigPath(initialConfig, async (configPath) => { + readConfigFileSnapshotMock.mockResolvedValueOnce({ + valid: true, + parsed: structuredClone(initialConfig), + }); + validateConfigObjectWithPluginsMock.mockImplementation((config: unknown) => ({ + ok: true, + config, + })); + const params = buildParams( + "/config set channels.telegram.accounts.work.enabled=false", + { + commands: { config: true, text: true }, + channels: { + telegram: { + accounts: { + work: { enabled: true, configWrites: false }, + }, + }, + }, + } as OpenClawConfig, + { + Provider: INTERNAL_MESSAGE_CHANNEL, + Surface: INTERNAL_MESSAGE_CHANNEL, + GatewayClientScopes: ["operator.write", "operator.admin"], + }, + ); + params.command.channel = INTERNAL_MESSAGE_CHANNEL; + params.command.senderIsOwner = true; + const result = await handleCommands(params); + expect(result.shouldContinue).toBe(false); + expect(result.reply?.text).toContain("Config updated"); + const written = await readJsonFile(configPath); + expect(written.channels?.telegram?.accounts?.work?.enabled).toBe(false); + }); + }); +}); + +describe("handleCommands /debug owner gating", () => { + it("blocks /debug show from authorized non-owner senders", async () => { const cfg = { - commands: { config: true, text: true }, + commands: { debug: true, text: true }, + channels: { whatsapp: { allowFrom: ["*"] } }, } as OpenClawConfig; - readConfigFileSnapshotMock.mockResolvedValueOnce({ - valid: true, - parsed: { messages: { ackReaction: ":)" } }, - }); - validateConfigObjectWithPluginsMock.mockImplementation((config: unknown) => ({ - ok: true, - config, - })); - const params = buildParams('/config set messages.ackReaction=":D"', cfg, { - Provider: INTERNAL_MESSAGE_CHANNEL, - Surface: INTERNAL_MESSAGE_CHANNEL, - GatewayClientScopes: ["operator.write", "operator.admin"], - }); - params.command.channel = INTERNAL_MESSAGE_CHANNEL; + const params = buildParams("/debug show", cfg); + params.command.senderIsOwner = false; const result = await handleCommands(params); expect(result.shouldContinue).toBe(false); - expect(writeConfigFileMock).toHaveBeenCalledOnce(); - expect(result.reply?.text).toContain("Config updated"); + expect(result.reply).toBeUndefined(); + }); + + it("keeps /debug show working for owners", async () => { + const cfg = { + commands: { debug: true, text: true }, + channels: { whatsapp: { allowFrom: ["*"] } }, + } as OpenClawConfig; + const params = buildParams("/debug show", cfg); + params.command.senderIsOwner = true; + const result = await handleCommands(params); + expect(result.shouldContinue).toBe(false); + expect(result.reply?.text).toContain("Debug overrides"); }); }); @@ -686,7 +971,7 @@ function buildPolicyParams( ctx, cfg, isGroup: false, - triggerBodyNormalized: commandBody.trim().toLowerCase(), + triggerBodyNormalized: commandBody.trim(), commandAuthorized: true, }); @@ -732,40 +1017,44 @@ describe("handleCommands /allowlist", () => { }); it("adds entries to config and pairing store", async () => { - readConfigFileSnapshotMock.mockResolvedValueOnce({ - valid: true, - parsed: { + await withTempConfigPath( + { channels: { telegram: { allowFrom: ["123"] } }, }, - }); - validateConfigObjectWithPluginsMock.mockImplementation((config: unknown) => ({ - ok: true, - config, - })); - addChannelAllowFromStoreEntryMock.mockResolvedValueOnce({ - changed: true, - allowFrom: ["123", "789"], - }); + async (configPath) => { + readConfigFileSnapshotMock.mockResolvedValueOnce({ + valid: true, + parsed: { + channels: { telegram: { allowFrom: ["123"] } }, + }, + }); + validateConfigObjectWithPluginsMock.mockImplementation((config: unknown) => ({ + ok: true, + config, + })); + addChannelAllowFromStoreEntryMock.mockResolvedValueOnce({ + changed: true, + allowFrom: ["123", "789"], + }); - const cfg = { - commands: { text: true, config: true }, - channels: { telegram: { allowFrom: ["123"] } }, - } as OpenClawConfig; - const params = buildPolicyParams("/allowlist add dm 789", cfg); - const result = await handleCommands(params); + const cfg = { + commands: { text: true, config: true }, + channels: { telegram: { allowFrom: ["123"] } }, + } as OpenClawConfig; + const params = buildPolicyParams("/allowlist add dm 789", cfg); + const result = await handleCommands(params); - expect(result.shouldContinue).toBe(false); - expect(writeConfigFileMock).toHaveBeenCalledWith( - expect.objectContaining({ - channels: { telegram: { allowFrom: ["123", "789"] } }, - }), + expect(result.shouldContinue).toBe(false); + const written = await readJsonFile(configPath); + expect(written.channels?.telegram?.allowFrom).toEqual(["123", "789"]); + expect(addChannelAllowFromStoreEntryMock).toHaveBeenCalledWith({ + channel: "telegram", + entry: "789", + accountId: "default", + }); + expect(result.reply?.text).toContain("DM allowlist added"); + }, ); - expect(addChannelAllowFromStoreEntryMock).toHaveBeenCalledWith({ - channel: "telegram", - entry: "789", - accountId: "default", - }); - expect(result.reply?.text).toContain("DM allowlist added"); }); it("writes store entries to the selected account scope", async () => { @@ -801,6 +1090,35 @@ describe("handleCommands /allowlist", () => { }); }); + it("blocks config-targeted /allowlist edits when the target account disables writes", async () => { + const previousWriteCount = writeConfigFileMock.mock.calls.length; + const cfg = { + commands: { text: true, config: true }, + channels: { + telegram: { + configWrites: true, + accounts: { + work: { configWrites: false, allowFrom: ["123"] }, + }, + }, + }, + } as OpenClawConfig; + readConfigFileSnapshotMock.mockResolvedValueOnce({ + valid: true, + parsed: structuredClone(cfg), + }); + const params = buildPolicyParams("/allowlist add dm --account work --config 789", cfg, { + AccountId: "default", + Provider: "telegram", + Surface: "telegram", + }); + const result = await handleCommands(params); + + expect(result.shouldContinue).toBe(false); + expect(result.reply?.text).toContain("channels.telegram.accounts.work.configWrites=true"); + expect(writeConfigFileMock.mock.calls.length).toBe(previousWriteCount); + }); + it("removes default-account entries from scoped and legacy pairing stores", async () => { removeChannelAllowFromStoreEntryMock .mockResolvedValueOnce({ @@ -868,22 +1186,7 @@ describe("handleCommands /allowlist", () => { })); for (const testCase of cases) { - const previousWriteCount = writeConfigFileMock.mock.calls.length; - readConfigFileSnapshotMock.mockResolvedValueOnce({ - valid: true, - parsed: { - channels: { - [testCase.provider]: { - allowFrom: testCase.initialAllowFrom, - dm: { allowFrom: testCase.initialAllowFrom }, - configWrites: true, - }, - }, - }, - }); - - const cfg = { - commands: { text: true, config: true }, + const initialConfig = { channels: { [testCase.provider]: { allowFrom: testCase.initialAllowFrom, @@ -891,21 +1194,37 @@ describe("handleCommands /allowlist", () => { configWrites: true, }, }, - } as OpenClawConfig; + }; + await withTempConfigPath(initialConfig, async (configPath) => { + readConfigFileSnapshotMock.mockResolvedValueOnce({ + valid: true, + parsed: structuredClone(initialConfig), + }); - const params = buildPolicyParams(`/allowlist remove dm ${testCase.removeId}`, cfg, { - Provider: testCase.provider, - Surface: testCase.provider, + const cfg = { + commands: { text: true, config: true }, + channels: { + [testCase.provider]: { + allowFrom: testCase.initialAllowFrom, + dm: { allowFrom: testCase.initialAllowFrom }, + configWrites: true, + }, + }, + } as OpenClawConfig; + + const params = buildPolicyParams(`/allowlist remove dm ${testCase.removeId}`, cfg, { + Provider: testCase.provider, + Surface: testCase.provider, + }); + const result = await handleCommands(params); + + expect(result.shouldContinue).toBe(false); + const written = await readJsonFile(configPath); + const channelConfig = written.channels?.[testCase.provider]; + expect(channelConfig?.allowFrom).toEqual(testCase.expectedAllowFrom); + expect(channelConfig?.dm?.allowFrom).toBeUndefined(); + expect(result.reply?.text).toContain(`channels.${testCase.provider}.allowFrom`); }); - const result = await handleCommands(params); - - expect(result.shouldContinue).toBe(false); - expect(writeConfigFileMock.mock.calls.length).toBe(previousWriteCount + 1); - const written = writeConfigFileMock.mock.calls.at(-1)?.[0] as OpenClawConfig; - const channelConfig = written.channels?.[testCase.provider]; - expect(channelConfig?.allowFrom).toEqual(testCase.expectedAllowFrom); - expect(channelConfig?.dm?.allowFrom).toBeUndefined(); - expect(result.reply?.text).toContain(`channels.${testCase.provider}.allowFrom`); } }); }); @@ -1147,226 +1466,6 @@ describe("handleCommands hooks", () => { }); }); -describe("handleCommands ACP-bound /new and /reset", () => { - const discordChannelId = "1478836151241412759"; - const buildDiscordBoundConfig = (): OpenClawConfig => - ({ - commands: { text: true }, - bindings: [ - { - type: "acp", - agentId: "codex", - match: { - channel: "discord", - accountId: "default", - peer: { - kind: "channel", - id: discordChannelId, - }, - }, - acp: { - mode: "persistent", - }, - }, - ], - channels: { - discord: { - allowFrom: ["*"], - guilds: { "1459246755253325866": { channels: { [discordChannelId]: {} } } }, - }, - }, - }) as OpenClawConfig; - - const buildDiscordBoundParams = (body: string) => { - const params = buildParams(body, buildDiscordBoundConfig(), { - Provider: "discord", - Surface: "discord", - OriginatingChannel: "discord", - AccountId: "default", - SenderId: "12345", - From: "discord:12345", - To: discordChannelId, - OriginatingTo: discordChannelId, - SessionKey: "agent:main:acp:binding:discord:default:feedface", - }); - params.sessionKey = "agent:main:acp:binding:discord:default:feedface"; - return params; - }; - - it("handles /new as ACP in-place reset for bound conversations", async () => { - resetAcpSessionInPlaceMock.mockResolvedValue({ ok: true } as const); - const result = await handleCommands(buildDiscordBoundParams("/new")); - - expect(result.shouldContinue).toBe(false); - expect(result.reply?.text).toContain("ACP session reset in place"); - expect(resetAcpSessionInPlaceMock).toHaveBeenCalledTimes(1); - expect(resetAcpSessionInPlaceMock.mock.calls[0]?.[0]).toMatchObject({ - reason: "new", - }); - }); - - it("continues with trailing prompt text after successful ACP-bound /new", async () => { - resetAcpSessionInPlaceMock.mockResolvedValue({ ok: true } as const); - const params = buildDiscordBoundParams("/new continue with deployment"); - const result = await handleCommands(params); - - expect(result.shouldContinue).toBe(false); - expect(result.reply).toBeUndefined(); - const mutableCtx = params.ctx as Record; - expect(mutableCtx.BodyStripped).toBe("continue with deployment"); - expect(mutableCtx.CommandBody).toBe("continue with deployment"); - expect(mutableCtx.AcpDispatchTailAfterReset).toBe(true); - expect(resetAcpSessionInPlaceMock).toHaveBeenCalledTimes(1); - }); - - it("handles /reset failures without falling back to normal session reset flow", async () => { - resetAcpSessionInPlaceMock.mockResolvedValue({ ok: false, error: "backend unavailable" }); - const result = await handleCommands(buildDiscordBoundParams("/reset")); - - expect(result.shouldContinue).toBe(false); - expect(result.reply?.text).toContain("ACP session reset failed"); - expect(resetAcpSessionInPlaceMock).toHaveBeenCalledTimes(1); - expect(resetAcpSessionInPlaceMock.mock.calls[0]?.[0]).toMatchObject({ - reason: "reset", - }); - }); - - it("does not emit reset hooks when ACP reset fails", async () => { - resetAcpSessionInPlaceMock.mockResolvedValue({ ok: false, error: "backend unavailable" }); - const spy = vi.spyOn(internalHooks, "triggerInternalHook").mockResolvedValue(); - - const result = await handleCommands(buildDiscordBoundParams("/reset")); - - expect(result.shouldContinue).toBe(false); - expect(spy).not.toHaveBeenCalled(); - spy.mockRestore(); - }); - - it("keeps existing /new behavior for non-ACP sessions", async () => { - const cfg = { - commands: { text: true }, - channels: { whatsapp: { allowFrom: ["*"] } }, - } as OpenClawConfig; - const result = await handleCommands(buildParams("/new", cfg)); - - expect(result.shouldContinue).toBe(true); - expect(resetAcpSessionInPlaceMock).not.toHaveBeenCalled(); - }); - - it("still targets configured ACP binding when runtime routing falls back to a non-ACP session", async () => { - const fallbackSessionKey = `agent:main:discord:channel:${discordChannelId}`; - const configuredAcpSessionKey = buildConfiguredAcpSessionKey({ - channel: "discord", - accountId: "default", - conversationId: discordChannelId, - agentId: "codex", - mode: "persistent", - }); - const params = buildDiscordBoundParams("/new"); - params.sessionKey = fallbackSessionKey; - params.ctx.SessionKey = fallbackSessionKey; - params.ctx.CommandTargetSessionKey = fallbackSessionKey; - - const result = await handleCommands(params); - - expect(result.shouldContinue).toBe(false); - expect(result.reply?.text).toContain("ACP session reset unavailable"); - expect(resetAcpSessionInPlaceMock).toHaveBeenCalledTimes(1); - expect(resetAcpSessionInPlaceMock.mock.calls[0]?.[0]).toMatchObject({ - sessionKey: configuredAcpSessionKey, - reason: "new", - }); - }); - - it("emits reset hooks for the ACP session key when routing falls back to non-ACP session", async () => { - resetAcpSessionInPlaceMock.mockResolvedValue({ ok: true } as const); - const hookSpy = vi.spyOn(internalHooks, "triggerInternalHook").mockResolvedValue(); - const fallbackSessionKey = `agent:main:discord:channel:${discordChannelId}`; - const configuredAcpSessionKey = buildConfiguredAcpSessionKey({ - channel: "discord", - accountId: "default", - conversationId: discordChannelId, - agentId: "codex", - mode: "persistent", - }); - const fallbackEntry = { - sessionId: "fallback-session-id", - sessionFile: "/tmp/fallback-session.jsonl", - } as SessionEntry; - const configuredEntry = { - sessionId: "configured-acp-session-id", - sessionFile: "/tmp/configured-acp-session.jsonl", - } as SessionEntry; - const params = buildDiscordBoundParams("/new"); - params.sessionKey = fallbackSessionKey; - params.ctx.SessionKey = fallbackSessionKey; - params.ctx.CommandTargetSessionKey = fallbackSessionKey; - params.sessionEntry = fallbackEntry; - params.previousSessionEntry = fallbackEntry; - params.sessionStore = { - [fallbackSessionKey]: fallbackEntry, - [configuredAcpSessionKey]: configuredEntry, - }; - - const result = await handleCommands(params); - - expect(result.shouldContinue).toBe(false); - expect(result.reply?.text).toContain("ACP session reset in place"); - expect(hookSpy).toHaveBeenCalledWith( - expect.objectContaining({ - type: "command", - action: "new", - sessionKey: configuredAcpSessionKey, - context: expect.objectContaining({ - sessionEntry: configuredEntry, - previousSessionEntry: configuredEntry, - }), - }), - ); - hookSpy.mockRestore(); - }); - - it("uses active ACP command target when conversation binding context is missing", async () => { - resetAcpSessionInPlaceMock.mockResolvedValue({ ok: true } as const); - const activeAcpTarget = "agent:codex:acp:binding:discord:default:feedface"; - const params = buildParams( - "/new", - { - commands: { text: true }, - channels: { - discord: { - allowFrom: ["*"], - }, - }, - } as OpenClawConfig, - { - Provider: "discord", - Surface: "discord", - OriginatingChannel: "discord", - AccountId: "default", - SenderId: "12345", - From: "discord:12345", - }, - ); - params.sessionKey = "discord:slash:12345"; - params.ctx.SessionKey = "discord:slash:12345"; - params.ctx.CommandSource = "native"; - params.ctx.CommandTargetSessionKey = activeAcpTarget; - params.ctx.To = "user:12345"; - params.ctx.OriginatingTo = "user:12345"; - - const result = await handleCommands(params); - - expect(result.shouldContinue).toBe(false); - expect(result.reply?.text).toContain("ACP session reset in place"); - expect(resetAcpSessionInPlaceMock).toHaveBeenCalledTimes(1); - expect(resetAcpSessionInPlaceMock.mock.calls[0]?.[0]).toMatchObject({ - sessionKey: activeAcpTarget, - reason: "new", - }); - }); -}); - describe("handleCommands context", () => { it("returns expected details for /context commands", async () => { const cfg = { diff --git a/src/auto-reply/reply/config-write-authorization.ts b/src/auto-reply/reply/config-write-authorization.ts new file mode 100644 index 00000000000..a2c2142709f --- /dev/null +++ b/src/auto-reply/reply/config-write-authorization.ts @@ -0,0 +1,33 @@ +import { + authorizeConfigWrite, + canBypassConfigWritePolicy, + formatConfigWriteDeniedMessage, +} from "../../channels/plugins/config-writes.js"; +import type { ChannelId } from "../../channels/plugins/types.js"; +import type { OpenClawConfig } from "../../config/config.js"; + +export function resolveConfigWriteDeniedText(params: { + cfg: OpenClawConfig; + channel?: string | null; + channelId: ChannelId | null; + accountId?: string; + gatewayClientScopes?: string[]; + target: Parameters[0]["target"]; +}): string | null { + const writeAuth = authorizeConfigWrite({ + cfg: params.cfg, + origin: { channelId: params.channelId, accountId: params.accountId }, + target: params.target, + allowBypass: canBypassConfigWritePolicy({ + channel: params.channel ?? "", + gatewayClientScopes: params.gatewayClientScopes, + }), + }); + if (writeAuth.allowed) { + return null; + } + return formatConfigWriteDeniedMessage({ + result: writeAuth, + fallbackChannelId: params.channelId, + }); +} diff --git a/src/auto-reply/reply/directive-handling.auth.test.ts b/src/auto-reply/reply/directive-handling.auth.test.ts index 04249b88795..5e1248c8a61 100644 --- a/src/auto-reply/reply/directive-handling.auth.test.ts +++ b/src/auto-reply/reply/directive-handling.auth.test.ts @@ -4,6 +4,11 @@ import type { OpenClawConfig } from "../../config/config.js"; let mockStore: AuthProfileStore; let mockOrder: string[]; +const githubCopilotTokenRefProfile: AuthProfileStore["profiles"][string] = { + type: "token", + provider: "github-copilot", + tokenRef: { source: "env", provider: "default", id: "GITHUB_TOKEN" }, +}; vi.mock("../../agents/auth-health.js", () => ({ formatRemainingShort: () => "1h", @@ -32,13 +37,35 @@ vi.mock("../../agents/model-selection.js", () => ({ vi.mock("../../agents/model-auth.js", () => ({ ensureAuthProfileStore: () => mockStore, - getCustomProviderApiKey: () => undefined, + resolveUsableCustomProviderApiKey: () => null, resolveAuthProfileOrder: () => mockOrder, resolveEnvApiKey: () => null, })); const { resolveAuthLabel } = await import("./directive-handling.auth.js"); +async function resolveRefOnlyAuthLabel(params: { + provider: string; + profileId: string; + profile: + | (AuthProfileStore["profiles"][string] & { type: "api_key" }) + | (AuthProfileStore["profiles"][string] & { type: "token" }); + mode: "compact" | "verbose"; +}) { + mockStore.profiles = { + [params.profileId]: params.profile, + }; + mockOrder = [params.profileId]; + + return resolveAuthLabel( + params.provider, + {} as OpenClawConfig, + "/tmp/models.json", + undefined, + params.mode, + ); +} + describe("resolveAuthLabel ref-aware labels", () => { beforeEach(() => { mockStore = { @@ -49,64 +76,38 @@ describe("resolveAuthLabel ref-aware labels", () => { }); it("shows api-key (ref) for keyRef-only profiles in compact mode", async () => { - mockStore.profiles = { - "openai:default": { + const result = await resolveRefOnlyAuthLabel({ + provider: "openai", + profileId: "openai:default", + profile: { type: "api_key", provider: "openai", keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, }, - }; - mockOrder = ["openai:default"]; - - const result = await resolveAuthLabel( - "openai", - {} as OpenClawConfig, - "/tmp/models.json", - undefined, - "compact", - ); + mode: "compact", + }); expect(result.label).toBe("openai:default api-key (ref)"); }); it("shows token (ref) for tokenRef-only profiles in compact mode", async () => { - mockStore.profiles = { - "github-copilot:default": { - type: "token", - provider: "github-copilot", - tokenRef: { source: "env", provider: "default", id: "GITHUB_TOKEN" }, - }, - }; - mockOrder = ["github-copilot:default"]; - - const result = await resolveAuthLabel( - "github-copilot", - {} as OpenClawConfig, - "/tmp/models.json", - undefined, - "compact", - ); + const result = await resolveRefOnlyAuthLabel({ + provider: "github-copilot", + profileId: "github-copilot:default", + profile: githubCopilotTokenRefProfile, + mode: "compact", + }); expect(result.label).toBe("github-copilot:default token (ref)"); }); it("uses token:ref instead of token:missing in verbose mode", async () => { - mockStore.profiles = { - "github-copilot:default": { - type: "token", - provider: "github-copilot", - tokenRef: { source: "env", provider: "default", id: "GITHUB_TOKEN" }, - }, - }; - mockOrder = ["github-copilot:default"]; - - const result = await resolveAuthLabel( - "github-copilot", - {} as OpenClawConfig, - "/tmp/models.json", - undefined, - "verbose", - ); + const result = await resolveRefOnlyAuthLabel({ + provider: "github-copilot", + profileId: "github-copilot:default", + profile: githubCopilotTokenRefProfile, + mode: "verbose", + }); expect(result.label).toContain("github-copilot:default=token:ref"); expect(result.label).not.toContain("token:missing"); diff --git a/src/auto-reply/reply/directive-handling.auth.ts b/src/auto-reply/reply/directive-handling.auth.ts index dd33ed6ae73..604e7473ae8 100644 --- a/src/auto-reply/reply/directive-handling.auth.ts +++ b/src/auto-reply/reply/directive-handling.auth.ts @@ -6,9 +6,9 @@ import { } from "../../agents/auth-profiles.js"; import { ensureAuthProfileStore, - getCustomProviderApiKey, resolveAuthProfileOrder, resolveEnvApiKey, + resolveUsableCustomProviderApiKey, } from "../../agents/model-auth.js"; import { findNormalizedProviderValue, normalizeProviderId } from "../../agents/model-selection.js"; import type { OpenClawConfig } from "../../config/config.js"; @@ -33,6 +33,22 @@ function resolveStoredCredentialLabel(params: { return "missing"; } +function formatExpirationLabel( + expires: unknown, + now: number, + formatUntil: (timestampMs: number) => string, + compactExpiredPrefix = " expired", +) { + if (typeof expires !== "number" || !Number.isFinite(expires) || expires <= 0) { + return ""; + } + return expires <= now ? compactExpiredPrefix : ` exp ${formatUntil(expires)}`; +} + +function formatFlagsSuffix(flags: string[]) { + return flags.length > 0 ? ` (${flags.join(", ")})` : ""; +} + export const resolveAuthLabel = async ( provider: string, cfg: OpenClawConfig, @@ -89,14 +105,7 @@ export const resolveAuthLabel = async ( refValue: profile.tokenRef, mode, }); - const exp = - typeof profile.expires === "number" && - Number.isFinite(profile.expires) && - profile.expires > 0 - ? profile.expires <= now - ? " expired" - : ` exp ${formatUntil(profile.expires)}` - : ""; + const exp = formatExpirationLabel(profile.expires, now, formatUntil); return { label: `${profileId} token ${tokenLabel}${exp}${more}`, source: "", @@ -104,14 +113,7 @@ export const resolveAuthLabel = async ( } const display = resolveAuthProfileDisplayLabel({ cfg, store, profileId }); const label = display === profileId ? profileId : display; - const exp = - typeof profile.expires === "number" && - Number.isFinite(profile.expires) && - profile.expires > 0 - ? profile.expires <= now - ? " expired" - : ` exp ${formatUntil(profile.expires)}` - : ""; + const exp = formatExpirationLabel(profile.expires, now, formatUntil); return { label: `${label} oauth${exp}${more}`, source: "" }; } @@ -140,7 +142,7 @@ export const resolveAuthLabel = async ( configProfile.mode !== profile.type && !(configProfile.mode === "oauth" && profile.type === "token")) ) { - const suffix = flags.length > 0 ? ` (${flags.join(", ")})` : ""; + const suffix = formatFlagsSuffix(flags); return `${profileId}=missing${suffix}`; } if (profile.type === "api_key") { @@ -149,7 +151,7 @@ export const resolveAuthLabel = async ( refValue: profile.keyRef, mode, }); - const suffix = flags.length > 0 ? ` (${flags.join(", ")})` : ""; + const suffix = formatFlagsSuffix(flags); return `${profileId}=${keyLabel}${suffix}`; } if (profile.type === "token") { @@ -158,14 +160,11 @@ export const resolveAuthLabel = async ( refValue: profile.tokenRef, mode, }); - if ( - typeof profile.expires === "number" && - Number.isFinite(profile.expires) && - profile.expires > 0 - ) { - flags.push(profile.expires <= now ? "expired" : `exp ${formatUntil(profile.expires)}`); + const expirationFlag = formatExpirationLabel(profile.expires, now, formatUntil, "expired"); + if (expirationFlag) { + flags.push(expirationFlag); } - const suffix = flags.length > 0 ? ` (${flags.join(", ")})` : ""; + const suffix = formatFlagsSuffix(flags); return `${profileId}=token:${tokenLabel}${suffix}`; } const display = resolveAuthProfileDisplayLabel({ @@ -179,15 +178,12 @@ export const resolveAuthLabel = async ( : display.startsWith(profileId) ? display.slice(profileId.length).trim() : `(${display})`; - if ( - typeof profile.expires === "number" && - Number.isFinite(profile.expires) && - profile.expires > 0 - ) { - flags.push(profile.expires <= now ? "expired" : `exp ${formatUntil(profile.expires)}`); + const expirationFlag = formatExpirationLabel(profile.expires, now, formatUntil, "expired"); + if (expirationFlag) { + flags.push(expirationFlag); } const suffixLabel = suffix ? ` ${suffix}` : ""; - const suffixFlags = flags.length > 0 ? ` (${flags.join(", ")})` : ""; + const suffixFlags = formatFlagsSuffix(flags); return `${profileId}=OAuth${suffixLabel}${suffixFlags}`; }); return { @@ -204,7 +200,7 @@ export const resolveAuthLabel = async ( const label = isOAuthEnv ? "OAuth (env)" : maskApiKey(envKey.apiKey); return { label, source: mode === "verbose" ? envKey.source : "" }; } - const customKey = getCustomProviderApiKey(cfg, provider); + const customKey = resolveUsableCustomProviderApiKey({ cfg, provider })?.apiKey; if (customKey) { return { label: maskApiKey(customKey), diff --git a/src/auto-reply/reply/directive-handling.fast-lane.ts b/src/auto-reply/reply/directive-handling.fast-lane.ts index 43f58adcca3..4635c4073f8 100644 --- a/src/auto-reply/reply/directive-handling.fast-lane.ts +++ b/src/auto-reply/reply/directive-handling.fast-lane.ts @@ -48,12 +48,17 @@ export async function applyInlineDirectivesFastLane( } const agentCfg = params.agentCfg; - const { currentThinkLevel, currentVerboseLevel, currentReasoningLevel, currentElevatedLevel } = - await resolveCurrentDirectiveLevels({ - sessionEntry, - agentCfg, - resolveDefaultThinkingLevel: () => modelState.resolveDefaultThinkingLevel(), - }); + const { + currentThinkLevel, + currentFastMode, + currentVerboseLevel, + currentReasoningLevel, + currentElevatedLevel, + } = await resolveCurrentDirectiveLevels({ + sessionEntry, + agentCfg, + resolveDefaultThinkingLevel: () => modelState.resolveDefaultThinkingLevel(), + }); const directiveAck = await handleDirectiveOnly({ cfg, @@ -77,6 +82,7 @@ export async function applyInlineDirectivesFastLane( initialModelLabel: params.initialModelLabel, formatModelSwitchEvent, currentThinkLevel, + currentFastMode, currentVerboseLevel, currentReasoningLevel, currentElevatedLevel, diff --git a/src/auto-reply/reply/directive-handling.impl.ts b/src/auto-reply/reply/directive-handling.impl.ts index 979304dfb1b..a994a3ccea6 100644 --- a/src/auto-reply/reply/directive-handling.impl.ts +++ b/src/auto-reply/reply/directive-handling.impl.ts @@ -3,6 +3,7 @@ import { resolveAgentDir, resolveSessionAgentId, } from "../../agents/agent-scope.js"; +import { resolveFastModeState } from "../../agents/fast-mode.js"; import { resolveSandboxRuntimeStatus } from "../../agents/sandbox.js"; import type { OpenClawConfig } from "../../config/config.js"; import { type SessionEntry, updateSessionStore } from "../../config/sessions.js"; @@ -78,6 +79,7 @@ export async function handleDirectiveOnly( initialModelLabel, formatModelSwitchEvent, currentThinkLevel, + currentFastMode, currentVerboseLevel, currentReasoningLevel, currentElevatedLevel, @@ -131,6 +133,15 @@ export async function handleDirectiveOnly( const resolvedProvider = modelSelection?.provider ?? provider; const resolvedModel = modelSelection?.model ?? model; + const fastModeState = resolveFastModeState({ + cfg: params.cfg, + provider: resolvedProvider, + model: resolvedModel, + sessionEntry, + }); + const effectiveFastMode = directives.fastMode ?? currentFastMode ?? fastModeState.enabled; + const effectiveFastModeSource = + directives.fastMode !== undefined ? "session" : fastModeState.source; if (directives.hasThinkDirective && !directives.thinkLevel) { // If no argument was provided, show the current level @@ -158,6 +169,25 @@ export async function handleDirectiveOnly( text: `Unrecognized verbose level "${directives.rawVerboseLevel}". Valid levels: off, on, full.`, }; } + if (directives.hasFastDirective && directives.fastMode === undefined) { + if (!directives.rawFastMode) { + const sourceSuffix = + effectiveFastModeSource === "config" + ? " (config)" + : effectiveFastModeSource === "default" + ? " (default)" + : ""; + return { + text: withOptions( + `Current fast mode: ${effectiveFastMode ? "on" : "off"}${sourceSuffix}.`, + "on, off", + ), + }; + } + return { + text: `Unrecognized fast mode "${directives.rawFastMode}". Valid levels: on, off.`, + }; + } if (directives.hasReasoningDirective && !directives.reasoningLevel) { if (!directives.rawReasoningLevel) { const level = currentReasoningLevel ?? "off"; @@ -279,11 +309,18 @@ export async function handleDirectiveOnly( directives.elevatedLevel !== undefined && elevatedEnabled && elevatedAllowed; + const fastModeChanged = + directives.hasFastDirective && + directives.fastMode !== undefined && + directives.fastMode !== currentFastMode; let reasoningChanged = directives.hasReasoningDirective && directives.reasoningLevel !== undefined; if (directives.hasThinkDirective && directives.thinkLevel) { sessionEntry.thinkingLevel = directives.thinkLevel; } + if (directives.hasFastDirective && directives.fastMode !== undefined) { + sessionEntry.fastMode = directives.fastMode; + } if (shouldDowngradeXHigh) { sessionEntry.thinkingLevel = "high"; } @@ -380,6 +417,13 @@ export async function handleDirectiveOnly( : `Thinking level set to ${directives.thinkLevel}.`, ); } + if (directives.hasFastDirective && directives.fastMode !== undefined) { + parts.push( + directives.fastMode + ? formatDirectiveAck("Fast mode enabled.") + : formatDirectiveAck("Fast mode disabled."), + ); + } if (directives.hasVerboseDirective && directives.verboseLevel) { parts.push( directives.verboseLevel === "off" @@ -459,6 +503,12 @@ export async function handleDirectiveOnly( if (directives.hasQueueDirective && directives.dropPolicy) { parts.push(formatDirectiveAck(`Queue drop set to ${directives.dropPolicy}.`)); } + if (fastModeChanged) { + enqueueSystemEvent(`Fast mode ${sessionEntry.fastMode ? "enabled" : "disabled"}.`, { + sessionKey, + contextKey: `fast:${sessionEntry.fastMode ? "on" : "off"}`, + }); + } const ack = parts.join(" ").trim(); if (!ack && directives.hasStatusDirective) { return undefined; diff --git a/src/auto-reply/reply/directive-handling.levels.ts b/src/auto-reply/reply/directive-handling.levels.ts index ee7b1108e83..b62e77c3501 100644 --- a/src/auto-reply/reply/directive-handling.levels.ts +++ b/src/auto-reply/reply/directive-handling.levels.ts @@ -3,6 +3,7 @@ import type { ElevatedLevel, ReasoningLevel, ThinkLevel, VerboseLevel } from ".. export async function resolveCurrentDirectiveLevels(params: { sessionEntry?: { thinkingLevel?: unknown; + fastMode?: unknown; verboseLevel?: unknown; reasoningLevel?: unknown; elevatedLevel?: unknown; @@ -15,6 +16,7 @@ export async function resolveCurrentDirectiveLevels(params: { resolveDefaultThinkingLevel: () => Promise; }): Promise<{ currentThinkLevel: ThinkLevel | undefined; + currentFastMode: boolean | undefined; currentVerboseLevel: VerboseLevel | undefined; currentReasoningLevel: ReasoningLevel; currentElevatedLevel: ElevatedLevel | undefined; @@ -24,6 +26,8 @@ export async function resolveCurrentDirectiveLevels(params: { (await params.resolveDefaultThinkingLevel()) ?? (params.agentCfg?.thinkingDefault as ThinkLevel | undefined); const currentThinkLevel = resolvedDefaultThinkLevel; + const currentFastMode = + typeof params.sessionEntry?.fastMode === "boolean" ? params.sessionEntry.fastMode : undefined; const currentVerboseLevel = (params.sessionEntry?.verboseLevel as VerboseLevel | undefined) ?? (params.agentCfg?.verboseDefault as VerboseLevel | undefined); @@ -34,6 +38,7 @@ export async function resolveCurrentDirectiveLevels(params: { (params.agentCfg?.elevatedDefault as ElevatedLevel | undefined); return { currentThinkLevel, + currentFastMode, currentVerboseLevel, currentReasoningLevel, currentElevatedLevel, diff --git a/src/auto-reply/reply/directive-handling.model-picker.ts b/src/auto-reply/reply/directive-handling.model-picker.ts index 0c2bcaf61e6..46c892dab0f 100644 --- a/src/auto-reply/reply/directive-handling.model-picker.ts +++ b/src/auto-reply/reply/directive-handling.model-picker.ts @@ -19,6 +19,7 @@ const MODEL_PICK_PROVIDER_PREFERENCE = [ "zai", "openrouter", "opencode", + "opencode-go", "github-copilot", "groq", "cerebras", diff --git a/src/auto-reply/reply/directive-handling.model.test.ts b/src/auto-reply/reply/directive-handling.model.test.ts index 5d4a23f3efb..b815ecfc9b9 100644 --- a/src/auto-reply/reply/directive-handling.model.test.ts +++ b/src/auto-reply/reply/directive-handling.model.test.ts @@ -57,24 +57,28 @@ function resolveModelSelectionForCommand(params: { }); } +async function resolveModelInfoReply( + overrides: Partial[0]> = {}, +) { + return maybeHandleModelDirectiveInfo({ + directives: parseInlineDirectives("/model"), + cfg: baseConfig(), + agentDir: "/tmp/agent", + activeAgentId: "main", + provider: "anthropic", + model: "claude-opus-4-5", + defaultProvider: "anthropic", + defaultModel: "claude-opus-4-5", + aliasIndex: baseAliasIndex(), + allowedModelCatalog: [], + resetModelOverride: false, + ...overrides, + }); +} + describe("/model chat UX", () => { it("shows summary for /model with no args", async () => { - const directives = parseInlineDirectives("/model"); - const cfg = { commands: { text: true } } as unknown as OpenClawConfig; - - const reply = await maybeHandleModelDirectiveInfo({ - directives, - cfg, - agentDir: "/tmp/agent", - activeAgentId: "main", - provider: "anthropic", - model: "claude-opus-4-5", - defaultProvider: "anthropic", - defaultModel: "claude-opus-4-5", - aliasIndex: baseAliasIndex(), - allowedModelCatalog: [], - resetModelOverride: false, - }); + const reply = await resolveModelInfoReply(); expect(reply?.text).toContain("Current:"); expect(reply?.text).toContain("Browse: /models"); @@ -82,21 +86,11 @@ describe("/model chat UX", () => { }); it("shows active runtime model when different from selected model", async () => { - const directives = parseInlineDirectives("/model"); - const cfg = { commands: { text: true } } as unknown as OpenClawConfig; - - const reply = await maybeHandleModelDirectiveInfo({ - directives, - cfg, - agentDir: "/tmp/agent", - activeAgentId: "main", + const reply = await resolveModelInfoReply({ provider: "fireworks", model: "fireworks/minimax-m2p5", defaultProvider: "fireworks", defaultModel: "fireworks/minimax-m2p5", - aliasIndex: baseAliasIndex(), - allowedModelCatalog: [], - resetModelOverride: false, sessionEntry: { modelProvider: "deepinfra", model: "moonshotai/Kimi-K2.5", diff --git a/src/auto-reply/reply/directive-handling.params.ts b/src/auto-reply/reply/directive-handling.params.ts index af6f0ff0d6d..fd64e379d0c 100644 --- a/src/auto-reply/reply/directive-handling.params.ts +++ b/src/auto-reply/reply/directive-handling.params.ts @@ -32,6 +32,7 @@ export type HandleDirectiveOnlyCoreParams = { export type HandleDirectiveOnlyParams = HandleDirectiveOnlyCoreParams & { currentThinkLevel?: ThinkLevel; + currentFastMode?: boolean; currentVerboseLevel?: VerboseLevel; currentReasoningLevel?: ReasoningLevel; currentElevatedLevel?: ElevatedLevel; diff --git a/src/auto-reply/reply/directive-handling.parse.ts b/src/auto-reply/reply/directive-handling.parse.ts index b09d5c553bc..81265b52809 100644 --- a/src/auto-reply/reply/directive-handling.parse.ts +++ b/src/auto-reply/reply/directive-handling.parse.ts @@ -6,6 +6,7 @@ import type { ElevatedLevel, ReasoningLevel, ThinkLevel, VerboseLevel } from "./ import { extractElevatedDirective, extractExecDirective, + extractFastDirective, extractReasoningDirective, extractStatusDirective, extractThinkDirective, @@ -23,6 +24,9 @@ export type InlineDirectives = { hasVerboseDirective: boolean; verboseLevel?: VerboseLevel; rawVerboseLevel?: string; + hasFastDirective: boolean; + fastMode?: boolean; + rawFastMode?: string; hasReasoningDirective: boolean; reasoningLevel?: ReasoningLevel; rawReasoningLevel?: string; @@ -80,12 +84,18 @@ export function parseInlineDirectives( rawLevel: rawVerboseLevel, hasDirective: hasVerboseDirective, } = extractVerboseDirective(thinkCleaned); + const { + cleaned: fastCleaned, + fastMode, + rawLevel: rawFastMode, + hasDirective: hasFastDirective, + } = extractFastDirective(verboseCleaned); const { cleaned: reasoningCleaned, reasoningLevel, rawLevel: rawReasoningLevel, hasDirective: hasReasoningDirective, - } = extractReasoningDirective(verboseCleaned); + } = extractReasoningDirective(fastCleaned); const { cleaned: elevatedCleaned, elevatedLevel, @@ -151,6 +161,9 @@ export function parseInlineDirectives( hasVerboseDirective, verboseLevel, rawVerboseLevel, + hasFastDirective, + fastMode, + rawFastMode, hasReasoningDirective, reasoningLevel, rawReasoningLevel, @@ -201,6 +214,7 @@ export function isDirectiveOnly(params: { if ( !directives.hasThinkDirective && !directives.hasVerboseDirective && + !directives.hasFastDirective && !directives.hasReasoningDirective && !directives.hasElevatedDirective && !directives.hasExecDirective && diff --git a/src/auto-reply/reply/directives.ts b/src/auto-reply/reply/directives.ts index e0bda738b6d..96a4dbecb2e 100644 --- a/src/auto-reply/reply/directives.ts +++ b/src/auto-reply/reply/directives.ts @@ -2,6 +2,7 @@ import { escapeRegExp } from "../../utils.js"; import type { NoticeLevel, ReasoningLevel } from "../thinking.js"; import { type ElevatedLevel, + normalizeFastMode, normalizeElevatedLevel, normalizeNoticeLevel, normalizeReasoningLevel, @@ -124,6 +125,24 @@ export function extractVerboseDirective(body?: string): { }; } +export function extractFastDirective(body?: string): { + cleaned: string; + fastMode?: boolean; + rawLevel?: string; + hasDirective: boolean; +} { + if (!body) { + return { cleaned: "", hasDirective: false }; + } + const extracted = extractLevelDirective(body, ["fast"], normalizeFastMode); + return { + cleaned: extracted.cleaned, + fastMode: extracted.level, + rawLevel: extracted.rawLevel, + hasDirective: extracted.hasDirective, + }; +} + export function extractNoticeDirective(body?: string): { cleaned: string; noticeLevel?: NoticeLevel; diff --git a/src/auto-reply/reply/discord-parent-channel.ts b/src/auto-reply/reply/discord-parent-channel.ts new file mode 100644 index 00000000000..877c4593ea7 --- /dev/null +++ b/src/auto-reply/reply/discord-parent-channel.ts @@ -0,0 +1,15 @@ +import { normalizeConversationText } from "../../acp/conversation-id.js"; +import { parseAgentSessionKey } from "../../routing/session-key.js"; + +export function parseDiscordParentChannelFromSessionKey(raw: unknown): string | undefined { + const sessionKey = normalizeConversationText(raw); + if (!sessionKey) { + return undefined; + } + const scoped = parseAgentSessionKey(sessionKey)?.rest ?? sessionKey.toLowerCase(); + const match = scoped.match(/(?:^|:)channel:([^:]+)$/); + if (!match?.[1]) { + return undefined; + } + return match[1]; +} diff --git a/src/auto-reply/reply/dispatch-acp.test.ts b/src/auto-reply/reply/dispatch-acp.test.ts index 286b73a7ceb..b19f2edde09 100644 --- a/src/auto-reply/reply/dispatch-acp.test.ts +++ b/src/auto-reply/reply/dispatch-acp.test.ts @@ -1,3 +1,6 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; import { beforeEach, describe, expect, it, vi } from "vitest"; import { AcpRuntimeError } from "../../acp/runtime/errors.js"; import type { AcpSessionStoreEntry } from "../../acp/runtime/session-meta.js"; @@ -131,6 +134,7 @@ async function runDispatch(params: { dispatcher?: ReplyDispatcher; shouldRouteToOriginating?: boolean; onReplyStart?: () => void; + ctxOverrides?: Record; }) { return tryDispatchAcpReply({ ctx: buildTestCtx({ @@ -138,6 +142,7 @@ async function runDispatch(params: { Surface: "discord", SessionKey: sessionKey, BodyForAgent: params.bodyForAgent, + ...params.ctxOverrides, }), cfg: params.cfg ?? createAcpTestConfig(), dispatcher: params.dispatcher ?? createDispatcher().dispatcher, @@ -353,6 +358,64 @@ describe("tryDispatchAcpReply", () => { expect(onReplyStart).not.toHaveBeenCalled(); }); + it("forwards normalized image attachments into ACP turns", async () => { + setReadyAcpResolution(); + const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "dispatch-acp-")); + const imagePath = path.join(tempDir, "inbound.png"); + try { + await fs.writeFile(imagePath, "image-bytes"); + managerMocks.runTurn.mockResolvedValue(undefined); + + await runDispatch({ + bodyForAgent: " ", + ctxOverrides: { + MediaPath: imagePath, + MediaType: "image/png", + }, + }); + + expect(managerMocks.runTurn).toHaveBeenCalledWith( + expect.objectContaining({ + text: "", + attachments: [ + { + mediaType: "image/png", + data: Buffer.from("image-bytes").toString("base64"), + }, + ], + }), + ); + } finally { + await fs.rm(tempDir, { recursive: true, force: true }); + } + }); + + it("skips ACP turns for non-image attachments when there is no text prompt", async () => { + setReadyAcpResolution(); + const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "dispatch-acp-")); + const docPath = path.join(tempDir, "inbound.pdf"); + const { dispatcher } = createDispatcher(); + const onReplyStart = vi.fn(); + try { + await fs.writeFile(docPath, "pdf-bytes"); + + await runDispatch({ + bodyForAgent: " ", + dispatcher, + onReplyStart, + ctxOverrides: { + MediaPath: docPath, + MediaType: "application/pdf", + }, + }); + + expect(managerMocks.runTurn).not.toHaveBeenCalled(); + expect(onReplyStart).not.toHaveBeenCalled(); + } finally { + await fs.rm(tempDir, { recursive: true, force: true }); + } + }); + it("surfaces ACP policy errors as final error replies", async () => { setReadyAcpResolution(); policyMocks.resolveAcpDispatchPolicyError.mockReturnValue( diff --git a/src/auto-reply/reply/dispatch-acp.ts b/src/auto-reply/reply/dispatch-acp.ts index 33990cb20d6..8fc7110fc4c 100644 --- a/src/auto-reply/reply/dispatch-acp.ts +++ b/src/auto-reply/reply/dispatch-acp.ts @@ -1,4 +1,6 @@ +import fs from "node:fs/promises"; import { getAcpSessionManager } from "../../acp/control-plane/manager.js"; +import type { AcpTurnAttachment } from "../../acp/control-plane/manager.types.js"; import { resolveAcpAgentPolicyError, resolveAcpDispatchPolicyError } from "../../acp/policy.js"; import { formatAcpRuntimeErrorText } from "../../acp/runtime/error-text.js"; import { toAcpRuntimeError } from "../../acp/runtime/errors.js"; @@ -14,6 +16,11 @@ import { logVerbose } from "../../globals.js"; import { getSessionBindingService } from "../../infra/outbound/session-binding-service.js"; import { generateSecureUuid } from "../../infra/secure-random.js"; import { prefixSystemMessage } from "../../infra/system-message.js"; +import { applyMediaUnderstanding } from "../../media-understanding/apply.js"; +import { + normalizeAttachmentPath, + normalizeAttachments, +} from "../../media-understanding/attachments.normalize.js"; import { resolveAgentIdFromSessionKey } from "../../routing/session-key.js"; import { maybeApplyTtsToPayload, resolveTtsConfig } from "../../tts/tts.js"; import { @@ -57,6 +64,40 @@ function resolveAcpPromptText(ctx: FinalizedMsgContext): string { ]).trim(); } +const ACP_ATTACHMENT_MAX_BYTES = 10 * 1024 * 1024; + +async function resolveAcpAttachments(ctx: FinalizedMsgContext): Promise { + const mediaAttachments = normalizeAttachments(ctx); + const results: AcpTurnAttachment[] = []; + for (const attachment of mediaAttachments) { + const mediaType = attachment.mime ?? "application/octet-stream"; + if (!mediaType.startsWith("image/")) { + continue; + } + const filePath = normalizeAttachmentPath(attachment.path); + if (!filePath) { + continue; + } + try { + const stat = await fs.stat(filePath); + if (stat.size > ACP_ATTACHMENT_MAX_BYTES) { + logVerbose( + `dispatch-acp: skipping attachment ${filePath} (${stat.size} bytes exceeds ${ACP_ATTACHMENT_MAX_BYTES} byte limit)`, + ); + continue; + } + const buf = await fs.readFile(filePath); + results.push({ + mediaType, + data: buf.toString("base64"), + }); + } catch { + // Skip unreadable files. Text content should still be delivered. + } + } + return results; +} + function resolveCommandCandidateText(ctx: FinalizedMsgContext): string { return resolveFirstContextText(ctx, ["CommandBody", "BodyForCommands", "RawBody", "Body"]).trim(); } @@ -188,15 +229,6 @@ export async function tryDispatchAcpReply(params: { onReplyStart: params.onReplyStart, }); - const promptText = resolveAcpPromptText(params.ctx); - if (!promptText) { - const counts = params.dispatcher.getQueuedCounts(); - delivery.applyRoutedCounts(counts); - params.recordProcessed("completed", { reason: "acp_empty_prompt" }); - params.markIdle("message_completed"); - return { queuedFinal: false, counts }; - } - const identityPendingBeforeTurn = isSessionIdentityPending( resolveSessionIdentityFromMeta(acpResolution.kind === "ready" ? acpResolution.meta : undefined), ); @@ -238,6 +270,28 @@ export async function tryDispatchAcpReply(params: { if (agentPolicyError) { throw agentPolicyError; } + if (!params.ctx.MediaUnderstanding?.length) { + try { + await applyMediaUnderstanding({ + ctx: params.ctx, + cfg: params.cfg, + }); + } catch (err) { + logVerbose( + `dispatch-acp: media understanding failed, proceeding with raw content: ${err instanceof Error ? err.message : String(err)}`, + ); + } + } + + const promptText = resolveAcpPromptText(params.ctx); + const attachments = await resolveAcpAttachments(params.ctx); + if (!promptText && attachments.length === 0) { + const counts = params.dispatcher.getQueuedCounts(); + delivery.applyRoutedCounts(counts); + params.recordProcessed("completed", { reason: "acp_empty_prompt" }); + params.markIdle("message_completed"); + return { queuedFinal: false, counts }; + } try { await delivery.startReplyLifecycle(); @@ -251,6 +305,7 @@ export async function tryDispatchAcpReply(params: { cfg: params.cfg, sessionKey, text: promptText, + attachments: attachments.length > 0 ? attachments : undefined, mode: "prompt", requestId: resolveAcpRequestId(params.ctx), onEvent: async (event) => await projector.onEvent(event), diff --git a/src/auto-reply/reply/dispatch-from-config.test.ts b/src/auto-reply/reply/dispatch-from-config.test.ts index 982557ecb68..666964eb865 100644 --- a/src/auto-reply/reply/dispatch-from-config.test.ts +++ b/src/auto-reply/reply/dispatch-from-config.test.ts @@ -41,6 +41,12 @@ const acpMocks = vi.hoisted(() => ({ const sessionBindingMocks = vi.hoisted(() => ({ listBySession: vi.fn<(targetSessionKey: string) => SessionBindingRecord[]>(() => []), })); +const sessionStoreMocks = vi.hoisted(() => ({ + currentEntry: undefined as Record | undefined, + loadSessionStore: vi.fn(() => ({})), + resolveStorePath: vi.fn(() => "/tmp/mock-sessions.json"), + resolveSessionStoreEntry: vi.fn(() => ({ existing: sessionStoreMocks.currentEntry })), +})); const ttsMocks = vi.hoisted(() => { const state = { synthesizeFinalAudio: false, @@ -77,9 +83,16 @@ vi.mock("./route-reply.js", () => ({ isRoutableChannel: (channel: string | undefined) => Boolean( channel && - ["telegram", "slack", "discord", "signal", "imessage", "whatsapp", "feishu"].includes( - channel, - ), + [ + "telegram", + "slack", + "discord", + "signal", + "imessage", + "whatsapp", + "feishu", + "mattermost", + ].includes(channel), ), routeReply: mocks.routeReply, })); @@ -100,6 +113,15 @@ vi.mock("../../logging/diagnostic.js", () => ({ logMessageProcessed: diagnosticMocks.logMessageProcessed, logSessionStateChange: diagnosticMocks.logSessionStateChange, })); +vi.mock("../../config/sessions.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + loadSessionStore: sessionStoreMocks.loadSessionStore, + resolveStorePath: sessionStoreMocks.resolveStorePath, + resolveSessionStoreEntry: sessionStoreMocks.resolveSessionStoreEntry, + }; +}); vi.mock("../../plugins/hook-runner-global.js", () => ({ getGlobalHookRunner: () => hookMocks.runner, @@ -228,6 +250,10 @@ describe("dispatchReplyFromConfig", () => { acpMocks.requireAcpRuntimeBackend.mockReset(); sessionBindingMocks.listBySession.mockReset(); sessionBindingMocks.listBySession.mockReturnValue([]); + sessionStoreMocks.currentEntry = undefined; + sessionStoreMocks.loadSessionStore.mockClear(); + sessionStoreMocks.resolveStorePath.mockClear(); + sessionStoreMocks.resolveSessionStoreEntry.mockClear(); ttsMocks.state.synthesizeFinalAudio = false; ttsMocks.maybeApplyTtsToPayload.mockClear(); ttsMocks.normalizeTtsAutoMode.mockClear(); @@ -293,6 +319,88 @@ describe("dispatchReplyFromConfig", () => { ); }); + it("falls back to thread-scoped session key when current ctx has no MessageThreadId", async () => { + setNoAbort(); + mocks.routeReply.mockClear(); + sessionStoreMocks.currentEntry = { + deliveryContext: { + channel: "mattermost", + to: "channel:CHAN1", + accountId: "default", + }, + origin: { + threadId: "stale-origin-root", + }, + lastThreadId: "stale-origin-root", + }; + const cfg = emptyConfig; + const dispatcher = createDispatcher(); + const ctx = buildTestCtx({ + Provider: "webchat", + Surface: "webchat", + SessionKey: "agent:main:mattermost:channel:CHAN1:thread:post-root", + AccountId: "default", + MessageThreadId: undefined, + OriginatingChannel: "mattermost", + OriginatingTo: "channel:CHAN1", + ExplicitDeliverRoute: true, + }); + + const replyResolver = async () => ({ text: "hi" }) satisfies ReplyPayload; + await dispatchReplyFromConfig({ ctx, cfg, dispatcher, replyResolver }); + + expect(mocks.routeReply).toHaveBeenCalledWith( + expect.objectContaining({ + channel: "mattermost", + to: "channel:CHAN1", + threadId: "post-root", + }), + ); + }); + + it("does not resurrect a cleared route thread from origin metadata", async () => { + setNoAbort(); + mocks.routeReply.mockClear(); + // Simulate the real store: lastThreadId and deliveryContext.threadId may be normalised from + // origin.threadId on read, but a non-thread session key must still route to channel root. + sessionStoreMocks.currentEntry = { + deliveryContext: { + channel: "mattermost", + to: "channel:CHAN1", + accountId: "default", + threadId: "stale-root", + }, + lastThreadId: "stale-root", + origin: { + threadId: "stale-root", + }, + }; + const cfg = emptyConfig; + const dispatcher = createDispatcher(); + const ctx = buildTestCtx({ + Provider: "webchat", + Surface: "webchat", + SessionKey: "agent:main:mattermost:channel:CHAN1", + AccountId: "default", + MessageThreadId: undefined, + OriginatingChannel: "mattermost", + OriginatingTo: "channel:CHAN1", + ExplicitDeliverRoute: true, + }); + + const replyResolver = async () => ({ text: "hi" }) satisfies ReplyPayload; + await dispatchReplyFromConfig({ ctx, cfg, dispatcher, replyResolver }); + + const routeCall = mocks.routeReply.mock.calls[0]?.[0] as + | { channel?: string; to?: string; threadId?: string | number } + | undefined; + expect(routeCall).toMatchObject({ + channel: "mattermost", + to: "channel:CHAN1", + }); + expect(routeCall?.threadId).toBeUndefined(); + }); + it("forces suppressTyping when routing to a different originating channel", async () => { setNoAbort(); const cfg = emptyConfig; @@ -543,6 +651,51 @@ describe("dispatchReplyFromConfig", () => { expect(dispatcher.sendFinalReply).toHaveBeenCalledTimes(1); }); + it("delivers deterministic exec approval tool payloads in groups", async () => { + setNoAbort(); + const cfg = emptyConfig; + const dispatcher = createDispatcher(); + const ctx = buildTestCtx({ + Provider: "telegram", + ChatType: "group", + }); + + const replyResolver = async ( + _ctx: MsgContext, + opts?: GetReplyOptions, + _cfg?: OpenClawConfig, + ) => { + await opts?.onToolResult?.({ + text: "Approval required.\n\n```txt\n/approve 117ba06d allow-once\n```", + channelData: { + execApproval: { + approvalId: "117ba06d-1111-2222-3333-444444444444", + approvalSlug: "117ba06d", + allowedDecisions: ["allow-once", "allow-always", "deny"], + }, + }, + }); + return { text: "NO_REPLY" } satisfies ReplyPayload; + }; + + await dispatchReplyFromConfig({ ctx, cfg, dispatcher, replyResolver }); + + expect(dispatcher.sendToolResult).toHaveBeenCalledTimes(1); + expect(firstToolResultPayload(dispatcher)).toEqual( + expect.objectContaining({ + text: "Approval required.\n\n```txt\n/approve 117ba06d allow-once\n```", + channelData: { + execApproval: { + approvalId: "117ba06d-1111-2222-3333-444444444444", + approvalSlug: "117ba06d", + allowedDecisions: ["allow-once", "allow-always", "deny"], + }, + }, + }), + ); + expect(dispatcher.sendFinalReply).toHaveBeenCalledWith({ text: "NO_REPLY" }); + }); + it("sends tool results via dispatcher in DM sessions", async () => { setNoAbort(); const cfg = emptyConfig; @@ -601,6 +754,50 @@ describe("dispatchReplyFromConfig", () => { expect(dispatcher.sendFinalReply).toHaveBeenCalledTimes(1); }); + it("delivers deterministic exec approval tool payloads for native commands", async () => { + setNoAbort(); + const cfg = emptyConfig; + const dispatcher = createDispatcher(); + const ctx = buildTestCtx({ + Provider: "telegram", + CommandSource: "native", + }); + + const replyResolver = async ( + _ctx: MsgContext, + opts?: GetReplyOptions, + _cfg?: OpenClawConfig, + ) => { + await opts?.onToolResult?.({ + text: "Approval required.\n\n```txt\n/approve 117ba06d allow-once\n```", + channelData: { + execApproval: { + approvalId: "117ba06d-1111-2222-3333-444444444444", + approvalSlug: "117ba06d", + allowedDecisions: ["allow-once", "allow-always", "deny"], + }, + }, + }); + return { text: "NO_REPLY" } satisfies ReplyPayload; + }; + + await dispatchReplyFromConfig({ ctx, cfg, dispatcher, replyResolver }); + + expect(dispatcher.sendToolResult).toHaveBeenCalledTimes(1); + expect(firstToolResultPayload(dispatcher)).toEqual( + expect.objectContaining({ + channelData: { + execApproval: { + approvalId: "117ba06d-1111-2222-3333-444444444444", + approvalSlug: "117ba06d", + allowedDecisions: ["allow-once", "allow-always", "deny"], + }, + }, + }), + ); + expect(dispatcher.sendFinalReply).toHaveBeenCalledWith({ text: "NO_REPLY" }); + }); + it("fast-aborts without calling the reply resolver", async () => { mocks.tryFastAbortFromMessage.mockResolvedValue({ handled: true, @@ -1539,6 +1736,47 @@ describe("dispatchReplyFromConfig", () => { expect(replyResolver).toHaveBeenCalledTimes(1); }); + it("suppresses local discord exec approval tool prompts when discord approvals are enabled", async () => { + setNoAbort(); + const cfg = { + channels: { + discord: { + enabled: true, + execApprovals: { + enabled: true, + approvers: ["123"], + }, + }, + }, + } as OpenClawConfig; + const dispatcher = createDispatcher(); + const ctx = buildTestCtx({ + Provider: "discord", + Surface: "discord", + AccountId: "default", + }); + const replyResolver = vi.fn(async (_ctx: MsgContext, options?: GetReplyOptions) => { + await options?.onToolResult?.({ + text: "Approval required.", + channelData: { + execApproval: { + approvalId: "12345678-1234-1234-1234-123456789012", + approvalSlug: "12345678", + allowedDecisions: ["allow-once", "allow-always", "deny"], + }, + }, + }); + return { text: "done" } as ReplyPayload; + }); + + await dispatchReplyFromConfig({ ctx, cfg, dispatcher, replyResolver }); + + expect(dispatcher.sendToolResult).not.toHaveBeenCalled(); + expect(dispatcher.sendFinalReply).toHaveBeenCalledWith( + expect.objectContaining({ text: "done" }), + ); + }); + it("deduplicates same-agent inbound replies across main and direct session keys", async () => { setNoAbort(); const cfg = emptyConfig; diff --git a/src/auto-reply/reply/dispatch-from-config.ts b/src/auto-reply/reply/dispatch-from-config.ts index 786b1a7c16b..b21fcabe80b 100644 --- a/src/auto-reply/reply/dispatch-from-config.ts +++ b/src/auto-reply/reply/dispatch-from-config.ts @@ -2,10 +2,12 @@ import { resolveSessionAgentId } from "../../agents/agent-scope.js"; import type { OpenClawConfig } from "../../config/config.js"; import { loadSessionStore, + parseSessionThreadInfo, resolveSessionStoreEntry, resolveStorePath, type SessionEntry, } from "../../config/sessions.js"; +import { shouldSuppressLocalDiscordExecApprovalPrompt } from "../../discord/exec-approvals.js"; import { logVerbose } from "../../globals.js"; import { fireAndForgetHook } from "../../hooks/fire-and-forget.js"; import { createInternalHookEvent, triggerInternalHook } from "../../hooks/internal-hooks.js"; @@ -171,6 +173,12 @@ export async function dispatchReplyFromConfig(params: { const sessionStoreEntry = resolveSessionStoreLookup(ctx, cfg); const acpDispatchSessionKey = sessionStoreEntry.sessionKey ?? sessionKey; + // Restore route thread context only from the active turn or the thread-scoped session key. + // Do not read thread ids from the normalised session store here: `origin.threadId` can be + // folded back into lastThreadId/deliveryContext during store normalisation and resurrect a + // stale route after thread delivery was intentionally cleared. + const routeThreadId = + ctx.MessageThreadId ?? parseSessionThreadInfo(acpDispatchSessionKey).threadId; const inboundAudio = isInboundAudioContext(ctx); const sessionTtsAuto = normalizeTtsAutoMode(sessionStoreEntry.entry?.ttsAuto); const hookRunner = getGlobalHookRunner(); @@ -259,7 +267,7 @@ export async function dispatchReplyFromConfig(params: { to: originatingTo, sessionKey: ctx.SessionKey, accountId: ctx.AccountId, - threadId: ctx.MessageThreadId, + threadId: routeThreadId, cfg, abortSignal, mirror, @@ -288,7 +296,7 @@ export async function dispatchReplyFromConfig(params: { to: originatingTo, sessionKey: ctx.SessionKey, accountId: ctx.AccountId, - threadId: ctx.MessageThreadId, + threadId: routeThreadId, cfg, isGroup, groupId, @@ -365,9 +373,28 @@ export async function dispatchReplyFromConfig(params: { let blockCount = 0; const resolveToolDeliveryPayload = (payload: ReplyPayload): ReplyPayload | null => { + if ( + normalizeMessageChannel(ctx.Surface ?? ctx.Provider) === "discord" && + shouldSuppressLocalDiscordExecApprovalPrompt({ + cfg, + accountId: ctx.AccountId, + payload, + }) + ) { + return null; + } if (shouldSendToolSummaries) { return payload; } + const execApproval = + payload.channelData && + typeof payload.channelData === "object" && + !Array.isArray(payload.channelData) + ? payload.channelData.execApproval + : undefined; + if (execApproval && typeof execApproval === "object" && !Array.isArray(execApproval)) { + return payload; + } // Group/native flows intentionally suppress tool summary text, but media-only // tool results (for example TTS audio) must still be delivered. const hasMedia = Boolean(payload.mediaUrl) || (payload.mediaUrls?.length ?? 0) > 0; @@ -499,7 +526,7 @@ export async function dispatchReplyFromConfig(params: { to: originatingTo, sessionKey: ctx.SessionKey, accountId: ctx.AccountId, - threadId: ctx.MessageThreadId, + threadId: routeThreadId, cfg, isGroup, groupId, @@ -551,7 +578,7 @@ export async function dispatchReplyFromConfig(params: { to: originatingTo, sessionKey: ctx.SessionKey, accountId: ctx.AccountId, - threadId: ctx.MessageThreadId, + threadId: routeThreadId, cfg, isGroup, groupId, diff --git a/src/auto-reply/reply/followup-runner.test.ts b/src/auto-reply/reply/followup-runner.test.ts index a02ce0b2038..8d12e815685 100644 --- a/src/auto-reply/reply/followup-runner.test.ts +++ b/src/auto-reply/reply/followup-runner.test.ts @@ -4,7 +4,7 @@ import path from "node:path"; import { beforeEach, describe, expect, it, vi } from "vitest"; import { loadSessionStore, saveSessionStore, type SessionEntry } from "../../config/sessions.js"; import type { FollowupRun } from "./queue.js"; -import { createMockTypingController } from "./test-helpers.js"; +import { createMockFollowupRun, createMockTypingController } from "./test-helpers.js"; const runEmbeddedPiAgentMock = vi.fn(); const routeReplyMock = vi.fn(); @@ -50,47 +50,12 @@ beforeEach(() => { }); const baseQueuedRun = (messageProvider = "whatsapp"): FollowupRun => - ({ - prompt: "hello", - summaryLine: "hello", - enqueuedAt: Date.now(), - originatingTo: "channel:C1", - run: { - sessionId: "session", - sessionKey: "main", - messageProvider, - agentAccountId: "primary", - sessionFile: "/tmp/session.jsonl", - workspaceDir: "/tmp", - config: {}, - skillsSnapshot: {}, - provider: "anthropic", - model: "claude", - thinkLevel: "low", - verboseLevel: "off", - elevatedLevel: "off", - bashElevated: { - enabled: false, - allowed: false, - defaultLevel: "off", - }, - timeoutMs: 1_000, - blockReplyBreak: "message_end", - }, - }) as FollowupRun; + createMockFollowupRun({ run: { messageProvider } }); function createQueuedRun( overrides: Partial> & { run?: Partial } = {}, ): FollowupRun { - const base = baseQueuedRun(); - return { - ...base, - ...overrides, - run: { - ...base.run, - ...overrides.run, - }, - }; + return createMockFollowupRun(overrides); } function mockCompactionRun(params: { diff --git a/src/auto-reply/reply/followup-runner.ts b/src/auto-reply/reply/followup-runner.ts index 91e78138102..8c7eccb5f02 100644 --- a/src/auto-reply/reply/followup-runner.ts +++ b/src/auto-reply/reply/followup-runner.ts @@ -159,6 +159,7 @@ export function createFollowupRunner(params: { cfg: queued.run.config, provider: queued.run.provider, model: queued.run.model, + runId, agentDir: queued.run.agentDir, fallbacksOverride: resolveRunModelFallbacksOverride({ cfg: queued.run.config, diff --git a/src/auto-reply/reply/get-reply-directives-apply.ts b/src/auto-reply/reply/get-reply-directives-apply.ts index 4232171a82b..fa02e00f6b4 100644 --- a/src/auto-reply/reply/get-reply-directives-apply.ts +++ b/src/auto-reply/reply/get-reply-directives-apply.ts @@ -150,6 +150,7 @@ export async function applyInlineDirectiveOverrides(params: { } const { currentThinkLevel: resolvedDefaultThinkLevel, + currentFastMode, currentVerboseLevel, currentReasoningLevel, currentElevatedLevel, @@ -162,6 +163,7 @@ export async function applyInlineDirectiveOverrides(params: { const directiveReply = await handleDirectiveOnly({ ...createDirectiveHandlingBase(), currentThinkLevel, + currentFastMode, currentVerboseLevel, currentReasoningLevel, currentElevatedLevel, @@ -201,6 +203,7 @@ export async function applyInlineDirectiveOverrides(params: { const hasAnyDirective = directives.hasThinkDirective || + directives.hasFastDirective || directives.hasVerboseDirective || directives.hasReasoningDirective || directives.hasElevatedDirective || diff --git a/src/auto-reply/reply/get-reply-directives-utils.ts b/src/auto-reply/reply/get-reply-directives-utils.ts index 02c60a31fac..d507d71d86b 100644 --- a/src/auto-reply/reply/get-reply-directives-utils.ts +++ b/src/auto-reply/reply/get-reply-directives-utils.ts @@ -26,6 +26,9 @@ export function clearInlineDirectives(cleaned: string): InlineDirectives { hasVerboseDirective: false, verboseLevel: undefined, rawVerboseLevel: undefined, + hasFastDirective: false, + fastMode: undefined, + rawFastMode: undefined, hasReasoningDirective: false, reasoningLevel: undefined, rawReasoningLevel: undefined, diff --git a/src/auto-reply/reply/get-reply-directives.ts b/src/auto-reply/reply/get-reply-directives.ts index 4c9da28deae..37eef3fb9b8 100644 --- a/src/auto-reply/reply/get-reply-directives.ts +++ b/src/auto-reply/reply/get-reply-directives.ts @@ -1,4 +1,5 @@ import type { ExecToolDefaults } from "../../agents/bash-tools.js"; +import { resolveFastModeState } from "../../agents/fast-mode.js"; import type { ModelAliasIndex } from "../../agents/model-selection.js"; import { resolveSandboxRuntimeStatus } from "../../agents/sandbox.js"; import type { SkillCommandSpec } from "../../agents/skills.js"; @@ -37,6 +38,7 @@ export type ReplyDirectiveContinuation = { elevatedFailures: Array<{ gate: string; key: string }>; defaultActivation: ReturnType; resolvedThinkLevel: ThinkLevel | undefined; + resolvedFastMode: boolean; resolvedVerboseLevel: VerboseLevel | undefined; resolvedReasoningLevel: ReasoningLevel; resolvedElevatedLevel: ElevatedLevel; @@ -228,6 +230,7 @@ export async function resolveReplyDirectives(params: { const hasInlineDirective = parsedDirectives.hasThinkDirective || parsedDirectives.hasVerboseDirective || + parsedDirectives.hasFastDirective || parsedDirectives.hasReasoningDirective || parsedDirectives.hasElevatedDirective || parsedDirectives.hasExecDirective || @@ -260,6 +263,7 @@ export async function resolveReplyDirectives(params: { ...parsedDirectives, hasThinkDirective: false, hasVerboseDirective: false, + hasFastDirective: false, hasReasoningDirective: false, hasStatusDirective: false, hasModelDirective: false, @@ -340,6 +344,14 @@ export async function resolveReplyDirectives(params: { const defaultActivation = defaultGroupActivation(requireMention); const resolvedThinkLevel = directives.thinkLevel ?? (sessionEntry?.thinkingLevel as ThinkLevel | undefined); + const resolvedFastMode = + directives.fastMode ?? + resolveFastModeState({ + cfg, + provider, + model, + sessionEntry, + }).enabled; const resolvedVerboseLevel = directives.verboseLevel ?? @@ -373,6 +385,7 @@ export async function resolveReplyDirectives(params: { const modelState = await createModelSelectionState({ cfg, + agentId, agentCfg, sessionEntry, sessionStore, @@ -478,6 +491,7 @@ export async function resolveReplyDirectives(params: { elevatedFailures, defaultActivation, resolvedThinkLevel: resolvedThinkLevelWithDefault, + resolvedFastMode, resolvedVerboseLevel, resolvedReasoningLevel, resolvedElevatedLevel, diff --git a/src/auto-reply/reply/get-reply-inline-actions.skip-when-config-empty.test.ts b/src/auto-reply/reply/get-reply-inline-actions.skip-when-config-empty.test.ts index 51351f05de8..36b5910ecae 100644 --- a/src/auto-reply/reply/get-reply-inline-actions.skip-when-config-empty.test.ts +++ b/src/auto-reply/reply/get-reply-inline-actions.skip-when-config-empty.test.ts @@ -84,6 +84,19 @@ const createHandleInlineActionsInput = (params: { }; }; +async function expectInlineActionSkipped(params: { + ctx: ReturnType; + typing: TypingController; + cleanedBody: string; + command?: Partial; + overrides?: Partial>; +}) { + const result = await handleInlineActions(createHandleInlineActionsInput(params)); + expect(result).toEqual({ kind: "reply", reply: undefined }); + expect(params.typing.cleanup).toHaveBeenCalled(); + expect(handleCommandsMock).not.toHaveBeenCalled(); +} + describe("handleInlineActions", () => { beforeEach(() => { handleCommandsMock.mockReset(); @@ -97,18 +110,12 @@ describe("handleInlineActions", () => { To: "whatsapp:+123", Body: "hi", }); - const result = await handleInlineActions( - createHandleInlineActionsInput({ - ctx, - typing, - cleanedBody: "hi", - command: { to: "whatsapp:+123" }, - }), - ); - - expect(result).toEqual({ kind: "reply", reply: undefined }); - expect(typing.cleanup).toHaveBeenCalled(); - expect(handleCommandsMock).not.toHaveBeenCalled(); + await expectInlineActionSkipped({ + ctx, + typing, + cleanedBody: "hi", + command: { to: "whatsapp:+123" }, + }); }); it("forwards agentDir into handleCommands", async () => { @@ -163,25 +170,19 @@ describe("handleInlineActions", () => { MessageSid: "41", }); - const result = await handleInlineActions( - createHandleInlineActionsInput({ - ctx, - typing, - cleanedBody: "old queued message", - command: { - rawBodyNormalized: "old queued message", - commandBodyNormalized: "old queued message", - }, - overrides: { - sessionEntry, - sessionStore, - }, - }), - ); - - expect(result).toEqual({ kind: "reply", reply: undefined }); - expect(typing.cleanup).toHaveBeenCalled(); - expect(handleCommandsMock).not.toHaveBeenCalled(); + await expectInlineActionSkipped({ + ctx, + typing, + cleanedBody: "old queued message", + command: { + rawBodyNormalized: "old queued message", + commandBodyNormalized: "old queued message", + }, + overrides: { + sessionEntry, + sessionStore, + }, + }); }); it("clears /stop cutoff when a newer message arrives", async () => { diff --git a/src/auto-reply/reply/get-reply-inline-actions.ts b/src/auto-reply/reply/get-reply-inline-actions.ts index e133585411a..c312e1144e4 100644 --- a/src/auto-reply/reply/get-reply-inline-actions.ts +++ b/src/auto-reply/reply/get-reply-inline-actions.ts @@ -30,8 +30,13 @@ import type { createModelSelectionState } from "./model-selection.js"; import { extractInlineSimpleCommand } from "./reply-inline.js"; import type { TypingController } from "./typing.js"; -const builtinSlashCommands = (() => { - return listReservedChatSlashCommandNames([ +let builtinSlashCommands: Set | null = null; + +function getBuiltinSlashCommands(): Set { + if (builtinSlashCommands) { + return builtinSlashCommands; + } + builtinSlashCommands = listReservedChatSlashCommandNames([ "think", "verbose", "reasoning", @@ -41,7 +46,8 @@ const builtinSlashCommands = (() => { "status", "queue", ]); -})(); + return builtinSlashCommands; +} function resolveSlashCommandName(commandBodyNormalized: string): string | null { const trimmed = commandBodyNormalized.trim(); @@ -163,7 +169,7 @@ export async function handleInlineActions(params: { allowTextCommands && slashCommandName !== null && // `/skill …` needs the full skill command list. - (slashCommandName === "skill" || !builtinSlashCommands.has(slashCommandName)); + (slashCommandName === "skill" || !getBuiltinSlashCommands().has(slashCommandName)); const skillCommands = shouldLoadSkillCommands && params.skillCommands ? params.skillCommands diff --git a/src/auto-reply/reply/get-reply-run.ts b/src/auto-reply/reply/get-reply-run.ts index dceac522eca..760c42aed1a 100644 --- a/src/auto-reply/reply/get-reply-run.ts +++ b/src/auto-reply/reply/get-reply-run.ts @@ -1,6 +1,7 @@ import crypto from "node:crypto"; import { resolveSessionAuthProfileOverride } from "../../agents/auth-profiles/session-override.js"; import type { ExecToolDefaults } from "../../agents/bash-tools.js"; +import { resolveFastModeState } from "../../agents/fast-mode.js"; import { abortEmbeddedPiRun, isEmbeddedPiRunActive, @@ -509,6 +510,12 @@ export async function runPreparedReply( authProfileId, authProfileIdSource, thinkLevel: resolvedThinkLevel, + fastMode: resolveFastModeState({ + cfg, + provider, + model, + sessionEntry, + }).enabled, verboseLevel: resolvedVerboseLevel, reasoningLevel: resolvedReasoningLevel, elevatedLevel: resolvedElevatedLevel, diff --git a/src/auto-reply/reply/get-reply.ts b/src/auto-reply/reply/get-reply.ts index be4c8d362f8..81dd478a84a 100644 --- a/src/auto-reply/reply/get-reply.ts +++ b/src/auto-reply/reply/get-reply.ts @@ -175,6 +175,7 @@ export async function getReplyFromConfig( await applyResetModelOverride({ cfg, + agentId, resetTriggered, bodyStripped, sessionCtx, diff --git a/src/auto-reply/reply/inbound-dedupe.test.ts b/src/auto-reply/reply/inbound-dedupe.test.ts new file mode 100644 index 00000000000..c71aeb598dd --- /dev/null +++ b/src/auto-reply/reply/inbound-dedupe.test.ts @@ -0,0 +1,43 @@ +import { afterEach, describe, expect, it } from "vitest"; +import { importFreshModule } from "../../../test/helpers/import-fresh.js"; +import type { MsgContext } from "../templating.js"; +import { resetInboundDedupe } from "./inbound-dedupe.js"; + +const sharedInboundContext: MsgContext = { + Provider: "discord", + Surface: "discord", + From: "discord:user-1", + To: "channel:c1", + OriginatingChannel: "discord", + OriginatingTo: "channel:c1", + SessionKey: "agent:main:discord:channel:c1", + MessageSid: "msg-1", +}; + +describe("inbound dedupe", () => { + afterEach(() => { + resetInboundDedupe(); + }); + + it("shares dedupe state across distinct module instances", async () => { + const inboundA = await importFreshModule( + import.meta.url, + "./inbound-dedupe.js?scope=shared-a", + ); + const inboundB = await importFreshModule( + import.meta.url, + "./inbound-dedupe.js?scope=shared-b", + ); + + inboundA.resetInboundDedupe(); + inboundB.resetInboundDedupe(); + + try { + expect(inboundA.shouldSkipDuplicateInbound(sharedInboundContext)).toBe(false); + expect(inboundB.shouldSkipDuplicateInbound(sharedInboundContext)).toBe(true); + } finally { + inboundA.resetInboundDedupe(); + inboundB.resetInboundDedupe(); + } + }); +}); diff --git a/src/auto-reply/reply/inbound-dedupe.ts b/src/auto-reply/reply/inbound-dedupe.ts index 0e4740261b9..04744217c7e 100644 --- a/src/auto-reply/reply/inbound-dedupe.ts +++ b/src/auto-reply/reply/inbound-dedupe.ts @@ -1,15 +1,24 @@ import { logVerbose, shouldLogVerbose } from "../../globals.js"; import { createDedupeCache, type DedupeCache } from "../../infra/dedupe.js"; import { parseAgentSessionKey } from "../../sessions/session-key-utils.js"; +import { resolveGlobalSingleton } from "../../shared/global-singleton.js"; import type { MsgContext } from "../templating.js"; const DEFAULT_INBOUND_DEDUPE_TTL_MS = 20 * 60_000; const DEFAULT_INBOUND_DEDUPE_MAX = 5000; -const inboundDedupeCache = createDedupeCache({ - ttlMs: DEFAULT_INBOUND_DEDUPE_TTL_MS, - maxSize: DEFAULT_INBOUND_DEDUPE_MAX, -}); +/** + * Keep inbound dedupe shared across bundled chunks so the same provider + * message cannot bypass dedupe by entering through a different chunk copy. + */ +const INBOUND_DEDUPE_CACHE_KEY = Symbol.for("openclaw.inboundDedupeCache"); + +const inboundDedupeCache = resolveGlobalSingleton(INBOUND_DEDUPE_CACHE_KEY, () => + createDedupeCache({ + ttlMs: DEFAULT_INBOUND_DEDUPE_TTL_MS, + maxSize: DEFAULT_INBOUND_DEDUPE_MAX, + }), +); const normalizeProvider = (value?: string | null) => value?.trim().toLowerCase() || ""; diff --git a/src/auto-reply/reply/memory-flush.test.ts b/src/auto-reply/reply/memory-flush.test.ts index 0e04e7e0ea3..079c5578676 100644 --- a/src/auto-reply/reply/memory-flush.test.ts +++ b/src/auto-reply/reply/memory-flush.test.ts @@ -1,6 +1,10 @@ import { describe, expect, it } from "vitest"; import type { OpenClawConfig } from "../../config/config.js"; -import { DEFAULT_MEMORY_FLUSH_PROMPT, resolveMemoryFlushPromptForRun } from "./memory-flush.js"; +import { + DEFAULT_MEMORY_FLUSH_PROMPT, + resolveMemoryFlushPromptForRun, + resolveMemoryFlushRelativePathForRun, +} from "./memory-flush.js"; describe("resolveMemoryFlushPromptForRun", () => { const cfg = { @@ -35,6 +39,15 @@ describe("resolveMemoryFlushPromptForRun", () => { expect(prompt).toContain("Current time: already present"); expect((prompt.match(/Current time:/g) ?? []).length).toBe(1); }); + + it("resolves the canonical relative memory path using user timezone", () => { + const relativePath = resolveMemoryFlushRelativePathForRun({ + cfg, + nowMs: Date.UTC(2026, 1, 16, 15, 0, 0), + }); + + expect(relativePath).toBe("memory/2026-02-16.md"); + }); }); describe("DEFAULT_MEMORY_FLUSH_PROMPT", () => { diff --git a/src/auto-reply/reply/memory-flush.ts b/src/auto-reply/reply/memory-flush.ts index c02fad5eca0..95f6dbaa053 100644 --- a/src/auto-reply/reply/memory-flush.ts +++ b/src/auto-reply/reply/memory-flush.ts @@ -10,10 +10,23 @@ import { SILENT_REPLY_TOKEN } from "../tokens.js"; export const DEFAULT_MEMORY_FLUSH_SOFT_TOKENS = 4000; export const DEFAULT_MEMORY_FLUSH_FORCE_TRANSCRIPT_BYTES = 2 * 1024 * 1024; +const MEMORY_FLUSH_TARGET_HINT = + "Store durable memories only in memory/YYYY-MM-DD.md (create memory/ if needed)."; +const MEMORY_FLUSH_APPEND_ONLY_HINT = + "If memory/YYYY-MM-DD.md already exists, APPEND new content only and do not overwrite existing entries."; +const MEMORY_FLUSH_READ_ONLY_HINT = + "Treat workspace bootstrap/reference files such as MEMORY.md, SOUL.md, TOOLS.md, and AGENTS.md as read-only during this flush; never overwrite, replace, or edit them."; +const MEMORY_FLUSH_REQUIRED_HINTS = [ + MEMORY_FLUSH_TARGET_HINT, + MEMORY_FLUSH_APPEND_ONLY_HINT, + MEMORY_FLUSH_READ_ONLY_HINT, +]; + export const DEFAULT_MEMORY_FLUSH_PROMPT = [ "Pre-compaction memory flush.", - "Store durable memories now (use memory/YYYY-MM-DD.md; create memory/ if needed).", - "IMPORTANT: If the file already exists, APPEND new content only — do not overwrite existing entries.", + MEMORY_FLUSH_TARGET_HINT, + MEMORY_FLUSH_READ_ONLY_HINT, + MEMORY_FLUSH_APPEND_ONLY_HINT, "Do NOT create timestamped variant files (e.g., YYYY-MM-DD-HHMM.md); always use the canonical YYYY-MM-DD.md filename.", `If nothing to store, reply with ${SILENT_REPLY_TOKEN}.`, ].join(" "); @@ -21,6 +34,9 @@ export const DEFAULT_MEMORY_FLUSH_PROMPT = [ export const DEFAULT_MEMORY_FLUSH_SYSTEM_PROMPT = [ "Pre-compaction memory flush turn.", "The session is near auto-compaction; capture durable memories to disk.", + MEMORY_FLUSH_TARGET_HINT, + MEMORY_FLUSH_READ_ONLY_HINT, + MEMORY_FLUSH_APPEND_ONLY_HINT, `You may reply, but usually ${SILENT_REPLY_TOKEN} is correct.`, ].join(" "); @@ -40,14 +56,29 @@ function formatDateStampInTimezone(nowMs: number, timezone: string): string { return new Date(nowMs).toISOString().slice(0, 10); } +export function resolveMemoryFlushRelativePathForRun(params: { + cfg?: OpenClawConfig; + nowMs?: number; +}): string { + const nowMs = Number.isFinite(params.nowMs) ? (params.nowMs as number) : Date.now(); + const { userTimezone } = resolveCronStyleNow(params.cfg ?? {}, nowMs); + const dateStamp = formatDateStampInTimezone(nowMs, userTimezone); + return `memory/${dateStamp}.md`; +} + export function resolveMemoryFlushPromptForRun(params: { prompt: string; cfg?: OpenClawConfig; nowMs?: number; }): string { const nowMs = Number.isFinite(params.nowMs) ? (params.nowMs as number) : Date.now(); - const { userTimezone, timeLine } = resolveCronStyleNow(params.cfg ?? {}, nowMs); - const dateStamp = formatDateStampInTimezone(nowMs, userTimezone); + const { timeLine } = resolveCronStyleNow(params.cfg ?? {}, nowMs); + const dateStamp = resolveMemoryFlushRelativePathForRun({ + cfg: params.cfg, + nowMs, + }) + .replace(/^memory\//, "") + .replace(/\.md$/, ""); const withDate = params.prompt.replaceAll("YYYY-MM-DD", dateStamp).trimEnd(); if (!withDate) { return timeLine; @@ -90,8 +121,12 @@ export function resolveMemoryFlushSettings(cfg?: OpenClawConfig): MemoryFlushSet const forceFlushTranscriptBytes = parseNonNegativeByteSize(defaults?.forceFlushTranscriptBytes) ?? DEFAULT_MEMORY_FLUSH_FORCE_TRANSCRIPT_BYTES; - const prompt = defaults?.prompt?.trim() || DEFAULT_MEMORY_FLUSH_PROMPT; - const systemPrompt = defaults?.systemPrompt?.trim() || DEFAULT_MEMORY_FLUSH_SYSTEM_PROMPT; + const prompt = ensureMemoryFlushSafetyHints( + defaults?.prompt?.trim() || DEFAULT_MEMORY_FLUSH_PROMPT, + ); + const systemPrompt = ensureMemoryFlushSafetyHints( + defaults?.systemPrompt?.trim() || DEFAULT_MEMORY_FLUSH_SYSTEM_PROMPT, + ); const reserveTokensFloor = normalizeNonNegativeInt(cfg?.agents?.defaults?.compaction?.reserveTokensFloor) ?? DEFAULT_PI_COMPACTION_RESERVE_TOKENS_FLOOR; @@ -113,6 +148,16 @@ function ensureNoReplyHint(text: string): string { return `${text}\n\nIf no user-visible reply is needed, start with ${SILENT_REPLY_TOKEN}.`; } +function ensureMemoryFlushSafetyHints(text: string): string { + let next = text.trim(); + for (const hint of MEMORY_FLUSH_REQUIRED_HINTS) { + if (!next.includes(hint)) { + next = next ? `${next}\n\n${hint}` : hint; + } + } + return next; +} + export function resolveMemoryFlushContextWindowTokens(params: { modelId?: string; agentCfgContextTokens?: number; diff --git a/src/auto-reply/reply/model-selection.ts b/src/auto-reply/reply/model-selection.ts index 1b666b6ded5..33132e1f477 100644 --- a/src/auto-reply/reply/model-selection.ts +++ b/src/auto-reply/reply/model-selection.ts @@ -263,6 +263,7 @@ function scoreFuzzyMatch(params: { export async function createModelSelectionState(params: { cfg: OpenClawConfig; + agentId?: string; agentCfg: NonNullable["defaults"]> | undefined; sessionEntry?: SessionEntry; sessionStore?: Record; @@ -315,6 +316,7 @@ export async function createModelSelectionState(params: { catalog: modelCatalog, defaultProvider, defaultModel, + agentId: params.agentId, }); allowedModelCatalog = allowed.allowedCatalog; allowedModelKeys = allowed.allowedKeys; @@ -363,7 +365,7 @@ export async function createModelSelectionState(params: { } if (sessionEntry && sessionStore && sessionKey && sessionEntry.authProfileOverride) { - const { ensureAuthProfileStore } = await import("../../agents/auth-profiles.js"); + const { ensureAuthProfileStore } = await import("../../agents/auth-profiles.runtime.js"); const store = ensureAuthProfileStore(undefined, { allowKeychainPrompt: false, }); diff --git a/src/auto-reply/reply/normalize-reply.ts b/src/auto-reply/reply/normalize-reply.ts index 9aafb66bd34..793cbcc326f 100644 --- a/src/auto-reply/reply/normalize-reply.ts +++ b/src/auto-reply/reply/normalize-reply.ts @@ -12,11 +12,13 @@ import { resolveResponsePrefixTemplate, type ResponsePrefixContext, } from "./response-prefix-template.js"; +import { hasSlackDirectives, parseSlackDirectives } from "./slack-directives.js"; export type NormalizeReplySkipReason = "empty" | "silent" | "heartbeat"; export type NormalizeReplyOptions = { responsePrefix?: string; + enableSlackInteractiveReplies?: boolean; /** Context for template variable interpolation in responsePrefix */ responsePrefixContext?: ResponsePrefixContext; onHeartbeatStrip?: () => void; @@ -105,5 +107,10 @@ export function normalizeReplyPayload( text = `${effectivePrefix} ${text}`; } - return { ...enrichedPayload, text }; + enrichedPayload = { ...enrichedPayload, text }; + if (opts.enableSlackInteractiveReplies && text && hasSlackDirectives(text)) { + enrichedPayload = parseSlackDirectives(enrichedPayload); + } + + return enrichedPayload; } diff --git a/src/auto-reply/reply/post-compaction-context.test.ts b/src/auto-reply/reply/post-compaction-context.test.ts index 0c97df4d50b..3af8bceab00 100644 --- a/src/auto-reply/reply/post-compaction-context.test.ts +++ b/src/auto-reply/reply/post-compaction-context.test.ts @@ -15,6 +15,28 @@ describe("readPostCompactionContext", () => { fs.rmSync(tmpDir, { recursive: true, force: true }); }); + async function expectLegacySectionFallback( + postCompactionSections: string[], + expectDefaultProse = false, + ) { + const content = `## Every Session\n\nDo startup things.\n\n## Safety\n\nBe safe.\n`; + fs.writeFileSync(path.join(tmpDir, "AGENTS.md"), content); + const cfg = { + agents: { + defaults: { + compaction: { postCompactionSections }, + }, + }, + } as OpenClawConfig; + const result = await readPostCompactionContext(tmpDir, cfg); + expect(result).not.toBeNull(); + expect(result).toContain("Do startup things"); + expect(result).toContain("Be safe"); + if (expectDefaultProse) { + expect(result).toContain("Run your Session Startup sequence"); + } + } + it("returns null when no AGENTS.md exists", async () => { const result = await readPostCompactionContext(tmpDir); expect(result).toBeNull(); @@ -332,43 +354,18 @@ Read WORKFLOW.md on startup. fs.writeFileSync(path.join(tmpDir, "AGENTS.md"), content); const result = await readPostCompactionContext(tmpDir); expect(result).not.toBeNull(); - expect(result).toContain("Execute your Session Startup sequence now"); + expect(result).toContain("Run your Session Startup sequence"); }); it("falls back to legacy sections when defaults are explicitly configured", async () => { // Older AGENTS.md templates use "Every Session" / "Safety" instead of // "Session Startup" / "Red Lines". Explicitly setting the defaults should // still trigger the legacy fallback — same behavior as leaving the field unset. - const content = `## Every Session\n\nDo startup things.\n\n## Safety\n\nBe safe.\n`; - fs.writeFileSync(path.join(tmpDir, "AGENTS.md"), content); - const cfg = { - agents: { - defaults: { - compaction: { postCompactionSections: ["Session Startup", "Red Lines"] }, - }, - }, - } as OpenClawConfig; - const result = await readPostCompactionContext(tmpDir, cfg); - expect(result).not.toBeNull(); - expect(result).toContain("Do startup things"); - expect(result).toContain("Be safe"); + await expectLegacySectionFallback(["Session Startup", "Red Lines"]); }); it("falls back to legacy sections when default sections are configured in a different order", async () => { - const content = `## Every Session\n\nDo startup things.\n\n## Safety\n\nBe safe.\n`; - fs.writeFileSync(path.join(tmpDir, "AGENTS.md"), content); - const cfg = { - agents: { - defaults: { - compaction: { postCompactionSections: ["Red Lines", "Session Startup"] }, - }, - }, - } as OpenClawConfig; - const result = await readPostCompactionContext(tmpDir, cfg); - expect(result).not.toBeNull(); - expect(result).toContain("Do startup things"); - expect(result).toContain("Be safe"); - expect(result).toContain("Execute your Session Startup sequence now"); + await expectLegacySectionFallback(["Red Lines", "Session Startup"], true); }); it("custom section names are matched case-insensitively", async () => { diff --git a/src/auto-reply/reply/post-compaction-context.ts b/src/auto-reply/reply/post-compaction-context.ts index 316ac3c29b1..791c1a91a19 100644 --- a/src/auto-reply/reply/post-compaction-context.ts +++ b/src/auto-reply/reply/post-compaction-context.ts @@ -136,7 +136,7 @@ export async function readPostCompactionContext( // would be misleading for deployments that use different section names. const prose = isDefaultSections ? "Session was just compacted. The conversation summary above is a hint, NOT a substitute for your startup sequence. " + - "Execute your Session Startup sequence now — read the required files before responding to the user." + "Run your Session Startup sequence — read the required files before responding to the user." : `Session was just compacted. The conversation summary above is a hint, NOT a substitute for your full startup sequence. ` + `Re-read the sections injected below (${displayNames.join(", ")}) and follow your configured startup procedure before responding to the user.`; diff --git a/src/auto-reply/reply/queue/drain.ts b/src/auto-reply/reply/queue/drain.ts index e8e93b3dd6d..1e2fb33e4e0 100644 --- a/src/auto-reply/reply/queue/drain.ts +++ b/src/auto-reply/reply/queue/drain.ts @@ -1,4 +1,5 @@ import { defaultRuntime } from "../../../runtime.js"; +import { resolveGlobalMap } from "../../../shared/global-singleton.js"; import { buildCollectPrompt, beginQueueDrain, @@ -15,7 +16,11 @@ import type { FollowupRun } from "./types.js"; // Persists the most recent runFollowup callback per queue key so that // enqueueFollowupRun can restart a drain that finished and deleted the queue. -const FOLLOWUP_RUN_CALLBACKS = new Map Promise>(); +const FOLLOWUP_DRAIN_CALLBACKS_KEY = Symbol.for("openclaw.followupDrainCallbacks"); + +const FOLLOWUP_RUN_CALLBACKS = resolveGlobalMap Promise>( + FOLLOWUP_DRAIN_CALLBACKS_KEY, +); export function clearFollowupDrainCallback(key: string): void { FOLLOWUP_RUN_CALLBACKS.delete(key); diff --git a/src/auto-reply/reply/queue/enqueue.ts b/src/auto-reply/reply/queue/enqueue.ts index 7743048a77b..11da0db98fc 100644 --- a/src/auto-reply/reply/queue/enqueue.ts +++ b/src/auto-reply/reply/queue/enqueue.ts @@ -1,13 +1,22 @@ import { createDedupeCache } from "../../../infra/dedupe.js"; +import { resolveGlobalSingleton } from "../../../shared/global-singleton.js"; import { applyQueueDropPolicy, shouldSkipQueueItem } from "../../../utils/queue-helpers.js"; import { kickFollowupDrainIfIdle } from "./drain.js"; import { getExistingFollowupQueue, getFollowupQueue } from "./state.js"; import type { FollowupRun, QueueDedupeMode, QueueSettings } from "./types.js"; -const RECENT_QUEUE_MESSAGE_IDS = createDedupeCache({ - ttlMs: 5 * 60 * 1000, - maxSize: 10_000, -}); +/** + * Keep queued message-id dedupe shared across bundled chunks so redeliveries + * are rejected no matter which chunk receives the enqueue call. + */ +const RECENT_QUEUE_MESSAGE_IDS_KEY = Symbol.for("openclaw.recentQueueMessageIds"); + +const RECENT_QUEUE_MESSAGE_IDS = resolveGlobalSingleton(RECENT_QUEUE_MESSAGE_IDS_KEY, () => + createDedupeCache({ + ttlMs: 5 * 60 * 1000, + maxSize: 10_000, + }), +); function buildRecentMessageIdKey(run: FollowupRun, queueKey: string): string | undefined { const messageId = run.messageId?.trim(); diff --git a/src/auto-reply/reply/queue/state.ts b/src/auto-reply/reply/queue/state.ts index 73f7ed946bc..44208e727dd 100644 --- a/src/auto-reply/reply/queue/state.ts +++ b/src/auto-reply/reply/queue/state.ts @@ -1,3 +1,4 @@ +import { resolveGlobalMap } from "../../../shared/global-singleton.js"; import { applyQueueRuntimeSettings } from "../../../utils/queue-helpers.js"; import type { FollowupRun, QueueDropPolicy, QueueMode, QueueSettings } from "./types.js"; @@ -18,7 +19,13 @@ export const DEFAULT_QUEUE_DEBOUNCE_MS = 1000; export const DEFAULT_QUEUE_CAP = 20; export const DEFAULT_QUEUE_DROP: QueueDropPolicy = "summarize"; -export const FOLLOWUP_QUEUES = new Map(); +/** + * Share followup queues across bundled chunks so busy-session enqueue/drain + * logic observes one queue registry per process. + */ +const FOLLOWUP_QUEUES_KEY = Symbol.for("openclaw.followupQueues"); + +export const FOLLOWUP_QUEUES = resolveGlobalMap(FOLLOWUP_QUEUES_KEY); export function getExistingFollowupQueue(key: string): FollowupQueueState | undefined { const cleaned = key.trim(); diff --git a/src/auto-reply/reply/reply-delivery.ts b/src/auto-reply/reply/reply-delivery.ts index acf04e73a3e..cacd6b083cb 100644 --- a/src/auto-reply/reply/reply-delivery.ts +++ b/src/auto-reply/reply/reply-delivery.ts @@ -2,7 +2,7 @@ import { logVerbose } from "../../globals.js"; import { SILENT_REPLY_TOKEN } from "../tokens.js"; import type { BlockReplyContext, ReplyPayload } from "../types.js"; import type { BlockReplyPipeline } from "./block-reply-pipeline.js"; -import { createBlockReplyPayloadKey } from "./block-reply-pipeline.js"; +import { createBlockReplyContentKey } from "./block-reply-pipeline.js"; import { parseReplyDirectives } from "./reply-directives.js"; import { applyReplyTagsToPayload, isRenderablePayload } from "./reply-payloads.js"; import type { TypingSignaler } from "./typing-mode.js"; @@ -128,7 +128,7 @@ export function createBlockReplyDeliveryHandler(params: { } else if (params.blockStreamingEnabled) { // Send directly when flushing before tool execution (no pipeline but streaming enabled). // Track sent key to avoid duplicate in final payloads. - params.directlySentBlockKeys.add(createBlockReplyPayloadKey(blockPayload)); + params.directlySentBlockKeys.add(createBlockReplyContentKey(blockPayload)); await params.onBlockReply(blockPayload); } // When streaming is disabled entirely, blocks are accumulated in final text instead. diff --git a/src/auto-reply/reply/reply-dispatcher.ts b/src/auto-reply/reply/reply-dispatcher.ts index 7272a3081a2..d212245ef59 100644 --- a/src/auto-reply/reply/reply-dispatcher.ts +++ b/src/auto-reply/reply/reply-dispatcher.ts @@ -43,6 +43,7 @@ function getHumanDelay(config: HumanDelayConfig | undefined): number { export type ReplyDispatcherOptions = { deliver: ReplyDispatchDeliverer; responsePrefix?: string; + enableSlackInteractiveReplies?: boolean; /** Static context for response prefix template interpolation. */ responsePrefixContext?: ResponsePrefixContext; /** Dynamic context provider for response prefix template interpolation. @@ -84,7 +85,11 @@ export type ReplyDispatcher = { type NormalizeReplyPayloadInternalOptions = Pick< ReplyDispatcherOptions, - "responsePrefix" | "responsePrefixContext" | "responsePrefixContextProvider" | "onHeartbeatStrip" + | "responsePrefix" + | "enableSlackInteractiveReplies" + | "responsePrefixContext" + | "responsePrefixContextProvider" + | "onHeartbeatStrip" > & { onSkip?: (reason: NormalizeReplySkipReason) => void; }; @@ -98,6 +103,7 @@ function normalizeReplyPayloadInternal( return normalizeReplyPayload(payload, { responsePrefix: opts.responsePrefix, + enableSlackInteractiveReplies: opts.enableSlackInteractiveReplies, responsePrefixContext: prefixContext, onHeartbeatStrip: opts.onHeartbeatStrip, onSkip: opts.onSkip, @@ -129,6 +135,7 @@ export function createReplyDispatcher(options: ReplyDispatcherOptions): ReplyDis const enqueue = (kind: ReplyDispatchKind, payload: ReplyPayload) => { const normalized = normalizeReplyPayloadInternal(payload, { responsePrefix: options.responsePrefix, + enableSlackInteractiveReplies: options.enableSlackInteractiveReplies, responsePrefixContext: options.responsePrefixContext, responsePrefixContextProvider: options.responsePrefixContextProvider, onHeartbeatStrip: options.onHeartbeatStrip, diff --git a/src/auto-reply/reply/reply-elevated.test.ts b/src/auto-reply/reply/reply-elevated.test.ts index 74fba60acf7..28259c34638 100644 --- a/src/auto-reply/reply/reply-elevated.test.ts +++ b/src/auto-reply/reply/reply-elevated.test.ts @@ -27,68 +27,65 @@ function buildContext(overrides?: Partial): MsgContext { } as MsgContext; } +function expectAllowFromDecision(params: { + allowFrom: string[]; + ctx?: Partial; + allowed: boolean; +}) { + const result = resolveElevatedPermissions({ + cfg: buildConfig(params.allowFrom), + agentId: "main", + provider: "whatsapp", + ctx: buildContext(params.ctx), + }); + + expect(result.enabled).toBe(true); + expect(result.allowed).toBe(params.allowed); + if (params.allowed) { + expect(result.failures).toHaveLength(0); + return; + } + + expect(result.failures).toContainEqual({ + gate: "allowFrom", + key: "tools.elevated.allowFrom.whatsapp", + }); +} + describe("resolveElevatedPermissions", () => { it("authorizes when sender matches allowFrom", () => { - const result = resolveElevatedPermissions({ - cfg: buildConfig(["+15550001111"]), - agentId: "main", - provider: "whatsapp", - ctx: buildContext(), + expectAllowFromDecision({ + allowFrom: ["+15550001111"], + allowed: true, }); - - expect(result.enabled).toBe(true); - expect(result.allowed).toBe(true); - expect(result.failures).toHaveLength(0); }); it("does not authorize when only recipient matches allowFrom", () => { - const result = resolveElevatedPermissions({ - cfg: buildConfig(["+15559990000"]), - agentId: "main", - provider: "whatsapp", - ctx: buildContext(), - }); - - expect(result.enabled).toBe(true); - expect(result.allowed).toBe(false); - expect(result.failures).toContainEqual({ - gate: "allowFrom", - key: "tools.elevated.allowFrom.whatsapp", + expectAllowFromDecision({ + allowFrom: ["+15559990000"], + allowed: false, }); }); it("does not authorize untyped mutable sender fields", () => { - const result = resolveElevatedPermissions({ - cfg: buildConfig(["owner-display-name"]), - agentId: "main", - provider: "whatsapp", - ctx: buildContext({ + expectAllowFromDecision({ + allowFrom: ["owner-display-name"], + allowed: false, + ctx: { SenderName: "owner-display-name", SenderUsername: "owner-display-name", SenderTag: "owner-display-name", - }), - }); - - expect(result.enabled).toBe(true); - expect(result.allowed).toBe(false); - expect(result.failures).toContainEqual({ - gate: "allowFrom", - key: "tools.elevated.allowFrom.whatsapp", + }, }); }); it("authorizes mutable sender fields only with explicit prefix", () => { - const result = resolveElevatedPermissions({ - cfg: buildConfig(["username:owner_username"]), - agentId: "main", - provider: "whatsapp", - ctx: buildContext({ + expectAllowFromDecision({ + allowFrom: ["username:owner_username"], + allowed: true, + ctx: { SenderUsername: "owner_username", - }), + }, }); - - expect(result.enabled).toBe(true); - expect(result.allowed).toBe(true); - expect(result.failures).toHaveLength(0); }); }); diff --git a/src/auto-reply/reply/reply-flow.test.ts b/src/auto-reply/reply/reply-flow.test.ts index 575ac7f1780..d7efa640b1c 100644 --- a/src/auto-reply/reply/reply-flow.test.ts +++ b/src/auto-reply/reply/reply-flow.test.ts @@ -1,4 +1,5 @@ import { afterAll, beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; +import { importFreshModule } from "../../../test/helpers/import-fresh.js"; import { expectInboundContextContract } from "../../../test/helpers/inbound-contract.js"; import type { OpenClawConfig } from "../../config/config.js"; import { defaultRuntime } from "../../runtime.js"; @@ -15,6 +16,7 @@ import { } from "./queue.js"; import { createReplyDispatcher } from "./reply-dispatcher.js"; import { createReplyToModeFilter, resolveReplyToMode } from "./reply-threading.js"; +import { parseSlackDirectives, hasSlackDirectives } from "./slack-directives.js"; describe("normalizeInboundTextNewlines", () => { it("normalizes real newlines and preserves literal backslash-n sequences", () => { @@ -195,6 +197,8 @@ describe("inbound context contract (providers + extensions)", () => { const getLineData = (result: ReturnType) => (result.channelData?.line as Record | undefined) ?? {}; +const getSlackData = (result: ReturnType) => + (result.channelData?.slack as Record | undefined) ?? {}; describe("hasLineDirectives", () => { it("matches expected detection across directive patterns", () => { @@ -218,6 +222,24 @@ describe("hasLineDirectives", () => { }); }); +describe("hasSlackDirectives", () => { + it("matches expected detection across Slack directive patterns", () => { + const cases: Array<{ text: string; expected: boolean }> = [ + { text: "Pick one [[slack_buttons: Approve:approve, Reject:reject]]", expected: true }, + { + text: "[[slack_select: Choose a project | Alpha:alpha, Beta:beta]]", + expected: true, + }, + { text: "Just regular text", expected: false }, + { text: "[[buttons: Menu | Choose | A:a]]", expected: false }, + ]; + + for (const testCase of cases) { + expect(hasSlackDirectives(testCase.text)).toBe(testCase.expected); + } + }); +}); + describe("parseLineDirectives", () => { describe("quick_replies", () => { it("parses quick replies variants", () => { @@ -578,6 +600,279 @@ describe("parseLineDirectives", () => { }); }); +describe("parseSlackDirectives", () => { + it("builds section and button blocks from slack_buttons directives", () => { + const result = parseSlackDirectives({ + text: "Choose an action [[slack_buttons: Approve:approve, Reject:reject]]", + }); + + expect(result.text).toBe("Choose an action"); + expect(getSlackData(result).blocks).toEqual([ + { + type: "section", + text: { + type: "mrkdwn", + text: "Choose an action", + }, + }, + { + type: "actions", + block_id: "openclaw_reply_buttons_1", + elements: [ + { + type: "button", + action_id: "openclaw:reply_button", + text: { + type: "plain_text", + text: "Approve", + emoji: true, + }, + value: "reply_1_approve", + }, + { + type: "button", + action_id: "openclaw:reply_button", + text: { + type: "plain_text", + text: "Reject", + emoji: true, + }, + value: "reply_2_reject", + }, + ], + }, + ]); + }); + + it("builds static select blocks from slack_select directives", () => { + const result = parseSlackDirectives({ + text: "[[slack_select: Choose a project | Alpha:alpha, Beta:beta]]", + }); + + expect(result.text).toBeUndefined(); + expect(getSlackData(result).blocks).toEqual([ + { + type: "actions", + block_id: "openclaw_reply_select_1", + elements: [ + { + type: "static_select", + action_id: "openclaw:reply_select", + placeholder: { + type: "plain_text", + text: "Choose a project", + emoji: true, + }, + options: [ + { + text: { + type: "plain_text", + text: "Alpha", + emoji: true, + }, + value: "reply_1_alpha", + }, + { + text: { + type: "plain_text", + text: "Beta", + emoji: true, + }, + value: "reply_2_beta", + }, + ], + }, + ], + }, + ]); + }); + + it("appends Slack interactive blocks to existing slack blocks", () => { + const result = parseSlackDirectives({ + text: "Act now [[slack_buttons: Retry:retry]]", + channelData: { + slack: { + blocks: [{ type: "divider" }], + }, + }, + }); + + expect(result.text).toBe("Act now"); + expect(getSlackData(result).blocks).toEqual([ + { type: "divider" }, + { + type: "section", + text: { + type: "mrkdwn", + text: "Act now", + }, + }, + { + type: "actions", + block_id: "openclaw_reply_buttons_1", + elements: [ + { + type: "button", + action_id: "openclaw:reply_button", + text: { + type: "plain_text", + text: "Retry", + emoji: true, + }, + value: "reply_1_retry", + }, + ], + }, + ]); + }); + + it("preserves authored order for mixed Slack directives", () => { + const result = parseSlackDirectives({ + text: "[[slack_select: Pick one | Alpha:alpha]] then [[slack_buttons: Retry:retry]]", + }); + + expect(getSlackData(result).blocks).toEqual([ + { + type: "actions", + block_id: "openclaw_reply_select_1", + elements: [ + { + type: "static_select", + action_id: "openclaw:reply_select", + placeholder: { + type: "plain_text", + text: "Pick one", + emoji: true, + }, + options: [ + { + text: { + type: "plain_text", + text: "Alpha", + emoji: true, + }, + value: "reply_1_alpha", + }, + ], + }, + ], + }, + { + type: "section", + text: { + type: "mrkdwn", + text: "then", + }, + }, + { + type: "actions", + block_id: "openclaw_reply_buttons_1", + elements: [ + { + type: "button", + action_id: "openclaw:reply_button", + text: { + type: "plain_text", + text: "Retry", + emoji: true, + }, + value: "reply_1_retry", + }, + ], + }, + ]); + }); + + it("truncates Slack interactive reply strings to safe Block Kit limits", () => { + const long = "x".repeat(120); + const result = parseSlackDirectives({ + text: `${"y".repeat(3100)} [[slack_select: ${long} | ${long}:${long}]] [[slack_buttons: ${long}:${long}]]`, + }); + + const blocks = getSlackData(result).blocks as Array>; + expect(blocks).toHaveLength(3); + expect(((blocks[0]?.text as { text?: string })?.text ?? "").length).toBeLessThanOrEqual(3000); + expect( + ( + ( + (blocks[1]?.elements as Array>)?.[0]?.placeholder as { + text?: string; + } + )?.text ?? "" + ).length, + ).toBeLessThanOrEqual(75); + expect( + ( + ( + ( + (blocks[1]?.elements as Array>)?.[0]?.options as Array< + Record + > + )?.[0]?.text as { text?: string } + )?.text ?? "" + ).length, + ).toBeLessThanOrEqual(75); + expect( + ( + (( + (blocks[1]?.elements as Array>)?.[0]?.options as Array< + Record + > + )?.[0]?.value as string | undefined) ?? "" + ).length, + ).toBeLessThanOrEqual(75); + expect( + ( + ( + (blocks[2]?.elements as Array>)?.[0]?.text as { + text?: string; + } + )?.text ?? "" + ).length, + ).toBeLessThanOrEqual(75); + expect( + ( + ((blocks[2]?.elements as Array>)?.[0]?.value as + | string + | undefined) ?? "" + ).length, + ).toBeLessThanOrEqual(75); + }); + + it("falls back to the original payload when generated blocks would exceed Slack limits", () => { + const result = parseSlackDirectives({ + text: "Choose [[slack_buttons: Retry:retry]]", + channelData: { + slack: { + blocks: Array.from({ length: 49 }, () => ({ type: "divider" })), + }, + }, + }); + + expect(result).toEqual({ + text: "Choose [[slack_buttons: Retry:retry]]", + channelData: { + slack: { + blocks: Array.from({ length: 49 }, () => ({ type: "divider" })), + }, + }, + }); + }); + + it("ignores malformed existing Slack blocks during directive compilation", () => { + expect(() => + parseSlackDirectives({ + text: "Choose [[slack_buttons: Retry:retry]]", + channelData: { + slack: { + blocks: "{not json}", + }, + }, + }), + ).not.toThrow(); + }); +}); + function createDeferred() { let resolve!: (value: T) => void; let reject!: (reason?: unknown) => void; @@ -743,6 +1038,71 @@ describe("followup queue deduplication", () => { expect(calls).toHaveLength(1); }); + it("deduplicates same message_id across distinct enqueue module instances", async () => { + const enqueueA = await importFreshModule( + import.meta.url, + "./queue/enqueue.js?scope=dedupe-a", + ); + const enqueueB = await importFreshModule( + import.meta.url, + "./queue/enqueue.js?scope=dedupe-b", + ); + const { clearSessionQueues } = await import("./queue.js"); + const key = `test-dedup-cross-module-${Date.now()}`; + const calls: FollowupRun[] = []; + const done = createDeferred(); + const runFollowup = async (run: FollowupRun) => { + calls.push(run); + done.resolve(); + }; + const settings: QueueSettings = { + mode: "collect", + debounceMs: 0, + cap: 50, + dropPolicy: "summarize", + }; + + enqueueA.resetRecentQueuedMessageIdDedupe(); + enqueueB.resetRecentQueuedMessageIdDedupe(); + + try { + expect( + enqueueA.enqueueFollowupRun( + key, + createRun({ + prompt: "first", + messageId: "same-id", + originatingChannel: "signal", + originatingTo: "+10000000000", + }), + settings, + ), + ).toBe(true); + + scheduleFollowupDrain(key, runFollowup); + await done.promise; + await new Promise((resolve) => setImmediate(resolve)); + + expect( + enqueueB.enqueueFollowupRun( + key, + createRun({ + prompt: "first-redelivery", + messageId: "same-id", + originatingChannel: "signal", + originatingTo: "+10000000000", + }), + settings, + ), + ).toBe(false); + expect(calls).toHaveLength(1); + } finally { + clearSessionQueues([key]); + enqueueA.resetRecentQueuedMessageIdDedupe(); + enqueueB.resetRecentQueuedMessageIdDedupe(); + } + }); + it("does not collide recent message-id keys when routing contains delimiters", async () => { const key = `test-dedup-key-collision-${Date.now()}`; const calls: FollowupRun[] = []; @@ -1264,6 +1624,55 @@ describe("followup queue drain restart after idle window", () => { expect(calls[1]?.prompt).toBe("after-idle"); }); + it("restarts an idle drain across distinct enqueue and drain module instances", async () => { + const drainA = await importFreshModule( + import.meta.url, + "./queue/drain.js?scope=restart-a", + ); + const enqueueB = await importFreshModule( + import.meta.url, + "./queue/enqueue.js?scope=restart-b", + ); + const { clearSessionQueues } = await import("./queue.js"); + const key = `test-idle-window-cross-module-${Date.now()}`; + const calls: FollowupRun[] = []; + const settings: QueueSettings = { mode: "followup", debounceMs: 0, cap: 50 }; + const firstProcessed = createDeferred(); + + enqueueB.resetRecentQueuedMessageIdDedupe(); + + try { + const runFollowup = async (run: FollowupRun) => { + calls.push(run); + if (calls.length === 1) { + firstProcessed.resolve(); + } + }; + + enqueueB.enqueueFollowupRun(key, createRun({ prompt: "before-idle" }), settings); + drainA.scheduleFollowupDrain(key, runFollowup); + await firstProcessed.promise; + + await new Promise((resolve) => setImmediate(resolve)); + + enqueueB.enqueueFollowupRun(key, createRun({ prompt: "after-idle" }), settings); + + await vi.waitFor( + () => { + expect(calls).toHaveLength(2); + }, + { timeout: 1_000 }, + ); + + expect(calls[0]?.prompt).toBe("before-idle"); + expect(calls[1]?.prompt).toBe("after-idle"); + } finally { + clearSessionQueues([key]); + drainA.clearFollowupDrainCallback(key); + enqueueB.resetRecentQueuedMessageIdDedupe(); + } + }); + it("does not double-drain when a message arrives while drain is still running", async () => { const key = `test-no-double-drain-${Date.now()}`; const calls: FollowupRun[] = []; @@ -1370,6 +1779,43 @@ describe("createReplyDispatcher", () => { expect(onHeartbeatStrip).toHaveBeenCalledTimes(2); }); + it("compiles Slack directives in dispatcher flows when enabled", async () => { + const deliver = vi.fn().mockResolvedValue(undefined); + const dispatcher = createReplyDispatcher({ + deliver, + enableSlackInteractiveReplies: true, + }); + + expect( + dispatcher.sendFinalReply({ + text: "Choose [[slack_buttons: Retry:retry]]", + }), + ).toBe(true); + await dispatcher.waitForIdle(); + + expect(deliver).toHaveBeenCalledTimes(1); + expect(deliver.mock.calls[0]?.[0]).toMatchObject({ + text: "Choose", + channelData: { + slack: { + blocks: [ + { + type: "section", + text: { + type: "mrkdwn", + text: "Choose", + }, + }, + { + type: "actions", + block_id: "openclaw_reply_buttons_1", + }, + ], + }, + }, + }); + }); + it("avoids double-prefixing and keeps media when heartbeat is the only text", async () => { const deliver = vi.fn().mockResolvedValue(undefined); const dispatcher = createReplyDispatcher({ diff --git a/src/auto-reply/reply/reply-state.test.ts b/src/auto-reply/reply/reply-state.test.ts index 56623fe6cfa..69dbad531e7 100644 --- a/src/auto-reply/reply/reply-state.test.ts +++ b/src/auto-reply/reply/reply-state.test.ts @@ -203,6 +203,10 @@ describe("memory flush settings", () => { expect(settings?.forceFlushTranscriptBytes).toBe(DEFAULT_MEMORY_FLUSH_FORCE_TRANSCRIPT_BYTES); expect(settings?.prompt.length).toBeGreaterThan(0); expect(settings?.systemPrompt.length).toBeGreaterThan(0); + expect(settings?.prompt).toContain("memory/YYYY-MM-DD.md"); + expect(settings?.prompt).toContain("MEMORY.md"); + expect(settings?.systemPrompt).toContain("memory/YYYY-MM-DD.md"); + expect(settings?.systemPrompt).toContain("MEMORY.md"); }); it("respects disable flag", () => { @@ -230,6 +234,10 @@ describe("memory flush settings", () => { }); expect(settings?.prompt).toContain("NO_REPLY"); expect(settings?.systemPrompt).toContain("NO_REPLY"); + expect(settings?.prompt).toContain("memory/YYYY-MM-DD.md"); + expect(settings?.prompt).toContain("MEMORY.md"); + expect(settings?.systemPrompt).toContain("memory/YYYY-MM-DD.md"); + expect(settings?.systemPrompt).toContain("MEMORY.md"); }); it("falls back to defaults when numeric values are invalid", () => { diff --git a/src/auto-reply/reply/reply-utils.test.ts b/src/auto-reply/reply/reply-utils.test.ts index c1e76e50403..88f092bf1e5 100644 --- a/src/auto-reply/reply/reply-utils.test.ts +++ b/src/auto-reply/reply/reply-utils.test.ts @@ -150,6 +150,67 @@ describe("normalizeReplyPayload", () => { expect(result!.text).toBe(""); expect(result!.mediaUrl).toBe("https://example.com/img.png"); }); + + it("does not compile Slack directives unless interactive replies are enabled", () => { + const result = normalizeReplyPayload({ + text: "hello [[slack_buttons: Retry:retry, Ignore:ignore]]", + }); + + expect(result).not.toBeNull(); + expect(result!.text).toBe("hello [[slack_buttons: Retry:retry, Ignore:ignore]]"); + expect(result!.channelData).toBeUndefined(); + }); + + it("applies responsePrefix before compiling Slack directives into blocks", () => { + const result = normalizeReplyPayload( + { + text: "hello [[slack_buttons: Retry:retry, Ignore:ignore]]", + }, + { responsePrefix: "[bot]", enableSlackInteractiveReplies: true }, + ); + + expect(result).not.toBeNull(); + expect(result!.text).toBe("[bot] hello"); + expect(result!.channelData).toEqual({ + slack: { + blocks: [ + { + type: "section", + text: { + type: "mrkdwn", + text: "[bot] hello", + }, + }, + { + type: "actions", + block_id: "openclaw_reply_buttons_1", + elements: [ + { + type: "button", + action_id: "openclaw:reply_button", + text: { + type: "plain_text", + text: "Retry", + emoji: true, + }, + value: "reply_1_retry", + }, + { + type: "button", + action_id: "openclaw:reply_button", + text: { + type: "plain_text", + text: "Ignore", + emoji: true, + }, + value: "reply_2_ignore", + }, + ], + }, + ], + }, + }); + }); }); describe("typing controller", () => { diff --git a/src/auto-reply/reply/route-reply.test.ts b/src/auto-reply/reply/route-reply.test.ts index 9b5d432149a..bfae51e63c2 100644 --- a/src/auto-reply/reply/route-reply.test.ts +++ b/src/auto-reply/reply/route-reply.test.ts @@ -1,4 +1,5 @@ import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { mattermostPlugin } from "../../../extensions/mattermost/src/channel.js"; import { discordOutbound } from "../../channels/plugins/outbound/discord.js"; import { imessageOutbound } from "../../channels/plugins/outbound/imessage.js"; import { signalOutbound } from "../../channels/plugins/outbound/signal.js"; @@ -24,6 +25,7 @@ const mocks = vi.hoisted(() => ({ sendMessageSlack: vi.fn(async () => ({ messageId: "m1", channelId: "c1" })), sendMessageTelegram: vi.fn(async () => ({ messageId: "m1", chatId: "c1" })), sendMessageWhatsApp: vi.fn(async () => ({ messageId: "m1", toJid: "jid" })), + sendMessageMattermost: vi.fn(async () => ({ messageId: "m1", channelId: "c1" })), deliverOutboundPayloads: vi.fn(), })); @@ -46,6 +48,9 @@ vi.mock("../../web/outbound.js", () => ({ sendMessageWhatsApp: mocks.sendMessageWhatsApp, sendPollWhatsApp: mocks.sendMessageWhatsApp, })); +vi.mock("../../../extensions/mattermost/src/mattermost/send.js", () => ({ + sendMessageMattermost: mocks.sendMessageMattermost, +})); vi.mock("../../infra/outbound/deliver.js", async () => { const actual = await vi.importActual( "../../infra/outbound/deliver.js", @@ -105,6 +110,23 @@ const createMSTeamsPlugin = (params: { outbound: ChannelOutboundAdapter }): Chan outbound: params.outbound, }); +async function expectSlackNoSend( + payload: Parameters[0]["payload"], + overrides: Partial[0]> = {}, +) { + mocks.sendMessageSlack.mockClear(); + const res = await routeReply({ + payload, + channel: "slack", + to: "channel:C123", + cfg: {} as never, + ...overrides, + }); + expect(res.ok).toBe(true); + expect(mocks.sendMessageSlack).not.toHaveBeenCalled(); + return res; +} + describe("routeReply", () => { beforeEach(() => { setActivePluginRegistry(defaultRegistry); @@ -132,39 +154,15 @@ describe("routeReply", () => { }); it("no-ops on empty payload", async () => { - mocks.sendMessageSlack.mockClear(); - const res = await routeReply({ - payload: {}, - channel: "slack", - to: "channel:C123", - cfg: {} as never, - }); - expect(res.ok).toBe(true); - expect(mocks.sendMessageSlack).not.toHaveBeenCalled(); + await expectSlackNoSend({}); }); it("suppresses reasoning payloads", async () => { - mocks.sendMessageSlack.mockClear(); - const res = await routeReply({ - payload: { text: "Reasoning:\n_step_", isReasoning: true }, - channel: "slack", - to: "channel:C123", - cfg: {} as never, - }); - expect(res.ok).toBe(true); - expect(mocks.sendMessageSlack).not.toHaveBeenCalled(); + await expectSlackNoSend({ text: "Reasoning:\n_step_", isReasoning: true }); }); it("drops silent token payloads", async () => { - mocks.sendMessageSlack.mockClear(); - const res = await routeReply({ - payload: { text: SILENT_REPLY_TOKEN }, - channel: "slack", - to: "channel:C123", - cfg: {} as never, - }); - expect(res.ok).toBe(true); - expect(mocks.sendMessageSlack).not.toHaveBeenCalled(); + await expectSlackNoSend({ text: SILENT_REPLY_TOKEN }); }); it("does not drop payloads that merely start with the silent token", async () => { @@ -201,6 +199,46 @@ describe("routeReply", () => { ); }); + it("routes directive-only Slack replies when interactive replies are enabled", async () => { + mocks.sendMessageSlack.mockClear(); + const cfg = { + channels: { + slack: { + capabilities: { interactiveReplies: true }, + }, + }, + } as unknown as OpenClawConfig; + await routeReply({ + payload: { text: "[[slack_select: Choose one | Alpha:alpha]]" }, + channel: "slack", + to: "channel:C123", + cfg, + }); + expect(mocks.sendMessageSlack).toHaveBeenCalledWith( + "channel:C123", + "", + expect.objectContaining({ + blocks: [ + expect.objectContaining({ + type: "actions", + block_id: "openclaw_reply_select_1", + }), + ], + }), + ); + }); + + it("does not bypass the empty-reply guard for invalid Slack blocks", async () => { + await expectSlackNoSend({ + text: " ", + channelData: { + slack: { + blocks: " ", + }, + }, + }); + }); + it("does not derive responsePrefix from agent identity when routing", async () => { mocks.sendMessageSlack.mockClear(); const cfg = { @@ -302,6 +340,33 @@ describe("routeReply", () => { ); }); + it("uses threadId as replyToId for Mattermost when replyToId is missing", async () => { + mocks.deliverOutboundPayloads.mockResolvedValue([]); + await routeReply({ + payload: { text: "hi" }, + channel: "mattermost", + to: "channel:CHAN1", + threadId: "post-root", + cfg: { + channels: { + mattermost: { + enabled: true, + botToken: "test-token", + baseUrl: "https://chat.example.com", + }, + }, + } as unknown as OpenClawConfig, + }); + expect(mocks.deliverOutboundPayloads).toHaveBeenCalledWith( + expect.objectContaining({ + channel: "mattermost", + to: "channel:CHAN1", + replyToId: "post-root", + threadId: "post-root", + }), + ); + }); + it("sends multiple mediaUrls (caption only on first)", async () => { mocks.sendMessageSlack.mockClear(); await routeReply({ @@ -468,4 +533,9 @@ const defaultRegistry = createTestRegistry([ }), source: "test", }, + { + pluginId: "mattermost", + plugin: mattermostPlugin, + source: "test", + }, ]); diff --git a/src/auto-reply/reply/route-reply.ts b/src/auto-reply/reply/route-reply.ts index a489bedcbbf..a6f863d7d18 100644 --- a/src/auto-reply/reply/route-reply.ts +++ b/src/auto-reply/reply/route-reply.ts @@ -12,6 +12,8 @@ import { resolveEffectiveMessagesConfig } from "../../agents/identity.js"; import { normalizeChannelId } from "../../channels/plugins/index.js"; import type { OpenClawConfig } from "../../config/config.js"; import { buildOutboundSessionContext } from "../../infra/outbound/session-context.js"; +import { parseSlackBlocksInput } from "../../slack/blocks-input.js"; +import { isSlackInteractiveRepliesEnabled } from "../../slack/interactive-replies.js"; import { INTERNAL_MESSAGE_CHANNEL, normalizeMessageChannel } from "../../utils/message-channel.js"; import type { OriginatingChannelType } from "../templating.js"; import type { ReplyPayload } from "../types.js"; @@ -94,6 +96,8 @@ export async function routeReply(params: RouteReplyParams): Promise { it("includes the core session startup instruction", () => { const prompt = buildBareSessionResetPrompt(); - expect(prompt).toContain("Execute your Session Startup sequence now"); + expect(prompt).toContain("Run your Session Startup sequence"); expect(prompt).toContain("read the required files before responding to the user"); }); diff --git a/src/auto-reply/reply/session-reset-prompt.ts b/src/auto-reply/reply/session-reset-prompt.ts index 67e693f70b1..c903e3a688a 100644 --- a/src/auto-reply/reply/session-reset-prompt.ts +++ b/src/auto-reply/reply/session-reset-prompt.ts @@ -2,7 +2,7 @@ import { appendCronStyleCurrentTimeLine } from "../../agents/current-time.js"; import type { OpenClawConfig } from "../../config/config.js"; const BARE_SESSION_RESET_PROMPT_BASE = - "A new session was started via /new or /reset. Execute your Session Startup sequence now - read the required files before responding to the user. Then greet the user in your configured persona, if one is provided. Be yourself - use your defined voice, mannerisms, and mood. Keep it to 1-3 sentences and ask what they want to do. If the runtime model differs from default_model in the system prompt, mention the default model. Do not mention internal steps, files, tools, or reasoning."; + "A new session was started via /new or /reset. Run your Session Startup sequence - read the required files before responding to the user. Then greet the user in your configured persona, if one is provided. Be yourself - use your defined voice, mannerisms, and mood. Keep it to 1-3 sentences and ask what they want to do. If the runtime model differs from default_model in the system prompt, mention the default model. Do not mention internal steps, files, tools, or reasoning."; /** * Build the bare session reset prompt, appending the current date/time so agents diff --git a/src/auto-reply/reply/session-updates.ts b/src/auto-reply/reply/session-updates.ts index 96243e919bb..55b4d4eb15b 100644 --- a/src/auto-reply/reply/session-updates.ts +++ b/src/auto-reply/reply/session-updates.ts @@ -117,6 +117,27 @@ export async function drainFormattedSystemEvents(params: { .join("\n"); } +async function persistSessionEntryUpdate(params: { + sessionStore?: Record; + sessionKey?: string; + storePath?: string; + nextEntry: SessionEntry; +}) { + if (!params.sessionStore || !params.sessionKey) { + return; + } + params.sessionStore[params.sessionKey] = { + ...params.sessionStore[params.sessionKey], + ...params.nextEntry, + }; + if (!params.storePath) { + return; + } + await updateSessionStore(params.storePath, (store) => { + store[params.sessionKey!] = { ...store[params.sessionKey!], ...params.nextEntry }; + }); +} + export async function ensureSkillSnapshot(params: { sessionEntry?: SessionEntry; sessionStore?: Record; @@ -185,12 +206,7 @@ export async function ensureSkillSnapshot(params: { systemSent: true, skillsSnapshot: skillSnapshot, }; - sessionStore[sessionKey] = { ...sessionStore[sessionKey], ...nextEntry }; - if (storePath) { - await updateSessionStore(storePath, (store) => { - store[sessionKey] = { ...store[sessionKey], ...nextEntry }; - }); - } + await persistSessionEntryUpdate({ sessionStore, sessionKey, storePath, nextEntry }); systemSent = true; } @@ -227,12 +243,7 @@ export async function ensureSkillSnapshot(params: { updatedAt: Date.now(), skillsSnapshot, }; - sessionStore[sessionKey] = { ...sessionStore[sessionKey], ...nextEntry }; - if (storePath) { - await updateSessionStore(storePath, (store) => { - store[sessionKey] = { ...store[sessionKey], ...nextEntry }; - }); - } + await persistSessionEntryUpdate({ sessionStore, sessionKey, storePath, nextEntry }); } return { sessionEntry: nextEntry, skillsSnapshot, systemSent }; diff --git a/src/auto-reply/reply/session.ts b/src/auto-reply/reply/session.ts index 6db6b1708cb..a2c0b1c7cf4 100644 --- a/src/auto-reply/reply/session.ts +++ b/src/auto-reply/reply/session.ts @@ -2,6 +2,7 @@ import crypto from "node:crypto"; import path from "node:path"; import { buildTelegramTopicConversationId, + normalizeConversationText, parseTelegramChatIdFromTarget, } from "../../acp/conversation-id.js"; import { resolveSessionAgentId } from "../../agents/agent-scope.js"; @@ -33,11 +34,12 @@ import { resolveConversationIdFromTargets } from "../../infra/outbound/conversat import { deliverSessionMaintenanceWarning } from "../../infra/session-maintenance-warning.js"; import { createSubsystemLogger } from "../../logging/subsystem.js"; import { getGlobalHookRunner } from "../../plugins/hook-runner-global.js"; -import { normalizeMainKey, parseAgentSessionKey } from "../../routing/session-key.js"; +import { normalizeMainKey } from "../../routing/session-key.js"; import { normalizeSessionDeliveryFields } from "../../utils/delivery-context.js"; import { resolveCommandAuthorization } from "../command-auth.js"; import type { MsgContext, TemplateContext } from "../templating.js"; import { resolveEffectiveResetTargetSessionKey } from "./acp-reset-target.js"; +import { parseDiscordParentChannelFromSessionKey } from "./discord-parent-channel.js"; import { normalizeInboundTextNewlines } from "./inbound-text.js"; import { stripMentions, stripStructuralPrefixes } from "./mentions.js"; import { @@ -69,44 +71,21 @@ export type SessionInitResult = { triggerBodyNormalized: string; }; -function normalizeSessionText(value: unknown): string { - if (typeof value === "string") { - return value.trim(); - } - if (typeof value === "number" || typeof value === "bigint" || typeof value === "boolean") { - return `${value}`.trim(); - } - return ""; -} - -function parseDiscordParentChannelFromSessionKey(raw: unknown): string | undefined { - const sessionKey = normalizeSessionText(raw); - if (!sessionKey) { - return undefined; - } - const scoped = parseAgentSessionKey(sessionKey)?.rest ?? sessionKey.toLowerCase(); - const match = scoped.match(/(?:^|:)channel:([^:]+)$/); - if (!match?.[1]) { - return undefined; - } - return match[1]; -} - function resolveAcpResetBindingContext(ctx: MsgContext): { channel: string; accountId: string; conversationId: string; parentConversationId?: string; } | null { - const channelRaw = normalizeSessionText( + const channelRaw = normalizeConversationText( ctx.OriginatingChannel ?? ctx.Surface ?? ctx.Provider ?? "", ).toLowerCase(); if (!channelRaw) { return null; } - const accountId = normalizeSessionText(ctx.AccountId) || "default"; + const accountId = normalizeConversationText(ctx.AccountId) || "default"; const normalizedThreadId = - ctx.MessageThreadId != null ? normalizeSessionText(String(ctx.MessageThreadId)) : ""; + ctx.MessageThreadId != null ? normalizeConversationText(String(ctx.MessageThreadId)) : ""; if (channelRaw === "telegram") { const parentConversationId = @@ -143,7 +122,7 @@ function resolveAcpResetBindingContext(ctx: MsgContext): { } let parentConversationId: string | undefined; if (channelRaw === "discord" && normalizedThreadId) { - const fromContext = normalizeSessionText(ctx.ThreadParentId); + const fromContext = normalizeConversationText(ctx.ThreadParentId); if (fromContext && fromContext !== conversationId) { parentConversationId = fromContext; } else { @@ -172,7 +151,7 @@ function resolveBoundAcpSessionForReset(params: { cfg: OpenClawConfig; ctx: MsgContext; }): string | undefined { - const activeSessionKey = normalizeSessionText(params.ctx.SessionKey); + const activeSessionKey = normalizeConversationText(params.ctx.SessionKey); const bindingContext = resolveAcpResetBindingContext(params.ctx); return resolveEffectiveResetTargetSessionKey({ cfg: params.cfg, diff --git a/src/auto-reply/reply/slack-directives.ts b/src/auto-reply/reply/slack-directives.ts new file mode 100644 index 00000000000..fe58f0c5961 --- /dev/null +++ b/src/auto-reply/reply/slack-directives.ts @@ -0,0 +1,218 @@ +import { parseSlackBlocksInput } from "../../slack/blocks-input.js"; +import { truncateSlackText } from "../../slack/truncate.js"; +import type { ReplyPayload } from "../types.js"; + +const SLACK_REPLY_BUTTON_ACTION_ID = "openclaw:reply_button"; +const SLACK_REPLY_SELECT_ACTION_ID = "openclaw:reply_select"; +const SLACK_MAX_BLOCKS = 50; +const SLACK_BUTTON_MAX_ITEMS = 5; +const SLACK_SELECT_MAX_ITEMS = 100; +const SLACK_SECTION_TEXT_MAX = 3000; +const SLACK_PLAIN_TEXT_MAX = 75; +const SLACK_OPTION_VALUE_MAX = 75; +const SLACK_DIRECTIVE_RE = /\[\[(slack_buttons|slack_select):\s*([^\]]+)\]\]/gi; + +type SlackBlock = Record; +type SlackChannelData = { + blocks?: unknown; +}; + +type SlackChoice = { + label: string; + value: string; +}; + +function parseChoice(raw: string): SlackChoice | null { + const trimmed = raw.trim(); + if (!trimmed) { + return null; + } + const delimiter = trimmed.indexOf(":"); + if (delimiter === -1) { + return { + label: trimmed, + value: trimmed, + }; + } + const label = trimmed.slice(0, delimiter).trim(); + const value = trimmed.slice(delimiter + 1).trim(); + if (!label || !value) { + return null; + } + return { label, value }; +} + +function parseChoices(raw: string, maxItems: number): SlackChoice[] { + return raw + .split(",") + .map((entry) => parseChoice(entry)) + .filter((entry): entry is SlackChoice => Boolean(entry)) + .slice(0, maxItems); +} + +function buildSlackReplyChoiceToken(value: string, index: number): string { + const slug = value + .trim() + .toLowerCase() + .replace(/[^a-z0-9]+/g, "_") + .replace(/^_+|_+$/g, ""); + return truncateSlackText(`reply_${index}_${slug || "choice"}`, SLACK_OPTION_VALUE_MAX); +} + +function buildSectionBlock(text: string): SlackBlock | null { + const trimmed = text.trim(); + if (!trimmed) { + return null; + } + return { + type: "section", + text: { + type: "mrkdwn", + text: truncateSlackText(trimmed, SLACK_SECTION_TEXT_MAX), + }, + }; +} + +function buildButtonsBlock(raw: string, index: number): SlackBlock | null { + const choices = parseChoices(raw, SLACK_BUTTON_MAX_ITEMS); + if (choices.length === 0) { + return null; + } + return { + type: "actions", + block_id: `openclaw_reply_buttons_${index}`, + elements: choices.map((choice, choiceIndex) => ({ + type: "button", + action_id: SLACK_REPLY_BUTTON_ACTION_ID, + text: { + type: "plain_text", + text: truncateSlackText(choice.label, SLACK_PLAIN_TEXT_MAX), + emoji: true, + }, + value: buildSlackReplyChoiceToken(choice.value, choiceIndex + 1), + })), + }; +} + +function buildSelectBlock(raw: string, index: number): SlackBlock | null { + const parts = raw + .split("|") + .map((entry) => entry.trim()) + .filter(Boolean); + if (parts.length === 0) { + return null; + } + const [first, second] = parts; + const placeholder = parts.length >= 2 ? first : "Choose an option"; + const choices = parseChoices(parts.length >= 2 ? second : first, SLACK_SELECT_MAX_ITEMS); + if (choices.length === 0) { + return null; + } + return { + type: "actions", + block_id: `openclaw_reply_select_${index}`, + elements: [ + { + type: "static_select", + action_id: SLACK_REPLY_SELECT_ACTION_ID, + placeholder: { + type: "plain_text", + text: truncateSlackText(placeholder, SLACK_PLAIN_TEXT_MAX), + emoji: true, + }, + options: choices.map((choice, choiceIndex) => ({ + text: { + type: "plain_text", + text: truncateSlackText(choice.label, SLACK_PLAIN_TEXT_MAX), + emoji: true, + }, + value: buildSlackReplyChoiceToken(choice.value, choiceIndex + 1), + })), + }, + ], + }; +} + +function readExistingSlackBlocks(payload: ReplyPayload): SlackBlock[] { + const slackData = payload.channelData?.slack as SlackChannelData | undefined; + try { + const blocks = parseSlackBlocksInput(slackData?.blocks) as SlackBlock[] | undefined; + return blocks ?? []; + } catch { + return []; + } +} + +export function hasSlackDirectives(text: string): boolean { + SLACK_DIRECTIVE_RE.lastIndex = 0; + return SLACK_DIRECTIVE_RE.test(text); +} + +export function parseSlackDirectives(payload: ReplyPayload): ReplyPayload { + const text = payload.text; + if (!text) { + return payload; + } + + const generatedBlocks: SlackBlock[] = []; + const visibleTextParts: string[] = []; + let buttonIndex = 0; + let selectIndex = 0; + let cursor = 0; + let matchedDirective = false; + let generatedInteractiveBlock = false; + SLACK_DIRECTIVE_RE.lastIndex = 0; + + for (const match of text.matchAll(SLACK_DIRECTIVE_RE)) { + matchedDirective = true; + const matchText = match[0]; + const directiveType = match[1]; + const body = match[2]; + const index = match.index ?? 0; + const precedingText = text.slice(cursor, index); + visibleTextParts.push(precedingText); + const section = buildSectionBlock(precedingText); + if (section) { + generatedBlocks.push(section); + } + const block = + directiveType.toLowerCase() === "slack_buttons" + ? buildButtonsBlock(body, ++buttonIndex) + : buildSelectBlock(body, ++selectIndex); + if (block) { + generatedInteractiveBlock = true; + generatedBlocks.push(block); + } + cursor = index + matchText.length; + } + + const trailingText = text.slice(cursor); + visibleTextParts.push(trailingText); + const trailingSection = buildSectionBlock(trailingText); + if (trailingSection) { + generatedBlocks.push(trailingSection); + } + const cleanedText = visibleTextParts.join(""); + + if (!matchedDirective || !generatedInteractiveBlock) { + return payload; + } + + const existingBlocks = readExistingSlackBlocks(payload); + if (existingBlocks.length + generatedBlocks.length > SLACK_MAX_BLOCKS) { + return payload; + } + const nextBlocks = [...existingBlocks, ...generatedBlocks]; + + return { + ...payload, + text: cleanedText.trim() || undefined, + channelData: { + ...payload.channelData, + slack: { + ...(payload.channelData?.slack as Record | undefined), + blocks: nextBlocks, + }, + }, + }; +} diff --git a/src/auto-reply/reply/stage-sandbox-media.ts b/src/auto-reply/reply/stage-sandbox-media.ts index d364fa6a554..3d3dec1738f 100644 --- a/src/auto-reply/reply/stage-sandbox-media.ts +++ b/src/auto-reply/reply/stage-sandbox-media.ts @@ -7,7 +7,7 @@ import { ensureSandboxWorkspaceForSession } from "../../agents/sandbox.js"; import type { OpenClawConfig } from "../../config/config.js"; import { logVerbose } from "../../globals.js"; import { copyFileWithinRoot, SafeOpenError } from "../../infra/fs-safe.js"; -import { normalizeScpRemoteHost } from "../../infra/scp-host.js"; +import { normalizeScpRemoteHost, normalizeScpRemotePath } from "../../infra/scp-host.js"; import { resolvePreferredOpenClawTmpDir } from "../../infra/tmp-openclaw-dir.js"; import { isInboundPathAllowed, @@ -293,6 +293,10 @@ async function scpFile(remoteHost: string, remotePath: string, localPath: string if (!safeRemoteHost) { throw new Error("invalid remote host for SCP"); } + const safeRemotePath = normalizeScpRemotePath(remotePath); + if (!safeRemotePath) { + throw new Error("invalid remote path for SCP"); + } return new Promise((resolve, reject) => { const child = spawn( "/usr/bin/scp", @@ -302,7 +306,7 @@ async function scpFile(remoteHost: string, remotePath: string, localPath: string "-o", "StrictHostKeyChecking=yes", "--", - `${safeRemoteHost}:${remotePath}`, + `${safeRemoteHost}:${safeRemotePath}`, localPath, ], { stdio: ["ignore", "ignore", "pipe"] }, diff --git a/src/auto-reply/reply/test-helpers.ts b/src/auto-reply/reply/test-helpers.ts index 4c30ae6756a..fe1913e723d 100644 --- a/src/auto-reply/reply/test-helpers.ts +++ b/src/auto-reply/reply/test-helpers.ts @@ -1,4 +1,5 @@ import { vi } from "vitest"; +import type { FollowupRun } from "./queue.js"; import type { TypingController } from "./typing.js"; export function createMockTypingController( @@ -16,3 +17,49 @@ export function createMockTypingController( ...overrides, }; } + +export function createMockFollowupRun( + overrides: Partial> & { run?: Partial } = {}, +): FollowupRun { + const base: FollowupRun = { + prompt: "hello", + summaryLine: "hello", + enqueuedAt: Date.now(), + originatingTo: "channel:C1", + run: { + agentId: "agent", + agentDir: "/tmp/agent", + sessionId: "session", + sessionKey: "main", + messageProvider: "whatsapp", + agentAccountId: "primary", + sessionFile: "/tmp/session.jsonl", + workspaceDir: "/tmp", + config: {}, + skillsSnapshot: { + prompt: "", + skills: [], + }, + provider: "anthropic", + model: "claude", + thinkLevel: "low", + verboseLevel: "off", + elevatedLevel: "off", + bashElevated: { + enabled: false, + allowed: false, + defaultLevel: "off", + }, + timeoutMs: 1_000, + blockReplyBreak: "message_end", + }, + }; + return { + ...base, + ...overrides, + run: { + ...base.run, + ...overrides.run, + }, + }; +} diff --git a/src/auto-reply/status.test.ts b/src/auto-reply/status.test.ts index e58f03e0c13..b416c1e3ef7 100644 --- a/src/auto-reply/status.test.ts +++ b/src/auto-reply/status.test.ts @@ -113,6 +113,23 @@ describe("buildStatusMessage", () => { expect(normalized).toContain("Reasoning: on"); }); + it("shows fast mode when enabled", () => { + const text = buildStatusMessage({ + agent: { + model: "openai/gpt-5.4", + }, + sessionEntry: { + sessionId: "fast", + updatedAt: 0, + fastMode: true, + }, + sessionKey: "agent:main:main", + queue: { mode: "collect", depth: 0 }, + }); + + expect(normalizeTestText(text)).toContain("Fast: on"); + }); + it("notes channel model overrides in status output", () => { const text = buildStatusMessage({ config: { @@ -708,6 +725,10 @@ describe("buildHelpMessage", () => { expect(text).not.toContain("/config"); expect(text).not.toContain("/debug"); }); + + it("includes /fast in help output", () => { + expect(buildHelpMessage()).toContain("/fast on|off"); + }); }); describe("buildCommandsMessagePaginated", () => { diff --git a/src/auto-reply/status.ts b/src/auto-reply/status.ts index d4c5e0c18bb..1b7aa2a87ec 100644 --- a/src/auto-reply/status.ts +++ b/src/auto-reply/status.ts @@ -77,6 +77,7 @@ type StatusArgs = { sessionStorePath?: string; groupActivation?: "mention" | "always"; resolvedThink?: ThinkLevel; + resolvedFast?: boolean; resolvedVerbose?: VerboseLevel; resolvedReasoning?: ReasoningLevel; resolvedElevated?: ElevatedLevel; @@ -510,6 +511,7 @@ export function buildStatusMessage(args: StatusArgs): string { args.resolvedThink ?? args.sessionEntry?.thinkingLevel ?? args.agent?.thinkingDefault ?? "off"; const verboseLevel = args.resolvedVerbose ?? args.sessionEntry?.verboseLevel ?? args.agent?.verboseDefault ?? "off"; + const fastMode = args.resolvedFast ?? args.sessionEntry?.fastMode ?? false; const reasoningLevel = args.resolvedReasoning ?? args.sessionEntry?.reasoningLevel ?? "off"; const elevatedLevel = args.resolvedElevated ?? @@ -556,6 +558,7 @@ export function buildStatusMessage(args: StatusArgs): string { const optionParts = [ `Runtime: ${runtime.label}`, `Think: ${thinkLevel}`, + fastMode ? "Fast: on" : null, verboseLabel, reasoningLevel !== "off" ? `Reasoning: ${reasoningLevel}` : null, elevatedLabel, @@ -728,7 +731,7 @@ export function buildHelpMessage(cfg?: OpenClawConfig): string { lines.push(" /new | /reset | /compact [instructions] | /stop"); lines.push(""); - const optionParts = ["/think ", "/model ", "/verbose on|off"]; + const optionParts = ["/think ", "/model ", "/fast on|off", "/verbose on|off"]; if (isCommandFlagEnabled(cfg, "config")) { optionParts.push("/config"); } diff --git a/src/auto-reply/templating.ts b/src/auto-reply/templating.ts index cc4fc49e93f..8ca3c2389bc 100644 --- a/src/auto-reply/templating.ts +++ b/src/auto-reply/templating.ts @@ -132,6 +132,8 @@ export type MsgContext = { Provider?: string; /** Provider surface label (e.g. discord, slack). Prefer this over `Provider` when available. */ Surface?: string; + /** Platform bot username when command mentions should be normalized. */ + BotUsername?: string; WasMentioned?: boolean; CommandAuthorized?: boolean; CommandSource?: "text" | "native"; diff --git a/src/auto-reply/thinking.test.ts b/src/auto-reply/thinking.test.ts index 359082c2616..d4814a263e9 100644 --- a/src/auto-reply/thinking.test.ts +++ b/src/auto-reply/thinking.test.ts @@ -4,6 +4,7 @@ import { listThinkingLevels, normalizeReasoningLevel, normalizeThinkLevel, + resolveThinkingDefaultForModel, } from "./thinking.js"; describe("normalizeThinkLevel", () => { @@ -84,6 +85,40 @@ describe("listThinkingLevelLabels", () => { }); }); +describe("resolveThinkingDefaultForModel", () => { + it("defaults Claude 4.6 models to adaptive", () => { + expect( + resolveThinkingDefaultForModel({ provider: "anthropic", model: "claude-opus-4-6" }), + ).toBe("adaptive"); + }); + + it("treats Bedrock Anthropic aliases as adaptive", () => { + expect( + resolveThinkingDefaultForModel({ provider: "aws-bedrock", model: "claude-sonnet-4-6" }), + ).toBe("adaptive"); + }); + + it("defaults reasoning-capable catalog models to low", () => { + expect( + resolveThinkingDefaultForModel({ + provider: "openai", + model: "gpt-5.4", + catalog: [{ provider: "openai", id: "gpt-5.4", reasoning: true }], + }), + ).toBe("low"); + }); + + it("defaults to off when no adaptive or reasoning hint is present", () => { + expect( + resolveThinkingDefaultForModel({ + provider: "openai", + model: "gpt-4.1-mini", + catalog: [{ provider: "openai", id: "gpt-4.1-mini", reasoning: false }], + }), + ).toBe("off"); + }); +}); + describe("normalizeReasoningLevel", () => { it("accepts on/off", () => { expect(normalizeReasoningLevel("on")).toBe("on"); diff --git a/src/auto-reply/thinking.ts b/src/auto-reply/thinking.ts index 0a0f87c16e7..639db68eafb 100644 --- a/src/auto-reply/thinking.ts +++ b/src/auto-reply/thinking.ts @@ -5,6 +5,13 @@ export type ElevatedLevel = "off" | "on" | "ask" | "full"; export type ElevatedMode = "off" | "ask" | "full"; export type ReasoningLevel = "off" | "on" | "stream"; export type UsageDisplayLevel = "off" | "tokens" | "full"; +export type ThinkingCatalogEntry = { + provider: string; + id: string; + reasoning?: boolean; +}; + +const CLAUDE_46_MODEL_RE = /claude-(?:opus|sonnet)-4(?:\.|-)6(?:$|[-.])/i; function normalizeProviderId(provider?: string | null): string { if (!provider) { @@ -14,6 +21,9 @@ function normalizeProviderId(provider?: string | null): string { if (normalized === "z.ai" || normalized === "z-ai") { return "zai"; } + if (normalized === "bedrock" || normalized === "aws-bedrock") { + return "amazon-bedrock"; + } return normalized; } @@ -130,6 +140,30 @@ export function formatXHighModelHint(): string { return `${refs.slice(0, -1).join(", ")} or ${refs[refs.length - 1]}`; } +export function resolveThinkingDefaultForModel(params: { + provider: string; + model: string; + catalog?: ThinkingCatalogEntry[]; +}): ThinkLevel { + const normalizedProvider = normalizeProviderId(params.provider); + const modelLower = params.model.trim().toLowerCase(); + const isAnthropicFamilyModel = + normalizedProvider === "anthropic" || + normalizedProvider === "amazon-bedrock" || + modelLower.includes("anthropic/") || + modelLower.includes(".anthropic."); + if (isAnthropicFamilyModel && CLAUDE_46_MODEL_RE.test(modelLower)) { + return "adaptive"; + } + const candidate = params.catalog?.find( + (entry) => entry.provider === params.provider && entry.id === params.model, + ); + if (candidate?.reasoning) { + return "low"; + } + return "off"; +} + type OnOffFullLevel = "off" | "on" | "full"; function normalizeOnOffFullLevel(raw?: string | null): OnOffFullLevel | undefined { @@ -184,6 +218,24 @@ export function resolveResponseUsageMode(raw?: string | null): UsageDisplayLevel return normalizeUsageDisplay(raw) ?? "off"; } +// Normalize fast-mode flags used to toggle low-latency model behavior. +export function normalizeFastMode(raw?: string | boolean | null): boolean | undefined { + if (typeof raw === "boolean") { + return raw; + } + if (!raw) { + return undefined; + } + const key = raw.toLowerCase(); + if (["off", "false", "no", "0", "disable", "disabled", "normal"].includes(key)) { + return false; + } + if (["on", "true", "yes", "1", "enable", "enabled", "fast"].includes(key)) { + return true; + } + return undefined; +} + // Normalize elevated flags used to toggle elevated bash permissions. export function normalizeElevatedLevel(raw?: string | null): ElevatedLevel | undefined { if (!raw) { diff --git a/src/auto-reply/types.ts b/src/auto-reply/types.ts index 4692d442ea5..be32e3635e1 100644 --- a/src/auto-reply/types.ts +++ b/src/auto-reply/types.ts @@ -54,6 +54,10 @@ export type GetReplyOptions = { onToolResult?: (payload: ReplyPayload) => Promise | void; /** Called when a tool phase starts/updates, before summary payloads are emitted. */ onToolStart?: (payload: { name?: string; phase?: string }) => Promise | void; + /** Called when context auto-compaction starts (allows UX feedback during the pause). */ + onCompactionStart?: () => Promise | void; + /** Called when context auto-compaction completes. */ + onCompactionEnd?: () => Promise | void; /** Called when the actual model is selected (including after fallback). * Use this to get model/provider/thinkLevel for responsePrefix template interpolation. */ onModelSelected?: (ctx: ModelSelectedContext) => void; diff --git a/src/browser/cdp.helpers.ts b/src/browser/cdp.helpers.ts index 5749a591fd6..44f689e8706 100644 --- a/src/browser/cdp.helpers.ts +++ b/src/browser/cdp.helpers.ts @@ -3,6 +3,7 @@ import { isLoopbackHost } from "../gateway/net.js"; import { rawDataToString } from "../infra/ws.js"; import { getDirectAgentForCdp, withNoProxyForCdpUrl } from "./cdp-proxy-bypass.js"; import { CDP_HTTP_REQUEST_TIMEOUT_MS, CDP_WS_HANDSHAKE_TIMEOUT_MS } from "./cdp-timeouts.js"; +import { resolveBrowserRateLimitMessage } from "./client-fetch.js"; import { getChromeExtensionRelayAuthHeaders } from "./extension-relay.js"; export { isLoopbackHost }; @@ -172,6 +173,10 @@ export async function fetchCdpChecked( fetch(url, { ...init, headers, signal: ctrl.signal }), ); if (!res.ok) { + if (res.status === 429) { + // Do not reflect upstream response text into the error surface (log/agent injection risk) + throw new Error(`${resolveBrowserRateLimitMessage(url)} Do NOT retry the browser tool.`); + } throw new Error(`HTTP ${res.status}`); } return res; diff --git a/src/browser/chrome-mcp.snapshot.test.ts b/src/browser/chrome-mcp.snapshot.test.ts new file mode 100644 index 00000000000..3fe3288848f --- /dev/null +++ b/src/browser/chrome-mcp.snapshot.test.ts @@ -0,0 +1,68 @@ +import { describe, expect, it } from "vitest"; +import { + buildAiSnapshotFromChromeMcpSnapshot, + flattenChromeMcpSnapshotToAriaNodes, +} from "./chrome-mcp.snapshot.js"; + +const snapshot = { + id: "root", + role: "document", + name: "Example", + children: [ + { + id: "btn-1", + role: "button", + name: "Continue", + }, + { + id: "txt-1", + role: "textbox", + name: "Email", + value: "peter@example.com", + }, + ], +}; + +describe("chrome MCP snapshot conversion", () => { + it("flattens structured snapshots into aria-style nodes", () => { + const nodes = flattenChromeMcpSnapshotToAriaNodes(snapshot, 10); + expect(nodes).toEqual([ + { + ref: "root", + role: "document", + name: "Example", + value: undefined, + description: undefined, + depth: 0, + }, + { + ref: "btn-1", + role: "button", + name: "Continue", + value: undefined, + description: undefined, + depth: 1, + }, + { + ref: "txt-1", + role: "textbox", + name: "Email", + value: "peter@example.com", + description: undefined, + depth: 1, + }, + ]); + }); + + it("builds AI snapshots that preserve Chrome MCP uids as refs", () => { + const result = buildAiSnapshotFromChromeMcpSnapshot({ root: snapshot }); + + expect(result.snapshot).toContain('- button "Continue" [ref=btn-1]'); + expect(result.snapshot).toContain('- textbox "Email" [ref=txt-1] value="peter@example.com"'); + expect(result.refs).toEqual({ + "btn-1": { role: "button", name: "Continue" }, + "txt-1": { role: "textbox", name: "Email" }, + }); + expect(result.stats.refs).toBe(2); + }); +}); diff --git a/src/browser/chrome-mcp.snapshot.ts b/src/browser/chrome-mcp.snapshot.ts new file mode 100644 index 00000000000..f0a1413736a --- /dev/null +++ b/src/browser/chrome-mcp.snapshot.ts @@ -0,0 +1,193 @@ +import type { SnapshotAriaNode } from "./client.js"; +import { + getRoleSnapshotStats, + type RoleRefMap, + type RoleSnapshotOptions, +} from "./pw-role-snapshot.js"; +import { CONTENT_ROLES, INTERACTIVE_ROLES, STRUCTURAL_ROLES } from "./snapshot-roles.js"; + +export type ChromeMcpSnapshotNode = { + id?: string; + role?: string; + name?: string; + value?: string | number | boolean; + description?: string; + children?: ChromeMcpSnapshotNode[]; +}; + +function normalizeRole(node: ChromeMcpSnapshotNode): string { + const role = typeof node.role === "string" ? node.role.trim().toLowerCase() : ""; + return role || "generic"; +} + +function normalizeString(value: unknown): string | undefined { + if (typeof value === "string") { + const trimmed = value.trim(); + return trimmed || undefined; + } + if (typeof value === "number" || typeof value === "boolean") { + return String(value); + } + return undefined; +} + +function escapeQuoted(value: string): string { + return value.replaceAll("\\", "\\\\").replaceAll('"', '\\"'); +} + +function shouldIncludeNode(params: { + role: string; + name?: string; + options?: RoleSnapshotOptions; +}): boolean { + if (params.options?.interactive && !INTERACTIVE_ROLES.has(params.role)) { + return false; + } + if (params.options?.compact && STRUCTURAL_ROLES.has(params.role) && !params.name) { + return false; + } + return true; +} + +function shouldCreateRef(role: string, name?: string): boolean { + return INTERACTIVE_ROLES.has(role) || (CONTENT_ROLES.has(role) && Boolean(name)); +} + +type DuplicateTracker = { + counts: Map; + keysByRef: Map; + duplicates: Set; +}; + +function createDuplicateTracker(): DuplicateTracker { + return { + counts: new Map(), + keysByRef: new Map(), + duplicates: new Set(), + }; +} + +function registerRef( + tracker: DuplicateTracker, + ref: string, + role: string, + name?: string, +): number | undefined { + const key = `${role}:${name ?? ""}`; + const count = tracker.counts.get(key) ?? 0; + tracker.counts.set(key, count + 1); + tracker.keysByRef.set(ref, key); + if (count > 0) { + tracker.duplicates.add(key); + return count; + } + return undefined; +} + +export function flattenChromeMcpSnapshotToAriaNodes( + root: ChromeMcpSnapshotNode, + limit = 500, +): SnapshotAriaNode[] { + const boundedLimit = Math.max(1, Math.min(2000, Math.floor(limit))); + const out: SnapshotAriaNode[] = []; + + const visit = (node: ChromeMcpSnapshotNode, depth: number) => { + if (out.length >= boundedLimit) { + return; + } + const ref = normalizeString(node.id); + if (ref) { + out.push({ + ref, + role: normalizeRole(node), + name: normalizeString(node.name) ?? "", + value: normalizeString(node.value), + description: normalizeString(node.description), + depth, + }); + } + for (const child of node.children ?? []) { + visit(child, depth + 1); + if (out.length >= boundedLimit) { + return; + } + } + }; + + visit(root, 0); + return out; +} + +export function buildAiSnapshotFromChromeMcpSnapshot(params: { + root: ChromeMcpSnapshotNode; + options?: RoleSnapshotOptions; + maxChars?: number; +}): { + snapshot: string; + truncated?: boolean; + refs: RoleRefMap; + stats: { lines: number; chars: number; refs: number; interactive: number }; +} { + const refs: RoleRefMap = {}; + const tracker = createDuplicateTracker(); + const lines: string[] = []; + + const visit = (node: ChromeMcpSnapshotNode, depth: number) => { + const role = normalizeRole(node); + const name = normalizeString(node.name); + const value = normalizeString(node.value); + const description = normalizeString(node.description); + const maxDepth = params.options?.maxDepth; + if (maxDepth !== undefined && depth > maxDepth) { + return; + } + + const includeNode = shouldIncludeNode({ role, name, options: params.options }); + if (includeNode) { + let line = `${" ".repeat(depth)}- ${role}`; + if (name) { + line += ` "${escapeQuoted(name)}"`; + } + const ref = normalizeString(node.id); + if (ref && shouldCreateRef(role, name)) { + const nth = registerRef(tracker, ref, role, name); + refs[ref] = nth === undefined ? { role, name } : { role, name, nth }; + line += ` [ref=${ref}]`; + } + if (value) { + line += ` value="${escapeQuoted(value)}"`; + } + if (description) { + line += ` description="${escapeQuoted(description)}"`; + } + lines.push(line); + } + + for (const child of node.children ?? []) { + visit(child, depth + 1); + } + }; + + visit(params.root, 0); + + for (const [ref, data] of Object.entries(refs)) { + const key = tracker.keysByRef.get(ref); + if (key && !tracker.duplicates.has(key)) { + delete data.nth; + } + } + + let snapshot = lines.join("\n"); + let truncated = false; + const maxChars = + typeof params.maxChars === "number" && Number.isFinite(params.maxChars) && params.maxChars > 0 + ? Math.floor(params.maxChars) + : undefined; + if (maxChars && snapshot.length > maxChars) { + snapshot = `${snapshot.slice(0, maxChars)}\n\n[...TRUNCATED - page too large]`; + truncated = true; + } + + const stats = getRoleSnapshotStats(snapshot, refs); + return truncated ? { snapshot, truncated, refs, stats } : { snapshot, refs, stats }; +} diff --git a/src/browser/chrome-mcp.test.ts b/src/browser/chrome-mcp.test.ts new file mode 100644 index 00000000000..a77149d7a72 --- /dev/null +++ b/src/browser/chrome-mcp.test.ts @@ -0,0 +1,270 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { + evaluateChromeMcpScript, + listChromeMcpTabs, + openChromeMcpTab, + resetChromeMcpSessionsForTest, + setChromeMcpSessionFactoryForTest, +} from "./chrome-mcp.js"; + +type ToolCall = { + name: string; + arguments?: Record; +}; + +type ChromeMcpSessionFactory = Exclude< + Parameters[0], + null +>; +type ChromeMcpSession = Awaited>; + +function createFakeSession(): ChromeMcpSession { + const callTool = vi.fn(async ({ name }: ToolCall) => { + if (name === "list_pages") { + return { + content: [ + { + type: "text", + text: [ + "## Pages", + "1: https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session [selected]", + "2: https://github.com/openclaw/openclaw/pull/45318", + ].join("\n"), + }, + ], + }; + } + if (name === "new_page") { + return { + content: [ + { + type: "text", + text: [ + "## Pages", + "1: https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session", + "2: https://github.com/openclaw/openclaw/pull/45318", + "3: https://example.com/ [selected]", + ].join("\n"), + }, + ], + }; + } + if (name === "evaluate_script") { + return { + content: [ + { + type: "text", + text: "```json\n123\n```", + }, + ], + }; + } + throw new Error(`unexpected tool ${name}`); + }); + + return { + client: { + callTool, + listTools: vi.fn().mockResolvedValue({ tools: [{ name: "list_pages" }] }), + close: vi.fn().mockResolvedValue(undefined), + connect: vi.fn().mockResolvedValue(undefined), + }, + transport: { + pid: 123, + }, + ready: Promise.resolve(), + } as unknown as ChromeMcpSession; +} + +describe("chrome MCP page parsing", () => { + beforeEach(async () => { + await resetChromeMcpSessionsForTest(); + }); + + it("parses list_pages text responses when structuredContent is missing", async () => { + const factory: ChromeMcpSessionFactory = async () => createFakeSession(); + setChromeMcpSessionFactoryForTest(factory); + + const tabs = await listChromeMcpTabs("chrome-live"); + + expect(tabs).toEqual([ + { + targetId: "1", + title: "", + url: "https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session", + type: "page", + }, + { + targetId: "2", + title: "", + url: "https://github.com/openclaw/openclaw/pull/45318", + type: "page", + }, + ]); + }); + + it("parses new_page text responses and returns the created tab", async () => { + const factory: ChromeMcpSessionFactory = async () => createFakeSession(); + setChromeMcpSessionFactoryForTest(factory); + + const tab = await openChromeMcpTab("chrome-live", "https://example.com/"); + + expect(tab).toEqual({ + targetId: "3", + title: "", + url: "https://example.com/", + type: "page", + }); + }); + + it("parses evaluate_script text responses when structuredContent is missing", async () => { + const factory: ChromeMcpSessionFactory = async () => createFakeSession(); + setChromeMcpSessionFactoryForTest(factory); + + const result = await evaluateChromeMcpScript({ + profileName: "chrome-live", + targetId: "1", + fn: "() => 123", + }); + + expect(result).toBe(123); + }); + + it("surfaces MCP tool errors instead of JSON parse noise", async () => { + const factory: ChromeMcpSessionFactory = async () => { + const session = createFakeSession(); + const callTool = vi.fn(async ({ name }: ToolCall) => { + if (name === "evaluate_script") { + return { + content: [ + { + type: "text", + text: "Cannot read properties of null (reading 'value')", + }, + ], + isError: true, + }; + } + throw new Error(`unexpected tool ${name}`); + }); + session.client.callTool = callTool as typeof session.client.callTool; + return session; + }; + setChromeMcpSessionFactoryForTest(factory); + + await expect( + evaluateChromeMcpScript({ + profileName: "chrome-live", + targetId: "1", + fn: "() => document.getElementById('missing').value", + }), + ).rejects.toThrow(/Cannot read properties of null/); + }); + + it("reuses a single pending session for concurrent requests", async () => { + let factoryCalls = 0; + let releaseFactory!: () => void; + const factoryGate = new Promise((resolve) => { + releaseFactory = resolve; + }); + + const factory: ChromeMcpSessionFactory = async () => { + factoryCalls += 1; + await factoryGate; + return createFakeSession(); + }; + setChromeMcpSessionFactoryForTest(factory); + + const tabsPromise = listChromeMcpTabs("chrome-live"); + const evalPromise = evaluateChromeMcpScript({ + profileName: "chrome-live", + targetId: "1", + fn: "() => 123", + }); + + releaseFactory(); + const [tabs, result] = await Promise.all([tabsPromise, evalPromise]); + + expect(factoryCalls).toBe(1); + expect(tabs).toHaveLength(2); + expect(result).toBe(123); + }); + + it("preserves session after tool-level errors (isError)", async () => { + let factoryCalls = 0; + const factory: ChromeMcpSessionFactory = async () => { + factoryCalls += 1; + const session = createFakeSession(); + const callTool = vi.fn(async ({ name }: ToolCall) => { + if (name === "evaluate_script") { + return { + content: [{ type: "text", text: "element not found" }], + isError: true, + }; + } + if (name === "list_pages") { + return { + content: [{ type: "text", text: "## Pages\n1: https://example.com [selected]" }], + }; + } + throw new Error(`unexpected tool ${name}`); + }); + session.client.callTool = callTool as typeof session.client.callTool; + return session; + }; + setChromeMcpSessionFactoryForTest(factory); + + // First call: tool error (isError: true) — should NOT destroy session + await expect( + evaluateChromeMcpScript({ profileName: "chrome-live", targetId: "1", fn: "() => null" }), + ).rejects.toThrow(/element not found/); + + // Second call: should reuse the same session (factory called only once) + const tabs = await listChromeMcpTabs("chrome-live"); + expect(factoryCalls).toBe(1); + expect(tabs).toHaveLength(1); + }); + + it("destroys session on transport errors so next call reconnects", async () => { + let factoryCalls = 0; + const factory: ChromeMcpSessionFactory = async () => { + factoryCalls += 1; + const session = createFakeSession(); + if (factoryCalls === 1) { + // First session: transport error (callTool throws) + const callTool = vi.fn(async () => { + throw new Error("connection reset"); + }); + session.client.callTool = callTool as typeof session.client.callTool; + } + return session; + }; + setChromeMcpSessionFactoryForTest(factory); + + // First call: transport error — should destroy session + await expect(listChromeMcpTabs("chrome-live")).rejects.toThrow(/connection reset/); + + // Second call: should create a new session (factory called twice) + const tabs = await listChromeMcpTabs("chrome-live"); + expect(factoryCalls).toBe(2); + expect(tabs).toHaveLength(2); + }); + + it("clears failed pending sessions so the next call can retry", async () => { + let factoryCalls = 0; + const factory: ChromeMcpSessionFactory = async () => { + factoryCalls += 1; + if (factoryCalls === 1) { + throw new Error("attach failed"); + } + return createFakeSession(); + }; + setChromeMcpSessionFactoryForTest(factory); + + await expect(listChromeMcpTabs("chrome-live")).rejects.toThrow(/attach failed/); + + const tabs = await listChromeMcpTabs("chrome-live"); + expect(factoryCalls).toBe(2); + expect(tabs).toHaveLength(2); + }); +}); diff --git a/src/browser/chrome-mcp.ts b/src/browser/chrome-mcp.ts new file mode 100644 index 00000000000..25ae39b2293 --- /dev/null +++ b/src/browser/chrome-mcp.ts @@ -0,0 +1,542 @@ +import { randomUUID } from "node:crypto"; +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { Client } from "@modelcontextprotocol/sdk/client/index.js"; +import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js"; +import type { ChromeMcpSnapshotNode } from "./chrome-mcp.snapshot.js"; +import type { BrowserTab } from "./client.js"; +import { BrowserProfileUnavailableError, BrowserTabNotFoundError } from "./errors.js"; + +type ChromeMcpStructuredPage = { + id: number; + url?: string; + selected?: boolean; +}; + +type ChromeMcpToolResult = { + structuredContent?: Record; + content?: Array>; + isError?: boolean; +}; + +type ChromeMcpSession = { + client: Client; + transport: StdioClientTransport; + ready: Promise; +}; + +type ChromeMcpSessionFactory = (profileName: string) => Promise; + +const DEFAULT_CHROME_MCP_COMMAND = "npx"; +const DEFAULT_CHROME_MCP_ARGS = [ + "-y", + "chrome-devtools-mcp@latest", + "--autoConnect", + // Direct chrome-devtools-mcp launches do not enable structuredContent by default. + "--experimentalStructuredContent", + "--experimental-page-id-routing", +]; + +const sessions = new Map(); +const pendingSessions = new Map>(); +let sessionFactory: ChromeMcpSessionFactory | null = null; + +function asRecord(value: unknown): Record | null { + return value && typeof value === "object" && !Array.isArray(value) + ? (value as Record) + : null; +} + +function asPages(value: unknown): ChromeMcpStructuredPage[] { + if (!Array.isArray(value)) { + return []; + } + const out: ChromeMcpStructuredPage[] = []; + for (const entry of value) { + const record = asRecord(entry); + if (!record || typeof record.id !== "number") { + continue; + } + out.push({ + id: record.id, + url: typeof record.url === "string" ? record.url : undefined, + selected: record.selected === true, + }); + } + return out; +} + +function parsePageId(targetId: string): number { + const parsed = Number.parseInt(targetId.trim(), 10); + if (!Number.isFinite(parsed)) { + throw new BrowserTabNotFoundError(); + } + return parsed; +} + +function toBrowserTabs(pages: ChromeMcpStructuredPage[]): BrowserTab[] { + return pages.map((page) => ({ + targetId: String(page.id), + title: "", + url: page.url ?? "", + type: "page", + })); +} + +function extractStructuredContent(result: ChromeMcpToolResult): Record { + return asRecord(result.structuredContent) ?? {}; +} + +function extractTextContent(result: ChromeMcpToolResult): string[] { + const content = Array.isArray(result.content) ? result.content : []; + return content + .map((entry) => { + const record = asRecord(entry); + return record && typeof record.text === "string" ? record.text : ""; + }) + .filter(Boolean); +} + +function extractTextPages(result: ChromeMcpToolResult): ChromeMcpStructuredPage[] { + const pages: ChromeMcpStructuredPage[] = []; + for (const block of extractTextContent(result)) { + for (const line of block.split(/\r?\n/)) { + const match = line.match(/^\s*(\d+):\s+(.+?)(?:\s+\[(selected)\])?\s*$/i); + if (!match) { + continue; + } + pages.push({ + id: Number.parseInt(match[1] ?? "", 10), + url: match[2]?.trim() || undefined, + selected: Boolean(match[3]), + }); + } + } + return pages; +} + +function extractStructuredPages(result: ChromeMcpToolResult): ChromeMcpStructuredPage[] { + const structured = asPages(extractStructuredContent(result).pages); + return structured.length > 0 ? structured : extractTextPages(result); +} + +function extractSnapshot(result: ChromeMcpToolResult): ChromeMcpSnapshotNode { + const structured = extractStructuredContent(result); + const snapshot = asRecord(structured.snapshot); + if (!snapshot) { + throw new Error("Chrome MCP snapshot response was missing structured snapshot data."); + } + return snapshot as unknown as ChromeMcpSnapshotNode; +} + +function extractJsonBlock(text: string): unknown { + const match = text.match(/```json\s*([\s\S]*?)\s*```/i); + const raw = match?.[1]?.trim() || text.trim(); + return raw ? JSON.parse(raw) : null; +} + +function extractMessageText(result: ChromeMcpToolResult): string { + const message = extractStructuredContent(result).message; + if (typeof message === "string" && message.trim()) { + return message; + } + const blocks = extractTextContent(result); + return blocks.find((block) => block.trim()) ?? ""; +} + +function extractToolErrorMessage(result: ChromeMcpToolResult, name: string): string { + const message = extractMessageText(result).trim(); + return message || `Chrome MCP tool "${name}" failed.`; +} + +function extractJsonMessage(result: ChromeMcpToolResult): unknown { + const candidates = [extractMessageText(result), ...extractTextContent(result)].filter((text) => + text.trim(), + ); + let lastError: unknown; + for (const candidate of candidates) { + try { + return extractJsonBlock(candidate); + } catch (err) { + lastError = err; + } + } + if (lastError) { + throw lastError; + } + return null; +} + +async function createRealSession(profileName: string): Promise { + const transport = new StdioClientTransport({ + command: DEFAULT_CHROME_MCP_COMMAND, + args: DEFAULT_CHROME_MCP_ARGS, + stderr: "pipe", + }); + const client = new Client( + { + name: "openclaw-browser", + version: "0.0.0", + }, + {}, + ); + + const ready = (async () => { + try { + await client.connect(transport); + const tools = await client.listTools(); + if (!tools.tools.some((tool) => tool.name === "list_pages")) { + throw new Error("Chrome MCP server did not expose the expected navigation tools."); + } + } catch (err) { + await client.close().catch(() => {}); + throw new BrowserProfileUnavailableError( + `Chrome MCP existing-session attach failed for profile "${profileName}". ` + + `Make sure Chrome is running, enable chrome://inspect/#remote-debugging, and approve the connection. ` + + `Details: ${String(err)}`, + ); + } + })(); + + return { + client, + transport, + ready, + }; +} + +async function getSession(profileName: string): Promise { + let session = sessions.get(profileName); + if (session && session.transport.pid === null) { + sessions.delete(profileName); + session = undefined; + } + if (!session) { + let pending = pendingSessions.get(profileName); + if (!pending) { + pending = (async () => { + const created = await (sessionFactory ?? createRealSession)(profileName); + sessions.set(profileName, created); + return created; + })(); + pendingSessions.set(profileName, pending); + } + try { + session = await pending; + } finally { + if (pendingSessions.get(profileName) === pending) { + pendingSessions.delete(profileName); + } + } + } + try { + await session.ready; + return session; + } catch (err) { + const current = sessions.get(profileName); + if (current?.transport === session.transport) { + sessions.delete(profileName); + } + throw err; + } +} + +async function callTool( + profileName: string, + name: string, + args: Record = {}, +): Promise { + const session = await getSession(profileName); + let result: ChromeMcpToolResult; + try { + result = (await session.client.callTool({ + name, + arguments: args, + })) as ChromeMcpToolResult; + } catch (err) { + // Transport/connection error — tear down session so it reconnects on next call + sessions.delete(profileName); + await session.client.close().catch(() => {}); + throw err; + } + // Tool-level errors (element not found, script error, etc.) don't indicate a + // broken connection — don't tear down the session for these. + if (result.isError) { + throw new Error(extractToolErrorMessage(result, name)); + } + return result; +} + +async function withTempFile(fn: (filePath: string) => Promise): Promise { + const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-chrome-mcp-")); + const filePath = path.join(dir, randomUUID()); + try { + return await fn(filePath); + } finally { + await fs.rm(dir, { recursive: true, force: true }).catch(() => {}); + } +} + +async function findPageById(profileName: string, pageId: number): Promise { + const pages = await listChromeMcpPages(profileName); + const page = pages.find((entry) => entry.id === pageId); + if (!page) { + throw new BrowserTabNotFoundError(); + } + return page; +} + +export async function ensureChromeMcpAvailable(profileName: string): Promise { + await getSession(profileName); +} + +export function getChromeMcpPid(profileName: string): number | null { + return sessions.get(profileName)?.transport.pid ?? null; +} + +export async function closeChromeMcpSession(profileName: string): Promise { + pendingSessions.delete(profileName); + const session = sessions.get(profileName); + if (!session) { + return false; + } + sessions.delete(profileName); + await session.client.close().catch(() => {}); + return true; +} + +export async function stopAllChromeMcpSessions(): Promise { + const names = [...sessions.keys()]; + for (const name of names) { + await closeChromeMcpSession(name).catch(() => {}); + } +} + +export async function listChromeMcpPages(profileName: string): Promise { + const result = await callTool(profileName, "list_pages"); + return extractStructuredPages(result); +} + +export async function listChromeMcpTabs(profileName: string): Promise { + return toBrowserTabs(await listChromeMcpPages(profileName)); +} + +export async function openChromeMcpTab(profileName: string, url: string): Promise { + const result = await callTool(profileName, "new_page", { url }); + const pages = extractStructuredPages(result); + const chosen = pages.find((page) => page.selected) ?? pages.at(-1); + if (!chosen) { + throw new Error("Chrome MCP did not return the created page."); + } + return { + targetId: String(chosen.id), + title: "", + url: chosen.url ?? url, + type: "page", + }; +} + +export async function focusChromeMcpTab(profileName: string, targetId: string): Promise { + await callTool(profileName, "select_page", { + pageId: parsePageId(targetId), + bringToFront: true, + }); +} + +export async function closeChromeMcpTab(profileName: string, targetId: string): Promise { + await callTool(profileName, "close_page", { pageId: parsePageId(targetId) }); +} + +export async function navigateChromeMcpPage(params: { + profileName: string; + targetId: string; + url: string; + timeoutMs?: number; +}): Promise<{ url: string }> { + await callTool(params.profileName, "navigate_page", { + pageId: parsePageId(params.targetId), + type: "url", + url: params.url, + ...(typeof params.timeoutMs === "number" ? { timeout: params.timeoutMs } : {}), + }); + const page = await findPageById(params.profileName, parsePageId(params.targetId)); + return { url: page.url ?? params.url }; +} + +export async function takeChromeMcpSnapshot(params: { + profileName: string; + targetId: string; +}): Promise { + const result = await callTool(params.profileName, "take_snapshot", { + pageId: parsePageId(params.targetId), + }); + return extractSnapshot(result); +} + +export async function takeChromeMcpScreenshot(params: { + profileName: string; + targetId: string; + uid?: string; + fullPage?: boolean; + format?: "png" | "jpeg"; +}): Promise { + return await withTempFile(async (filePath) => { + await callTool(params.profileName, "take_screenshot", { + pageId: parsePageId(params.targetId), + filePath, + format: params.format ?? "png", + ...(params.uid ? { uid: params.uid } : {}), + ...(params.fullPage ? { fullPage: true } : {}), + }); + return await fs.readFile(filePath); + }); +} + +export async function clickChromeMcpElement(params: { + profileName: string; + targetId: string; + uid: string; + doubleClick?: boolean; +}): Promise { + await callTool(params.profileName, "click", { + pageId: parsePageId(params.targetId), + uid: params.uid, + ...(params.doubleClick ? { dblClick: true } : {}), + }); +} + +export async function fillChromeMcpElement(params: { + profileName: string; + targetId: string; + uid: string; + value: string; +}): Promise { + await callTool(params.profileName, "fill", { + pageId: parsePageId(params.targetId), + uid: params.uid, + value: params.value, + }); +} + +export async function fillChromeMcpForm(params: { + profileName: string; + targetId: string; + elements: Array<{ uid: string; value: string }>; +}): Promise { + await callTool(params.profileName, "fill_form", { + pageId: parsePageId(params.targetId), + elements: params.elements, + }); +} + +export async function hoverChromeMcpElement(params: { + profileName: string; + targetId: string; + uid: string; +}): Promise { + await callTool(params.profileName, "hover", { + pageId: parsePageId(params.targetId), + uid: params.uid, + }); +} + +export async function dragChromeMcpElement(params: { + profileName: string; + targetId: string; + fromUid: string; + toUid: string; +}): Promise { + await callTool(params.profileName, "drag", { + pageId: parsePageId(params.targetId), + from_uid: params.fromUid, + to_uid: params.toUid, + }); +} + +export async function uploadChromeMcpFile(params: { + profileName: string; + targetId: string; + uid: string; + filePath: string; +}): Promise { + await callTool(params.profileName, "upload_file", { + pageId: parsePageId(params.targetId), + uid: params.uid, + filePath: params.filePath, + }); +} + +export async function pressChromeMcpKey(params: { + profileName: string; + targetId: string; + key: string; +}): Promise { + await callTool(params.profileName, "press_key", { + pageId: parsePageId(params.targetId), + key: params.key, + }); +} + +export async function resizeChromeMcpPage(params: { + profileName: string; + targetId: string; + width: number; + height: number; +}): Promise { + await callTool(params.profileName, "resize_page", { + pageId: parsePageId(params.targetId), + width: params.width, + height: params.height, + }); +} + +export async function handleChromeMcpDialog(params: { + profileName: string; + targetId: string; + action: "accept" | "dismiss"; + promptText?: string; +}): Promise { + await callTool(params.profileName, "handle_dialog", { + pageId: parsePageId(params.targetId), + action: params.action, + ...(params.promptText ? { promptText: params.promptText } : {}), + }); +} + +export async function evaluateChromeMcpScript(params: { + profileName: string; + targetId: string; + fn: string; + args?: string[]; +}): Promise { + const result = await callTool(params.profileName, "evaluate_script", { + pageId: parsePageId(params.targetId), + function: params.fn, + ...(params.args?.length ? { args: params.args } : {}), + }); + return extractJsonMessage(result); +} + +export async function waitForChromeMcpText(params: { + profileName: string; + targetId: string; + text: string[]; + timeoutMs?: number; +}): Promise { + await callTool(params.profileName, "wait_for", { + pageId: parsePageId(params.targetId), + text: params.text, + ...(typeof params.timeoutMs === "number" ? { timeout: params.timeoutMs } : {}), + }); +} + +export function setChromeMcpSessionFactoryForTest(factory: ChromeMcpSessionFactory | null): void { + sessionFactory = factory; +} + +export async function resetChromeMcpSessionsForTest(): Promise { + sessionFactory = null; + pendingSessions.clear(); + await stopAllChromeMcpSessions(); +} diff --git a/src/browser/client-actions-core.ts b/src/browser/client-actions-core.ts index 72e27cd9afa..149ca54fadf 100644 --- a/src/browser/client-actions-core.ts +++ b/src/browser/client-actions-core.ts @@ -15,16 +15,19 @@ export type BrowserFormField = { export type BrowserActRequest = | { kind: "click"; - ref: string; + ref?: string; + selector?: string; targetId?: string; doubleClick?: boolean; button?: string; modifiers?: string[]; + delayMs?: number; timeoutMs?: number; } | { kind: "type"; - ref: string; + ref?: string; + selector?: string; text: string; targetId?: string; submit?: boolean; @@ -32,23 +35,33 @@ export type BrowserActRequest = timeoutMs?: number; } | { kind: "press"; key: string; targetId?: string; delayMs?: number } - | { kind: "hover"; ref: string; targetId?: string; timeoutMs?: number } + | { + kind: "hover"; + ref?: string; + selector?: string; + targetId?: string; + timeoutMs?: number; + } | { kind: "scrollIntoView"; - ref: string; + ref?: string; + selector?: string; targetId?: string; timeoutMs?: number; } | { kind: "drag"; - startRef: string; - endRef: string; + startRef?: string; + startSelector?: string; + endRef?: string; + endSelector?: string; targetId?: string; timeoutMs?: number; } | { kind: "select"; - ref: string; + ref?: string; + selector?: string; values: string[]; targetId?: string; timeoutMs?: number; @@ -73,13 +86,20 @@ export type BrowserActRequest = timeoutMs?: number; } | { kind: "evaluate"; fn: string; ref?: string; targetId?: string; timeoutMs?: number } - | { kind: "close"; targetId?: string }; + | { kind: "close"; targetId?: string } + | { + kind: "batch"; + actions: BrowserActRequest[]; + targetId?: string; + stopOnError?: boolean; + }; export type BrowserActResponse = { ok: true; targetId: string; url?: string; result?: unknown; + results?: Array<{ ok: boolean; error?: string }>; }; export type BrowserDownloadPayload = { diff --git a/src/browser/client-fetch.loopback-auth.test.ts b/src/browser/client-fetch.loopback-auth.test.ts index cda6d29d4e3..bf982322027 100644 --- a/src/browser/client-fetch.loopback-auth.test.ts +++ b/src/browser/client-fetch.loopback-auth.test.ts @@ -1,4 +1,9 @@ import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import type { BrowserDispatchResponse } from "./routes/dispatcher.js"; + +function okDispatchResponse(): BrowserDispatchResponse { + return { status: 200, body: { ok: true } }; +} const mocks = vi.hoisted(() => ({ loadConfig: vi.fn(() => ({ @@ -9,7 +14,7 @@ const mocks = vi.hoisted(() => ({ }, })), startBrowserControlServiceFromConfig: vi.fn(async () => ({ ok: true })), - dispatch: vi.fn(async () => ({ status: 200, body: { ok: true } })), + dispatch: vi.fn(async (): Promise => okDispatchResponse()), })); vi.mock("../config/config.js", async (importOriginal) => { @@ -45,6 +50,27 @@ function stubJsonFetchOk() { return fetchMock; } +async function expectThrownBrowserFetchError( + request: () => Promise, + params: { + contains: string[]; + omits?: string[]; + }, +) { + const thrown = await request().catch((err: unknown) => err); + expect(thrown).toBeInstanceOf(Error); + if (!(thrown instanceof Error)) { + throw new Error(`Expected Error, got ${String(thrown)}`); + } + for (const snippet of params.contains) { + expect(thrown.message).toContain(snippet); + } + for (const snippet of params.omits ?? []) { + expect(thrown.message).not.toContain(snippet); + } + return thrown; +} + describe("fetchBrowserJson loopback auth", () => { beforeEach(() => { vi.restoreAllMocks(); @@ -57,7 +83,7 @@ describe("fetchBrowserJson loopback auth", () => { }, }); mocks.startBrowserControlServiceFromConfig.mockReset().mockResolvedValue({ ok: true }); - mocks.dispatch.mockReset().mockResolvedValue({ status: 200, body: { ok: true } }); + mocks.dispatch.mockReset().mockResolvedValue(okDispatchResponse()); }); afterEach(() => { @@ -122,15 +148,86 @@ describe("fetchBrowserJson loopback auth", () => { it("preserves dispatcher error context while keeping no-retry hint", async () => { mocks.dispatch.mockRejectedValueOnce(new Error("Chrome CDP handshake timeout")); - const thrown = await fetchBrowserJson<{ ok: boolean }>("/tabs").catch((err: unknown) => err); + await expectThrownBrowserFetchError(() => fetchBrowserJson<{ ok: boolean }>("/tabs"), { + contains: ["Chrome CDP handshake timeout", "Do NOT retry the browser tool"], + omits: ["Can't reach the OpenClaw browser control service"], + }); + }); - expect(thrown).toBeInstanceOf(Error); - if (!(thrown instanceof Error)) { - throw new Error(`Expected Error, got ${String(thrown)}`); - } - expect(thrown.message).toContain("Chrome CDP handshake timeout"); - expect(thrown.message).toContain("Do NOT retry the browser tool"); - expect(thrown.message).not.toContain("Can't reach the OpenClaw browser control service"); + it("surfaces 429 from HTTP URL as rate-limit error with no-retry hint", async () => { + const response = new Response("max concurrent sessions exceeded", { status: 429 }); + const text = vi.spyOn(response, "text"); + const cancel = vi.spyOn(response.body!, "cancel").mockResolvedValue(undefined); + vi.stubGlobal( + "fetch", + vi.fn(async () => response), + ); + + await expectThrownBrowserFetchError( + () => fetchBrowserJson<{ ok: boolean }>("http://127.0.0.1:18888/"), + { + contains: ["Browser service rate limit reached", "Do NOT retry the browser tool"], + omits: ["max concurrent sessions exceeded"], + }, + ); + expect(text).not.toHaveBeenCalled(); + expect(cancel).toHaveBeenCalledOnce(); + }); + + it("surfaces 429 from HTTP URL without body detail when empty", async () => { + vi.stubGlobal( + "fetch", + vi.fn(async () => new Response("", { status: 429 })), + ); + + await expectThrownBrowserFetchError( + () => fetchBrowserJson<{ ok: boolean }>("http://127.0.0.1:18888/"), + { + contains: ["rate limit reached", "Do NOT retry the browser tool"], + }, + ); + }); + + it("keeps Browserbase-specific wording for Browserbase 429 responses", async () => { + vi.stubGlobal( + "fetch", + vi.fn(async () => new Response("max concurrent sessions exceeded", { status: 429 })), + ); + + await expectThrownBrowserFetchError( + () => fetchBrowserJson<{ ok: boolean }>("https://connect.browserbase.com/session"), + { + contains: ["Browserbase rate limit reached", "upgrade your plan"], + omits: ["max concurrent sessions exceeded"], + }, + ); + }); + + it("non-429 errors still produce generic messages", async () => { + vi.stubGlobal( + "fetch", + vi.fn(async () => new Response("internal error", { status: 500 })), + ); + + await expectThrownBrowserFetchError( + () => fetchBrowserJson<{ ok: boolean }>("http://127.0.0.1:18888/"), + { + contains: ["internal error"], + omits: ["rate limit"], + }, + ); + }); + + it("surfaces 429 from dispatcher path as rate-limit error", async () => { + mocks.dispatch.mockResolvedValueOnce({ + status: 429, + body: { error: "too many sessions" }, + }); + + await expectThrownBrowserFetchError(() => fetchBrowserJson<{ ok: boolean }>("/tabs"), { + contains: ["Browser service rate limit reached", "Do NOT retry the browser tool"], + omits: ["too many sessions"], + }); }); it("keeps absolute URL failures wrapped as reachability errors", async () => { @@ -141,15 +238,14 @@ describe("fetchBrowserJson loopback auth", () => { }), ); - const thrown = await fetchBrowserJson<{ ok: boolean }>("http://example.com/").catch( - (err: unknown) => err, + await expectThrownBrowserFetchError( + () => fetchBrowserJson<{ ok: boolean }>("http://example.com/"), + { + contains: [ + "Can't reach the OpenClaw browser control service", + "Do NOT retry the browser tool", + ], + }, ); - - expect(thrown).toBeInstanceOf(Error); - if (!(thrown instanceof Error)) { - throw new Error(`Expected Error, got ${String(thrown)}`); - } - expect(thrown.message).toContain("Can't reach the OpenClaw browser control service"); - expect(thrown.message).toContain("Do NOT retry the browser tool"); }); }); diff --git a/src/browser/client-fetch.ts b/src/browser/client-fetch.ts index 8f13da4e1aa..e321c5a1e62 100644 --- a/src/browser/client-fetch.ts +++ b/src/browser/client-fetch.ts @@ -102,6 +102,36 @@ const BROWSER_TOOL_MODEL_HINT = "Do NOT retry the browser tool — it will keep failing. " + "Use an alternative approach or inform the user that the browser is currently unavailable."; +const BROWSER_SERVICE_RATE_LIMIT_MESSAGE = + "Browser service rate limit reached. " + + "Wait for the current session to complete, or retry later."; + +const BROWSERBASE_RATE_LIMIT_MESSAGE = + "Browserbase rate limit reached (max concurrent sessions). " + + "Wait for the current session to complete, or upgrade your plan."; + +function isRateLimitStatus(status: number): boolean { + return status === 429; +} + +function isBrowserbaseUrl(url: string): boolean { + if (!isAbsoluteHttp(url)) { + return false; + } + try { + const host = new URL(url).hostname.toLowerCase(); + return host === "browserbase.com" || host.endsWith(".browserbase.com"); + } catch { + return false; + } +} + +export function resolveBrowserRateLimitMessage(url: string): string { + return isBrowserbaseUrl(url) + ? BROWSERBASE_RATE_LIMIT_MESSAGE + : BROWSER_SERVICE_RATE_LIMIT_MESSAGE; +} + function resolveBrowserFetchOperatorHint(url: string): string { const isLocal = !isAbsoluteHttp(url); return isLocal @@ -123,6 +153,14 @@ function appendBrowserToolModelHint(message: string): string { return `${message} ${BROWSER_TOOL_MODEL_HINT}`; } +async function discardResponseBody(res: Response): Promise { + try { + await res.body?.cancel(); + } catch { + // Best effort only; we're already returning a stable error message. + } +} + function enhanceDispatcherPathError(url: string, err: unknown): Error { const msg = normalizeErrorMessage(err); const suffix = `${resolveBrowserFetchOperatorHint(url)} ${BROWSER_TOOL_MODEL_HINT}`; @@ -175,6 +213,13 @@ async function fetchHttpJson( try { const res = await fetch(url, { ...init, signal: ctrl.signal }); if (!res.ok) { + if (isRateLimitStatus(res.status)) { + // Do not reflect upstream response text into the error surface (log/agent injection risk) + await discardResponseBody(res); + throw new BrowserServiceError( + `${resolveBrowserRateLimitMessage(url)} ${BROWSER_TOOL_MODEL_HINT}`, + ); + } const text = await res.text().catch(() => ""); throw new BrowserServiceError(text || `HTTP ${res.status}`); } @@ -269,6 +314,12 @@ export async function fetchBrowserJson( }); if (result.status >= 400) { + if (isRateLimitStatus(result.status)) { + // Do not reflect upstream response text into the error surface (log/agent injection risk) + throw new BrowserServiceError( + `${resolveBrowserRateLimitMessage(url)} ${BROWSER_TOOL_MODEL_HINT}`, + ); + } const message = result.body && typeof result.body === "object" && "error" in result.body ? String((result.body as { error?: unknown }).error) diff --git a/src/browser/client.test.ts b/src/browser/client.test.ts index a4f95c23007..64d37580e35 100644 --- a/src/browser/client.test.ts +++ b/src/browser/client.test.ts @@ -160,6 +160,7 @@ describe("browser client", () => { targetId: "t1", url: "https://x", result: 1, + results: [{ ok: true }], }), } as unknown as Response; } @@ -258,7 +259,7 @@ describe("browser client", () => { ).resolves.toMatchObject({ ok: true, targetId: "t1" }); await expect( browserAct("http://127.0.0.1:18791", { kind: "click", ref: "1" }), - ).resolves.toMatchObject({ ok: true, targetId: "t1" }); + ).resolves.toMatchObject({ ok: true, targetId: "t1", results: [{ ok: true }] }); await expect( browserArmFileChooser("http://127.0.0.1:18791", { paths: ["/tmp/a.txt"], diff --git a/src/browser/client.ts b/src/browser/client.ts index 953c9efcd11..8e30762bfb1 100644 --- a/src/browser/client.ts +++ b/src/browser/client.ts @@ -1,14 +1,18 @@ import { fetchBrowserJson } from "./client-fetch.js"; +export type BrowserTransport = "cdp" | "chrome-mcp"; + export type BrowserStatus = { enabled: boolean; profile?: string; + driver?: "openclaw" | "extension" | "existing-session"; + transport?: BrowserTransport; running: boolean; cdpReady?: boolean; cdpHttp?: boolean; pid: number | null; - cdpPort: number; - cdpUrl?: string; + cdpPort: number | null; + cdpUrl?: string | null; chosenBrowser: string | null; detectedBrowser?: string | null; detectedExecutablePath?: string | null; @@ -23,9 +27,11 @@ export type BrowserStatus = { export type ProfileStatus = { name: string; - cdpPort: number; - cdpUrl: string; + transport?: BrowserTransport; + cdpPort: number | null; + cdpUrl: string | null; color: string; + driver: "openclaw" | "extension" | "existing-session"; running: boolean; tabCount: number; isDefault: boolean; @@ -153,8 +159,9 @@ export async function browserResetProfile( export type BrowserCreateProfileResult = { ok: true; profile: string; - cdpPort: number; - cdpUrl: string; + transport?: BrowserTransport; + cdpPort: number | null; + cdpUrl: string | null; color: string; isRemote: boolean; }; @@ -165,7 +172,7 @@ export async function browserCreateProfile( name: string; color?: string; cdpUrl?: string; - driver?: "openclaw" | "extension"; + driver?: "openclaw" | "extension" | "existing-session"; }, ): Promise { return await fetchBrowserJson( diff --git a/src/browser/config.test.ts b/src/browser/config.test.ts index d2643a6784b..5c16dd54dc6 100644 --- a/src/browser/config.test.ts +++ b/src/browser/config.test.ts @@ -1,6 +1,7 @@ import { describe, expect, it } from "vitest"; import { withEnv } from "../test-utils/env.js"; import { resolveBrowserConfig, resolveProfile, shouldStartLocalBrowserServer } from "./config.js"; +import { getBrowserProfileCapabilities } from "./profile-capabilities.js"; describe("browser config", () => { it("defaults to enabled with loopback defaults and lobster-orange color", () => { @@ -21,10 +22,14 @@ describe("browser config", () => { expect(openclaw?.driver).toBe("openclaw"); expect(openclaw?.cdpPort).toBe(18800); expect(openclaw?.cdpUrl).toBe("http://127.0.0.1:18800"); - const chrome = resolveProfile(resolved, "chrome"); - expect(chrome?.driver).toBe("extension"); - expect(chrome?.cdpPort).toBe(18792); - expect(chrome?.cdpUrl).toBe("http://127.0.0.1:18792"); + const user = resolveProfile(resolved, "user"); + expect(user?.driver).toBe("existing-session"); + expect(user?.cdpPort).toBe(0); + expect(user?.cdpUrl).toBe(""); + const chromeRelay = resolveProfile(resolved, "chrome-relay"); + expect(chromeRelay?.driver).toBe("extension"); + expect(chromeRelay?.cdpPort).toBe(18792); + expect(chromeRelay?.cdpUrl).toBe("http://127.0.0.1:18792"); expect(resolved.remoteCdpTimeoutMs).toBe(1500); expect(resolved.remoteCdpHandshakeTimeoutMs).toBe(3000); }); @@ -33,10 +38,10 @@ describe("browser config", () => { withEnv({ OPENCLAW_GATEWAY_PORT: "19001" }, () => { const resolved = resolveBrowserConfig(undefined); expect(resolved.controlPort).toBe(19003); - const chrome = resolveProfile(resolved, "chrome"); - expect(chrome?.driver).toBe("extension"); - expect(chrome?.cdpPort).toBe(19004); - expect(chrome?.cdpUrl).toBe("http://127.0.0.1:19004"); + const chromeRelay = resolveProfile(resolved, "chrome-relay"); + expect(chromeRelay?.driver).toBe("extension"); + expect(chromeRelay?.cdpPort).toBe(19004); + expect(chromeRelay?.cdpUrl).toBe("http://127.0.0.1:19004"); const openclaw = resolveProfile(resolved, "openclaw"); expect(openclaw?.cdpPort).toBe(19012); @@ -48,10 +53,10 @@ describe("browser config", () => { withEnv({ OPENCLAW_GATEWAY_PORT: undefined }, () => { const resolved = resolveBrowserConfig(undefined, { gateway: { port: 19011 } }); expect(resolved.controlPort).toBe(19013); - const chrome = resolveProfile(resolved, "chrome"); - expect(chrome?.driver).toBe("extension"); - expect(chrome?.cdpPort).toBe(19014); - expect(chrome?.cdpUrl).toBe("http://127.0.0.1:19014"); + const chromeRelay = resolveProfile(resolved, "chrome-relay"); + expect(chromeRelay?.driver).toBe("extension"); + expect(chromeRelay?.cdpPort).toBe(19014); + expect(chromeRelay?.cdpUrl).toBe("http://127.0.0.1:19014"); const openclaw = resolveProfile(resolved, "openclaw"); expect(openclaw?.cdpPort).toBe(19022); @@ -204,13 +209,13 @@ describe("browser config", () => { ); }); - it("does not add the built-in chrome extension profile if the derived relay port is already used", () => { + it("does not add the built-in chrome-relay profile if the derived relay port is already used", () => { const resolved = resolveBrowserConfig({ profiles: { openclaw: { cdpPort: 18792, color: "#FF4500" }, }, }); - expect(resolveProfile(resolved, "chrome")).toBe(null); + expect(resolveProfile(resolved, "chrome-relay")).toBe(null); expect(resolved.defaultProfile).toBe("openclaw"); }); @@ -278,6 +283,47 @@ describe("browser config", () => { expect(resolved.ssrfPolicy).toEqual({}); }); + it("resolves existing-session profiles without cdpPort or cdpUrl", () => { + const resolved = resolveBrowserConfig({ + profiles: { + "chrome-live": { + driver: "existing-session", + attachOnly: true, + color: "#00AA00", + }, + }, + }); + const profile = resolveProfile(resolved, "chrome-live"); + expect(profile).not.toBeNull(); + expect(profile?.driver).toBe("existing-session"); + expect(profile?.attachOnly).toBe(true); + expect(profile?.cdpPort).toBe(0); + expect(profile?.cdpUrl).toBe(""); + expect(profile?.cdpIsLoopback).toBe(true); + expect(profile?.color).toBe("#00AA00"); + }); + + it("sets usesChromeMcp only for existing-session profiles", () => { + const resolved = resolveBrowserConfig({ + profiles: { + "chrome-live": { driver: "existing-session", attachOnly: true, color: "#00AA00" }, + work: { cdpPort: 18801, color: "#0066CC" }, + }, + }); + + const existingSession = resolveProfile(resolved, "chrome-live")!; + expect(getBrowserProfileCapabilities(existingSession).usesChromeMcp).toBe(true); + + const managed = resolveProfile(resolved, "openclaw")!; + expect(getBrowserProfileCapabilities(managed).usesChromeMcp).toBe(false); + + const extension = resolveProfile(resolved, "chrome-relay")!; + expect(getBrowserProfileCapabilities(extension).usesChromeMcp).toBe(false); + + const work = resolveProfile(resolved, "work")!; + expect(getBrowserProfileCapabilities(work).usesChromeMcp).toBe(false); + }); + describe("default profile preference", () => { it("defaults to openclaw profile when defaultProfile is not configured", () => { const resolved = resolveBrowserConfig({ @@ -312,17 +358,17 @@ describe("browser config", () => { it("explicit defaultProfile config overrides defaults in headless mode", () => { const resolved = resolveBrowserConfig({ headless: true, - defaultProfile: "chrome", + defaultProfile: "chrome-relay", }); - expect(resolved.defaultProfile).toBe("chrome"); + expect(resolved.defaultProfile).toBe("chrome-relay"); }); it("explicit defaultProfile config overrides defaults in noSandbox mode", () => { const resolved = resolveBrowserConfig({ noSandbox: true, - defaultProfile: "chrome", + defaultProfile: "chrome-relay", }); - expect(resolved.defaultProfile).toBe("chrome"); + expect(resolved.defaultProfile).toBe("chrome-relay"); }); it("allows custom profile as default even in headless mode", () => { diff --git a/src/browser/config.ts b/src/browser/config.ts index 6d24a07a287..8bcd51d0a68 100644 --- a/src/browser/config.ts +++ b/src/browser/config.ts @@ -46,7 +46,7 @@ export type ResolvedBrowserProfile = { cdpHost: string; cdpIsLoopback: boolean; color: string; - driver: "openclaw" | "extension"; + driver: "openclaw" | "extension" | "existing-session"; attachOnly: boolean; }; @@ -180,17 +180,35 @@ function ensureDefaultProfile( } /** - * Ensure a built-in "chrome" profile exists for the Chrome extension relay. + * Ensure a built-in "user" profile exists for Chrome's existing-session attach flow. + */ +function ensureDefaultUserBrowserProfile( + profiles: Record, +): Record { + const result = { ...profiles }; + if (result.user) { + return result; + } + result.user = { + driver: "existing-session", + attachOnly: true, + color: "#00AA00", + }; + return result; +} + +/** + * Ensure a built-in "chrome-relay" profile exists for the Chrome extension relay. * * Note: this is an OpenClaw browser profile (routing config), not a Chrome user profile. * It points at the local relay CDP endpoint (controlPort + 1). */ -function ensureDefaultChromeExtensionProfile( +function ensureDefaultChromeRelayProfile( profiles: Record, controlPort: number, ): Record { const result = { ...profiles }; - if (result.chrome) { + if (result["chrome-relay"]) { return result; } const relayPort = controlPort + 1; @@ -202,7 +220,7 @@ function ensureDefaultChromeExtensionProfile( if (getUsedPorts(result).has(relayPort)) { return result; } - result.chrome = { + result["chrome-relay"] = { driver: "extension", cdpUrl: `http://127.0.0.1:${relayPort}`, color: "#00AA00", @@ -268,13 +286,15 @@ export function resolveBrowserConfig( const legacyCdpPort = rawCdpUrl ? cdpInfo.port : undefined; const isWsUrl = cdpInfo.parsed.protocol === "ws:" || cdpInfo.parsed.protocol === "wss:"; const legacyCdpUrl = rawCdpUrl && isWsUrl ? cdpInfo.normalized : undefined; - const profiles = ensureDefaultChromeExtensionProfile( - ensureDefaultProfile( - cfg?.profiles, - defaultColor, - legacyCdpPort, - cdpPortRangeStart, - legacyCdpUrl, + const profiles = ensureDefaultChromeRelayProfile( + ensureDefaultUserBrowserProfile( + ensureDefaultProfile( + cfg?.profiles, + defaultColor, + legacyCdpPort, + cdpPortRangeStart, + legacyCdpUrl, + ), ), controlPort, ); @@ -286,7 +306,7 @@ export function resolveBrowserConfig( ? DEFAULT_BROWSER_DEFAULT_PROFILE_NAME : profiles[DEFAULT_OPENCLAW_BROWSER_PROFILE_NAME] ? DEFAULT_OPENCLAW_BROWSER_PROFILE_NAME - : "chrome"); + : "user"); const extraArgs = Array.isArray(cfg?.extraArgs) ? cfg.extraArgs.filter((a): a is string => typeof a === "string" && a.trim().length > 0) @@ -335,7 +355,26 @@ export function resolveProfile( let cdpHost = resolved.cdpHost; let cdpPort = profile.cdpPort ?? 0; let cdpUrl = ""; - const driver = profile.driver === "extension" ? "extension" : "openclaw"; + const driver = + profile.driver === "extension" + ? "extension" + : profile.driver === "existing-session" + ? "existing-session" + : "openclaw"; + + if (driver === "existing-session") { + // existing-session uses Chrome MCP auto-connect; no CDP port/URL needed + return { + name: profileName, + cdpPort: 0, + cdpUrl: "", + cdpHost: "", + cdpIsLoopback: true, + color: profile.color, + driver, + attachOnly: true, + }; + } if (rawProfileUrl) { const parsed = parseHttpUrl(rawProfileUrl, `browser.profiles.${profileName}.cdpUrl`); diff --git a/src/browser/profile-capabilities.ts b/src/browser/profile-capabilities.ts index 07a70ba00c4..b736a77d943 100644 --- a/src/browser/profile-capabilities.ts +++ b/src/browser/profile-capabilities.ts @@ -1,10 +1,16 @@ import type { ResolvedBrowserProfile } from "./config.js"; -export type BrowserProfileMode = "local-managed" | "local-extension-relay" | "remote-cdp"; +export type BrowserProfileMode = + | "local-managed" + | "local-extension-relay" + | "local-existing-session" + | "remote-cdp"; export type BrowserProfileCapabilities = { mode: BrowserProfileMode; isRemote: boolean; + /** Profile uses the Chrome DevTools MCP server (existing-session driver). */ + usesChromeMcp: boolean; requiresRelay: boolean; requiresAttachedTab: boolean; usesPersistentPlaywright: boolean; @@ -21,6 +27,7 @@ export function getBrowserProfileCapabilities( return { mode: "local-extension-relay", isRemote: false, + usesChromeMcp: false, requiresRelay: true, requiresAttachedTab: true, usesPersistentPlaywright: false, @@ -31,10 +38,26 @@ export function getBrowserProfileCapabilities( }; } + if (profile.driver === "existing-session") { + return { + mode: "local-existing-session", + isRemote: false, + usesChromeMcp: true, + requiresRelay: false, + requiresAttachedTab: false, + usesPersistentPlaywright: false, + supportsPerTabWs: false, + supportsJsonTabEndpoints: false, + supportsReset: false, + supportsManagedTabLimit: false, + }; + } + if (!profile.cdpIsLoopback) { return { mode: "remote-cdp", isRemote: true, + usesChromeMcp: false, requiresRelay: false, requiresAttachedTab: false, usesPersistentPlaywright: true, @@ -48,6 +71,7 @@ export function getBrowserProfileCapabilities( return { mode: "local-managed", isRemote: false, + usesChromeMcp: false, requiresRelay: false, requiresAttachedTab: false, usesPersistentPlaywright: false, @@ -75,6 +99,9 @@ export function resolveDefaultSnapshotFormat(params: { if (capabilities.mode === "local-extension-relay") { return "aria"; } + if (capabilities.mode === "local-existing-session") { + return "ai"; + } return params.hasPlaywright ? "ai" : "aria"; } diff --git a/src/browser/profiles-service.test.ts b/src/browser/profiles-service.test.ts index 3dc714d33f3..13bbdf27c49 100644 --- a/src/browser/profiles-service.test.ts +++ b/src/browser/profiles-service.test.ts @@ -1,6 +1,6 @@ import fs from "node:fs"; import path from "node:path"; -import { describe, expect, it, vi } from "vitest"; +import { beforeEach, describe, expect, it, vi } from "vitest"; import { resolveBrowserConfig } from "./config.js"; import { createBrowserProfilesService } from "./profiles-service.js"; import type { BrowserRouteContext, BrowserServerState } from "./server-context.js"; @@ -57,6 +57,10 @@ async function createWorkProfileWithConfig(params: { } describe("BrowserProfilesService", () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + it("allocates next local port for new profiles", async () => { const { result, state } = await createWorkProfileWithConfig({ resolved: resolveBrowserConfig({}), @@ -163,6 +167,56 @@ describe("BrowserProfilesService", () => { ).rejects.toThrow(/requires an explicit loopback cdpUrl/i); }); + it("creates existing-session profiles as attach-only local entries", async () => { + const resolved = resolveBrowserConfig({}); + const { ctx, state } = createCtx(resolved); + vi.mocked(loadConfig).mockReturnValue({ browser: { profiles: {} } }); + + const service = createBrowserProfilesService(ctx); + const result = await service.createProfile({ + name: "chrome-live", + driver: "existing-session", + }); + + expect(result.transport).toBe("chrome-mcp"); + expect(result.cdpPort).toBeNull(); + expect(result.cdpUrl).toBeNull(); + expect(result.isRemote).toBe(false); + expect(state.resolved.profiles["chrome-live"]).toEqual({ + driver: "existing-session", + attachOnly: true, + color: expect.any(String), + }); + expect(writeConfigFile).toHaveBeenCalledWith( + expect.objectContaining({ + browser: expect.objectContaining({ + profiles: expect.objectContaining({ + "chrome-live": expect.objectContaining({ + driver: "existing-session", + attachOnly: true, + }), + }), + }), + }), + ); + }); + + it("rejects driver=existing-session when cdpUrl is provided", async () => { + const resolved = resolveBrowserConfig({}); + const { ctx } = createCtx(resolved); + vi.mocked(loadConfig).mockReturnValue({ browser: { profiles: {} } }); + + const service = createBrowserProfilesService(ctx); + + await expect( + service.createProfile({ + name: "chrome-live", + driver: "existing-session", + cdpUrl: "http://127.0.0.1:9222", + }), + ).rejects.toThrow(/does not accept cdpUrl/i); + }); + it("deletes remote profiles without stopping or removing local data", async () => { const resolved = resolveBrowserConfig({ profiles: { @@ -218,4 +272,40 @@ describe("BrowserProfilesService", () => { expect(result.deleted).toBe(true); expect(movePathToTrash).toHaveBeenCalledWith(path.dirname(userDataDir)); }); + + it("deletes existing-session profiles without touching local browser data", async () => { + const resolved = resolveBrowserConfig({ + profiles: { + "chrome-live": { + cdpPort: 18801, + color: "#0066CC", + driver: "existing-session", + attachOnly: true, + }, + }, + }); + const { ctx } = createCtx(resolved); + + vi.mocked(loadConfig).mockReturnValue({ + browser: { + defaultProfile: "openclaw", + profiles: { + openclaw: { cdpPort: 18800, color: "#FF4500" }, + "chrome-live": { + cdpPort: 18801, + color: "#0066CC", + driver: "existing-session", + attachOnly: true, + }, + }, + }, + }); + + const service = createBrowserProfilesService(ctx); + const result = await service.deleteProfile("chrome-live"); + + expect(result.deleted).toBe(false); + expect(ctx.forProfile).not.toHaveBeenCalled(); + expect(movePathToTrash).not.toHaveBeenCalled(); + }); }); diff --git a/src/browser/profiles-service.ts b/src/browser/profiles-service.ts index 962c6408522..86321006e98 100644 --- a/src/browser/profiles-service.ts +++ b/src/browser/profiles-service.ts @@ -6,13 +6,13 @@ import { deriveDefaultBrowserCdpPortRange } from "../config/port-defaults.js"; import { isLoopbackHost } from "../gateway/net.js"; import { resolveOpenClawUserDataDir } from "./chrome.js"; import { parseHttpUrl, resolveProfile } from "./config.js"; -import { DEFAULT_BROWSER_DEFAULT_PROFILE_NAME } from "./constants.js"; import { BrowserConflictError, BrowserProfileNotFoundError, BrowserResourceExhaustedError, BrowserValidationError, } from "./errors.js"; +import { getBrowserProfileCapabilities } from "./profile-capabilities.js"; import { allocateCdpPort, allocateColor, @@ -27,14 +27,15 @@ export type CreateProfileParams = { name: string; color?: string; cdpUrl?: string; - driver?: "openclaw" | "extension"; + driver?: "openclaw" | "extension" | "existing-session"; }; export type CreateProfileResult = { ok: true; profile: string; - cdpPort: number; - cdpUrl: string; + transport: "cdp" | "chrome-mcp"; + cdpPort: number | null; + cdpUrl: string | null; color: string; isRemote: boolean; }; @@ -79,7 +80,12 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) { const createProfile = async (params: CreateProfileParams): Promise => { const name = params.name.trim(); const rawCdpUrl = params.cdpUrl?.trim() || undefined; - const driver = params.driver === "extension" ? "extension" : undefined; + const driver = + params.driver === "extension" + ? "extension" + : params.driver === "existing-session" + ? "existing-session" + : undefined; if (!isValidProfileName(name)) { throw new BrowserValidationError( @@ -105,7 +111,12 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) { let profileConfig: BrowserProfileConfig; if (rawCdpUrl) { - const parsed = parseHttpUrl(rawCdpUrl, "browser.profiles.cdpUrl"); + let parsed: ReturnType; + try { + parsed = parseHttpUrl(rawCdpUrl, "browser.profiles.cdpUrl"); + } catch (err) { + throw new BrowserValidationError(String(err)); + } if (driver === "extension") { if (!isLoopbackHost(parsed.parsed.hostname)) { throw new BrowserValidationError( @@ -118,6 +129,11 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) { ); } } + if (driver === "existing-session") { + throw new BrowserValidationError( + "driver=existing-session does not accept cdpUrl; it attaches via the Chrome MCP auto-connect flow", + ); + } profileConfig = { cdpUrl: parsed.normalized, ...(driver ? { driver } : {}), @@ -127,17 +143,26 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) { if (driver === "extension") { throw new BrowserValidationError("driver=extension requires an explicit loopback cdpUrl"); } - const usedPorts = getUsedPorts(resolvedProfiles); - const range = cdpPortRange(state.resolved); - const cdpPort = allocateCdpPort(usedPorts, range); - if (cdpPort === null) { - throw new BrowserResourceExhaustedError("no available CDP ports in range"); + if (driver === "existing-session") { + // existing-session uses Chrome MCP auto-connect; no CDP port needed + profileConfig = { + driver, + attachOnly: true, + color: profileColor, + }; + } else { + const usedPorts = getUsedPorts(resolvedProfiles); + const range = cdpPortRange(state.resolved); + const cdpPort = allocateCdpPort(usedPorts, range); + if (cdpPort === null) { + throw new BrowserResourceExhaustedError("no available CDP ports in range"); + } + profileConfig = { + cdpPort, + ...(driver ? { driver } : {}), + color: profileColor, + }; } - profileConfig = { - cdpPort, - ...(driver ? { driver } : {}), - color: profileColor, - }; } const nextConfig: OpenClawConfig = { @@ -158,12 +183,14 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) { if (!resolved) { throw new BrowserProfileNotFoundError(`profile "${name}" not found after creation`); } + const capabilities = getBrowserProfileCapabilities(resolved); return { ok: true, profile: name, - cdpPort: resolved.cdpPort, - cdpUrl: resolved.cdpUrl, + transport: capabilities.usesChromeMcp ? "chrome-mcp" : "cdp", + cdpPort: capabilities.usesChromeMcp ? null : resolved.cdpPort, + cdpUrl: capabilities.usesChromeMcp ? null : resolved.cdpUrl, color: resolved.color, isRemote: !resolved.cdpIsLoopback, }; @@ -178,24 +205,23 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) { throw new BrowserValidationError("invalid profile name"); } + const state = ctx.state(); const cfg = loadConfig(); const profiles = cfg.browser?.profiles ?? {}; - if (!(name in profiles)) { - throw new BrowserProfileNotFoundError(`profile "${name}" not found`); - } - - const defaultProfile = cfg.browser?.defaultProfile ?? DEFAULT_BROWSER_DEFAULT_PROFILE_NAME; + const defaultProfile = cfg.browser?.defaultProfile ?? state.resolved.defaultProfile; if (name === defaultProfile) { throw new BrowserValidationError( `cannot delete the default profile "${name}"; change browser.defaultProfile first`, ); } + if (!(name in profiles)) { + throw new BrowserProfileNotFoundError(`profile "${name}" not found`); + } let deleted = false; - const state = ctx.state(); const resolved = resolveProfile(state.resolved, name); - if (resolved?.cdpIsLoopback) { + if (resolved?.cdpIsLoopback && resolved.driver === "openclaw") { try { await ctx.forProfile(name).stopRunningBrowser(); } catch { diff --git a/src/browser/proxy-files.test.ts b/src/browser/proxy-files.test.ts new file mode 100644 index 00000000000..1d7ea9566bb --- /dev/null +++ b/src/browser/proxy-files.test.ts @@ -0,0 +1,54 @@ +import fs from "node:fs/promises"; +import path from "node:path"; +import { afterEach, beforeEach, describe, expect, it } from "vitest"; +import { MEDIA_MAX_BYTES } from "../media/store.js"; +import { createTempHomeEnv, type TempHomeEnv } from "../test-utils/temp-home.js"; +import { persistBrowserProxyFiles } from "./proxy-files.js"; + +describe("persistBrowserProxyFiles", () => { + let tempHome: TempHomeEnv; + + beforeEach(async () => { + tempHome = await createTempHomeEnv("openclaw-browser-proxy-files-"); + }); + + afterEach(async () => { + await tempHome.restore(); + }); + + it("persists browser proxy files under the shared media store", async () => { + const sourcePath = "/tmp/proxy-file.txt"; + const mapping = await persistBrowserProxyFiles([ + { + path: sourcePath, + base64: Buffer.from("hello from browser proxy").toString("base64"), + mimeType: "text/plain", + }, + ]); + + const savedPath = mapping.get(sourcePath); + expect(typeof savedPath).toBe("string"); + expect(path.normalize(savedPath ?? "")).toContain( + `${path.sep}.openclaw${path.sep}media${path.sep}browser${path.sep}`, + ); + await expect(fs.readFile(savedPath ?? "", "utf8")).resolves.toBe("hello from browser proxy"); + }); + + it("rejects browser proxy files that exceed the shared media size limit", async () => { + const oversized = Buffer.alloc(MEDIA_MAX_BYTES + 1, 0x41); + + await expect( + persistBrowserProxyFiles([ + { + path: "/tmp/oversized.bin", + base64: oversized.toString("base64"), + mimeType: "application/octet-stream", + }, + ]), + ).rejects.toThrow("Media exceeds 5MB limit"); + + await expect( + fs.stat(path.join(tempHome.home, ".openclaw", "media", "browser")), + ).rejects.toThrow(); + }); +}); diff --git a/src/browser/proxy-files.ts b/src/browser/proxy-files.ts index b18820a4594..1d39d71a09e 100644 --- a/src/browser/proxy-files.ts +++ b/src/browser/proxy-files.ts @@ -13,7 +13,7 @@ export async function persistBrowserProxyFiles(files: BrowserProxyFile[] | undef const mapping = new Map(); for (const file of files) { const buffer = Buffer.from(file.base64, "base64"); - const saved = await saveMediaBuffer(buffer, file.mimeType, "browser", buffer.byteLength); + const saved = await saveMediaBuffer(buffer, file.mimeType, "browser"); mapping.set(file.path, saved.path); } return mapping; diff --git a/src/browser/pw-ai.ts b/src/browser/pw-ai.ts index 6da8b410c83..f8d538b5394 100644 --- a/src/browser/pw-ai.ts +++ b/src/browser/pw-ai.ts @@ -19,6 +19,7 @@ export { export { armDialogViaPlaywright, armFileUploadViaPlaywright, + batchViaPlaywright, clickViaPlaywright, closePageViaPlaywright, cookiesClearViaPlaywright, diff --git a/src/browser/pw-role-snapshot.ts b/src/browser/pw-role-snapshot.ts index 7a0b0ae70fe..312abcf872f 100644 --- a/src/browser/pw-role-snapshot.ts +++ b/src/browser/pw-role-snapshot.ts @@ -1,3 +1,5 @@ +import { CONTENT_ROLES, INTERACTIVE_ROLES, STRUCTURAL_ROLES } from "./snapshot-roles.js"; + export type RoleRef = { role: string; name?: string; @@ -23,60 +25,6 @@ export type RoleSnapshotOptions = { compact?: boolean; }; -const INTERACTIVE_ROLES = new Set([ - "button", - "link", - "textbox", - "checkbox", - "radio", - "combobox", - "listbox", - "menuitem", - "menuitemcheckbox", - "menuitemradio", - "option", - "searchbox", - "slider", - "spinbutton", - "switch", - "tab", - "treeitem", -]); - -const CONTENT_ROLES = new Set([ - "heading", - "cell", - "gridcell", - "columnheader", - "rowheader", - "listitem", - "article", - "region", - "main", - "navigation", -]); - -const STRUCTURAL_ROLES = new Set([ - "generic", - "group", - "list", - "table", - "row", - "rowgroup", - "grid", - "treegrid", - "menu", - "menubar", - "toolbar", - "tablist", - "tree", - "directory", - "document", - "application", - "presentation", - "none", -]); - export function getRoleSnapshotStats(snapshot: string, refs: RoleRefMap): RoleSnapshotStats { const interactive = Object.values(refs).filter((r) => INTERACTIVE_ROLES.has(r.role)).length; return { diff --git a/src/browser/pw-session.get-page-for-targetid.extension-fallback.test.ts b/src/browser/pw-session.get-page-for-targetid.extension-fallback.test.ts index 43f1a6c7e09..8f64b2bf575 100644 --- a/src/browser/pw-session.get-page-for-targetid.extension-fallback.test.ts +++ b/src/browser/pw-session.get-page-for-targetid.extension-fallback.test.ts @@ -12,40 +12,49 @@ afterEach(async () => { await closePlaywrightBrowserConnection().catch(() => {}); }); +function createExtensionFallbackBrowserHarness(options?: { + urls?: string[]; + newCDPSessionError?: string; +}) { + const pageOn = vi.fn(); + const contextOn = vi.fn(); + const browserOn = vi.fn(); + const browserClose = vi.fn(async () => {}); + const newCDPSession = vi.fn(async () => { + throw new Error(options?.newCDPSessionError ?? "Not allowed"); + }); + + const context = { + pages: () => [], + on: contextOn, + newCDPSession, + } as unknown as import("playwright-core").BrowserContext; + + const pages = (options?.urls ?? [undefined]).map( + (url) => + ({ + on: pageOn, + context: () => context, + ...(url ? { url: () => url } : {}), + }) as unknown as import("playwright-core").Page, + ); + (context as unknown as { pages: () => unknown[] }).pages = () => pages; + + const browser = { + contexts: () => [context], + on: browserOn, + close: browserClose, + } as unknown as import("playwright-core").Browser; + + connectOverCdpSpy.mockResolvedValue(browser); + getChromeWebSocketUrlSpy.mockResolvedValue(null); + return { browserClose, newCDPSession, pages }; +} + describe("pw-session getPageForTargetId", () => { it("falls back to the only page when CDP session attachment is blocked (extension relays)", async () => { - connectOverCdpSpy.mockClear(); - getChromeWebSocketUrlSpy.mockClear(); - - const pageOn = vi.fn(); - const contextOn = vi.fn(); - const browserOn = vi.fn(); - const browserClose = vi.fn(async () => {}); - - const context = { - pages: () => [], - on: contextOn, - newCDPSession: vi.fn(async () => { - throw new Error("Not allowed"); - }), - } as unknown as import("playwright-core").BrowserContext; - - const page = { - on: pageOn, - context: () => context, - } as unknown as import("playwright-core").Page; - - // Fill pages() after page exists. - (context as unknown as { pages: () => unknown[] }).pages = () => [page]; - - const browser = { - contexts: () => [context], - on: browserOn, - close: browserClose, - } as unknown as import("playwright-core").Browser; - - connectOverCdpSpy.mockResolvedValue(browser); - getChromeWebSocketUrlSpy.mockResolvedValue(null); + const { browserClose, pages } = createExtensionFallbackBrowserHarness(); + const [page] = pages; const resolved = await getPageForTargetId({ cdpUrl: "http://127.0.0.1:18792", @@ -58,40 +67,9 @@ describe("pw-session getPageForTargetId", () => { }); it("uses the shared HTTP-base normalization when falling back to /json/list for direct WebSocket CDP URLs", async () => { - const pageOn = vi.fn(); - const contextOn = vi.fn(); - const browserOn = vi.fn(); - const browserClose = vi.fn(async () => {}); - - const context = { - pages: () => [], - on: contextOn, - newCDPSession: vi.fn(async () => { - throw new Error("Not allowed"); - }), - } as unknown as import("playwright-core").BrowserContext; - - const pageA = { - on: pageOn, - context: () => context, - url: () => "https://alpha.example", - } as unknown as import("playwright-core").Page; - const pageB = { - on: pageOn, - context: () => context, - url: () => "https://beta.example", - } as unknown as import("playwright-core").Page; - - (context as unknown as { pages: () => unknown[] }).pages = () => [pageA, pageB]; - - const browser = { - contexts: () => [context], - on: browserOn, - close: browserClose, - } as unknown as import("playwright-core").Browser; - - connectOverCdpSpy.mockResolvedValue(browser); - getChromeWebSocketUrlSpy.mockResolvedValue(null); + const [, pageB] = createExtensionFallbackBrowserHarness({ + urls: ["https://alpha.example", "https://beta.example"], + }).pages; const fetchSpy = vi.spyOn(globalThis, "fetch").mockResolvedValue({ ok: true, @@ -117,41 +95,11 @@ describe("pw-session getPageForTargetId", () => { }); it("resolves extension-relay pages from /json/list without probing page CDP sessions first", async () => { - const pageOn = vi.fn(); - const contextOn = vi.fn(); - const browserOn = vi.fn(); - const browserClose = vi.fn(async () => {}); - const newCDPSession = vi.fn(async () => { - throw new Error("Target.attachToBrowserTarget: Not allowed"); + const { newCDPSession, pages } = createExtensionFallbackBrowserHarness({ + urls: ["https://alpha.example", "https://beta.example"], + newCDPSessionError: "Target.attachToBrowserTarget: Not allowed", }); - - const context = { - pages: () => [], - on: contextOn, - newCDPSession, - } as unknown as import("playwright-core").BrowserContext; - - const pageA = { - on: pageOn, - context: () => context, - url: () => "https://alpha.example", - } as unknown as import("playwright-core").Page; - const pageB = { - on: pageOn, - context: () => context, - url: () => "https://beta.example", - } as unknown as import("playwright-core").Page; - - (context as unknown as { pages: () => unknown[] }).pages = () => [pageA, pageB]; - - const browser = { - contexts: () => [context], - on: browserOn, - close: browserClose, - } as unknown as import("playwright-core").Browser; - - connectOverCdpSpy.mockResolvedValue(browser); - getChromeWebSocketUrlSpy.mockResolvedValue(null); + const [, pageB] = pages; const fetchSpy = vi.spyOn(globalThis, "fetch"); fetchSpy diff --git a/src/browser/pw-session.ts b/src/browser/pw-session.ts index a7103c1174c..2e63d190dea 100644 --- a/src/browser/pw-session.ts +++ b/src/browser/pw-session.ts @@ -365,6 +365,11 @@ async function connectBrowser(cdpUrl: string): Promise { return connected; } catch (err) { lastErr = err; + // Don't retry rate-limit errors; retrying worsens the 429. + const errMsg = err instanceof Error ? err.message : String(err); + if (errMsg.includes("rate limit")) { + break; + } const delay = 250 + attempt * 250; await new Promise((r) => setTimeout(r, delay)); } diff --git a/src/browser/pw-tools-core.interactions.batch.test.ts b/src/browser/pw-tools-core.interactions.batch.test.ts new file mode 100644 index 00000000000..2801ebe8190 --- /dev/null +++ b/src/browser/pw-tools-core.interactions.batch.test.ts @@ -0,0 +1,85 @@ +import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; + +let page: { evaluate: ReturnType } | null = null; + +const getPageForTargetId = vi.fn(async () => { + if (!page) { + throw new Error("test: page not set"); + } + return page; +}); +const ensurePageState = vi.fn(() => {}); +const forceDisconnectPlaywrightForTarget = vi.fn(async () => {}); +const refLocator = vi.fn(() => { + throw new Error("test: refLocator should not be called"); +}); +const restoreRoleRefsForTarget = vi.fn(() => {}); + +const closePageViaPlaywright = vi.fn(async () => {}); +const resizeViewportViaPlaywright = vi.fn(async () => {}); + +vi.mock("./pw-session.js", () => ({ + ensurePageState, + forceDisconnectPlaywrightForTarget, + getPageForTargetId, + refLocator, + restoreRoleRefsForTarget, +})); + +vi.mock("./pw-tools-core.snapshot.js", () => ({ + closePageViaPlaywright, + resizeViewportViaPlaywright, +})); + +let batchViaPlaywright: typeof import("./pw-tools-core.interactions.js").batchViaPlaywright; + +describe("batchViaPlaywright", () => { + beforeAll(async () => { + ({ batchViaPlaywright } = await import("./pw-tools-core.interactions.js")); + }); + + beforeEach(() => { + vi.clearAllMocks(); + page = { + evaluate: vi.fn(async () => "ok"), + }; + }); + + it("propagates evaluate timeouts through batched execution", async () => { + const result = await batchViaPlaywright({ + cdpUrl: "http://127.0.0.1:9222", + targetId: "tab-1", + evaluateEnabled: true, + actions: [{ kind: "evaluate", fn: "() => 1", timeoutMs: 5000 }], + }); + + expect(result).toEqual({ results: [{ ok: true }] }); + expect(page?.evaluate).toHaveBeenCalledWith( + expect.any(Function), + expect.objectContaining({ + fnBody: "() => 1", + timeoutMs: 4500, + }), + ); + }); + + it("supports resize and close inside a batch", async () => { + const result = await batchViaPlaywright({ + cdpUrl: "http://127.0.0.1:9222", + targetId: "tab-1", + actions: [{ kind: "resize", width: 800, height: 600 }, { kind: "close" }], + }); + + expect(result).toEqual({ results: [{ ok: true }, { ok: true }] }); + expect(resizeViewportViaPlaywright).toHaveBeenCalledWith({ + cdpUrl: "http://127.0.0.1:9222", + targetId: "tab-1", + width: 800, + height: 600, + }); + expect(closePageViaPlaywright).toHaveBeenCalledWith({ + cdpUrl: "http://127.0.0.1:9222", + targetId: "tab-1", + }); + }); +}); diff --git a/src/browser/pw-tools-core.interactions.ts b/src/browser/pw-tools-core.interactions.ts index 852b11bb6dc..01abc5338f0 100644 --- a/src/browser/pw-tools-core.interactions.ts +++ b/src/browser/pw-tools-core.interactions.ts @@ -1,4 +1,4 @@ -import type { BrowserFormField } from "./client-actions-core.js"; +import type { BrowserActRequest, BrowserFormField } from "./client-actions-core.js"; import { DEFAULT_FILL_FIELD_TYPE } from "./form-fields.js"; import { DEFAULT_UPLOAD_DIR, resolveStrictExistingPathsWithinRoot } from "./paths.js"; import { @@ -8,13 +8,34 @@ import { refLocator, restoreRoleRefsForTarget, } from "./pw-session.js"; -import { normalizeTimeoutMs, requireRef, toAIFriendlyError } from "./pw-tools-core.shared.js"; +import { + normalizeTimeoutMs, + requireRef, + requireRefOrSelector, + toAIFriendlyError, +} from "./pw-tools-core.shared.js"; +import { closePageViaPlaywright, resizeViewportViaPlaywright } from "./pw-tools-core.snapshot.js"; type TargetOpts = { cdpUrl: string; targetId?: string; }; +const MAX_CLICK_DELAY_MS = 5_000; +const MAX_WAIT_TIME_MS = 30_000; +const MAX_BATCH_ACTIONS = 100; + +function resolveBoundedDelayMs(value: number | undefined, label: string, maxMs: number): number { + const normalized = Math.floor(value ?? 0); + if (!Number.isFinite(normalized) || normalized < 0) { + throw new Error(`${label} must be >= 0`); + } + if (normalized > maxMs) { + throw new Error(`${label} exceeds maximum of ${maxMs}ms`); + } + return normalized; +} + async function getRestoredPageForTarget(opts: TargetOpts) { const page = await getPageForTargetId(opts); ensurePageState(page); @@ -59,17 +80,27 @@ export async function highlightViaPlaywright(opts: { export async function clickViaPlaywright(opts: { cdpUrl: string; targetId?: string; - ref: string; + ref?: string; + selector?: string; doubleClick?: boolean; button?: "left" | "right" | "middle"; modifiers?: Array<"Alt" | "Control" | "ControlOrMeta" | "Meta" | "Shift">; + delayMs?: number; timeoutMs?: number; }): Promise { + const resolved = requireRefOrSelector(opts.ref, opts.selector); const page = await getRestoredPageForTarget(opts); - const ref = requireRef(opts.ref); - const locator = refLocator(page, ref); + const label = resolved.ref ?? resolved.selector!; + const locator = resolved.ref + ? refLocator(page, requireRef(resolved.ref)) + : page.locator(resolved.selector!); const timeout = resolveInteractionTimeoutMs(opts.timeoutMs); try { + const delayMs = resolveBoundedDelayMs(opts.delayMs, "click delayMs", MAX_CLICK_DELAY_MS); + if (delayMs > 0) { + await locator.hover({ timeout }); + await new Promise((resolve) => setTimeout(resolve, delayMs)); + } if (opts.doubleClick) { await locator.dblclick({ timeout, @@ -84,67 +115,84 @@ export async function clickViaPlaywright(opts: { }); } } catch (err) { - throw toAIFriendlyError(err, ref); + throw toAIFriendlyError(err, label); } } export async function hoverViaPlaywright(opts: { cdpUrl: string; targetId?: string; - ref: string; + ref?: string; + selector?: string; timeoutMs?: number; }): Promise { - const ref = requireRef(opts.ref); + const resolved = requireRefOrSelector(opts.ref, opts.selector); const page = await getRestoredPageForTarget(opts); + const label = resolved.ref ?? resolved.selector!; + const locator = resolved.ref + ? refLocator(page, requireRef(resolved.ref)) + : page.locator(resolved.selector!); try { - await refLocator(page, ref).hover({ + await locator.hover({ timeout: resolveInteractionTimeoutMs(opts.timeoutMs), }); } catch (err) { - throw toAIFriendlyError(err, ref); + throw toAIFriendlyError(err, label); } } export async function dragViaPlaywright(opts: { cdpUrl: string; targetId?: string; - startRef: string; - endRef: string; + startRef?: string; + startSelector?: string; + endRef?: string; + endSelector?: string; timeoutMs?: number; }): Promise { - const startRef = requireRef(opts.startRef); - const endRef = requireRef(opts.endRef); - if (!startRef || !endRef) { - throw new Error("startRef and endRef are required"); - } + const resolvedStart = requireRefOrSelector(opts.startRef, opts.startSelector); + const resolvedEnd = requireRefOrSelector(opts.endRef, opts.endSelector); const page = await getRestoredPageForTarget(opts); + const startLocator = resolvedStart.ref + ? refLocator(page, requireRef(resolvedStart.ref)) + : page.locator(resolvedStart.selector!); + const endLocator = resolvedEnd.ref + ? refLocator(page, requireRef(resolvedEnd.ref)) + : page.locator(resolvedEnd.selector!); + const startLabel = resolvedStart.ref ?? resolvedStart.selector!; + const endLabel = resolvedEnd.ref ?? resolvedEnd.selector!; try { - await refLocator(page, startRef).dragTo(refLocator(page, endRef), { + await startLocator.dragTo(endLocator, { timeout: resolveInteractionTimeoutMs(opts.timeoutMs), }); } catch (err) { - throw toAIFriendlyError(err, `${startRef} -> ${endRef}`); + throw toAIFriendlyError(err, `${startLabel} -> ${endLabel}`); } } export async function selectOptionViaPlaywright(opts: { cdpUrl: string; targetId?: string; - ref: string; + ref?: string; + selector?: string; values: string[]; timeoutMs?: number; }): Promise { - const ref = requireRef(opts.ref); + const resolved = requireRefOrSelector(opts.ref, opts.selector); if (!opts.values?.length) { throw new Error("values are required"); } const page = await getRestoredPageForTarget(opts); + const label = resolved.ref ?? resolved.selector!; + const locator = resolved.ref + ? refLocator(page, requireRef(resolved.ref)) + : page.locator(resolved.selector!); try { - await refLocator(page, ref).selectOption(opts.values, { + await locator.selectOption(opts.values, { timeout: resolveInteractionTimeoutMs(opts.timeoutMs), }); } catch (err) { - throw toAIFriendlyError(err, ref); + throw toAIFriendlyError(err, label); } } @@ -168,16 +216,20 @@ export async function pressKeyViaPlaywright(opts: { export async function typeViaPlaywright(opts: { cdpUrl: string; targetId?: string; - ref: string; + ref?: string; + selector?: string; text: string; submit?: boolean; slowly?: boolean; timeoutMs?: number; }): Promise { + const resolved = requireRefOrSelector(opts.ref, opts.selector); const text = String(opts.text ?? ""); const page = await getRestoredPageForTarget(opts); - const ref = requireRef(opts.ref); - const locator = refLocator(page, ref); + const label = resolved.ref ?? resolved.selector!; + const locator = resolved.ref + ? refLocator(page, requireRef(resolved.ref)) + : page.locator(resolved.selector!); const timeout = resolveInteractionTimeoutMs(opts.timeoutMs); try { if (opts.slowly) { @@ -190,7 +242,7 @@ export async function typeViaPlaywright(opts: { await locator.press("Enter", { timeout }); } } catch (err) { - throw toAIFriendlyError(err, ref); + throw toAIFriendlyError(err, label); } } @@ -367,18 +419,22 @@ export async function evaluateViaPlaywright(opts: { export async function scrollIntoViewViaPlaywright(opts: { cdpUrl: string; targetId?: string; - ref: string; + ref?: string; + selector?: string; timeoutMs?: number; }): Promise { + const resolved = requireRefOrSelector(opts.ref, opts.selector); const page = await getRestoredPageForTarget(opts); const timeout = normalizeTimeoutMs(opts.timeoutMs, 20_000); - const ref = requireRef(opts.ref); - const locator = refLocator(page, ref); + const label = resolved.ref ?? resolved.selector!; + const locator = resolved.ref + ? refLocator(page, requireRef(resolved.ref)) + : page.locator(resolved.selector!); try { await locator.scrollIntoViewIfNeeded({ timeout }); } catch (err) { - throw toAIFriendlyError(err, ref); + throw toAIFriendlyError(err, label); } } @@ -399,7 +455,7 @@ export async function waitForViaPlaywright(opts: { const timeout = normalizeTimeoutMs(opts.timeoutMs, 20_000); if (typeof opts.timeMs === "number" && Number.isFinite(opts.timeMs)) { - await page.waitForTimeout(Math.max(0, opts.timeMs)); + await page.waitForTimeout(resolveBoundedDelayMs(opts.timeMs, "wait timeMs", MAX_WAIT_TIME_MS)); } if (opts.text) { await page.getByText(opts.text).first().waitFor({ @@ -648,3 +704,188 @@ export async function setInputFilesViaPlaywright(opts: { // Best-effort for sites that don't react to setInputFiles alone. } } + +const MAX_BATCH_DEPTH = 5; + +async function executeSingleAction( + action: BrowserActRequest, + cdpUrl: string, + targetId?: string, + evaluateEnabled?: boolean, + depth = 0, +): Promise { + if (depth > MAX_BATCH_DEPTH) { + throw new Error(`Batch nesting depth exceeds maximum of ${MAX_BATCH_DEPTH}`); + } + const effectiveTargetId = action.targetId ?? targetId; + switch (action.kind) { + case "click": + await clickViaPlaywright({ + cdpUrl, + targetId: effectiveTargetId, + ref: action.ref, + selector: action.selector, + doubleClick: action.doubleClick, + button: action.button as "left" | "right" | "middle" | undefined, + modifiers: action.modifiers as Array< + "Alt" | "Control" | "ControlOrMeta" | "Meta" | "Shift" + >, + delayMs: action.delayMs, + timeoutMs: action.timeoutMs, + }); + break; + case "type": + await typeViaPlaywright({ + cdpUrl, + targetId: effectiveTargetId, + ref: action.ref, + selector: action.selector, + text: action.text, + submit: action.submit, + slowly: action.slowly, + timeoutMs: action.timeoutMs, + }); + break; + case "press": + await pressKeyViaPlaywright({ + cdpUrl, + targetId: effectiveTargetId, + key: action.key, + delayMs: action.delayMs, + }); + break; + case "hover": + await hoverViaPlaywright({ + cdpUrl, + targetId: effectiveTargetId, + ref: action.ref, + selector: action.selector, + timeoutMs: action.timeoutMs, + }); + break; + case "scrollIntoView": + await scrollIntoViewViaPlaywright({ + cdpUrl, + targetId: effectiveTargetId, + ref: action.ref, + selector: action.selector, + timeoutMs: action.timeoutMs, + }); + break; + case "drag": + await dragViaPlaywright({ + cdpUrl, + targetId: effectiveTargetId, + startRef: action.startRef, + startSelector: action.startSelector, + endRef: action.endRef, + endSelector: action.endSelector, + timeoutMs: action.timeoutMs, + }); + break; + case "select": + await selectOptionViaPlaywright({ + cdpUrl, + targetId: effectiveTargetId, + ref: action.ref, + selector: action.selector, + values: action.values, + timeoutMs: action.timeoutMs, + }); + break; + case "fill": + await fillFormViaPlaywright({ + cdpUrl, + targetId: effectiveTargetId, + fields: action.fields, + timeoutMs: action.timeoutMs, + }); + break; + case "resize": + await resizeViewportViaPlaywright({ + cdpUrl, + targetId: effectiveTargetId, + width: action.width, + height: action.height, + }); + break; + case "wait": + if (action.fn && !evaluateEnabled) { + throw new Error("wait --fn is disabled by config (browser.evaluateEnabled=false)"); + } + await waitForViaPlaywright({ + cdpUrl, + targetId: effectiveTargetId, + timeMs: action.timeMs, + text: action.text, + textGone: action.textGone, + selector: action.selector, + url: action.url, + loadState: action.loadState, + fn: action.fn, + timeoutMs: action.timeoutMs, + }); + break; + case "evaluate": + if (!evaluateEnabled) { + throw new Error("act:evaluate is disabled by config (browser.evaluateEnabled=false)"); + } + await evaluateViaPlaywright({ + cdpUrl, + targetId: effectiveTargetId, + fn: action.fn, + ref: action.ref, + timeoutMs: action.timeoutMs, + }); + break; + case "close": + await closePageViaPlaywright({ + cdpUrl, + targetId: effectiveTargetId, + }); + break; + case "batch": + await batchViaPlaywright({ + cdpUrl, + targetId: effectiveTargetId, + actions: action.actions, + stopOnError: action.stopOnError, + evaluateEnabled, + depth: depth + 1, + }); + break; + default: + throw new Error(`Unsupported batch action kind: ${(action as { kind: string }).kind}`); + } +} + +export async function batchViaPlaywright(opts: { + cdpUrl: string; + targetId?: string; + actions: BrowserActRequest[]; + stopOnError?: boolean; + evaluateEnabled?: boolean; + depth?: number; +}): Promise<{ results: Array<{ ok: boolean; error?: string }> }> { + const depth = opts.depth ?? 0; + if (depth > MAX_BATCH_DEPTH) { + throw new Error(`Batch nesting depth exceeds maximum of ${MAX_BATCH_DEPTH}`); + } + if (opts.actions.length > MAX_BATCH_ACTIONS) { + throw new Error(`Batch exceeds maximum of ${MAX_BATCH_ACTIONS} actions`); + } + const results: Array<{ ok: boolean; error?: string }> = []; + for (const action of opts.actions) { + try { + await executeSingleAction(action, opts.cdpUrl, opts.targetId, opts.evaluateEnabled, depth); + results.push({ ok: true }); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + results.push({ ok: false, error: message }); + if (opts.stopOnError !== false) { + break; + } + } + } + return { results }; +} diff --git a/src/browser/pw-tools-core.responses.ts b/src/browser/pw-tools-core.responses.ts index 5a6ddc1818c..4b153692a20 100644 --- a/src/browser/pw-tools-core.responses.ts +++ b/src/browser/pw-tools-core.responses.ts @@ -1,22 +1,7 @@ import { formatCliCommand } from "../cli/command-format.js"; import { ensurePageState, getPageForTargetId } from "./pw-session.js"; import { normalizeTimeoutMs } from "./pw-tools-core.shared.js"; - -function matchUrlPattern(pattern: string, url: string): boolean { - const p = pattern.trim(); - if (!p) { - return false; - } - if (p === url) { - return true; - } - if (p.includes("*")) { - const escaped = p.replace(/[|\\{}()[\]^$+?.]/g, "\\$&"); - const regex = new RegExp(`^${escaped.replace(/\*\*/g, ".*").replace(/\*/g, ".*")}$`); - return regex.test(url); - } - return url.includes(p); -} +import { matchBrowserUrlPattern } from "./url-pattern.js"; export async function responseBodyViaPlaywright(opts: { cdpUrl: string; @@ -65,7 +50,7 @@ export async function responseBodyViaPlaywright(opts: { } const r = resp as { url?: () => string }; const u = r.url?.() || ""; - if (!matchUrlPattern(pattern, u)) { + if (!matchBrowserUrlPattern(pattern, u)) { return; } done = true; diff --git a/src/browser/pw-tools-core.shared.ts b/src/browser/pw-tools-core.shared.ts index d5ad74477d4..b6132de92bf 100644 --- a/src/browser/pw-tools-core.shared.ts +++ b/src/browser/pw-tools-core.shared.ts @@ -29,6 +29,21 @@ export function requireRef(value: unknown): string { return ref; } +export function requireRefOrSelector( + ref: string | undefined, + selector: string | undefined, +): { ref?: string; selector?: string } { + const trimmedRef = typeof ref === "string" ? ref.trim() : ""; + const trimmedSelector = typeof selector === "string" ? selector.trim() : ""; + if (!trimmedRef && !trimmedSelector) { + throw new Error("ref or selector is required"); + } + return { + ref: trimmedRef || undefined, + selector: trimmedSelector || undefined, + }; +} + export function normalizeTimeoutMs(timeoutMs: number | undefined, fallback: number) { return Math.max(500, Math.min(120_000, timeoutMs ?? fallback)); } diff --git a/src/browser/pw-tools-core.waits-next-download-saves-it.test.ts b/src/browser/pw-tools-core.waits-next-download-saves-it.test.ts index d976f7d7fb8..e5aa5bac2e0 100644 --- a/src/browser/pw-tools-core.waits-next-download-saves-it.test.ts +++ b/src/browser/pw-tools-core.waits-next-download-saves-it.test.ts @@ -291,6 +291,6 @@ describe("pw-tools-core", () => { targetId: "T1", ref: " ", }), - ).rejects.toThrow(/ref is required/i); + ).rejects.toThrow(/ref or selector is required/i); }); }); diff --git a/src/browser/resolved-config-refresh.ts b/src/browser/resolved-config-refresh.ts index fe934069a80..999a7ca1229 100644 --- a/src/browser/resolved-config-refresh.ts +++ b/src/browser/resolved-config-refresh.ts @@ -1,4 +1,4 @@ -import { createConfigIO, loadConfig } from "../config/config.js"; +import { createConfigIO, getRuntimeConfigSnapshot } from "../config/config.js"; import { resolveBrowserConfig, resolveProfile, type ResolvedBrowserProfile } from "./config.js"; import type { BrowserServerState } from "./server-context.types.js"; @@ -29,7 +29,13 @@ function applyResolvedConfig( current: BrowserServerState, freshResolved: BrowserServerState["resolved"], ) { - current.resolved = freshResolved; + current.resolved = { + ...freshResolved, + // Keep the runtime evaluate gate stable across request-time profile refreshes. + // Security-sensitive behavior should only change via full runtime config reload, + // not as a side effect of resolving profiles/tabs during a request. + evaluateEnabled: current.resolved.evaluateEnabled, + }; for (const [name, runtime] of current.profiles) { const nextProfile = resolveProfile(freshResolved, name); if (nextProfile) { @@ -63,7 +69,11 @@ export function refreshResolvedBrowserConfigFromDisk(params: { if (!params.refreshConfigFromDisk) { return; } - const cfg = params.mode === "fresh" ? createConfigIO().loadConfig() : loadConfig(); + + // Route-level browser config hot reload should observe on-disk changes immediately. + // The shared loadConfig() helper may return a cached snapshot for the configured TTL, + // which can leave request-time browser guards stale (for example evaluateEnabled). + const cfg = getRuntimeConfigSnapshot() ?? createConfigIO().loadConfig(); const freshResolved = resolveBrowserConfig(cfg.browser, cfg); applyResolvedConfig(params.current, freshResolved); } diff --git a/src/browser/routes/agent.act.download.ts b/src/browser/routes/agent.act.download.ts index d08287fea59..cfdf1362797 100644 --- a/src/browser/routes/agent.act.download.ts +++ b/src/browser/routes/agent.act.download.ts @@ -1,5 +1,11 @@ +import { getBrowserProfileCapabilities } from "../profile-capabilities.js"; import type { BrowserRouteContext } from "../server-context.js"; -import { readBody, resolveTargetIdFromBody, withPlaywrightRouteContext } from "./agent.shared.js"; +import { + readBody, + requirePwAi, + resolveTargetIdFromBody, + withRouteTabContext, +} from "./agent.shared.js"; import { ensureOutputRootDir, resolveWritableOutputPathOrRespond } from "./output-paths.js"; import { DEFAULT_DOWNLOAD_DIR } from "./path-output.js"; import type { BrowserRouteRegistrar } from "./types.js"; @@ -23,13 +29,23 @@ export function registerBrowserAgentActDownloadRoutes( const out = toStringOrEmpty(body.path) || ""; const timeoutMs = toNumber(body.timeoutMs); - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "wait for download", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { + if (getBrowserProfileCapabilities(profileCtx.profile).usesChromeMcp) { + return jsonError( + res, + 501, + "download waiting is not supported for existing-session profiles yet.", + ); + } + const pw = await requirePwAi(res, "wait for download"); + if (!pw) { + return; + } await ensureOutputRootDir(DEFAULT_DOWNLOAD_DIR); let downloadPath: string | undefined; if (out.trim()) { @@ -67,13 +83,23 @@ export function registerBrowserAgentActDownloadRoutes( return jsonError(res, 400, "path is required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "download", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { + if (getBrowserProfileCapabilities(profileCtx.profile).usesChromeMcp) { + return jsonError( + res, + 501, + "downloads are not supported for existing-session profiles yet.", + ); + } + const pw = await requirePwAi(res, "download"); + if (!pw) { + return; + } await ensureOutputRootDir(DEFAULT_DOWNLOAD_DIR); const downloadPath = await resolveWritableOutputPathOrRespond({ res, diff --git a/src/browser/routes/agent.act.hooks.ts b/src/browser/routes/agent.act.hooks.ts index 56d97bb03d3..a141a9cbe5a 100644 --- a/src/browser/routes/agent.act.hooks.ts +++ b/src/browser/routes/agent.act.hooks.ts @@ -1,5 +1,12 @@ +import { evaluateChromeMcpScript, uploadChromeMcpFile } from "../chrome-mcp.js"; +import { getBrowserProfileCapabilities } from "../profile-capabilities.js"; import type { BrowserRouteContext } from "../server-context.js"; -import { readBody, resolveTargetIdFromBody, withPlaywrightRouteContext } from "./agent.shared.js"; +import { + readBody, + requirePwAi, + resolveTargetIdFromBody, + withRouteTabContext, +} from "./agent.shared.js"; import { DEFAULT_UPLOAD_DIR, resolveExistingPathsWithinRoot } from "./path-output.js"; import type { BrowserRouteRegistrar } from "./types.js"; import { jsonError, toBoolean, toNumber, toStringArray, toStringOrEmpty } from "./utils.js"; @@ -20,13 +27,12 @@ export function registerBrowserAgentActHookRoutes( return jsonError(res, 400, "paths are required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "file chooser hook", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { const uploadPathsResult = await resolveExistingPathsWithinRoot({ rootDir: DEFAULT_UPLOAD_DIR, requestedPaths: paths, @@ -38,6 +44,39 @@ export function registerBrowserAgentActHookRoutes( } const resolvedPaths = uploadPathsResult.paths; + if (getBrowserProfileCapabilities(profileCtx.profile).usesChromeMcp) { + if (element) { + return jsonError( + res, + 501, + "existing-session file uploads do not support element selectors; use ref/inputRef.", + ); + } + if (resolvedPaths.length !== 1) { + return jsonError( + res, + 501, + "existing-session file uploads currently support one file at a time.", + ); + } + const uid = inputRef || ref; + if (!uid) { + return jsonError(res, 501, "existing-session file uploads require ref or inputRef."); + } + await uploadChromeMcpFile({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + uid, + filePath: resolvedPaths[0] ?? "", + }); + return res.json({ ok: true }); + } + + const pw = await requirePwAi(res, "file chooser hook"); + if (!pw) { + return; + } + if (inputRef || element) { if (ref) { return jsonError(res, 400, "ref cannot be combined with inputRef/element"); @@ -79,13 +118,69 @@ export function registerBrowserAgentActHookRoutes( return jsonError(res, 400, "accept is required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "dialog hook", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { + if (getBrowserProfileCapabilities(profileCtx.profile).usesChromeMcp) { + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session dialog handling does not support timeoutMs.", + ); + } + await evaluateChromeMcpScript({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + fn: `() => { + const state = (window.__openclawDialogHook ??= {}); + if (!state.originals) { + state.originals = { + alert: window.alert.bind(window), + confirm: window.confirm.bind(window), + prompt: window.prompt.bind(window), + }; + } + const originals = state.originals; + const restore = () => { + window.alert = originals.alert; + window.confirm = originals.confirm; + window.prompt = originals.prompt; + delete window.__openclawDialogHook; + }; + window.alert = (...args) => { + try { + return undefined; + } finally { + restore(); + } + }; + window.confirm = (...args) => { + try { + return ${accept ? "true" : "false"}; + } finally { + restore(); + } + }; + window.prompt = (...args) => { + try { + return ${accept ? JSON.stringify(promptText ?? "") : "null"}; + } finally { + restore(); + } + }; + return true; + }`, + }); + return res.json({ ok: true }); + } + const pw = await requirePwAi(res, "dialog hook"); + if (!pw) { + return; + } await pw.armDialogViaPlaywright({ cdpUrl, targetId: tab.targetId, diff --git a/src/browser/routes/agent.act.shared.ts b/src/browser/routes/agent.act.shared.ts index 81ca8caab71..b22f35e7ef2 100644 --- a/src/browser/routes/agent.act.shared.ts +++ b/src/browser/routes/agent.act.shared.ts @@ -1,4 +1,5 @@ export const ACT_KINDS = [ + "batch", "click", "close", "drag", diff --git a/src/browser/routes/agent.act.ts b/src/browser/routes/agent.act.ts index 2ae6073c7cf..1b444d1b963 100644 --- a/src/browser/routes/agent.act.ts +++ b/src/browser/routes/agent.act.ts @@ -1,6 +1,19 @@ -import type { BrowserFormField } from "../client-actions-core.js"; +import { + clickChromeMcpElement, + closeChromeMcpTab, + dragChromeMcpElement, + evaluateChromeMcpScript, + fillChromeMcpElement, + fillChromeMcpForm, + hoverChromeMcpElement, + pressChromeMcpKey, + resizeChromeMcpPage, +} from "../chrome-mcp.js"; +import type { BrowserActRequest, BrowserFormField } from "../client-actions-core.js"; import { normalizeBrowserFormField } from "../form-fields.js"; +import { getBrowserProfileCapabilities } from "../profile-capabilities.js"; import type { BrowserRouteContext } from "../server-context.js"; +import { matchBrowserUrlPattern } from "../url-pattern.js"; import { registerBrowserAgentActDownloadRoutes } from "./agent.act.download.js"; import { registerBrowserAgentActHookRoutes } from "./agent.act.hooks.js"; import { @@ -11,13 +24,426 @@ import { } from "./agent.act.shared.js"; import { readBody, + requirePwAi, resolveTargetIdFromBody, - withPlaywrightRouteContext, + withRouteTabContext, SELECTOR_UNSUPPORTED_MESSAGE, } from "./agent.shared.js"; import type { BrowserRouteRegistrar } from "./types.js"; import { jsonError, toBoolean, toNumber, toStringArray, toStringOrEmpty } from "./utils.js"; +function sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); +} + +function browserEvaluateDisabledMessage(action: "wait" | "evaluate"): string { + return [ + action === "wait" + ? "wait --fn is disabled by config (browser.evaluateEnabled=false)." + : "act:evaluate is disabled by config (browser.evaluateEnabled=false).", + "Docs: /gateway/configuration#browser-openclaw-managed-browser", + ].join("\n"); +} + +function buildExistingSessionWaitPredicate(params: { + text?: string; + textGone?: string; + selector?: string; + loadState?: "load" | "domcontentloaded" | "networkidle"; + fn?: string; +}): string | null { + const checks: string[] = []; + if (params.text) { + checks.push(`Boolean(document.body?.innerText?.includes(${JSON.stringify(params.text)}))`); + } + if (params.textGone) { + checks.push(`!document.body?.innerText?.includes(${JSON.stringify(params.textGone)})`); + } + if (params.selector) { + checks.push(`Boolean(document.querySelector(${JSON.stringify(params.selector)}))`); + } + if (params.loadState === "domcontentloaded") { + checks.push(`document.readyState === "interactive" || document.readyState === "complete"`); + } else if (params.loadState === "load") { + checks.push(`document.readyState === "complete"`); + } + if (params.fn) { + checks.push(`Boolean(await (${params.fn})())`); + } + if (checks.length === 0) { + return null; + } + return checks.length === 1 ? checks[0] : checks.map((check) => `(${check})`).join(" && "); +} + +async function waitForExistingSessionCondition(params: { + profileName: string; + targetId: string; + timeMs?: number; + text?: string; + textGone?: string; + selector?: string; + url?: string; + loadState?: "load" | "domcontentloaded" | "networkidle"; + fn?: string; + timeoutMs?: number; +}): Promise { + if (params.timeMs && params.timeMs > 0) { + await sleep(params.timeMs); + } + const predicate = buildExistingSessionWaitPredicate(params); + if (!predicate && !params.url) { + return; + } + const timeoutMs = Math.max(250, params.timeoutMs ?? 10_000); + const deadline = Date.now() + timeoutMs; + while (Date.now() < deadline) { + let ready = true; + if (predicate) { + ready = Boolean( + await evaluateChromeMcpScript({ + profileName: params.profileName, + targetId: params.targetId, + fn: `async () => ${predicate}`, + }), + ); + } + if (ready && params.url) { + const currentUrl = await evaluateChromeMcpScript({ + profileName: params.profileName, + targetId: params.targetId, + fn: "() => window.location.href", + }); + ready = typeof currentUrl === "string" && matchBrowserUrlPattern(params.url, currentUrl); + } + if (ready) { + return; + } + await sleep(250); + } + throw new Error("Timed out waiting for condition"); +} + +const SELECTOR_ALLOWED_KINDS: ReadonlySet = new Set([ + "batch", + "click", + "drag", + "hover", + "scrollIntoView", + "select", + "type", + "wait", +]); +const MAX_BATCH_ACTIONS = 100; +const MAX_BATCH_CLICK_DELAY_MS = 5_000; +const MAX_BATCH_WAIT_TIME_MS = 30_000; + +function normalizeBoundedNonNegativeMs( + value: unknown, + fieldName: string, + maxMs: number, +): number | undefined { + const ms = toNumber(value); + if (ms === undefined) { + return undefined; + } + if (ms < 0) { + throw new Error(`${fieldName} must be >= 0`); + } + const normalized = Math.floor(ms); + if (normalized > maxMs) { + throw new Error(`${fieldName} exceeds maximum of ${maxMs}ms`); + } + return normalized; +} + +function countBatchActions(actions: BrowserActRequest[]): number { + let count = 0; + for (const action of actions) { + count += 1; + if (action.kind === "batch") { + count += countBatchActions(action.actions); + } + } + return count; +} + +function validateBatchTargetIds(actions: BrowserActRequest[], targetId: string): string | null { + for (const action of actions) { + if (action.targetId && action.targetId !== targetId) { + return "batched action targetId must match request targetId"; + } + if (action.kind === "batch") { + const nestedError = validateBatchTargetIds(action.actions, targetId); + if (nestedError) { + return nestedError; + } + } + } + return null; +} + +function normalizeBatchAction(value: unknown): BrowserActRequest { + if (!value || typeof value !== "object" || Array.isArray(value)) { + throw new Error("batch actions must be objects"); + } + const raw = value as Record; + const kind = toStringOrEmpty(raw.kind); + if (!isActKind(kind)) { + throw new Error("batch actions must use a supported kind"); + } + + switch (kind) { + case "click": { + const ref = toStringOrEmpty(raw.ref) || undefined; + const selector = toStringOrEmpty(raw.selector) || undefined; + if (!ref && !selector) { + throw new Error("click requires ref or selector"); + } + const buttonRaw = toStringOrEmpty(raw.button); + const button = buttonRaw ? parseClickButton(buttonRaw) : undefined; + if (buttonRaw && !button) { + throw new Error("click button must be left|right|middle"); + } + const modifiersRaw = toStringArray(raw.modifiers) ?? []; + const parsedModifiers = parseClickModifiers(modifiersRaw); + if (parsedModifiers.error) { + throw new Error(parsedModifiers.error); + } + const doubleClick = toBoolean(raw.doubleClick); + const delayMs = normalizeBoundedNonNegativeMs( + raw.delayMs, + "click delayMs", + MAX_BATCH_CLICK_DELAY_MS, + ); + const timeoutMs = toNumber(raw.timeoutMs); + const targetId = toStringOrEmpty(raw.targetId) || undefined; + return { + kind, + ...(ref ? { ref } : {}), + ...(selector ? { selector } : {}), + ...(targetId ? { targetId } : {}), + ...(doubleClick !== undefined ? { doubleClick } : {}), + ...(button ? { button } : {}), + ...(parsedModifiers.modifiers ? { modifiers: parsedModifiers.modifiers } : {}), + ...(delayMs !== undefined ? { delayMs } : {}), + ...(timeoutMs !== undefined ? { timeoutMs } : {}), + }; + } + case "type": { + const ref = toStringOrEmpty(raw.ref) || undefined; + const selector = toStringOrEmpty(raw.selector) || undefined; + const text = raw.text; + if (!ref && !selector) { + throw new Error("type requires ref or selector"); + } + if (typeof text !== "string") { + throw new Error("type requires text"); + } + const targetId = toStringOrEmpty(raw.targetId) || undefined; + const submit = toBoolean(raw.submit); + const slowly = toBoolean(raw.slowly); + const timeoutMs = toNumber(raw.timeoutMs); + return { + kind, + ...(ref ? { ref } : {}), + ...(selector ? { selector } : {}), + text, + ...(targetId ? { targetId } : {}), + ...(submit !== undefined ? { submit } : {}), + ...(slowly !== undefined ? { slowly } : {}), + ...(timeoutMs !== undefined ? { timeoutMs } : {}), + }; + } + case "press": { + const key = toStringOrEmpty(raw.key); + if (!key) { + throw new Error("press requires key"); + } + const targetId = toStringOrEmpty(raw.targetId) || undefined; + const delayMs = toNumber(raw.delayMs); + return { + kind, + key, + ...(targetId ? { targetId } : {}), + ...(delayMs !== undefined ? { delayMs } : {}), + }; + } + case "hover": + case "scrollIntoView": { + const ref = toStringOrEmpty(raw.ref) || undefined; + const selector = toStringOrEmpty(raw.selector) || undefined; + if (!ref && !selector) { + throw new Error(`${kind} requires ref or selector`); + } + const targetId = toStringOrEmpty(raw.targetId) || undefined; + const timeoutMs = toNumber(raw.timeoutMs); + return { + kind, + ...(ref ? { ref } : {}), + ...(selector ? { selector } : {}), + ...(targetId ? { targetId } : {}), + ...(timeoutMs !== undefined ? { timeoutMs } : {}), + }; + } + case "drag": { + const startRef = toStringOrEmpty(raw.startRef) || undefined; + const startSelector = toStringOrEmpty(raw.startSelector) || undefined; + const endRef = toStringOrEmpty(raw.endRef) || undefined; + const endSelector = toStringOrEmpty(raw.endSelector) || undefined; + if (!startRef && !startSelector) { + throw new Error("drag requires startRef or startSelector"); + } + if (!endRef && !endSelector) { + throw new Error("drag requires endRef or endSelector"); + } + const targetId = toStringOrEmpty(raw.targetId) || undefined; + const timeoutMs = toNumber(raw.timeoutMs); + return { + kind, + ...(startRef ? { startRef } : {}), + ...(startSelector ? { startSelector } : {}), + ...(endRef ? { endRef } : {}), + ...(endSelector ? { endSelector } : {}), + ...(targetId ? { targetId } : {}), + ...(timeoutMs !== undefined ? { timeoutMs } : {}), + }; + } + case "select": { + const ref = toStringOrEmpty(raw.ref) || undefined; + const selector = toStringOrEmpty(raw.selector) || undefined; + const values = toStringArray(raw.values); + if ((!ref && !selector) || !values?.length) { + throw new Error("select requires ref/selector and values"); + } + const targetId = toStringOrEmpty(raw.targetId) || undefined; + const timeoutMs = toNumber(raw.timeoutMs); + return { + kind, + ...(ref ? { ref } : {}), + ...(selector ? { selector } : {}), + values, + ...(targetId ? { targetId } : {}), + ...(timeoutMs !== undefined ? { timeoutMs } : {}), + }; + } + case "fill": { + const rawFields = Array.isArray(raw.fields) ? raw.fields : []; + const fields = rawFields + .map((field) => { + if (!field || typeof field !== "object") { + return null; + } + return normalizeBrowserFormField(field as Record); + }) + .filter((field): field is BrowserFormField => field !== null); + if (!fields.length) { + throw new Error("fill requires fields"); + } + const targetId = toStringOrEmpty(raw.targetId) || undefined; + const timeoutMs = toNumber(raw.timeoutMs); + return { + kind, + fields, + ...(targetId ? { targetId } : {}), + ...(timeoutMs !== undefined ? { timeoutMs } : {}), + }; + } + case "resize": { + const width = toNumber(raw.width); + const height = toNumber(raw.height); + if (width === undefined || height === undefined) { + throw new Error("resize requires width and height"); + } + const targetId = toStringOrEmpty(raw.targetId) || undefined; + return { + kind, + width, + height, + ...(targetId ? { targetId } : {}), + }; + } + case "wait": { + const loadStateRaw = toStringOrEmpty(raw.loadState); + const loadState = + loadStateRaw === "load" || + loadStateRaw === "domcontentloaded" || + loadStateRaw === "networkidle" + ? loadStateRaw + : undefined; + const timeMs = normalizeBoundedNonNegativeMs( + raw.timeMs, + "wait timeMs", + MAX_BATCH_WAIT_TIME_MS, + ); + const text = toStringOrEmpty(raw.text) || undefined; + const textGone = toStringOrEmpty(raw.textGone) || undefined; + const selector = toStringOrEmpty(raw.selector) || undefined; + const url = toStringOrEmpty(raw.url) || undefined; + const fn = toStringOrEmpty(raw.fn) || undefined; + if (timeMs === undefined && !text && !textGone && !selector && !url && !loadState && !fn) { + throw new Error( + "wait requires at least one of: timeMs, text, textGone, selector, url, loadState, fn", + ); + } + const targetId = toStringOrEmpty(raw.targetId) || undefined; + const timeoutMs = toNumber(raw.timeoutMs); + return { + kind, + ...(timeMs !== undefined ? { timeMs } : {}), + ...(text ? { text } : {}), + ...(textGone ? { textGone } : {}), + ...(selector ? { selector } : {}), + ...(url ? { url } : {}), + ...(loadState ? { loadState } : {}), + ...(fn ? { fn } : {}), + ...(targetId ? { targetId } : {}), + ...(timeoutMs !== undefined ? { timeoutMs } : {}), + }; + } + case "evaluate": { + const fn = toStringOrEmpty(raw.fn); + if (!fn) { + throw new Error("evaluate requires fn"); + } + const ref = toStringOrEmpty(raw.ref) || undefined; + const targetId = toStringOrEmpty(raw.targetId) || undefined; + const timeoutMs = toNumber(raw.timeoutMs); + return { + kind, + fn, + ...(ref ? { ref } : {}), + ...(targetId ? { targetId } : {}), + ...(timeoutMs !== undefined ? { timeoutMs } : {}), + }; + } + case "close": { + const targetId = toStringOrEmpty(raw.targetId) || undefined; + return { + kind, + ...(targetId ? { targetId } : {}), + }; + } + case "batch": { + const actions = Array.isArray(raw.actions) ? raw.actions.map(normalizeBatchAction) : []; + if (!actions.length) { + throw new Error("batch requires actions"); + } + if (countBatchActions(actions) > MAX_BATCH_ACTIONS) { + throw new Error(`batch exceeds maximum of ${MAX_BATCH_ACTIONS} actions`); + } + const targetId = toStringOrEmpty(raw.targetId) || undefined; + const stopOnError = toBoolean(raw.stopOnError); + return { + kind, + actions, + ...(targetId ? { targetId } : {}), + ...(stopOnError !== undefined ? { stopOnError } : {}), + }; + } + } +} + export function registerBrowserAgentActRoutes( app: BrowserRouteRegistrar, ctx: BrowserRouteContext, @@ -30,27 +456,41 @@ export function registerBrowserAgentActRoutes( } const kind: ActKind = kindRaw; const targetId = resolveTargetIdFromBody(body); - if (Object.hasOwn(body, "selector") && kind !== "wait") { + if (Object.hasOwn(body, "selector") && !SELECTOR_ALLOWED_KINDS.has(kind)) { return jsonError(res, 400, SELECTOR_UNSUPPORTED_MESSAGE); } + const earlyFn = kind === "wait" || kind === "evaluate" ? toStringOrEmpty(body.fn) : ""; + if ( + (kind === "evaluate" || (kind === "wait" && earlyFn)) && + !ctx.state().resolved.evaluateEnabled + ) { + return jsonError( + res, + 403, + browserEvaluateDisabledMessage(kind === "evaluate" ? "evaluate" : "wait"), + ); + } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: `act:${kind}`, - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { const evaluateEnabled = ctx.state().resolved.evaluateEnabled; + const isExistingSession = getBrowserProfileCapabilities(profileCtx.profile).usesChromeMcp; + const profileName = profileCtx.profile.name; switch (kind) { case "click": { - const ref = toStringOrEmpty(body.ref); - if (!ref) { - return jsonError(res, 400, "ref is required"); + const ref = toStringOrEmpty(body.ref) || undefined; + const selector = toStringOrEmpty(body.selector) || undefined; + if (!ref && !selector) { + return jsonError(res, 400, "ref or selector is required"); } const doubleClick = toBoolean(body.doubleClick) ?? false; const timeoutMs = toNumber(body.timeoutMs); + const delayMs = toNumber(body.delayMs); const buttonRaw = toStringOrEmpty(body.button) || ""; const button = buttonRaw ? parseClickButton(buttonRaw) : undefined; if (buttonRaw && !button) { @@ -63,18 +503,53 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, parsedModifiers.error); } const modifiers = parsedModifiers.modifiers; + if (isExistingSession) { + if (selector) { + return jsonError( + res, + 501, + "existing-session click does not support selector targeting yet; use ref.", + ); + } + if ((button && button !== "left") || (modifiers && modifiers.length > 0)) { + return jsonError( + res, + 501, + "existing-session click currently supports left-click only (no button overrides/modifiers).", + ); + } + await clickChromeMcpElement({ + profileName, + targetId: tab.targetId, + uid: ref!, + doubleClick, + }); + return res.json({ ok: true, targetId: tab.targetId, url: tab.url }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } const clickRequest: Parameters[0] = { cdpUrl, targetId: tab.targetId, - ref, doubleClick, }; + if (ref) { + clickRequest.ref = ref; + } + if (selector) { + clickRequest.selector = selector; + } if (button) { clickRequest.button = button; } if (modifiers) { clickRequest.modifiers = modifiers; } + if (delayMs) { + clickRequest.delayMs = delayMs; + } if (timeoutMs) { clickRequest.timeoutMs = timeoutMs; } @@ -82,9 +557,10 @@ export function registerBrowserAgentActRoutes( return res.json({ ok: true, targetId: tab.targetId, url: tab.url }); } case "type": { - const ref = toStringOrEmpty(body.ref); - if (!ref) { - return jsonError(res, 400, "ref is required"); + const ref = toStringOrEmpty(body.ref) || undefined; + const selector = toStringOrEmpty(body.selector) || undefined; + if (!ref && !selector) { + return jsonError(res, 400, "ref or selector is required"); } if (typeof body.text !== "string") { return jsonError(res, 400, "text is required"); @@ -93,14 +569,53 @@ export function registerBrowserAgentActRoutes( const submit = toBoolean(body.submit) ?? false; const slowly = toBoolean(body.slowly) ?? false; const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (selector) { + return jsonError( + res, + 501, + "existing-session type does not support selector targeting yet; use ref.", + ); + } + if (slowly) { + return jsonError( + res, + 501, + "existing-session type does not support slowly=true; use fill/press instead.", + ); + } + await fillChromeMcpElement({ + profileName, + targetId: tab.targetId, + uid: ref!, + value: text, + }); + if (submit) { + await pressChromeMcpKey({ + profileName, + targetId: tab.targetId, + key: "Enter", + }); + } + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } const typeRequest: Parameters[0] = { cdpUrl, targetId: tab.targetId, - ref, text, submit, slowly, }; + if (ref) { + typeRequest.ref = ref; + } + if (selector) { + typeRequest.selector = selector; + } if (timeoutMs) { typeRequest.timeoutMs = timeoutMs; } @@ -113,6 +628,17 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "key is required"); } const delayMs = toNumber(body.delayMs); + if (isExistingSession) { + if (delayMs) { + return jsonError(res, 501, "existing-session press does not support delayMs."); + } + await pressChromeMcpKey({ profileName, targetId: tab.targetId, key }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.pressKeyViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -122,30 +648,87 @@ export function registerBrowserAgentActRoutes( return res.json({ ok: true, targetId: tab.targetId }); } case "hover": { - const ref = toStringOrEmpty(body.ref); - if (!ref) { - return jsonError(res, 400, "ref is required"); + const ref = toStringOrEmpty(body.ref) || undefined; + const selector = toStringOrEmpty(body.selector) || undefined; + if (!ref && !selector) { + return jsonError(res, 400, "ref or selector is required"); } const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (selector) { + return jsonError( + res, + 501, + "existing-session hover does not support selector targeting yet; use ref.", + ); + } + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session hover does not support timeoutMs overrides.", + ); + } + await hoverChromeMcpElement({ profileName, targetId: tab.targetId, uid: ref! }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.hoverViaPlaywright({ cdpUrl, targetId: tab.targetId, ref, + selector, timeoutMs: timeoutMs ?? undefined, }); return res.json({ ok: true, targetId: tab.targetId }); } case "scrollIntoView": { - const ref = toStringOrEmpty(body.ref); - if (!ref) { - return jsonError(res, 400, "ref is required"); + const ref = toStringOrEmpty(body.ref) || undefined; + const selector = toStringOrEmpty(body.selector) || undefined; + if (!ref && !selector) { + return jsonError(res, 400, "ref or selector is required"); } const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (selector) { + return jsonError( + res, + 501, + "existing-session scrollIntoView does not support selector targeting yet; use ref.", + ); + } + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session scrollIntoView does not support timeoutMs overrides.", + ); + } + await evaluateChromeMcpScript({ + profileName, + targetId: tab.targetId, + fn: `(el) => { el.scrollIntoView({ block: "center", inline: "center" }); return true; }`, + args: [ref!], + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } const scrollRequest: Parameters[0] = { cdpUrl, targetId: tab.targetId, - ref, }; + if (ref) { + scrollRequest.ref = ref; + } + if (selector) { + scrollRequest.selector = selector; + } if (timeoutMs) { scrollRequest.timeoutMs = timeoutMs; } @@ -153,32 +736,102 @@ export function registerBrowserAgentActRoutes( return res.json({ ok: true, targetId: tab.targetId }); } case "drag": { - const startRef = toStringOrEmpty(body.startRef); - const endRef = toStringOrEmpty(body.endRef); - if (!startRef || !endRef) { - return jsonError(res, 400, "startRef and endRef are required"); + const startRef = toStringOrEmpty(body.startRef) || undefined; + const startSelector = toStringOrEmpty(body.startSelector) || undefined; + const endRef = toStringOrEmpty(body.endRef) || undefined; + const endSelector = toStringOrEmpty(body.endSelector) || undefined; + if (!startRef && !startSelector) { + return jsonError(res, 400, "startRef or startSelector is required"); + } + if (!endRef && !endSelector) { + return jsonError(res, 400, "endRef or endSelector is required"); } const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (startSelector || endSelector) { + return jsonError( + res, + 501, + "existing-session drag does not support selector targeting yet; use startRef/endRef.", + ); + } + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session drag does not support timeoutMs overrides.", + ); + } + await dragChromeMcpElement({ + profileName, + targetId: tab.targetId, + fromUid: startRef!, + toUid: endRef!, + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.dragViaPlaywright({ cdpUrl, targetId: tab.targetId, startRef, + startSelector, endRef, + endSelector, timeoutMs: timeoutMs ?? undefined, }); return res.json({ ok: true, targetId: tab.targetId }); } case "select": { - const ref = toStringOrEmpty(body.ref); + const ref = toStringOrEmpty(body.ref) || undefined; + const selector = toStringOrEmpty(body.selector) || undefined; const values = toStringArray(body.values); - if (!ref || !values?.length) { - return jsonError(res, 400, "ref and values are required"); + if ((!ref && !selector) || !values?.length) { + return jsonError(res, 400, "ref/selector and values are required"); } const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (selector) { + return jsonError( + res, + 501, + "existing-session select does not support selector targeting yet; use ref.", + ); + } + if (values.length !== 1) { + return jsonError( + res, + 501, + "existing-session select currently supports a single value only.", + ); + } + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session select does not support timeoutMs overrides.", + ); + } + await fillChromeMcpElement({ + profileName, + targetId: tab.targetId, + uid: ref!, + value: values[0] ?? "", + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.selectOptionViaPlaywright({ cdpUrl, targetId: tab.targetId, ref, + selector, values, timeoutMs: timeoutMs ?? undefined, }); @@ -198,6 +851,28 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "fields are required"); } const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session fill does not support timeoutMs overrides.", + ); + } + await fillChromeMcpForm({ + profileName, + targetId: tab.targetId, + elements: fields.map((field) => ({ + uid: field.ref, + value: String(field.value ?? ""), + })), + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.fillFormViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -212,6 +887,19 @@ export function registerBrowserAgentActRoutes( if (!width || !height) { return jsonError(res, 400, "width and height are required"); } + if (isExistingSession) { + await resizeChromeMcpPage({ + profileName, + targetId: tab.targetId, + width, + height, + }); + return res.json({ ok: true, targetId: tab.targetId, url: tab.url }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.resizeViewportViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -236,14 +924,7 @@ export function registerBrowserAgentActRoutes( const fn = toStringOrEmpty(body.fn) || undefined; const timeoutMs = toNumber(body.timeoutMs) ?? undefined; if (fn && !evaluateEnabled) { - return jsonError( - res, - 403, - [ - "wait --fn is disabled by config (browser.evaluateEnabled=false).", - "Docs: /gateway/configuration#browser-openclaw-managed-browser", - ].join("\n"), - ); + return jsonError(res, 403, browserEvaluateDisabledMessage("wait")); } if ( timeMs === undefined && @@ -260,6 +941,32 @@ export function registerBrowserAgentActRoutes( "wait requires at least one of: timeMs, text, textGone, selector, url, loadState, fn", ); } + if (isExistingSession) { + if (loadState === "networkidle") { + return jsonError( + res, + 501, + "existing-session wait does not support loadState=networkidle yet.", + ); + } + await waitForExistingSessionCondition({ + profileName, + targetId: tab.targetId, + timeMs, + text, + textGone, + selector, + url, + loadState, + fn, + timeoutMs, + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.waitForViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -276,14 +983,7 @@ export function registerBrowserAgentActRoutes( } case "evaluate": { if (!evaluateEnabled) { - return jsonError( - res, - 403, - [ - "act:evaluate is disabled by config (browser.evaluateEnabled=false).", - "Docs: /gateway/configuration#browser-openclaw-managed-browser", - ].join("\n"), - ); + return jsonError(res, 403, browserEvaluateDisabledMessage("evaluate")); } const fn = toStringOrEmpty(body.fn); if (!fn) { @@ -291,6 +991,31 @@ export function registerBrowserAgentActRoutes( } const ref = toStringOrEmpty(body.ref) || undefined; const evalTimeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (evalTimeoutMs !== undefined) { + return jsonError( + res, + 501, + "existing-session evaluate does not support timeoutMs overrides.", + ); + } + const result = await evaluateChromeMcpScript({ + profileName, + targetId: tab.targetId, + fn, + args: ref ? [ref] : undefined, + }); + return res.json({ + ok: true, + targetId: tab.targetId, + url: tab.url, + result, + }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } const evalRequest: Parameters[0] = { cdpUrl, targetId: tab.targetId, @@ -310,9 +1035,55 @@ export function registerBrowserAgentActRoutes( }); } case "close": { + if (isExistingSession) { + await closeChromeMcpTab(profileName, tab.targetId); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.closePageViaPlaywright({ cdpUrl, targetId: tab.targetId }); return res.json({ ok: true, targetId: tab.targetId }); } + case "batch": { + if (isExistingSession) { + return jsonError( + res, + 501, + "existing-session batch is not supported yet; send actions individually.", + ); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } + let actions: BrowserActRequest[]; + try { + actions = Array.isArray(body.actions) ? body.actions.map(normalizeBatchAction) : []; + } catch (err) { + return jsonError(res, 400, err instanceof Error ? err.message : String(err)); + } + if (!actions.length) { + return jsonError(res, 400, "actions are required"); + } + if (countBatchActions(actions) > MAX_BATCH_ACTIONS) { + return jsonError(res, 400, `batch exceeds maximum of ${MAX_BATCH_ACTIONS} actions`); + } + const targetIdError = validateBatchTargetIds(actions, tab.targetId); + if (targetIdError) { + return jsonError(res, 403, targetIdError); + } + const stopOnError = toBoolean(body.stopOnError) ?? true; + const result = await pw.batchViaPlaywright({ + cdpUrl, + targetId: tab.targetId, + actions, + stopOnError, + evaluateEnabled, + }); + return res.json({ ok: true, targetId: tab.targetId, results: result.results }); + } default: { return jsonError(res, 400, "unsupported kind"); } @@ -334,13 +1105,23 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "url is required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "response body", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { + if (getBrowserProfileCapabilities(profileCtx.profile).usesChromeMcp) { + return jsonError( + res, + 501, + "response body is not supported for existing-session profiles yet.", + ); + } + const pw = await requirePwAi(res, "response body"); + if (!pw) { + return; + } const result = await pw.responseBodyViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -361,13 +1142,39 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "ref is required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "highlight", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { + if (getBrowserProfileCapabilities(profileCtx.profile).usesChromeMcp) { + await evaluateChromeMcpScript({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + args: [ref], + fn: `(el) => { + if (!(el instanceof Element)) { + return false; + } + el.scrollIntoView({ block: "center", inline: "center" }); + const previousOutline = el.style.outline; + const previousOffset = el.style.outlineOffset; + el.style.outline = "3px solid #FF4500"; + el.style.outlineOffset = "2px"; + setTimeout(() => { + el.style.outline = previousOutline; + el.style.outlineOffset = previousOffset; + }, 2000); + return true; + }`, + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, "highlight"); + if (!pw) { + return; + } await pw.highlightViaPlaywright({ cdpUrl, targetId: tab.targetId, diff --git a/src/browser/routes/agent.existing-session.test.ts b/src/browser/routes/agent.existing-session.test.ts new file mode 100644 index 00000000000..4f8211114ea --- /dev/null +++ b/src/browser/routes/agent.existing-session.test.ts @@ -0,0 +1,252 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { registerBrowserAgentActRoutes } from "./agent.act.js"; +import { registerBrowserAgentSnapshotRoutes } from "./agent.snapshot.js"; +import { createBrowserRouteApp, createBrowserRouteResponse } from "./test-helpers.js"; +import type { BrowserRequest } from "./types.js"; + +const routeState = vi.hoisted(() => ({ + profileCtx: { + profile: { + driver: "existing-session" as const, + name: "chrome-live", + }, + ensureTabAvailable: vi.fn(async () => ({ + targetId: "7", + url: "https://example.com", + })), + }, + tab: { + targetId: "7", + url: "https://example.com", + }, +})); + +const chromeMcpMocks = vi.hoisted(() => ({ + evaluateChromeMcpScript: vi.fn( + async (_params: { profileName: string; targetId: string; fn: string }) => true, + ), + navigateChromeMcpPage: vi.fn(async ({ url }: { url: string }) => ({ url })), + takeChromeMcpScreenshot: vi.fn(async () => Buffer.from("png")), + takeChromeMcpSnapshot: vi.fn(async () => ({ + id: "root", + role: "document", + name: "Example", + children: [{ id: "btn-1", role: "button", name: "Continue" }], + })), +})); + +vi.mock("../chrome-mcp.js", () => ({ + clickChromeMcpElement: vi.fn(async () => {}), + closeChromeMcpTab: vi.fn(async () => {}), + dragChromeMcpElement: vi.fn(async () => {}), + evaluateChromeMcpScript: chromeMcpMocks.evaluateChromeMcpScript, + fillChromeMcpElement: vi.fn(async () => {}), + fillChromeMcpForm: vi.fn(async () => {}), + hoverChromeMcpElement: vi.fn(async () => {}), + navigateChromeMcpPage: chromeMcpMocks.navigateChromeMcpPage, + pressChromeMcpKey: vi.fn(async () => {}), + resizeChromeMcpPage: vi.fn(async () => {}), + takeChromeMcpScreenshot: chromeMcpMocks.takeChromeMcpScreenshot, + takeChromeMcpSnapshot: chromeMcpMocks.takeChromeMcpSnapshot, +})); + +vi.mock("../cdp.js", () => ({ + captureScreenshot: vi.fn(), + snapshotAria: vi.fn(), +})); + +vi.mock("../navigation-guard.js", () => ({ + assertBrowserNavigationAllowed: vi.fn(async () => {}), + assertBrowserNavigationResultAllowed: vi.fn(async () => {}), + withBrowserNavigationPolicy: vi.fn(() => ({})), +})); + +vi.mock("../screenshot.js", () => ({ + DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES: 128, + DEFAULT_BROWSER_SCREENSHOT_MAX_SIDE: 64, + normalizeBrowserScreenshot: vi.fn(async (buffer: Buffer) => ({ + buffer, + contentType: "image/png", + })), +})); + +vi.mock("../../media/store.js", () => ({ + ensureMediaDir: vi.fn(async () => {}), + saveMediaBuffer: vi.fn(async () => ({ path: "/tmp/fake.png" })), +})); + +vi.mock("./agent.shared.js", () => ({ + getPwAiModule: vi.fn(async () => null), + handleRouteError: vi.fn(), + readBody: vi.fn((req: BrowserRequest) => req.body ?? {}), + requirePwAi: vi.fn(async () => { + throw new Error("Playwright should not be used for existing-session tests"); + }), + resolveProfileContext: vi.fn(() => routeState.profileCtx), + resolveTargetIdFromBody: vi.fn((body: Record) => + typeof body.targetId === "string" ? body.targetId : undefined, + ), + withPlaywrightRouteContext: vi.fn(), + withRouteTabContext: vi.fn(async ({ run }: { run: (args: unknown) => Promise }) => { + await run({ + profileCtx: routeState.profileCtx, + cdpUrl: "http://127.0.0.1:18800", + tab: routeState.tab, + }); + }), +})); + +function getSnapshotGetHandler() { + const { app, getHandlers } = createBrowserRouteApp(); + registerBrowserAgentSnapshotRoutes(app, { + state: () => ({ resolved: { ssrfPolicy: undefined } }), + } as never); + const handler = getHandlers.get("/snapshot"); + expect(handler).toBeTypeOf("function"); + return handler; +} + +function getSnapshotPostHandler() { + const { app, postHandlers } = createBrowserRouteApp(); + registerBrowserAgentSnapshotRoutes(app, { + state: () => ({ resolved: { ssrfPolicy: undefined } }), + } as never); + const handler = postHandlers.get("/screenshot"); + expect(handler).toBeTypeOf("function"); + return handler; +} + +function getActPostHandler() { + const { app, postHandlers } = createBrowserRouteApp(); + registerBrowserAgentActRoutes(app, { + state: () => ({ resolved: { evaluateEnabled: true } }), + } as never); + const handler = postHandlers.get("/act"); + expect(handler).toBeTypeOf("function"); + return handler; +} + +describe("existing-session browser routes", () => { + beforeEach(() => { + routeState.profileCtx.ensureTabAvailable.mockClear(); + chromeMcpMocks.evaluateChromeMcpScript.mockReset(); + chromeMcpMocks.navigateChromeMcpPage.mockClear(); + chromeMcpMocks.takeChromeMcpScreenshot.mockClear(); + chromeMcpMocks.takeChromeMcpSnapshot.mockClear(); + chromeMcpMocks.evaluateChromeMcpScript + .mockResolvedValueOnce({ labels: 1, skipped: 0 } as never) + .mockResolvedValueOnce(true); + }); + + it("allows labeled AI snapshots for existing-session profiles", async () => { + const handler = getSnapshotGetHandler(); + const response = createBrowserRouteResponse(); + await handler?.({ params: {}, query: { format: "ai", labels: "1" } }, response.res); + + expect(response.statusCode).toBe(200); + expect(response.body).toMatchObject({ + ok: true, + format: "ai", + labels: true, + labelsCount: 1, + labelsSkipped: 0, + }); + expect(chromeMcpMocks.takeChromeMcpSnapshot).toHaveBeenCalledWith({ + profileName: "chrome-live", + targetId: "7", + }); + expect(chromeMcpMocks.takeChromeMcpScreenshot).toHaveBeenCalled(); + }); + + it("allows ref screenshots for existing-session profiles", async () => { + const handler = getSnapshotPostHandler(); + const response = createBrowserRouteResponse(); + await handler?.( + { + params: {}, + query: {}, + body: { ref: "btn-1", type: "jpeg" }, + }, + response.res, + ); + + expect(response.statusCode).toBe(200); + expect(response.body).toMatchObject({ + ok: true, + path: "/tmp/fake.png", + targetId: "7", + }); + expect(chromeMcpMocks.takeChromeMcpScreenshot).toHaveBeenCalledWith({ + profileName: "chrome-live", + targetId: "7", + uid: "btn-1", + fullPage: false, + format: "jpeg", + }); + }); + + it("rejects selector-based element screenshots for existing-session profiles", async () => { + const handler = getSnapshotPostHandler(); + const response = createBrowserRouteResponse(); + await handler?.( + { + params: {}, + query: {}, + body: { element: "#submit" }, + }, + response.res, + ); + + expect(response.statusCode).toBe(400); + expect(response.body).toMatchObject({ + error: expect.stringContaining("element screenshots are not supported"), + }); + expect(chromeMcpMocks.takeChromeMcpScreenshot).not.toHaveBeenCalled(); + }); + + it("fails closed for existing-session networkidle waits", async () => { + const handler = getActPostHandler(); + const response = createBrowserRouteResponse(); + await handler?.( + { + params: {}, + query: {}, + body: { kind: "wait", loadState: "networkidle" }, + }, + response.res, + ); + + expect(response.statusCode).toBe(501); + expect(response.body).toMatchObject({ + error: expect.stringContaining("loadState=networkidle"), + }); + expect(chromeMcpMocks.evaluateChromeMcpScript).not.toHaveBeenCalled(); + }); + + it("supports glob URL waits for existing-session profiles", async () => { + chromeMcpMocks.evaluateChromeMcpScript.mockReset(); + chromeMcpMocks.evaluateChromeMcpScript.mockImplementation( + async ({ fn }: { fn: string }) => + (fn === "() => window.location.href" ? "https://example.com/" : true) as never, + ); + + const handler = getActPostHandler(); + const response = createBrowserRouteResponse(); + await handler?.( + { + params: {}, + query: {}, + body: { kind: "wait", url: "**/example.com/" }, + }, + response.res, + ); + + expect(response.statusCode).toBe(200); + expect(response.body).toMatchObject({ ok: true, targetId: "7" }); + expect(chromeMcpMocks.evaluateChromeMcpScript).toHaveBeenCalledWith({ + profileName: "chrome-live", + targetId: "7", + fn: "() => window.location.href", + }); + }); +}); diff --git a/src/browser/routes/agent.snapshot.plan.test.ts b/src/browser/routes/agent.snapshot.plan.test.ts index 493fbcdfbad..71870aa1a6d 100644 --- a/src/browser/routes/agent.snapshot.plan.test.ts +++ b/src/browser/routes/agent.snapshot.plan.test.ts @@ -3,9 +3,9 @@ import { resolveBrowserConfig, resolveProfile } from "../config.js"; import { resolveSnapshotPlan } from "./agent.snapshot.plan.js"; describe("resolveSnapshotPlan", () => { - it("defaults chrome extension relay snapshots to aria when format is omitted", () => { + it("defaults chrome-relay snapshots to aria when format is omitted", () => { const resolved = resolveBrowserConfig({}); - const profile = resolveProfile(resolved, "chrome"); + const profile = resolveProfile(resolved, "chrome-relay"); expect(profile).toBeTruthy(); const plan = resolveSnapshotPlan({ diff --git a/src/browser/routes/agent.snapshot.ts b/src/browser/routes/agent.snapshot.ts index c750cafe723..80c11693a11 100644 --- a/src/browser/routes/agent.snapshot.ts +++ b/src/browser/routes/agent.snapshot.ts @@ -1,7 +1,22 @@ import path from "node:path"; import { ensureMediaDir, saveMediaBuffer } from "../../media/store.js"; import { captureScreenshot, snapshotAria } from "../cdp.js"; +import { + evaluateChromeMcpScript, + navigateChromeMcpPage, + takeChromeMcpScreenshot, + takeChromeMcpSnapshot, +} from "../chrome-mcp.js"; +import { + buildAiSnapshotFromChromeMcpSnapshot, + flattenChromeMcpSnapshotToAriaNodes, +} from "../chrome-mcp.snapshot.js"; +import { + assertBrowserNavigationAllowed, + assertBrowserNavigationResultAllowed, +} from "../navigation-guard.js"; import { withBrowserNavigationPolicy } from "../navigation-guard.js"; +import { getBrowserProfileCapabilities } from "../profile-capabilities.js"; import { DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, DEFAULT_BROWSER_SCREENSHOT_MAX_SIDE, @@ -25,6 +40,110 @@ import { import type { BrowserResponse, BrowserRouteRegistrar } from "./types.js"; import { jsonError, toBoolean, toStringOrEmpty } from "./utils.js"; +const CHROME_MCP_OVERLAY_ATTR = "data-openclaw-mcp-overlay"; + +async function clearChromeMcpOverlay(params: { + profileName: string; + targetId: string; +}): Promise { + await evaluateChromeMcpScript({ + profileName: params.profileName, + targetId: params.targetId, + fn: `() => { + document.querySelectorAll("[${CHROME_MCP_OVERLAY_ATTR}]").forEach((node) => node.remove()); + return true; + }`, + }).catch(() => {}); +} + +async function renderChromeMcpLabels(params: { + profileName: string; + targetId: string; + refs: string[]; +}): Promise<{ labels: number; skipped: number }> { + const refList = JSON.stringify(params.refs); + const result = await evaluateChromeMcpScript({ + profileName: params.profileName, + targetId: params.targetId, + args: params.refs, + fn: `(...elements) => { + const refs = ${refList}; + document.querySelectorAll("[${CHROME_MCP_OVERLAY_ATTR}]").forEach((node) => node.remove()); + const root = document.createElement("div"); + root.setAttribute("${CHROME_MCP_OVERLAY_ATTR}", "labels"); + root.style.position = "fixed"; + root.style.inset = "0"; + root.style.pointerEvents = "none"; + root.style.zIndex = "2147483647"; + let labels = 0; + let skipped = 0; + elements.forEach((el, index) => { + if (!(el instanceof Element)) { + skipped += 1; + return; + } + const rect = el.getBoundingClientRect(); + if (rect.width <= 0 && rect.height <= 0) { + skipped += 1; + return; + } + labels += 1; + const badge = document.createElement("div"); + badge.setAttribute("${CHROME_MCP_OVERLAY_ATTR}", "label"); + badge.textContent = refs[index] || String(labels); + badge.style.position = "fixed"; + badge.style.left = \`\${Math.max(0, rect.left)}px\`; + badge.style.top = \`\${Math.max(0, rect.top)}px\`; + badge.style.transform = "translateY(-100%)"; + badge.style.padding = "2px 6px"; + badge.style.borderRadius = "999px"; + badge.style.background = "#FF4500"; + badge.style.color = "#fff"; + badge.style.font = "600 12px ui-monospace, SFMono-Regular, Menlo, monospace"; + badge.style.boxShadow = "0 2px 6px rgba(0,0,0,0.35)"; + badge.style.whiteSpace = "nowrap"; + root.appendChild(badge); + }); + document.documentElement.appendChild(root); + return { labels, skipped }; + }`, + }); + const labels = + result && + typeof result === "object" && + typeof (result as { labels?: unknown }).labels === "number" + ? (result as { labels: number }).labels + : 0; + const skipped = + result && + typeof result === "object" && + typeof (result as { skipped?: unknown }).skipped === "number" + ? (result as { skipped: number }).skipped + : 0; + return { labels, skipped }; +} + +async function saveNormalizedScreenshotResponse(params: { + res: BrowserResponse; + buffer: Buffer; + type: "png" | "jpeg"; + targetId: string; + url: string; +}) { + const normalized = await normalizeBrowserScreenshot(params.buffer, { + maxSide: DEFAULT_BROWSER_SCREENSHOT_MAX_SIDE, + maxBytes: DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, + }); + await saveBrowserMediaResponse({ + res: params.res, + buffer: normalized.buffer, + contentType: normalized.contentType ?? `image/${params.type}`, + maxBytes: DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, + targetId: params.targetId, + url: params.url, + }); +} + async function saveBrowserMediaResponse(params: { res: BrowserResponse; buffer: Buffer; @@ -56,7 +175,10 @@ export async function resolveTargetIdAfterNavigate(opts: { }): Promise { let currentTargetId = opts.oldTargetId; try { - const pickReplacement = (tabs: Array<{ targetId: string; url: string }>) => { + const pickReplacement = ( + tabs: Array<{ targetId: string; url: string }>, + options?: { allowSingleTabFallback?: boolean }, + ) => { if (tabs.some((tab) => tab.targetId === opts.oldTargetId)) { return opts.oldTargetId; } @@ -68,7 +190,7 @@ export async function resolveTargetIdAfterNavigate(opts: { if (uniqueReplacement.length === 1) { return uniqueReplacement[0]?.targetId ?? opts.oldTargetId; } - if (tabs.length === 1) { + if (options?.allowSingleTabFallback && tabs.length === 1) { return tabs[0]?.targetId ?? opts.oldTargetId; } return opts.oldTargetId; @@ -77,7 +199,9 @@ export async function resolveTargetIdAfterNavigate(opts: { currentTargetId = pickReplacement(await opts.listTabs()); if (currentTargetId === opts.oldTargetId) { await new Promise((r) => setTimeout(r, 800)); - currentTargetId = pickReplacement(await opts.listTabs()); + currentTargetId = pickReplacement(await opts.listTabs(), { + allowSingleTabFallback: true, + }); } } catch { // Best-effort: fall back to pre-navigation targetId @@ -96,13 +220,27 @@ export function registerBrowserAgentSnapshotRoutes( if (!url) { return jsonError(res, 400, "url is required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "navigate", - run: async ({ cdpUrl, tab, pw, profileCtx }) => { + run: async ({ profileCtx, tab, cdpUrl }) => { + if (getBrowserProfileCapabilities(profileCtx.profile).usesChromeMcp) { + const ssrfPolicyOpts = withBrowserNavigationPolicy(ctx.state().resolved.ssrfPolicy); + await assertBrowserNavigationAllowed({ url, ...ssrfPolicyOpts }); + const result = await navigateChromeMcpPage({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + url, + }); + await assertBrowserNavigationResultAllowed({ url: result.url, ...ssrfPolicyOpts }); + return res.json({ ok: true, targetId: tab.targetId, ...result }); + } + const pw = await requirePwAi(res, "navigate"); + if (!pw) { + return; + } const result = await pw.navigateViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -122,6 +260,17 @@ export function registerBrowserAgentSnapshotRoutes( app.post("/pdf", async (req, res) => { const body = readBody(req); const targetId = toStringOrEmpty(body.targetId) || undefined; + const profileCtx = resolveProfileContext(req, res, ctx); + if (!profileCtx) { + return; + } + if (getBrowserProfileCapabilities(profileCtx.profile).usesChromeMcp) { + return jsonError( + res, + 501, + "pdf is not supported for existing-session profiles yet; use screenshot/snapshot instead.", + ); + } await withPlaywrightRouteContext({ req, res, @@ -163,6 +312,31 @@ export function registerBrowserAgentSnapshotRoutes( ctx, targetId, run: async ({ profileCtx, tab, cdpUrl }) => { + if (getBrowserProfileCapabilities(profileCtx.profile).usesChromeMcp) { + if (element) { + return jsonError( + res, + 400, + "element screenshots are not supported for existing-session profiles; use ref from snapshot.", + ); + } + const buffer = await takeChromeMcpScreenshot({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + uid: ref, + fullPage, + format: type, + }); + await saveNormalizedScreenshotResponse({ + res, + buffer, + type, + targetId: tab.targetId, + url: tab.url, + }); + return; + } + let buffer: Buffer; const shouldUsePlaywright = shouldUsePlaywrightForScreenshot({ profile: profileCtx.profile, @@ -193,15 +367,10 @@ export function registerBrowserAgentSnapshotRoutes( }); } - const normalized = await normalizeBrowserScreenshot(buffer, { - maxSide: DEFAULT_BROWSER_SCREENSHOT_MAX_SIDE, - maxBytes: DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, - }); - await saveBrowserMediaResponse({ + await saveNormalizedScreenshotResponse({ res, - buffer: normalized.buffer, - contentType: normalized.contentType ?? `image/${type}`, - maxBytes: DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, + buffer, + type, targetId: tab.targetId, url: tab.url, }); @@ -227,6 +396,87 @@ export function registerBrowserAgentSnapshotRoutes( if ((plan.labels || plan.mode === "efficient") && plan.format === "aria") { return jsonError(res, 400, "labels/mode=efficient require format=ai"); } + if (getBrowserProfileCapabilities(profileCtx.profile).usesChromeMcp) { + if (plan.selectorValue || plan.frameSelectorValue) { + return jsonError( + res, + 400, + "selector/frame snapshots are not supported for existing-session profiles; snapshot the whole page and use refs.", + ); + } + const snapshot = await takeChromeMcpSnapshot({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + }); + if (plan.format === "aria") { + return res.json({ + ok: true, + format: "aria", + targetId: tab.targetId, + url: tab.url, + nodes: flattenChromeMcpSnapshotToAriaNodes(snapshot, plan.limit), + }); + } + const built = buildAiSnapshotFromChromeMcpSnapshot({ + root: snapshot, + options: { + interactive: plan.interactive ?? undefined, + compact: plan.compact ?? undefined, + maxDepth: plan.depth ?? undefined, + }, + maxChars: plan.resolvedMaxChars, + }); + if (plan.labels) { + const refs = Object.keys(built.refs); + const labelResult = await renderChromeMcpLabels({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + refs, + }); + try { + const labeled = await takeChromeMcpScreenshot({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + format: "png", + }); + const normalized = await normalizeBrowserScreenshot(labeled, { + maxSide: DEFAULT_BROWSER_SCREENSHOT_MAX_SIDE, + maxBytes: DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, + }); + await ensureMediaDir(); + const saved = await saveMediaBuffer( + normalized.buffer, + normalized.contentType ?? "image/png", + "browser", + DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, + ); + return res.json({ + ok: true, + format: "ai", + targetId: tab.targetId, + url: tab.url, + labels: true, + labelsCount: labelResult.labels, + labelsSkipped: labelResult.skipped, + imagePath: path.resolve(saved.path), + imageType: normalized.contentType?.includes("jpeg") ? "jpeg" : "png", + ...built, + }); + } finally { + await clearChromeMcpOverlay({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + }); + } + } + return res.json({ + ok: true, + format: "ai", + targetId: tab.targetId, + url: tab.url, + ...built, + }); + } if (plan.format === "ai") { const pw = await requirePwAi(res, "ai snapshot"); if (!pw) { diff --git a/src/browser/routes/basic.existing-session.test.ts b/src/browser/routes/basic.existing-session.test.ts new file mode 100644 index 00000000000..34bcd9ee00b --- /dev/null +++ b/src/browser/routes/basic.existing-session.test.ts @@ -0,0 +1,94 @@ +import { describe, expect, it, vi } from "vitest"; +import { BrowserProfileUnavailableError } from "../errors.js"; +import { registerBrowserBasicRoutes } from "./basic.js"; +import { createBrowserRouteApp, createBrowserRouteResponse } from "./test-helpers.js"; + +vi.mock("../chrome-mcp.js", () => ({ + getChromeMcpPid: vi.fn(() => 4321), +})); + +describe("basic browser routes", () => { + it("maps existing-session status failures to JSON browser errors", async () => { + const { app, getHandlers } = createBrowserRouteApp(); + registerBrowserBasicRoutes(app, { + state: () => ({ + resolved: { + enabled: true, + headless: false, + noSandbox: false, + executablePath: undefined, + }, + profiles: new Map(), + }), + forProfile: () => + ({ + profile: { + name: "chrome-live", + driver: "existing-session", + cdpPort: 0, + cdpUrl: "", + color: "#00AA00", + attachOnly: true, + }, + isHttpReachable: async () => { + throw new BrowserProfileUnavailableError("attach failed"); + }, + isReachable: async () => true, + }) as never, + } as never); + + const handler = getHandlers.get("/"); + expect(handler).toBeTypeOf("function"); + + const response = createBrowserRouteResponse(); + await handler?.({ params: {}, query: { profile: "chrome-live" } }, response.res); + + expect(response.statusCode).toBe(409); + expect(response.body).toMatchObject({ error: "attach failed" }); + }); + + it("reports Chrome MCP transport without fake CDP fields", async () => { + const { app, getHandlers } = createBrowserRouteApp(); + registerBrowserBasicRoutes(app, { + state: () => ({ + resolved: { + enabled: true, + headless: false, + noSandbox: false, + executablePath: undefined, + }, + profiles: new Map(), + }), + forProfile: () => + ({ + profile: { + name: "chrome-live", + driver: "existing-session", + cdpPort: 0, + cdpUrl: "", + color: "#00AA00", + attachOnly: true, + }, + isHttpReachable: async () => true, + isReachable: async () => true, + }) as never, + } as never); + + const handler = getHandlers.get("/"); + expect(handler).toBeTypeOf("function"); + + const response = createBrowserRouteResponse(); + await handler?.({ params: {}, query: { profile: "chrome-live" } }, response.res); + + expect(response.statusCode).toBe(200); + expect(response.body).toMatchObject({ + profile: "chrome-live", + driver: "existing-session", + transport: "chrome-mcp", + running: true, + cdpPort: null, + cdpUrl: null, + pid: 4321, + }); + }); +}); diff --git a/src/browser/routes/basic.ts b/src/browser/routes/basic.ts index 5f32c86729b..f6123ac4cf0 100644 --- a/src/browser/routes/basic.ts +++ b/src/browser/routes/basic.ts @@ -1,11 +1,21 @@ +import { getChromeMcpPid } from "../chrome-mcp.js"; import { resolveBrowserExecutableForPlatform } from "../chrome.executables.js"; import { toBrowserErrorResponse } from "../errors.js"; +import { getBrowserProfileCapabilities } from "../profile-capabilities.js"; import { createBrowserProfilesService } from "../profiles-service.js"; import type { BrowserRouteContext, ProfileContext } from "../server-context.js"; import { resolveProfileContext } from "./agent.shared.js"; import type { BrowserRequest, BrowserResponse, BrowserRouteRegistrar } from "./types.js"; import { getProfileContext, jsonError, toStringOrEmpty } from "./utils.js"; +function handleBrowserRouteError(res: BrowserResponse, err: unknown) { + const mapped = toBrowserErrorResponse(err); + if (mapped) { + return jsonError(res, mapped.status, mapped.message); + } + jsonError(res, 500, String(err)); +} + async function withBasicProfileRoute(params: { req: BrowserRequest; res: BrowserResponse; @@ -19,11 +29,21 @@ async function withBasicProfileRoute(params: { try { await params.run(profileCtx); } catch (err) { - const mapped = toBrowserErrorResponse(err); - if (mapped) { - return jsonError(params.res, mapped.status, mapped.message); - } - jsonError(params.res, 500, String(err)); + return handleBrowserRouteError(params.res, err); + } +} + +async function withProfilesServiceMutation(params: { + res: BrowserResponse; + ctx: BrowserRouteContext; + run: (service: ReturnType) => Promise; +}) { + try { + const service = createBrowserProfilesService(params.ctx); + const result = await params.run(service); + params.res.json(result); + } catch (err) { + return handleBrowserRouteError(params.res, err); } } @@ -53,46 +73,59 @@ export function registerBrowserBasicRoutes(app: BrowserRouteRegistrar, ctx: Brow return jsonError(res, profileCtx.status, profileCtx.error); } - const [cdpHttp, cdpReady] = await Promise.all([ - profileCtx.isHttpReachable(300), - profileCtx.isReachable(600), - ]); - - const profileState = current.profiles.get(profileCtx.profile.name); - let detectedBrowser: string | null = null; - let detectedExecutablePath: string | null = null; - let detectError: string | null = null; - try { - const detected = resolveBrowserExecutableForPlatform(current.resolved, process.platform); - if (detected) { - detectedBrowser = detected.kind; - detectedExecutablePath = detected.path; - } - } catch (err) { - detectError = String(err); - } + const [cdpHttp, cdpReady] = await Promise.all([ + profileCtx.isHttpReachable(300), + profileCtx.isReachable(600), + ]); - res.json({ - enabled: current.resolved.enabled, - profile: profileCtx.profile.name, - running: cdpReady, - cdpReady, - cdpHttp, - pid: profileState?.running?.pid ?? null, - cdpPort: profileCtx.profile.cdpPort, - cdpUrl: profileCtx.profile.cdpUrl, - chosenBrowser: profileState?.running?.exe.kind ?? null, - detectedBrowser, - detectedExecutablePath, - detectError, - userDataDir: profileState?.running?.userDataDir ?? null, - color: profileCtx.profile.color, - headless: current.resolved.headless, - noSandbox: current.resolved.noSandbox, - executablePath: current.resolved.executablePath ?? null, - attachOnly: profileCtx.profile.attachOnly, - }); + const profileState = current.profiles.get(profileCtx.profile.name); + const capabilities = getBrowserProfileCapabilities(profileCtx.profile); + let detectedBrowser: string | null = null; + let detectedExecutablePath: string | null = null; + let detectError: string | null = null; + + try { + const detected = resolveBrowserExecutableForPlatform(current.resolved, process.platform); + if (detected) { + detectedBrowser = detected.kind; + detectedExecutablePath = detected.path; + } + } catch (err) { + detectError = String(err); + } + + res.json({ + enabled: current.resolved.enabled, + profile: profileCtx.profile.name, + driver: profileCtx.profile.driver, + transport: capabilities.usesChromeMcp ? "chrome-mcp" : "cdp", + running: cdpReady, + cdpReady, + cdpHttp, + pid: capabilities.usesChromeMcp + ? getChromeMcpPid(profileCtx.profile.name) + : (profileState?.running?.pid ?? null), + cdpPort: capabilities.usesChromeMcp ? null : profileCtx.profile.cdpPort, + cdpUrl: capabilities.usesChromeMcp ? null : profileCtx.profile.cdpUrl, + chosenBrowser: profileState?.running?.exe.kind ?? null, + detectedBrowser, + detectedExecutablePath, + detectError, + userDataDir: profileState?.running?.userDataDir ?? null, + color: profileCtx.profile.color, + headless: current.resolved.headless, + noSandbox: current.resolved.noSandbox, + executablePath: current.resolved.executablePath ?? null, + attachOnly: profileCtx.profile.attachOnly, + }); + } catch (err) { + const mapped = toBrowserErrorResponse(err); + if (mapped) { + return jsonError(res, mapped.status, mapped.message); + } + jsonError(res, 500, String(err)); + } }); // Start browser (profile-aware) @@ -146,28 +179,29 @@ export function registerBrowserBasicRoutes(app: BrowserRouteRegistrar, ctx: Brow const driver = toStringOrEmpty((req.body as { driver?: unknown })?.driver) as | "openclaw" | "extension" + | "existing-session" | ""; if (!name) { return jsonError(res, 400, "name is required"); } - try { - const service = createBrowserProfilesService(ctx); - const result = await service.createProfile({ - name, - color: color || undefined, - cdpUrl: cdpUrl || undefined, - driver: driver === "extension" ? "extension" : undefined, - }); - res.json(result); - } catch (err) { - const mapped = toBrowserErrorResponse(err); - if (mapped) { - return jsonError(res, mapped.status, mapped.message); - } - jsonError(res, 500, String(err)); - } + await withProfilesServiceMutation({ + res, + ctx, + run: async (service) => + await service.createProfile({ + name, + color: color || undefined, + cdpUrl: cdpUrl || undefined, + driver: + driver === "extension" + ? "extension" + : driver === "existing-session" + ? "existing-session" + : undefined, + }), + }); }); // Delete a profile @@ -177,16 +211,10 @@ export function registerBrowserBasicRoutes(app: BrowserRouteRegistrar, ctx: Brow return jsonError(res, 400, "profile name is required"); } - try { - const service = createBrowserProfilesService(ctx); - const result = await service.deleteProfile(name); - res.json(result); - } catch (err) { - const mapped = toBrowserErrorResponse(err); - if (mapped) { - return jsonError(res, mapped.status, mapped.message); - } - jsonError(res, 500, String(err)); - } + await withProfilesServiceMutation({ + res, + ctx, + run: async (service) => await service.deleteProfile(name), + }); }); } diff --git a/src/browser/routes/test-helpers.ts b/src/browser/routes/test-helpers.ts new file mode 100644 index 00000000000..e6b046a9878 --- /dev/null +++ b/src/browser/routes/test-helpers.ts @@ -0,0 +1,36 @@ +import type { BrowserResponse, BrowserRouteHandler, BrowserRouteRegistrar } from "./types.js"; + +export function createBrowserRouteApp() { + const getHandlers = new Map(); + const postHandlers = new Map(); + const deleteHandlers = new Map(); + const app: BrowserRouteRegistrar = { + get: (path, handler) => void getHandlers.set(path, handler), + post: (path, handler) => void postHandlers.set(path, handler), + delete: (path, handler) => void deleteHandlers.set(path, handler), + }; + return { app, getHandlers, postHandlers, deleteHandlers }; +} + +export function createBrowserRouteResponse() { + let statusCode = 200; + let jsonBody: unknown; + const res: BrowserResponse = { + status(code) { + statusCode = code; + return res; + }, + json(body) { + jsonBody = body; + }, + }; + return { + res, + get statusCode() { + return statusCode; + }, + get body() { + return jsonBody; + }, + }; +} diff --git a/src/browser/server-context.availability.ts b/src/browser/server-context.availability.ts index 3b00ff99dff..3b991bbbdfe 100644 --- a/src/browser/server-context.availability.ts +++ b/src/browser/server-context.availability.ts @@ -3,6 +3,11 @@ import { PROFILE_POST_RESTART_WS_TIMEOUT_MS, resolveCdpReachabilityTimeouts, } from "./cdp-timeouts.js"; +import { + closeChromeMcpSession, + ensureChromeMcpAvailable, + listChromeMcpTabs, +} from "./chrome-mcp.js"; import { isChromeCdpReady, isChromeReachable, @@ -60,11 +65,19 @@ export function createProfileAvailability({ }); const isReachable = async (timeoutMs?: number) => { + if (capabilities.usesChromeMcp) { + // listChromeMcpTabs creates the session if needed — no separate ensureChromeMcpAvailable call required + await listChromeMcpTabs(profile.name); + return true; + } const { httpTimeoutMs, wsTimeoutMs } = resolveTimeouts(timeoutMs); return await isChromeCdpReady(profile.cdpUrl, httpTimeoutMs, wsTimeoutMs); }; const isHttpReachable = async (timeoutMs?: number) => { + if (capabilities.usesChromeMcp) { + return await isReachable(timeoutMs); + } const { httpTimeoutMs } = resolveTimeouts(timeoutMs); return await isChromeReachable(profile.cdpUrl, httpTimeoutMs); }; @@ -109,6 +122,9 @@ export function createProfileAvailability({ if (previousProfile.driver === "extension") { await stopChromeExtensionRelayServer({ cdpUrl: previousProfile.cdpUrl }).catch(() => false); } + if (getBrowserProfileCapabilities(previousProfile).usesChromeMcp) { + await closeChromeMcpSession(previousProfile.name).catch(() => false); + } await closePlaywrightBrowserConnectionForProfile(previousProfile.cdpUrl); if (previousProfile.cdpUrl !== profile.cdpUrl) { await closePlaywrightBrowserConnectionForProfile(profile.cdpUrl); @@ -138,6 +154,10 @@ export function createProfileAvailability({ const ensureBrowserAvailable = async (): Promise => { await reconcileProfileRuntime(); + if (capabilities.usesChromeMcp) { + await ensureChromeMcpAvailable(profile.name); + return; + } const current = state(); const remoteCdp = capabilities.isRemote; const attachOnly = profile.attachOnly; @@ -238,6 +258,10 @@ export function createProfileAvailability({ const stopRunningBrowser = async (): Promise<{ stopped: boolean }> => { await reconcileProfileRuntime(); + if (capabilities.usesChromeMcp) { + const stopped = await closeChromeMcpSession(profile.name); + return { stopped }; + } if (capabilities.requiresRelay) { const stopped = await stopChromeExtensionRelayServer({ cdpUrl: profile.cdpUrl, diff --git a/src/browser/server-context.ensure-tab-available.prefers-last-target.test.ts b/src/browser/server-context.ensure-tab-available.prefers-last-target.test.ts index 13c5f82e31d..ceaafc46d41 100644 --- a/src/browser/server-context.ensure-tab-available.prefers-last-target.test.ts +++ b/src/browser/server-context.ensure-tab-available.prefers-last-target.test.ts @@ -25,9 +25,9 @@ function makeBrowserState(): BrowserServerState { headless: true, noSandbox: false, attachOnly: false, - defaultProfile: "chrome", + defaultProfile: "chrome-relay", profiles: { - chrome: { + "chrome-relay": { driver: "extension", cdpUrl: "http://127.0.0.1:18792", cdpPort: 18792, diff --git a/src/browser/server-context.existing-session.test.ts b/src/browser/server-context.existing-session.test.ts new file mode 100644 index 00000000000..abbd222342e --- /dev/null +++ b/src/browser/server-context.existing-session.test.ts @@ -0,0 +1,102 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import { createBrowserRouteContext } from "./server-context.js"; +import type { BrowserServerState } from "./server-context.js"; + +vi.mock("./chrome-mcp.js", () => ({ + closeChromeMcpSession: vi.fn(async () => true), + ensureChromeMcpAvailable: vi.fn(async () => {}), + focusChromeMcpTab: vi.fn(async () => {}), + listChromeMcpTabs: vi.fn(async () => [ + { targetId: "7", title: "", url: "https://example.com", type: "page" }, + ]), + openChromeMcpTab: vi.fn(async () => ({ + targetId: "8", + title: "", + url: "https://openclaw.ai", + type: "page", + })), + closeChromeMcpTab: vi.fn(async () => {}), + getChromeMcpPid: vi.fn(() => 4321), +})); + +import * as chromeMcp from "./chrome-mcp.js"; + +function makeState(): BrowserServerState { + return { + server: null, + port: 0, + resolved: { + enabled: true, + evaluateEnabled: true, + controlPort: 18791, + cdpPortRangeStart: 18800, + cdpPortRangeEnd: 18899, + cdpProtocol: "http", + cdpHost: "127.0.0.1", + cdpIsLoopback: true, + remoteCdpTimeoutMs: 1500, + remoteCdpHandshakeTimeoutMs: 3000, + color: "#FF4500", + headless: false, + noSandbox: false, + attachOnly: false, + defaultProfile: "chrome-live", + profiles: { + "chrome-live": { + cdpPort: 18801, + color: "#0066CC", + driver: "existing-session", + attachOnly: true, + }, + }, + extraArgs: [], + ssrfPolicy: { dangerouslyAllowPrivateNetwork: true }, + }, + profiles: new Map(), + }; +} + +afterEach(() => { + vi.clearAllMocks(); +}); + +describe("browser server-context existing-session profile", () => { + it("routes tab operations through the Chrome MCP backend", async () => { + const state = makeState(); + const ctx = createBrowserRouteContext({ getState: () => state }); + const live = ctx.forProfile("chrome-live"); + + vi.mocked(chromeMcp.listChromeMcpTabs) + .mockResolvedValueOnce([ + { targetId: "7", title: "", url: "https://example.com", type: "page" }, + ]) + .mockResolvedValueOnce([ + { targetId: "8", title: "", url: "https://openclaw.ai", type: "page" }, + ]) + .mockResolvedValueOnce([ + { targetId: "8", title: "", url: "https://openclaw.ai", type: "page" }, + ]) + .mockResolvedValueOnce([ + { targetId: "7", title: "", url: "https://example.com", type: "page" }, + ]); + + await live.ensureBrowserAvailable(); + const tabs = await live.listTabs(); + expect(tabs.map((tab) => tab.targetId)).toEqual(["7"]); + + const opened = await live.openTab("https://openclaw.ai"); + expect(opened.targetId).toBe("8"); + + const selected = await live.ensureTabAvailable(); + expect(selected.targetId).toBe("8"); + + await live.focusTab("7"); + await live.stopRunningBrowser(); + + expect(chromeMcp.ensureChromeMcpAvailable).toHaveBeenCalledWith("chrome-live"); + expect(chromeMcp.listChromeMcpTabs).toHaveBeenCalledWith("chrome-live"); + expect(chromeMcp.openChromeMcpTab).toHaveBeenCalledWith("chrome-live", "https://openclaw.ai"); + expect(chromeMcp.focusChromeMcpTab).toHaveBeenCalledWith("chrome-live", "7"); + expect(chromeMcp.closeChromeMcpSession).toHaveBeenCalledWith("chrome-live"); + }); +}); diff --git a/src/browser/server-context.hot-reload-profiles.test.ts b/src/browser/server-context.hot-reload-profiles.test.ts index ec0c7e072aa..f9eb2452ce2 100644 --- a/src/browser/server-context.hot-reload-profiles.test.ts +++ b/src/browser/server-context.hot-reload-profiles.test.ts @@ -30,6 +30,7 @@ vi.mock("../config/config.js", () => ({ return buildConfig(); }, }), + getRuntimeConfigSnapshot: () => null, loadConfig: () => { // simulate stale loadConfig that doesn't see updates unless cache cleared if (!cachedConfig) { diff --git a/src/browser/server-context.selection.ts b/src/browser/server-context.selection.ts index 8a9cfa19c42..f0ce3e25e06 100644 --- a/src/browser/server-context.selection.ts +++ b/src/browser/server-context.selection.ts @@ -1,5 +1,6 @@ import { fetchOk, normalizeCdpHttpBaseForJsonEndpoints } from "./cdp.helpers.js"; import { appendCdpPath } from "./cdp.js"; +import { closeChromeMcpTab, focusChromeMcpTab } from "./chrome-mcp.js"; import type { ResolvedBrowserProfile } from "./config.js"; import { BrowserTabNotFoundError, BrowserTargetAmbiguousError } from "./errors.js"; import { getBrowserProfileCapabilities } from "./profile-capabilities.js"; @@ -111,6 +112,13 @@ export function createProfileSelectionOps({ const focusTab = async (targetId: string): Promise => { const resolvedTargetId = await resolveTargetIdOrThrow(targetId); + if (capabilities.usesChromeMcp) { + await focusChromeMcpTab(profile.name, resolvedTargetId); + const profileState = getProfileState(); + profileState.lastTargetId = resolvedTargetId; + return; + } + if (capabilities.usesPersistentPlaywright) { const mod = await getPwAiModule({ mode: "strict" }); const focusPageByTargetIdViaPlaywright = (mod as Partial | null) @@ -134,6 +142,11 @@ export function createProfileSelectionOps({ const closeTab = async (targetId: string): Promise => { const resolvedTargetId = await resolveTargetIdOrThrow(targetId); + if (capabilities.usesChromeMcp) { + await closeChromeMcpTab(profile.name, resolvedTargetId); + return; + } + // For remote profiles, use Playwright's persistent connection to close tabs if (capabilities.usesPersistentPlaywright) { const mod = await getPwAiModule({ mode: "strict" }); diff --git a/src/browser/server-context.tab-ops.ts b/src/browser/server-context.tab-ops.ts index 24985430bdc..66a134564c6 100644 --- a/src/browser/server-context.tab-ops.ts +++ b/src/browser/server-context.tab-ops.ts @@ -1,6 +1,7 @@ import { CDP_JSON_NEW_TIMEOUT_MS } from "./cdp-timeouts.js"; import { fetchJson, fetchOk, normalizeCdpHttpBaseForJsonEndpoints } from "./cdp.helpers.js"; import { appendCdpPath, createTargetViaCdp, normalizeCdpWsUrl } from "./cdp.js"; +import { listChromeMcpTabs, openChromeMcpTab } from "./chrome-mcp.js"; import type { ResolvedBrowserProfile } from "./config.js"; import { assertBrowserNavigationAllowed, @@ -65,6 +66,10 @@ export function createProfileTabOps({ const capabilities = getBrowserProfileCapabilities(profile); const listTabs = async (): Promise => { + if (capabilities.usesChromeMcp) { + return await listChromeMcpTabs(profile.name); + } + if (capabilities.usesPersistentPlaywright) { const mod = await getPwAiModule({ mode: "strict" }); const listPagesViaPlaywright = (mod as Partial | null)?.listPagesViaPlaywright; @@ -134,6 +139,15 @@ export function createProfileTabOps({ const openTab = async (url: string): Promise => { const ssrfPolicyOpts = withBrowserNavigationPolicy(state().resolved.ssrfPolicy); + if (capabilities.usesChromeMcp) { + await assertBrowserNavigationAllowed({ url, ...ssrfPolicyOpts }); + const page = await openChromeMcpTab(profile.name, url); + const profileState = getProfileState(); + profileState.lastTargetId = page.targetId; + await assertBrowserNavigationResultAllowed({ url: page.url, ...ssrfPolicyOpts }); + return page; + } + if (capabilities.usesPersistentPlaywright) { const mod = await getPwAiModule({ mode: "strict" }); const createPageViaPlaywright = (mod as Partial | null)?.createPageViaPlaywright; diff --git a/src/browser/server-context.ts b/src/browser/server-context.ts index d75b14c2471..0ba29ad38cf 100644 --- a/src/browser/server-context.ts +++ b/src/browser/server-context.ts @@ -4,6 +4,7 @@ import type { ResolvedBrowserProfile } from "./config.js"; import { resolveProfile } from "./config.js"; import { BrowserProfileNotFoundError, toBrowserErrorResponse } from "./errors.js"; import { InvalidBrowserNavigationUrlError } from "./navigation-guard.js"; +import { getBrowserProfileCapabilities } from "./profile-capabilities.js"; import { refreshResolvedBrowserConfigFromDisk, resolveBrowserProfileWithHotReload, @@ -159,15 +160,26 @@ export function createBrowserRouteContext(opts: ContextOptions): BrowserRouteCon if (!profile) { continue; } + const capabilities = getBrowserProfileCapabilities(profile); let tabCount = 0; let running = false; + const profileCtx = createProfileContext(opts, profile); - if (profileState?.running) { + if (capabilities.usesChromeMcp) { + try { + running = await profileCtx.isReachable(300); + if (running) { + const tabs = await profileCtx.listTabs(); + tabCount = tabs.filter((t) => t.type === "page").length; + } + } catch { + // Chrome MCP not available + } + } else if (profileState?.running) { running = true; try { - const ctx = createProfileContext(opts, profile); - const tabs = await ctx.listTabs(); + const tabs = await profileCtx.listTabs(); tabCount = tabs.filter((t) => t.type === "page").length; } catch { // Browser might not be responsive @@ -178,8 +190,7 @@ export function createBrowserRouteContext(opts: ContextOptions): BrowserRouteCon const reachable = await isChromeReachable(profile.cdpUrl, 200); if (reachable) { running = true; - const ctx = createProfileContext(opts, profile); - const tabs = await ctx.listTabs().catch(() => []); + const tabs = await profileCtx.listTabs().catch(() => []); tabCount = tabs.filter((t) => t.type === "page").length; } } catch { @@ -189,9 +200,11 @@ export function createBrowserRouteContext(opts: ContextOptions): BrowserRouteCon result.push({ name, - cdpPort: profile.cdpPort, - cdpUrl: profile.cdpUrl, + transport: capabilities.usesChromeMcp ? "chrome-mcp" : "cdp", + cdpPort: capabilities.usesChromeMcp ? null : profile.cdpPort, + cdpUrl: capabilities.usesChromeMcp ? null : profile.cdpUrl, color: profile.color, + driver: profile.driver, running, tabCount, isDefault: name === current.resolved.defaultProfile, diff --git a/src/browser/server-context.types.ts b/src/browser/server-context.types.ts index f05e90e9e77..b8ad7aa329d 100644 --- a/src/browser/server-context.types.ts +++ b/src/browser/server-context.types.ts @@ -1,5 +1,6 @@ import type { Server } from "node:http"; import type { RunningChrome } from "./chrome.js"; +import type { BrowserTransport } from "./client.js"; import type { BrowserTab } from "./client.js"; import type { ResolvedBrowserConfig, ResolvedBrowserProfile } from "./config.js"; @@ -53,9 +54,11 @@ export type ProfileContext = { export type ProfileStatus = { name: string; - cdpPort: number; - cdpUrl: string; + transport: BrowserTransport; + cdpPort: number | null; + cdpUrl: string | null; color: string; + driver: ResolvedBrowserProfile["driver"]; running: boolean; tabCount: number; isDefault: boolean; diff --git a/src/browser/server-lifecycle.test.ts b/src/browser/server-lifecycle.test.ts index e2395f99f04..5ef331f1784 100644 --- a/src/browser/server-lifecycle.test.ts +++ b/src/browser/server-lifecycle.test.ts @@ -43,7 +43,7 @@ describe("ensureExtensionRelayForProfiles", () => { it("starts relay only for extension profiles", async () => { resolveProfileMock.mockImplementation((_resolved: unknown, name: string) => { - if (name === "chrome") { + if (name === "chrome-relay") { return { driver: "extension", cdpUrl: "http://127.0.0.1:18888" }; } return { driver: "openclaw", cdpUrl: "http://127.0.0.1:18889" }; @@ -53,7 +53,7 @@ describe("ensureExtensionRelayForProfiles", () => { await ensureExtensionRelayForProfiles({ resolved: { profiles: { - chrome: {}, + "chrome-relay": {}, openclaw: {}, }, } as never, @@ -72,12 +72,12 @@ describe("ensureExtensionRelayForProfiles", () => { const onWarn = vi.fn(); await ensureExtensionRelayForProfiles({ - resolved: { profiles: { chrome: {} } } as never, + resolved: { profiles: { "chrome-relay": {} } } as never, onWarn, }); expect(onWarn).toHaveBeenCalledWith( - 'Chrome extension relay init failed for profile "chrome": Error: boom', + 'Chrome extension relay init failed for profile "chrome-relay": Error: boom', ); }); }); @@ -91,10 +91,10 @@ describe("stopKnownBrowserProfiles", () => { }); it("stops all known profiles and ignores per-profile failures", async () => { - listKnownProfileNamesMock.mockReturnValue(["openclaw", "chrome"]); + listKnownProfileNamesMock.mockReturnValue(["openclaw", "chrome-relay"]); const stopMap: Record> = { openclaw: vi.fn(async () => {}), - chrome: vi.fn(async () => { + "chrome-relay": vi.fn(async () => { throw new Error("profile stop failed"); }), }; @@ -112,7 +112,7 @@ describe("stopKnownBrowserProfiles", () => { }); expect(stopMap.openclaw).toHaveBeenCalledTimes(1); - expect(stopMap.chrome).toHaveBeenCalledTimes(1); + expect(stopMap["chrome-relay"]).toHaveBeenCalledTimes(1); expect(onWarn).not.toHaveBeenCalled(); }); diff --git a/src/browser/server.agent-contract-form-layout-act-commands.test.ts b/src/browser/server.agent-contract-form-layout-act-commands.test.ts index 738bf8b7e2d..c8b76c4b886 100644 --- a/src/browser/server.agent-contract-form-layout-act-commands.test.ts +++ b/src/browser/server.agent-contract-form-layout-act-commands.test.ts @@ -51,12 +51,14 @@ describe("browser control server", () => { values: ["a", "b"], }); expect(select.ok).toBe(true); - expect(pwMocks.selectOptionViaPlaywright).toHaveBeenCalledWith({ - cdpUrl: state.cdpBaseUrl, - targetId: "abcd1234", - ref: "5", - values: ["a", "b"], - }); + expect(pwMocks.selectOptionViaPlaywright).toHaveBeenCalledWith( + expect.objectContaining({ + cdpUrl: expect.any(String), + targetId: "abcd1234", + ref: "5", + values: ["a", "b"], + }), + ); const fillCases: Array<{ input: Record; @@ -81,11 +83,13 @@ describe("browser control server", () => { fields: [input], }); expect(fill.ok).toBe(true); - expect(pwMocks.fillFormViaPlaywright).toHaveBeenCalledWith({ - cdpUrl: state.cdpBaseUrl, - targetId: "abcd1234", - fields: [expected], - }); + expect(pwMocks.fillFormViaPlaywright).toHaveBeenCalledWith( + expect.objectContaining({ + cdpUrl: expect.any(String), + targetId: "abcd1234", + fields: [expected], + }), + ); } const resize = await postJson<{ ok: boolean }>(`${base}/act`, { @@ -94,12 +98,14 @@ describe("browser control server", () => { height: 600, }); expect(resize.ok).toBe(true); - expect(pwMocks.resizeViewportViaPlaywright).toHaveBeenCalledWith({ - cdpUrl: state.cdpBaseUrl, - targetId: "abcd1234", - width: 800, - height: 600, - }); + expect(pwMocks.resizeViewportViaPlaywright).toHaveBeenCalledWith( + expect.objectContaining({ + cdpUrl: expect.any(String), + targetId: "abcd1234", + width: 800, + height: 600, + }), + ); const wait = await postJson<{ ok: boolean }>(`${base}/act`, { kind: "wait", @@ -150,13 +156,152 @@ describe("browser control server", () => { kind: "evaluate", fn: "() => 1", }); - expect(res.error).toContain("browser.evaluateEnabled=false"); expect(pwMocks.evaluateViaPlaywright).not.toHaveBeenCalled(); }, slowTimeoutMs, ); + it( + "normalizes batch actions and threads evaluateEnabled into the batch executor", + async () => { + const base = await startServerAndBase(); + + const batchRes = await postJson<{ ok: boolean; results?: Array<{ ok: boolean }> }>( + `${base}/act`, + { + kind: "batch", + stopOnError: "false", + actions: [ + { kind: "click", selector: "button.save", doubleClick: "true", delayMs: "25" }, + { kind: "wait", fn: " () => window.ready === true " }, + ], + }, + ); + + expect(batchRes.ok).toBe(true); + expect(pwMocks.batchViaPlaywright).toHaveBeenCalledWith( + expect.objectContaining({ + cdpUrl: expect.any(String), + targetId: "abcd1234", + stopOnError: false, + evaluateEnabled: true, + actions: [ + { + kind: "click", + selector: "button.save", + doubleClick: true, + delayMs: 25, + }, + { + kind: "wait", + fn: "() => window.ready === true", + }, + ], + }), + ); + }, + slowTimeoutMs, + ); + + it( + "preserves exact type text in batch normalization", + async () => { + const base = await startServerAndBase(); + + const batchRes = await postJson<{ ok: boolean }>(`${base}/act`, { + kind: "batch", + actions: [ + { kind: "type", selector: "input.name", text: " padded " }, + { kind: "type", selector: "input.clearable", text: "" }, + ], + }); + + expect(batchRes.ok).toBe(true); + expect(pwMocks.batchViaPlaywright).toHaveBeenCalledWith( + expect.objectContaining({ + actions: [ + { + kind: "type", + selector: "input.name", + text: " padded ", + }, + { + kind: "type", + selector: "input.clearable", + text: "", + }, + ], + }), + ); + }, + slowTimeoutMs, + ); + + it( + "rejects malformed batch actions before dispatch", + async () => { + const base = await startServerAndBase(); + + const batchRes = await postJson<{ error?: string }>(`${base}/act`, { + kind: "batch", + actions: [{ kind: "click", ref: {} }], + }); + + expect(batchRes.error).toContain("click requires ref or selector"); + expect(pwMocks.batchViaPlaywright).not.toHaveBeenCalled(); + }, + slowTimeoutMs, + ); + + it( + "rejects batched action targetId overrides before dispatch", + async () => { + const base = await startServerAndBase(); + + const batchRes = await postJson<{ error?: string }>(`${base}/act`, { + kind: "batch", + actions: [{ kind: "click", ref: "5", targetId: "other-tab" }], + }); + + expect(batchRes.error).toContain("batched action targetId must match request targetId"); + expect(pwMocks.batchViaPlaywright).not.toHaveBeenCalled(); + }, + slowTimeoutMs, + ); + + it( + "rejects oversized batch delays before dispatch", + async () => { + const base = await startServerAndBase(); + + const batchRes = await postJson<{ error?: string }>(`${base}/act`, { + kind: "batch", + actions: [{ kind: "click", selector: "button.save", delayMs: 5001 }], + }); + + expect(batchRes.error).toContain("click delayMs exceeds maximum of 5000ms"); + expect(pwMocks.batchViaPlaywright).not.toHaveBeenCalled(); + }, + slowTimeoutMs, + ); + + it( + "rejects oversized top-level batches before dispatch", + async () => { + const base = await startServerAndBase(); + + const batchRes = await postJson<{ error?: string }>(`${base}/act`, { + kind: "batch", + actions: Array.from({ length: 101 }, () => ({ kind: "press", key: "Enter" })), + }); + + expect(batchRes.error).toContain("batch exceeds maximum of 100 actions"); + expect(pwMocks.batchViaPlaywright).not.toHaveBeenCalled(); + }, + slowTimeoutMs, + ); + it("agent contract: hooks + response + downloads + screenshot", async () => { const base = await startServerAndBase(); @@ -165,13 +310,15 @@ describe("browser control server", () => { timeoutMs: 1234, }); expect(upload).toMatchObject({ ok: true }); - expect(pwMocks.armFileUploadViaPlaywright).toHaveBeenCalledWith({ - cdpUrl: state.cdpBaseUrl, - targetId: "abcd1234", - // The server resolves paths (which adds a drive letter on Windows for `\\tmp\\...` style roots). - paths: [path.resolve(DEFAULT_UPLOAD_DIR, "a.txt")], - timeoutMs: 1234, - }); + expect(pwMocks.armFileUploadViaPlaywright).toHaveBeenCalledWith( + expect.objectContaining({ + cdpUrl: expect.any(String), + targetId: "abcd1234", + // The server resolves paths (which adds a drive letter on Windows for `\\tmp\\...` style roots). + paths: [path.resolve(DEFAULT_UPLOAD_DIR, "a.txt")], + timeoutMs: 1234, + }), + ); const uploadWithRef = await postJson(`${base}/hooks/file-chooser`, { paths: ["b.txt"], @@ -280,7 +427,7 @@ describe("browser control server", () => { expect(res.path).toContain("safe-trace.zip"); expect(pwMocks.traceStopViaPlaywright).toHaveBeenCalledWith( expect.objectContaining({ - cdpUrl: state.cdpBaseUrl, + cdpUrl: expect.any(String), targetId: "abcd1234", path: expect.stringContaining("safe-trace.zip"), }), @@ -369,7 +516,7 @@ describe("browser control server", () => { expect(res.ok).toBe(true); expect(pwMocks.waitForDownloadViaPlaywright).toHaveBeenCalledWith( expect.objectContaining({ - cdpUrl: state.cdpBaseUrl, + cdpUrl: expect.any(String), targetId: "abcd1234", path: expect.stringContaining("safe-wait.pdf"), }), @@ -385,7 +532,7 @@ describe("browser control server", () => { expect(res.ok).toBe(true); expect(pwMocks.downloadViaPlaywright).toHaveBeenCalledWith( expect.objectContaining({ - cdpUrl: state.cdpBaseUrl, + cdpUrl: expect.any(String), targetId: "abcd1234", ref: "e12", path: expect.stringContaining("safe-download.pdf"), diff --git a/src/browser/server.agent-contract-snapshot-endpoints.test.ts b/src/browser/server.agent-contract-snapshot-endpoints.test.ts index 7e300fe5aee..837a122becd 100644 --- a/src/browser/server.agent-contract-snapshot-endpoints.test.ts +++ b/src/browser/server.agent-contract-snapshot-endpoints.test.ts @@ -96,10 +96,14 @@ describe("browser control server", () => { headers: { "Content-Type": "application/json" }, body: JSON.stringify({ kind: "click", selector: "button.save" }), }); - expect(clickSelector.status).toBe(400); - expect(((await clickSelector.json()) as { error?: string }).error).toMatch( - /'selector' is not supported/i, - ); + expect(clickSelector.status).toBe(200); + expect(((await clickSelector.json()) as { ok?: boolean }).ok).toBe(true); + expect(pwMocks.clickViaPlaywright).toHaveBeenNthCalledWith(2, { + cdpUrl: state.cdpBaseUrl, + targetId: "abcd1234", + selector: "button.save", + doubleClick: false, + }); const type = await postJson<{ ok: boolean }>(`${base}/act`, { kind: "type", diff --git a/src/browser/server.control-server.test-harness.ts b/src/browser/server.control-server.test-harness.ts index 5721d9eb17b..118c83dbb73 100644 --- a/src/browser/server.control-server.test-harness.ts +++ b/src/browser/server.control-server.test-harness.ts @@ -11,6 +11,17 @@ type HarnessState = { reachable: boolean; cfgAttachOnly: boolean; cfgEvaluateEnabled: boolean; + cfgDefaultProfile: string; + cfgProfiles: Record< + string, + { + cdpPort?: number; + cdpUrl?: string; + color: string; + driver?: "openclaw" | "extension" | "existing-session"; + attachOnly?: boolean; + } + >; createTargetId: string | null; prevGatewayPort: string | undefined; prevGatewayToken: string | undefined; @@ -23,6 +34,8 @@ const state: HarnessState = { reachable: false, cfgAttachOnly: false, cfgEvaluateEnabled: true, + cfgDefaultProfile: "openclaw", + cfgProfiles: {}, createTargetId: null, prevGatewayPort: undefined, prevGatewayToken: undefined, @@ -61,6 +74,14 @@ export function setBrowserControlServerReachable(reachable: boolean): void { state.reachable = reachable; } +export function setBrowserControlServerProfiles( + profiles: HarnessState["cfgProfiles"], + defaultProfile = Object.keys(profiles)[0] ?? "openclaw", +): void { + state.cfgProfiles = profiles; + state.cfgDefaultProfile = defaultProfile; +} + const cdpMocks = vi.hoisted(() => ({ createTargetViaCdp: vi.fn<() => Promise<{ targetId: string }>>(async () => { throw new Error("cdp disabled"); @@ -77,6 +98,7 @@ export function getCdpMocks(): { createTargetViaCdp: MockFn; snapshotAria: MockF const pwMocks = vi.hoisted(() => ({ armDialogViaPlaywright: vi.fn(async () => {}), armFileUploadViaPlaywright: vi.fn(async () => {}), + batchViaPlaywright: vi.fn(async () => ({ results: [] })), clickViaPlaywright: vi.fn(async () => {}), closePageViaPlaywright: vi.fn(async () => {}), closePlaywrightBrowserConnection: vi.fn(async () => {}), @@ -121,6 +143,44 @@ export function getPwMocks(): Record { return pwMocks as unknown as Record; } +const chromeMcpMocks = vi.hoisted(() => ({ + clickChromeMcpElement: vi.fn(async () => {}), + closeChromeMcpSession: vi.fn(async () => true), + closeChromeMcpTab: vi.fn(async () => {}), + dragChromeMcpElement: vi.fn(async () => {}), + ensureChromeMcpAvailable: vi.fn(async () => {}), + evaluateChromeMcpScript: vi.fn(async () => true), + fillChromeMcpElement: vi.fn(async () => {}), + fillChromeMcpForm: vi.fn(async () => {}), + focusChromeMcpTab: vi.fn(async () => {}), + getChromeMcpPid: vi.fn(() => 4321), + hoverChromeMcpElement: vi.fn(async () => {}), + listChromeMcpTabs: vi.fn(async () => [ + { targetId: "7", title: "", url: "https://example.com", type: "page" }, + ]), + navigateChromeMcpPage: vi.fn(async ({ url }: { url: string }) => ({ url })), + openChromeMcpTab: vi.fn(async (_profile: string, url: string) => ({ + targetId: "8", + title: "", + url, + type: "page", + })), + pressChromeMcpKey: vi.fn(async () => {}), + resizeChromeMcpPage: vi.fn(async () => {}), + takeChromeMcpScreenshot: vi.fn(async () => Buffer.from("png")), + takeChromeMcpSnapshot: vi.fn(async () => ({ + id: "root", + role: "document", + name: "Example", + children: [{ id: "btn-1", role: "button", name: "Continue" }], + })), + uploadChromeMcpFile: vi.fn(async () => {}), +})); + +export function getChromeMcpMocks(): Record { + return chromeMcpMocks as unknown as Record; +} + const chromeUserDataDir = vi.hoisted(() => ({ dir: "/tmp/openclaw" })); installChromeUserDataDirHooks(chromeUserDataDir); @@ -147,24 +207,40 @@ function makeProc(pid = 123) { const proc = makeProc(); +function defaultProfilesForState(testPort: number): HarnessState["cfgProfiles"] { + return { + openclaw: { cdpPort: testPort + 9, color: "#FF4500" }, + }; +} + vi.mock("../config/config.js", async (importOriginal) => { const actual = await importOriginal(); - return { - ...actual, - loadConfig: () => ({ + const loadConfig = () => { + return { browser: { enabled: true, evaluateEnabled: state.cfgEvaluateEnabled, color: "#FF4500", attachOnly: state.cfgAttachOnly, headless: true, - defaultProfile: "openclaw", - profiles: { - openclaw: { cdpPort: state.testPort + 1, color: "#FF4500" }, - }, + defaultProfile: state.cfgDefaultProfile, + profiles: + Object.keys(state.cfgProfiles).length > 0 + ? state.cfgProfiles + : defaultProfilesForState(state.testPort), }, - }), - writeConfigFile: vi.fn(async () => {}), + }; + }; + const writeConfigFile = vi.fn(async () => {}); + return { + ...actual, + createConfigIO: vi.fn(() => ({ + loadConfig, + writeConfigFile, + })), + getRuntimeConfigSnapshot: vi.fn(() => null), + loadConfig, + writeConfigFile, }; }); @@ -209,8 +285,12 @@ vi.mock("./cdp.js", () => ({ vi.mock("./pw-ai.js", () => pwMocks); +vi.mock("./chrome-mcp.js", () => chromeMcpMocks); + vi.mock("../media/store.js", () => ({ + MEDIA_MAX_BYTES: 5 * 1024 * 1024, ensureMediaDir: vi.fn(async () => {}), + getMediaDir: vi.fn(() => "/tmp"), saveMediaBuffer: vi.fn(async () => ({ path: "/tmp/fake.png" })), })); @@ -251,13 +331,18 @@ function mockClearAll(obj: Record unknown }>) { export async function resetBrowserControlServerTestContext(): Promise { state.reachable = false; state.cfgAttachOnly = false; + state.cfgEvaluateEnabled = true; + state.cfgDefaultProfile = "openclaw"; + state.cfgProfiles = defaultProfilesForState(state.testPort); state.createTargetId = null; mockClearAll(pwMocks); mockClearAll(cdpMocks); + mockClearAll(chromeMcpMocks); state.testPort = await getFreePort(); - state.cdpBaseUrl = `http://127.0.0.1:${state.testPort + 1}`; + state.cdpBaseUrl = `http://127.0.0.1:${state.testPort + 9}`; + state.cfgProfiles = defaultProfilesForState(state.testPort); state.prevGatewayPort = process.env.OPENCLAW_GATEWAY_PORT; process.env.OPENCLAW_GATEWAY_PORT = String(state.testPort - 2); // Avoid flaky auth coupling: some suites temporarily set gateway env auth diff --git a/src/browser/snapshot-roles.ts b/src/browser/snapshot-roles.ts new file mode 100644 index 00000000000..8e5d873e557 --- /dev/null +++ b/src/browser/snapshot-roles.ts @@ -0,0 +1,63 @@ +/** + * Shared ARIA role classification sets used by both the Playwright and Chrome MCP + * snapshot paths. Keep these in sync — divergence causes the two drivers to produce + * different snapshot output for the same page. + */ + +/** Roles that represent user-interactive elements and always get a ref. */ +export const INTERACTIVE_ROLES = new Set([ + "button", + "checkbox", + "combobox", + "link", + "listbox", + "menuitem", + "menuitemcheckbox", + "menuitemradio", + "option", + "radio", + "searchbox", + "slider", + "spinbutton", + "switch", + "tab", + "textbox", + "treeitem", +]); + +/** Roles that carry meaningful content and get a ref when named. */ +export const CONTENT_ROLES = new Set([ + "article", + "cell", + "columnheader", + "gridcell", + "heading", + "listitem", + "main", + "navigation", + "region", + "rowheader", +]); + +/** Structural/container roles — typically skipped in compact mode. */ +export const STRUCTURAL_ROLES = new Set([ + "application", + "directory", + "document", + "generic", + "grid", + "group", + "ignored", + "list", + "menu", + "menubar", + "none", + "presentation", + "row", + "rowgroup", + "table", + "tablist", + "toolbar", + "tree", + "treegrid", +]); diff --git a/src/browser/url-pattern.test.ts b/src/browser/url-pattern.test.ts new file mode 100644 index 00000000000..1cfdc06c36f --- /dev/null +++ b/src/browser/url-pattern.test.ts @@ -0,0 +1,26 @@ +import { describe, expect, it } from "vitest"; +import { matchBrowserUrlPattern } from "./url-pattern.js"; + +describe("browser url pattern matching", () => { + it("matches exact URLs", () => { + expect(matchBrowserUrlPattern("https://example.com/a", "https://example.com/a")).toBe(true); + expect(matchBrowserUrlPattern("https://example.com/a", "https://example.com/b")).toBe(false); + }); + + it("matches substring patterns without wildcards", () => { + expect(matchBrowserUrlPattern("example.com", "https://example.com/a")).toBe(true); + expect(matchBrowserUrlPattern("/dash", "https://example.com/app/dash")).toBe(true); + expect(matchBrowserUrlPattern("nope", "https://example.com/a")).toBe(false); + }); + + it("matches glob patterns", () => { + expect(matchBrowserUrlPattern("**/dash", "https://example.com/app/dash")).toBe(true); + expect(matchBrowserUrlPattern("https://example.com/*", "https://example.com/a")).toBe(true); + expect(matchBrowserUrlPattern("https://example.com/*", "https://other.com/a")).toBe(false); + }); + + it("rejects empty patterns", () => { + expect(matchBrowserUrlPattern("", "https://example.com")).toBe(false); + expect(matchBrowserUrlPattern(" ", "https://example.com")).toBe(false); + }); +}); diff --git a/src/browser/url-pattern.ts b/src/browser/url-pattern.ts new file mode 100644 index 00000000000..2ff99657d26 --- /dev/null +++ b/src/browser/url-pattern.ts @@ -0,0 +1,15 @@ +export function matchBrowserUrlPattern(pattern: string, url: string): boolean { + const trimmedPattern = pattern.trim(); + if (!trimmedPattern) { + return false; + } + if (trimmedPattern === url) { + return true; + } + if (trimmedPattern.includes("*")) { + const escaped = trimmedPattern.replace(/[|\\{}()[\]^$+?.]/g, "\\$&"); + const regex = new RegExp(`^${escaped.replace(/\*\*/g, ".*").replace(/\*/g, ".*")}$`); + return regex.test(url); + } + return url.includes(trimmedPattern); +} diff --git a/src/channels/allowlist-match.test.ts b/src/channels/allowlist-match.test.ts new file mode 100644 index 00000000000..9a55e593e57 --- /dev/null +++ b/src/channels/allowlist-match.test.ts @@ -0,0 +1,85 @@ +import { describe, expect, it } from "vitest"; +import { + resolveAllowlistMatchByCandidates, + resolveAllowlistMatchSimple, +} from "./allowlist-match.js"; + +describe("channels/allowlist-match", () => { + it("reflects in-place allowFrom edits even when array length stays the same", () => { + const allowFrom = ["alice", "bob"]; + + expect(resolveAllowlistMatchSimple({ allowFrom, senderId: "bob" })).toEqual({ + allowed: true, + matchKey: "bob", + matchSource: "id", + }); + + allowFrom[1] = "mallory"; + + expect(resolveAllowlistMatchSimple({ allowFrom, senderId: "bob" })).toEqual({ + allowed: false, + }); + expect(resolveAllowlistMatchSimple({ allowFrom, senderId: "mallory" })).toEqual({ + allowed: true, + matchKey: "mallory", + matchSource: "id", + }); + }); + + it("drops wildcard access after in-place wildcard replacement", () => { + const allowFrom = ["*"]; + + expect(resolveAllowlistMatchSimple({ allowFrom, senderId: "eve" })).toEqual({ + allowed: true, + matchKey: "*", + matchSource: "wildcard", + }); + + allowFrom[0] = "alice"; + + expect(resolveAllowlistMatchSimple({ allowFrom, senderId: "eve" })).toEqual({ + allowed: false, + }); + expect(resolveAllowlistMatchSimple({ allowFrom, senderId: "alice" })).toEqual({ + allowed: true, + matchKey: "alice", + matchSource: "id", + }); + }); + + it("recomputes candidate allowlist sets after in-place replacement", () => { + const allowList = ["user:alice", "user:bob"]; + + expect( + resolveAllowlistMatchByCandidates({ + allowList, + candidates: [{ value: "user:bob", source: "prefixed-user" }], + }), + ).toEqual({ + allowed: true, + matchKey: "user:bob", + matchSource: "prefixed-user", + }); + + allowList[1] = "user:mallory"; + + expect( + resolveAllowlistMatchByCandidates({ + allowList, + candidates: [{ value: "user:bob", source: "prefixed-user" }], + }), + ).toEqual({ + allowed: false, + }); + expect( + resolveAllowlistMatchByCandidates({ + allowList, + candidates: [{ value: "user:mallory", source: "prefixed-user" }], + }), + ).toEqual({ + allowed: true, + matchKey: "user:mallory", + matchSource: "prefixed-user", + }); + }); +}); diff --git a/src/channels/allowlist-match.ts b/src/channels/allowlist-match.ts index b30ef119c84..8c105f1e51b 100644 --- a/src/channels/allowlist-match.ts +++ b/src/channels/allowlist-match.ts @@ -16,33 +16,40 @@ export type AllowlistMatch = { matchSource?: TSource; }; -type CachedAllowListSet = { - size: number; - set: Set; +export type CompiledAllowlist = { + set: ReadonlySet; + wildcard: boolean; }; -const ALLOWLIST_SET_CACHE = new WeakMap(); -const SIMPLE_ALLOWLIST_CACHE = new WeakMap< - Array, - { normalized: string[]; size: number; wildcard: boolean; set: Set } ->(); - export function formatAllowlistMatchMeta( match?: { matchKey?: string; matchSource?: string } | null, ): string { return `matchKey=${match?.matchKey ?? "none"} matchSource=${match?.matchSource ?? "none"}`; } -export function resolveAllowlistMatchByCandidates(params: { - allowList: string[]; +export function compileAllowlist(entries: ReadonlyArray): CompiledAllowlist { + const set = new Set(entries.filter(Boolean)); + return { + set, + wildcard: set.has("*"), + }; +} + +function compileSimpleAllowlist(entries: ReadonlyArray): CompiledAllowlist { + return compileAllowlist( + entries.map((entry) => String(entry).trim().toLowerCase()).filter(Boolean), + ); +} + +export function resolveAllowlistCandidates(params: { + compiledAllowlist: CompiledAllowlist; candidates: Array<{ value?: string; source: TSource }>; }): AllowlistMatch { - const allowSet = resolveAllowListSet(params.allowList); for (const candidate of params.candidates) { if (!candidate.value) { continue; } - if (allowSet.has(candidate.value)) { + if (params.compiledAllowlist.set.has(candidate.value)) { return { allowed: true, matchKey: candidate.value, @@ -53,15 +60,38 @@ export function resolveAllowlistMatchByCandidates(params return { allowed: false }; } +export function resolveCompiledAllowlistMatch(params: { + compiledAllowlist: CompiledAllowlist; + candidates: Array<{ value?: string; source: TSource }>; +}): AllowlistMatch { + if (params.compiledAllowlist.set.size === 0) { + return { allowed: false }; + } + if (params.compiledAllowlist.wildcard) { + return { allowed: true, matchKey: "*", matchSource: "wildcard" as TSource }; + } + return resolveAllowlistCandidates(params); +} + +export function resolveAllowlistMatchByCandidates(params: { + allowList: ReadonlyArray; + candidates: Array<{ value?: string; source: TSource }>; +}): AllowlistMatch { + return resolveCompiledAllowlistMatch({ + compiledAllowlist: compileAllowlist(params.allowList), + candidates: params.candidates, + }); +} + export function resolveAllowlistMatchSimple(params: { - allowFrom: Array; + allowFrom: ReadonlyArray; senderId: string; senderName?: string | null; allowNameMatching?: boolean; }): AllowlistMatch<"wildcard" | "id" | "name"> { - const allowFrom = resolveSimpleAllowFrom(params.allowFrom); + const allowFrom = compileSimpleAllowlist(params.allowFrom); - if (allowFrom.size === 0) { + if (allowFrom.set.size === 0) { return { allowed: false }; } if (allowFrom.wildcard) { @@ -69,47 +99,17 @@ export function resolveAllowlistMatchSimple(params: { } const senderId = params.senderId.toLowerCase(); - if (allowFrom.set.has(senderId)) { - return { allowed: true, matchKey: senderId, matchSource: "id" }; - } - const senderName = params.senderName?.toLowerCase(); - if (params.allowNameMatching === true && senderName && allowFrom.set.has(senderName)) { - return { allowed: true, matchKey: senderName, matchSource: "name" }; - } - - return { allowed: false }; -} - -function resolveAllowListSet(allowList: string[]): Set { - const cached = ALLOWLIST_SET_CACHE.get(allowList); - if (cached && cached.size === allowList.length) { - return cached.set; - } - const set = new Set(allowList); - ALLOWLIST_SET_CACHE.set(allowList, { size: allowList.length, set }); - return set; -} - -function resolveSimpleAllowFrom(allowFrom: Array): { - normalized: string[]; - size: number; - wildcard: boolean; - set: Set; -} { - const cached = SIMPLE_ALLOWLIST_CACHE.get(allowFrom); - if (cached && cached.size === allowFrom.length) { - return cached; - } - - const normalized = allowFrom.map((entry) => String(entry).trim().toLowerCase()).filter(Boolean); - const set = new Set(normalized); - const built = { - normalized, - size: allowFrom.length, - wildcard: set.has("*"), - set, - }; - SIMPLE_ALLOWLIST_CACHE.set(allowFrom, built); - return built; + return resolveAllowlistCandidates({ + compiledAllowlist: allowFrom, + candidates: [ + { value: senderId, source: "id" }, + ...(params.allowNameMatching === true && senderName + ? ([{ value: senderName, source: "name" as const }] satisfies Array<{ + value?: string; + source: "id" | "name"; + }>) + : []), + ], + }); } diff --git a/src/channels/command-gating.test.ts b/src/channels/command-gating.test.ts index 5ea0614e287..9b3f645e515 100644 --- a/src/channels/command-gating.test.ts +++ b/src/channels/command-gating.test.ts @@ -2,6 +2,7 @@ import { describe, expect, it } from "vitest"; import { resolveCommandAuthorizedFromAuthorizers, resolveControlCommandGate, + resolveDualTextControlCommandGate, } from "./command-gating.js"; describe("resolveCommandAuthorizedFromAuthorizers", () => { @@ -94,4 +95,17 @@ describe("resolveControlCommandGate", () => { }); expect(result.shouldBlock).toBe(false); }); + + it("supports the dual-authorizer text gate helper", () => { + const result = resolveDualTextControlCommandGate({ + useAccessGroups: true, + primaryConfigured: true, + primaryAllowed: false, + secondaryConfigured: true, + secondaryAllowed: true, + hasControlCommand: true, + }); + expect(result.commandAuthorized).toBe(true); + expect(result.shouldBlock).toBe(false); + }); }); diff --git a/src/channels/command-gating.ts b/src/channels/command-gating.ts index 1492d4760a4..068db8328be 100644 --- a/src/channels/command-gating.ts +++ b/src/channels/command-gating.ts @@ -43,3 +43,24 @@ export function resolveControlCommandGate(params: { const shouldBlock = params.allowTextCommands && params.hasControlCommand && !commandAuthorized; return { commandAuthorized, shouldBlock }; } + +export function resolveDualTextControlCommandGate(params: { + useAccessGroups: boolean; + primaryConfigured: boolean; + primaryAllowed: boolean; + secondaryConfigured: boolean; + secondaryAllowed: boolean; + hasControlCommand: boolean; + modeWhenAccessGroupsOff?: CommandGatingModeWhenAccessGroupsOff; +}): { commandAuthorized: boolean; shouldBlock: boolean } { + return resolveControlCommandGate({ + useAccessGroups: params.useAccessGroups, + authorizers: [ + { configured: params.primaryConfigured, allowed: params.primaryAllowed }, + { configured: params.secondaryConfigured, allowed: params.secondaryAllowed }, + ], + allowTextCommands: true, + hasControlCommand: params.hasControlCommand, + modeWhenAccessGroupsOff: params.modeWhenAccessGroupsOff, + }); +} diff --git a/src/channels/plugins/catalog.ts b/src/channels/plugins/catalog.ts index fe2208765e3..a853dcdf805 100644 --- a/src/channels/plugins/catalog.ts +++ b/src/channels/plugins/catalog.ts @@ -4,7 +4,7 @@ import { MANIFEST_KEY } from "../../compat/legacy-names.js"; import { discoverOpenClawPlugins } from "../../plugins/discovery.js"; import type { OpenClawPackageManifest } from "../../plugins/manifest.js"; import type { PluginOrigin } from "../../plugins/types.js"; -import { CONFIG_DIR, isRecord, resolveUserPath } from "../../utils.js"; +import { isRecord, resolveConfigDir, resolveUserPath } from "../../utils.js"; import type { ChannelMeta } from "./types.js"; export type ChannelUiMetaEntry = { @@ -36,6 +36,7 @@ export type ChannelPluginCatalogEntry = { type CatalogOptions = { workspaceDir?: string; catalogPaths?: string[]; + env?: NodeJS.ProcessEnv; }; const ORIGIN_PRIORITY: Record = { @@ -51,12 +52,6 @@ type ExternalCatalogEntry = { description?: string; } & Partial>; -const DEFAULT_CATALOG_PATHS = [ - path.join(CONFIG_DIR, "mpm", "plugins.json"), - path.join(CONFIG_DIR, "mpm", "catalog.json"), - path.join(CONFIG_DIR, "plugins", "catalog.json"), -]; - const ENV_CATALOG_PATHS = ["OPENCLAW_PLUGIN_CATALOG_PATHS", "OPENCLAW_MPM_CATALOG_PATHS"]; type ManifestKey = typeof MANIFEST_KEY; @@ -87,24 +82,35 @@ function splitEnvPaths(value: string): string[] { .filter(Boolean); } +function resolveDefaultCatalogPaths(env: NodeJS.ProcessEnv): string[] { + const configDir = resolveConfigDir(env); + return [ + path.join(configDir, "mpm", "plugins.json"), + path.join(configDir, "mpm", "catalog.json"), + path.join(configDir, "plugins", "catalog.json"), + ]; +} + function resolveExternalCatalogPaths(options: CatalogOptions): string[] { if (options.catalogPaths && options.catalogPaths.length > 0) { return options.catalogPaths.map((entry) => entry.trim()).filter(Boolean); } + const env = options.env ?? process.env; for (const key of ENV_CATALOG_PATHS) { - const raw = process.env[key]; + const raw = env[key]; if (raw && raw.trim()) { return splitEnvPaths(raw); } } - return DEFAULT_CATALOG_PATHS; + return resolveDefaultCatalogPaths(env); } function loadExternalCatalogEntries(options: CatalogOptions): ExternalCatalogEntry[] { const paths = resolveExternalCatalogPaths(options); + const env = options.env ?? process.env; const entries: ExternalCatalogEntry[] = []; for (const rawPath of paths) { - const resolved = resolveUserPath(rawPath); + const resolved = resolveUserPath(rawPath, env); if (!fs.existsSync(resolved)) { continue; } @@ -259,7 +265,10 @@ export function buildChannelUiCatalog( export function listChannelPluginCatalogEntries( options: CatalogOptions = {}, ): ChannelPluginCatalogEntry[] { - const discovery = discoverOpenClawPlugins({ workspaceDir: options.workspaceDir }); + const discovery = discoverOpenClawPlugins({ + workspaceDir: options.workspaceDir, + env: options.env, + }); const resolved = new Map(); for (const candidate of discovery.candidates) { diff --git a/src/channels/plugins/config-schema.ts b/src/channels/plugins/config-schema.ts index 35be4c9d388..5ae166aa5a7 100644 --- a/src/channels/plugins/config-schema.ts +++ b/src/channels/plugins/config-schema.ts @@ -1,4 +1,5 @@ import { z, type ZodTypeAny } from "zod"; +import { DmPolicySchema } from "../../config/zod-schema.core.js"; import type { ChannelConfigSchema } from "./types.plugin.js"; type ZodSchemaWithToJsonSchema = ZodTypeAny & { @@ -10,6 +11,17 @@ type ExtendableZodObject = ZodTypeAny & { }; export const AllowFromEntrySchema = z.union([z.string(), z.number()]); +export const AllowFromListSchema = z.array(AllowFromEntrySchema).optional(); + +export function buildNestedDmConfigSchema() { + return z + .object({ + enabled: z.boolean().optional(), + policy: DmPolicySchema.optional(), + allowFrom: AllowFromListSchema, + }) + .optional(); +} export function buildCatchallMultiAccountChannelSchema( accountSchema: T, diff --git a/src/channels/plugins/config-writes.ts b/src/channels/plugins/config-writes.ts index 87e220d7029..3e3ef36ed04 100644 --- a/src/channels/plugins/config-writes.ts +++ b/src/channels/plugins/config-writes.ts @@ -1,6 +1,8 @@ import type { OpenClawConfig } from "../../config/config.js"; import { resolveAccountEntry } from "../../routing/account-lookup.js"; +import { DEFAULT_ACCOUNT_ID } from "../../routing/session-key.js"; import { normalizeAccountId } from "../../routing/session-key.js"; +import { isInternalMessageChannel } from "../../utils/message-channel.js"; import type { ChannelId } from "./types.js"; type ChannelConfigWithAccounts = { @@ -12,6 +14,25 @@ function resolveAccountConfig(accounts: ChannelConfigWithAccounts["accounts"], a return resolveAccountEntry(accounts, accountId); } +export type ConfigWriteScope = { + channelId?: ChannelId | null; + accountId?: string | null; +}; + +export type ConfigWriteTarget = + | { kind: "global" } + | { kind: "channel"; scope: { channelId: ChannelId } } + | { kind: "account"; scope: { channelId: ChannelId; accountId: string } } + | { kind: "ambiguous"; scopes: ConfigWriteScope[] }; + +export type ConfigWriteAuthorizationResult = + | { allowed: true } + | { + allowed: false; + reason: "ambiguous-target" | "origin-disabled" | "target-disabled"; + blockedScope?: { kind: "origin" | "target"; scope: ConfigWriteScope }; + }; + export function resolveChannelConfigWrites(params: { cfg: OpenClawConfig; channelId?: ChannelId | null; @@ -30,3 +51,133 @@ export function resolveChannelConfigWrites(params: { const value = accountConfig?.configWrites ?? channelConfig.configWrites; return value !== false; } + +export function authorizeConfigWrite(params: { + cfg: OpenClawConfig; + origin?: ConfigWriteScope; + target?: ConfigWriteTarget; + allowBypass?: boolean; +}): ConfigWriteAuthorizationResult { + if (params.allowBypass) { + return { allowed: true }; + } + if (params.target?.kind === "ambiguous") { + return { allowed: false, reason: "ambiguous-target" }; + } + if ( + params.origin?.channelId && + !resolveChannelConfigWrites({ + cfg: params.cfg, + channelId: params.origin.channelId, + accountId: params.origin.accountId, + }) + ) { + return { + allowed: false, + reason: "origin-disabled", + blockedScope: { kind: "origin", scope: params.origin }, + }; + } + const seen = new Set(); + for (const target of listConfigWriteTargetScopes(params.target)) { + if (!target.channelId) { + continue; + } + const key = `${target.channelId}:${normalizeAccountId(target.accountId)}`; + if (seen.has(key)) { + continue; + } + seen.add(key); + if ( + !resolveChannelConfigWrites({ + cfg: params.cfg, + channelId: target.channelId, + accountId: target.accountId, + }) + ) { + return { + allowed: false, + reason: "target-disabled", + blockedScope: { kind: "target", scope: target }, + }; + } + } + return { allowed: true }; +} + +export function resolveExplicitConfigWriteTarget(scope: ConfigWriteScope): ConfigWriteTarget { + if (!scope.channelId) { + return { kind: "global" }; + } + const accountId = normalizeAccountId(scope.accountId); + if (!accountId || accountId === DEFAULT_ACCOUNT_ID) { + return { kind: "channel", scope: { channelId: scope.channelId } }; + } + return { kind: "account", scope: { channelId: scope.channelId, accountId } }; +} + +export function resolveConfigWriteTargetFromPath(path: string[]): ConfigWriteTarget { + if (path[0] !== "channels") { + return { kind: "global" }; + } + if (path.length < 2) { + return { kind: "ambiguous", scopes: [] }; + } + const channelId = path[1].trim().toLowerCase() as ChannelId; + if (!channelId) { + return { kind: "ambiguous", scopes: [] }; + } + if (path.length === 2) { + return { kind: "ambiguous", scopes: [{ channelId }] }; + } + if (path[2] !== "accounts") { + return { kind: "channel", scope: { channelId } }; + } + if (path.length < 4) { + return { kind: "ambiguous", scopes: [{ channelId }] }; + } + return resolveExplicitConfigWriteTarget({ + channelId, + accountId: normalizeAccountId(path[3]), + }); +} + +export function canBypassConfigWritePolicy(params: { + channel?: string | null; + gatewayClientScopes?: string[] | null; +}): boolean { + return ( + isInternalMessageChannel(params.channel) && + params.gatewayClientScopes?.includes("operator.admin") === true + ); +} + +export function formatConfigWriteDeniedMessage(params: { + result: Exclude; + fallbackChannelId?: ChannelId | null; +}): string { + if (params.result.reason === "ambiguous-target") { + return "⚠️ Channel-initiated /config writes cannot replace channels, channel roots, or accounts collections. Use a more specific path or gateway operator.admin."; + } + + const blocked = params.result.blockedScope?.scope; + const channelLabel = blocked?.channelId ?? params.fallbackChannelId ?? "this channel"; + const hint = blocked?.channelId + ? blocked.accountId + ? `channels.${blocked.channelId}.accounts.${blocked.accountId}.configWrites=true` + : `channels.${blocked.channelId}.configWrites=true` + : params.fallbackChannelId + ? `channels.${params.fallbackChannelId}.configWrites=true` + : "channels..configWrites=true"; + return `⚠️ Config writes are disabled for ${channelLabel}. Set ${hint} to enable.`; +} + +function listConfigWriteTargetScopes(target?: ConfigWriteTarget): ConfigWriteScope[] { + if (!target || target.kind === "global") { + return []; + } + if (target.kind === "ambiguous") { + return target.scopes; + } + return [target.scope]; +} diff --git a/src/channels/plugins/directory-config-helpers.test.ts b/src/channels/plugins/directory-config-helpers.test.ts index c9ba1429791..15aa8f0d298 100644 --- a/src/channels/plugins/directory-config-helpers.test.ts +++ b/src/channels/plugins/directory-config-helpers.test.ts @@ -6,6 +6,13 @@ import { listDirectoryUserEntriesFromAllowFrom, } from "./directory-config-helpers.js"; +function expectUserDirectoryEntries(entries: unknown) { + expect(entries).toEqual([ + { kind: "user", id: "alice" }, + { kind: "user", id: "carla" }, + ]); +} + describe("listDirectoryUserEntriesFromAllowFrom", () => { it("normalizes, deduplicates, filters, and limits user ids", () => { const entries = listDirectoryUserEntriesFromAllowFrom({ @@ -15,10 +22,7 @@ describe("listDirectoryUserEntriesFromAllowFrom", () => { limit: 2, }); - expect(entries).toEqual([ - { kind: "user", id: "alice" }, - { kind: "user", id: "carla" }, - ]); + expectUserDirectoryEntries(entries); }); }); @@ -54,10 +58,7 @@ describe("listDirectoryUserEntriesFromAllowFromAndMapKeys", () => { limit: 2, }); - expect(entries).toEqual([ - { kind: "user", id: "alice" }, - { kind: "user", id: "carla" }, - ]); + expectUserDirectoryEntries(entries); }); }); diff --git a/src/channels/plugins/directory-config-helpers.ts b/src/channels/plugins/directory-config-helpers.ts index 13cd05d65c3..edfab553677 100644 --- a/src/channels/plugins/directory-config-helpers.ts +++ b/src/channels/plugins/directory-config-helpers.ts @@ -8,7 +8,7 @@ function resolveDirectoryLimit(limit?: number | null): number | undefined { return typeof limit === "number" && limit > 0 ? limit : undefined; } -function applyDirectoryQueryAndLimit( +export function applyDirectoryQueryAndLimit( ids: string[], params: { query?: string | null; limit?: number | null }, ): string[] { @@ -18,29 +18,15 @@ function applyDirectoryQueryAndLimit( return typeof limit === "number" ? filtered.slice(0, limit) : filtered; } -function toDirectoryEntries(kind: "user" | "group", ids: string[]): ChannelDirectoryEntry[] { +export function toDirectoryEntries(kind: "user" | "group", ids: string[]): ChannelDirectoryEntry[] { return ids.map((id) => ({ kind, id }) as const); } -function collectDirectoryIdsFromEntries(params: { - entries?: readonly unknown[]; +function normalizeDirectoryIds(params: { + rawIds: readonly string[]; normalizeId?: (entry: string) => string | null | undefined; }): string[] { - return (params.entries ?? []) - .map((entry) => String(entry).trim()) - .filter((entry) => Boolean(entry) && entry !== "*") - .map((entry) => { - const normalized = params.normalizeId ? params.normalizeId(entry) : entry; - return typeof normalized === "string" ? normalized.trim() : ""; - }) - .filter(Boolean); -} - -function collectDirectoryIdsFromMapKeys(params: { - groups?: Record; - normalizeId?: (entry: string) => string | null | undefined; -}): string[] { - return Object.keys(params.groups ?? {}) + return params.rawIds .map((entry) => entry.trim()) .filter((entry) => Boolean(entry) && entry !== "*") .map((entry) => { @@ -50,6 +36,26 @@ function collectDirectoryIdsFromMapKeys(params: { .filter(Boolean); } +function collectDirectoryIdsFromEntries(params: { + entries?: readonly unknown[]; + normalizeId?: (entry: string) => string | null | undefined; +}): string[] { + return normalizeDirectoryIds({ + rawIds: (params.entries ?? []).map((entry) => String(entry)), + normalizeId: params.normalizeId, + }); +} + +function collectDirectoryIdsFromMapKeys(params: { + groups?: Record; + normalizeId?: (entry: string) => string | null | undefined; +}): string[] { + return normalizeDirectoryIds({ + rawIds: Object.keys(params.groups ?? {}), + normalizeId: params.normalizeId, + }); +} + function dedupeDirectoryIds(ids: string[]): string[] { return Array.from(new Set(ids)); } diff --git a/src/channels/plugins/directory-config.ts b/src/channels/plugins/directory-config.ts index eaf35fa33ef..e1270a9ceed 100644 --- a/src/channels/plugins/directory-config.ts +++ b/src/channels/plugins/directory-config.ts @@ -5,6 +5,7 @@ import { inspectSlackAccount } from "../../slack/account-inspect.js"; import { inspectTelegramAccount } from "../../telegram/account-inspect.js"; import { resolveWhatsAppAccount } from "../../web/accounts.js"; import { isWhatsAppGroupJid, normalizeWhatsAppTarget } from "../../whatsapp/normalize.js"; +import { applyDirectoryQueryAndLimit, toDirectoryEntries } from "./directory-config-helpers.js"; import { normalizeSlackMessagingTarget } from "./normalize/slack.js"; import type { ChannelDirectoryEntry } from "./types.js"; @@ -54,25 +55,6 @@ function normalizeTrimmedSet( .filter((id): id is string => Boolean(id)); } -function resolveDirectoryQuery(query?: string | null): string { - return query?.trim().toLowerCase() || ""; -} - -function resolveDirectoryLimit(limit?: number | null): number | undefined { - return typeof limit === "number" && limit > 0 ? limit : undefined; -} - -function applyDirectoryQueryAndLimit(ids: string[], params: DirectoryConfigParams): string[] { - const q = resolveDirectoryQuery(params.query); - const limit = resolveDirectoryLimit(params.limit); - const filtered = ids.filter((id) => (q ? id.toLowerCase().includes(q) : true)); - return typeof limit === "number" ? filtered.slice(0, limit) : filtered; -} - -function toDirectoryEntries(kind: "user" | "group", ids: string[]): ChannelDirectoryEntry[] { - return ids.map((id) => ({ kind, id }) as const); -} - export async function listSlackDirectoryPeersFromConfig( params: DirectoryConfigParams, ): Promise { diff --git a/src/channels/plugins/helpers.test.ts b/src/channels/plugins/helpers.test.ts index 2b85d7fea06..6b5f56c2ca3 100644 --- a/src/channels/plugins/helpers.test.ts +++ b/src/channels/plugins/helpers.test.ts @@ -1,6 +1,10 @@ import { describe, expect, it } from "vitest"; import type { OpenClawConfig } from "../../config/config.js"; -import { buildAccountScopedDmSecurityPolicy, formatPairingApproveHint } from "./helpers.js"; +import { + buildAccountScopedDmSecurityPolicy, + formatPairingApproveHint, + parseOptionalDelimitedEntries, +} from "./helpers.js"; function cfgWithChannel(channelKey: string, accounts?: Record): OpenClawConfig { return { @@ -93,3 +97,18 @@ describe("buildAccountScopedDmSecurityPolicy", () => { }); }); }); + +describe("parseOptionalDelimitedEntries", () => { + it("returns undefined for empty input", () => { + expect(parseOptionalDelimitedEntries(" ")).toBeUndefined(); + }); + + it("splits comma, newline, and semicolon separated entries", () => { + expect(parseOptionalDelimitedEntries("alpha, beta\ngamma; delta")).toEqual([ + "alpha", + "beta", + "gamma", + "delta", + ]); + }); +}); diff --git a/src/channels/plugins/helpers.ts b/src/channels/plugins/helpers.ts index 135547d6e9a..40b01beb4d8 100644 --- a/src/channels/plugins/helpers.ts +++ b/src/channels/plugins/helpers.ts @@ -20,6 +20,17 @@ export function formatPairingApproveHint(channelId: string): string { return `Approve via: ${listCmd} / ${approveCmd}`; } +export function parseOptionalDelimitedEntries(value?: string): string[] | undefined { + if (!value?.trim()) { + return undefined; + } + const parsed = value + .split(/[\n,;]+/g) + .map((entry) => entry.trim()) + .filter(Boolean); + return parsed.length > 0 ? parsed : undefined; +} + export function buildAccountScopedDmSecurityPolicy(params: { cfg: OpenClawConfig; channelKey: string; diff --git a/src/channels/plugins/onboarding/discord.ts b/src/channels/plugins/onboarding/discord.ts index 52f0d2b1373..d6a8c8df370 100644 --- a/src/channels/plugins/onboarding/discord.ts +++ b/src/channels/plugins/onboarding/discord.ts @@ -20,15 +20,14 @@ import type { ChannelOnboardingAdapter, ChannelOnboardingDmPolicy } from "../onb import { configureChannelAccessWithAllowlist } from "./channel-access-configure.js"; import { applySingleTokenPromptResult, - buildSingleChannelSecretPromptState, parseMentionOrPrefixedId, noteChannelLookupFailure, noteChannelLookupSummary, patchChannelConfigForAccount, promptLegacyChannelAllowFrom, - promptSingleChannelSecretInput, resolveAccountIdForConfigure, resolveOnboardingAccountId, + runSingleChannelSecretStep, setAccountGroupPolicyForChannel, setLegacyChannelDmPolicyWithAllowFrom, setOnboardingChannelEnabled, @@ -179,52 +178,39 @@ export const discordOnboardingAdapter: ChannelOnboardingAdapter = { accountId: discordAccountId, }); const allowEnv = discordAccountId === DEFAULT_ACCOUNT_ID; - const tokenPromptState = buildSingleChannelSecretPromptState({ - accountConfigured: Boolean(resolvedAccount.token), - hasConfigToken: hasConfiguredSecretInput(resolvedAccount.config.token), - allowEnv, - envValue: process.env.DISCORD_BOT_TOKEN, - }); - - if (!tokenPromptState.accountConfigured) { - await noteDiscordTokenHelp(prompter); - } - - const tokenResult = await promptSingleChannelSecretInput({ + const tokenStep = await runSingleChannelSecretStep({ cfg: next, prompter, providerHint: "discord", credentialLabel: "Discord bot token", secretInputMode: options?.secretInputMode, - accountConfigured: tokenPromptState.accountConfigured, - canUseEnv: tokenPromptState.canUseEnv, - hasConfigToken: tokenPromptState.hasConfigToken, + accountConfigured: Boolean(resolvedAccount.token), + hasConfigToken: hasConfiguredSecretInput(resolvedAccount.config.token), + allowEnv, + envValue: process.env.DISCORD_BOT_TOKEN, envPrompt: "DISCORD_BOT_TOKEN detected. Use env var?", keepPrompt: "Discord token already configured. Keep it?", inputPrompt: "Enter Discord bot token", preferredEnvVar: allowEnv ? "DISCORD_BOT_TOKEN" : undefined, + onMissingConfigured: async () => await noteDiscordTokenHelp(prompter), + applyUseEnv: async (cfg) => + applySingleTokenPromptResult({ + cfg, + channel: "discord", + accountId: discordAccountId, + tokenPatchKey: "token", + tokenResult: { useEnv: true, token: null }, + }), + applySet: async (cfg, value) => + applySingleTokenPromptResult({ + cfg, + channel: "discord", + accountId: discordAccountId, + tokenPatchKey: "token", + tokenResult: { useEnv: false, token: value }, + }), }); - - let resolvedTokenForAllowlist: string | undefined; - if (tokenResult.action === "use-env") { - next = applySingleTokenPromptResult({ - cfg: next, - channel: "discord", - accountId: discordAccountId, - tokenPatchKey: "token", - tokenResult: { useEnv: true, token: null }, - }); - resolvedTokenForAllowlist = process.env.DISCORD_BOT_TOKEN?.trim() || undefined; - } else if (tokenResult.action === "set") { - next = applySingleTokenPromptResult({ - cfg: next, - channel: "discord", - accountId: discordAccountId, - tokenPatchKey: "token", - tokenResult: { useEnv: false, token: tokenResult.value }, - }); - resolvedTokenForAllowlist = tokenResult.resolvedValue; - } + next = tokenStep.cfg; const currentEntries = Object.entries(resolvedAccount.config.guilds ?? {}).flatMap( ([guildKey, value]) => { @@ -261,7 +247,7 @@ export const discordOnboardingAdapter: ChannelOnboardingAdapter = { input, resolved: false, })); - const activeToken = accountWithTokens.token || resolvedTokenForAllowlist || ""; + const activeToken = accountWithTokens.token || tokenStep.resolvedValue || ""; if (activeToken && entries.length > 0) { try { resolved = await resolveDiscordChannelAllowlist({ diff --git a/src/channels/plugins/onboarding/helpers.ts b/src/channels/plugins/onboarding/helpers.ts index 31ba023ba2f..77d03a4127a 100644 --- a/src/channels/plugins/onboarding/helpers.ts +++ b/src/channels/plugins/onboarding/helpers.ts @@ -9,7 +9,10 @@ import { promptAccountId as promptAccountIdSdk } from "../../../plugin-sdk/onboa import { DEFAULT_ACCOUNT_ID, normalizeAccountId } from "../../../routing/session-key.js"; import type { WizardPrompter } from "../../../wizard/prompts.js"; import type { PromptAccountId, PromptAccountIdParams } from "../onboarding-types.js"; -import { moveSingleAccountChannelSectionToDefaultAccount } from "../setup-helpers.js"; +import { + moveSingleAccountChannelSectionToDefaultAccount, + patchScopedAccountConfig, +} from "../setup-helpers.js"; export const promptAccountId: PromptAccountId = async (params: PromptAccountIdParams) => { return await promptAccountIdSdk(params); @@ -161,11 +164,11 @@ export function setAccountAllowFromForChannel(params: { }); } -export function setTopLevelChannelAllowFrom(params: { +function patchTopLevelChannelConfig(params: { cfg: OpenClawConfig; channel: string; - allowFrom: string[]; enabled?: boolean; + patch: Record; }): OpenClawConfig { const channelConfig = (params.cfg.channels?.[params.channel] as Record | undefined) ?? {}; @@ -176,12 +179,26 @@ export function setTopLevelChannelAllowFrom(params: { [params.channel]: { ...channelConfig, ...(params.enabled ? { enabled: true } : {}), - allowFrom: params.allowFrom, + ...params.patch, }, }, }; } +export function setTopLevelChannelAllowFrom(params: { + cfg: OpenClawConfig; + channel: string; + allowFrom: string[]; + enabled?: boolean; +}): OpenClawConfig { + return patchTopLevelChannelConfig({ + cfg: params.cfg, + channel: params.channel, + enabled: params.enabled, + patch: { allowFrom: params.allowFrom }, + }); +} + export function setTopLevelChannelDmPolicyWithAllowFrom(params: { cfg: OpenClawConfig; channel: string; @@ -196,17 +213,14 @@ export function setTopLevelChannelDmPolicyWithAllowFrom(params: { undefined; const allowFrom = params.dmPolicy === "open" ? addWildcardAllowFrom(existingAllowFrom) : undefined; - return { - ...params.cfg, - channels: { - ...params.cfg.channels, - [params.channel]: { - ...channelConfig, - dmPolicy: params.dmPolicy, - ...(allowFrom ? { allowFrom } : {}), - }, + return patchTopLevelChannelConfig({ + cfg: params.cfg, + channel: params.channel, + patch: { + dmPolicy: params.dmPolicy, + ...(allowFrom ? { allowFrom } : {}), }, - }; + }); } export function setTopLevelChannelGroupPolicy(params: { @@ -215,19 +229,12 @@ export function setTopLevelChannelGroupPolicy(params: { groupPolicy: GroupPolicy; enabled?: boolean; }): OpenClawConfig { - const channelConfig = - (params.cfg.channels?.[params.channel] as Record | undefined) ?? {}; - return { - ...params.cfg, - channels: { - ...params.cfg.channels, - [params.channel]: { - ...channelConfig, - ...(params.enabled ? { enabled: true } : {}), - groupPolicy: params.groupPolicy, - }, - }, - }; + return patchTopLevelChannelConfig({ + cfg: params.cfg, + channel: params.channel, + enabled: params.enabled, + patch: { groupPolicy: params.groupPolicy }, + }); } export function setChannelDmPolicyWithAllowFrom(params: { @@ -364,50 +371,14 @@ function patchConfigForScopedAccount(params: { cfg, channelKey: channel, }); - const channelConfig = - (seededCfg.channels?.[channel] as Record | undefined) ?? {}; - - if (accountId === DEFAULT_ACCOUNT_ID) { - return { - ...seededCfg, - channels: { - ...seededCfg.channels, - [channel]: { - ...channelConfig, - ...(ensureEnabled ? { enabled: true } : {}), - ...patch, - }, - }, - }; - } - - const accounts = - (channelConfig.accounts as Record> | undefined) ?? {}; - const existingAccount = accounts[accountId] ?? {}; - - return { - ...seededCfg, - channels: { - ...seededCfg.channels, - [channel]: { - ...channelConfig, - ...(ensureEnabled ? { enabled: true } : {}), - accounts: { - ...accounts, - [accountId]: { - ...existingAccount, - ...(ensureEnabled - ? { - enabled: - typeof existingAccount.enabled === "boolean" ? existingAccount.enabled : true, - } - : {}), - ...patch, - }, - }, - }, - }, - }; + return patchScopedAccountConfig({ + cfg: seededCfg, + channelKey: channel, + accountId, + patch, + ensureChannelEnabled: ensureEnabled, + ensureAccountEnabled: ensureEnabled, + }); } export function patchChannelConfigForAccount(params: { @@ -515,6 +486,82 @@ export type SingleChannelSecretInputPromptResult = | { action: "use-env" } | { action: "set"; value: SecretInput; resolvedValue: string }; +export async function runSingleChannelSecretStep(params: { + cfg: OpenClawConfig; + prompter: Pick; + providerHint: string; + credentialLabel: string; + secretInputMode?: "plaintext" | "ref"; + accountConfigured: boolean; + hasConfigToken: boolean; + allowEnv: boolean; + envValue?: string; + envPrompt: string; + keepPrompt: string; + inputPrompt: string; + preferredEnvVar?: string; + onMissingConfigured?: () => Promise; + applyUseEnv?: (cfg: OpenClawConfig) => OpenClawConfig | Promise; + applySet?: ( + cfg: OpenClawConfig, + value: SecretInput, + resolvedValue: string, + ) => OpenClawConfig | Promise; +}): Promise<{ + cfg: OpenClawConfig; + action: SingleChannelSecretInputPromptResult["action"]; + resolvedValue?: string; +}> { + const promptState = buildSingleChannelSecretPromptState({ + accountConfigured: params.accountConfigured, + hasConfigToken: params.hasConfigToken, + allowEnv: params.allowEnv, + envValue: params.envValue, + }); + + if (!promptState.accountConfigured && params.onMissingConfigured) { + await params.onMissingConfigured(); + } + + const result = await promptSingleChannelSecretInput({ + cfg: params.cfg, + prompter: params.prompter, + providerHint: params.providerHint, + credentialLabel: params.credentialLabel, + secretInputMode: params.secretInputMode, + accountConfigured: promptState.accountConfigured, + canUseEnv: promptState.canUseEnv, + hasConfigToken: promptState.hasConfigToken, + envPrompt: params.envPrompt, + keepPrompt: params.keepPrompt, + inputPrompt: params.inputPrompt, + preferredEnvVar: params.preferredEnvVar, + }); + + if (result.action === "use-env") { + return { + cfg: params.applyUseEnv ? await params.applyUseEnv(params.cfg) : params.cfg, + action: result.action, + resolvedValue: params.envValue?.trim() || undefined, + }; + } + + if (result.action === "set") { + return { + cfg: params.applySet + ? await params.applySet(params.cfg, result.value, result.resolvedValue) + : params.cfg, + action: result.action, + resolvedValue: result.resolvedValue, + }; + } + + return { + cfg: params.cfg, + action: result.action, + }; +} + export async function promptSingleChannelSecretInput(params: { cfg: OpenClawConfig; prompter: Pick; diff --git a/src/channels/plugins/onboarding/slack.ts b/src/channels/plugins/onboarding/slack.ts index cc683477c09..0cceb859e4d 100644 --- a/src/channels/plugins/onboarding/slack.ts +++ b/src/channels/plugins/onboarding/slack.ts @@ -14,15 +14,14 @@ import type { WizardPrompter } from "../../../wizard/prompts.js"; import type { ChannelOnboardingAdapter, ChannelOnboardingDmPolicy } from "../onboarding-types.js"; import { configureChannelAccessWithAllowlist } from "./channel-access-configure.js"; import { - buildSingleChannelSecretPromptState, parseMentionOrPrefixedId, noteChannelLookupFailure, noteChannelLookupSummary, patchChannelConfigForAccount, promptLegacyChannelAllowFrom, - promptSingleChannelSecretInput, resolveAccountIdForConfigure, resolveOnboardingAccountId, + runSingleChannelSecretStep, setAccountGroupPolicyForChannel, setLegacyChannelDmPolicyWithAllowFrom, setOnboardingChannelEnabled, @@ -235,18 +234,6 @@ export const slackOnboardingAdapter: ChannelOnboardingAdapter = { const accountConfigured = Boolean(resolvedAccount.botToken && resolvedAccount.appToken) || hasConfigTokens; const allowEnv = slackAccountId === DEFAULT_ACCOUNT_ID; - const botPromptState = buildSingleChannelSecretPromptState({ - accountConfigured: Boolean(resolvedAccount.botToken) || hasConfiguredBotToken, - hasConfigToken: hasConfiguredBotToken, - allowEnv, - envValue: process.env.SLACK_BOT_TOKEN, - }); - const appPromptState = buildSingleChannelSecretPromptState({ - accountConfigured: Boolean(resolvedAccount.appToken) || hasConfiguredAppToken, - hasConfigToken: hasConfiguredAppToken, - allowEnv, - envValue: process.env.SLACK_APP_TOKEN, - }); let resolvedBotTokenForAllowlist = resolvedAccount.botToken; const slackBotName = String( await prompter.text({ @@ -257,54 +244,56 @@ export const slackOnboardingAdapter: ChannelOnboardingAdapter = { if (!accountConfigured) { await noteSlackTokenHelp(prompter, slackBotName); } - const botTokenResult = await promptSingleChannelSecretInput({ + const botTokenStep = await runSingleChannelSecretStep({ cfg: next, prompter, providerHint: "slack-bot", credentialLabel: "Slack bot token", secretInputMode: options?.secretInputMode, - accountConfigured: botPromptState.accountConfigured, - canUseEnv: botPromptState.canUseEnv, - hasConfigToken: botPromptState.hasConfigToken, + accountConfigured: Boolean(resolvedAccount.botToken) || hasConfiguredBotToken, + hasConfigToken: hasConfiguredBotToken, + allowEnv, + envValue: process.env.SLACK_BOT_TOKEN, envPrompt: "SLACK_BOT_TOKEN detected. Use env var?", keepPrompt: "Slack bot token already configured. Keep it?", inputPrompt: "Enter Slack bot token (xoxb-...)", preferredEnvVar: allowEnv ? "SLACK_BOT_TOKEN" : undefined, + applySet: async (cfg, value) => + patchChannelConfigForAccount({ + cfg, + channel: "slack", + accountId: slackAccountId, + patch: { botToken: value }, + }), }); - if (botTokenResult.action === "use-env") { - resolvedBotTokenForAllowlist = process.env.SLACK_BOT_TOKEN?.trim() || undefined; - } else if (botTokenResult.action === "set") { - next = patchChannelConfigForAccount({ - cfg: next, - channel: "slack", - accountId: slackAccountId, - patch: { botToken: botTokenResult.value }, - }); - resolvedBotTokenForAllowlist = botTokenResult.resolvedValue; + next = botTokenStep.cfg; + if (botTokenStep.resolvedValue) { + resolvedBotTokenForAllowlist = botTokenStep.resolvedValue; } - const appTokenResult = await promptSingleChannelSecretInput({ + const appTokenStep = await runSingleChannelSecretStep({ cfg: next, prompter, providerHint: "slack-app", credentialLabel: "Slack app token", secretInputMode: options?.secretInputMode, - accountConfigured: appPromptState.accountConfigured, - canUseEnv: appPromptState.canUseEnv, - hasConfigToken: appPromptState.hasConfigToken, + accountConfigured: Boolean(resolvedAccount.appToken) || hasConfiguredAppToken, + hasConfigToken: hasConfiguredAppToken, + allowEnv, + envValue: process.env.SLACK_APP_TOKEN, envPrompt: "SLACK_APP_TOKEN detected. Use env var?", keepPrompt: "Slack app token already configured. Keep it?", inputPrompt: "Enter Slack app token (xapp-...)", preferredEnvVar: allowEnv ? "SLACK_APP_TOKEN" : undefined, + applySet: async (cfg, value) => + patchChannelConfigForAccount({ + cfg, + channel: "slack", + accountId: slackAccountId, + patch: { appToken: value }, + }), }); - if (appTokenResult.action === "set") { - next = patchChannelConfigForAccount({ - cfg: next, - channel: "slack", - accountId: slackAccountId, - patch: { appToken: appTokenResult.value }, - }); - } + next = appTokenStep.cfg; next = await configureChannelAccessWithAllowlist({ cfg: next, diff --git a/src/channels/plugins/onboarding/telegram.ts b/src/channels/plugins/onboarding/telegram.ts index 22a173d47fe..2c37c24bcee 100644 --- a/src/channels/plugins/onboarding/telegram.ts +++ b/src/channels/plugins/onboarding/telegram.ts @@ -14,12 +14,11 @@ import { fetchTelegramChatId } from "../../telegram/api.js"; import type { ChannelOnboardingAdapter, ChannelOnboardingDmPolicy } from "../onboarding-types.js"; import { applySingleTokenPromptResult, - buildSingleChannelSecretPromptState, patchChannelConfigForAccount, - promptSingleChannelSecretInput, promptResolvedAllowFrom, resolveAccountIdForConfigure, resolveOnboardingAccountId, + runSingleChannelSecretStep, setChannelDmPolicyWithAllowFrom, setOnboardingChannelEnabled, splitOnboardingEntries, @@ -194,59 +193,46 @@ export const telegramOnboardingAdapter: ChannelOnboardingAdapter = { const hasConfigToken = hasConfiguredBotToken || Boolean(resolvedAccount.config.tokenFile?.trim()); const allowEnv = telegramAccountId === DEFAULT_ACCOUNT_ID; - const tokenPromptState = buildSingleChannelSecretPromptState({ - accountConfigured: Boolean(resolvedAccount.token) || hasConfigToken, - hasConfigToken, - allowEnv, - envValue: process.env.TELEGRAM_BOT_TOKEN, - }); - - if (!tokenPromptState.accountConfigured) { - await noteTelegramTokenHelp(prompter); - } - - const tokenResult = await promptSingleChannelSecretInput({ + const tokenStep = await runSingleChannelSecretStep({ cfg: next, prompter, providerHint: "telegram", credentialLabel: "Telegram bot token", secretInputMode: options?.secretInputMode, - accountConfigured: tokenPromptState.accountConfigured, - canUseEnv: tokenPromptState.canUseEnv, - hasConfigToken: tokenPromptState.hasConfigToken, + accountConfigured: Boolean(resolvedAccount.token) || hasConfigToken, + hasConfigToken, + allowEnv, + envValue: process.env.TELEGRAM_BOT_TOKEN, envPrompt: "TELEGRAM_BOT_TOKEN detected. Use env var?", keepPrompt: "Telegram token already configured. Keep it?", inputPrompt: "Enter Telegram bot token", preferredEnvVar: allowEnv ? "TELEGRAM_BOT_TOKEN" : undefined, + onMissingConfigured: async () => await noteTelegramTokenHelp(prompter), + applyUseEnv: async (cfg) => + applySingleTokenPromptResult({ + cfg, + channel: "telegram", + accountId: telegramAccountId, + tokenPatchKey: "botToken", + tokenResult: { useEnv: true, token: null }, + }), + applySet: async (cfg, value) => + applySingleTokenPromptResult({ + cfg, + channel: "telegram", + accountId: telegramAccountId, + tokenPatchKey: "botToken", + tokenResult: { useEnv: false, token: value }, + }), }); - - let resolvedTokenForAllowFrom: string | undefined; - if (tokenResult.action === "use-env") { - next = applySingleTokenPromptResult({ - cfg: next, - channel: "telegram", - accountId: telegramAccountId, - tokenPatchKey: "botToken", - tokenResult: { useEnv: true, token: null }, - }); - resolvedTokenForAllowFrom = process.env.TELEGRAM_BOT_TOKEN?.trim() || undefined; - } else if (tokenResult.action === "set") { - next = applySingleTokenPromptResult({ - cfg: next, - channel: "telegram", - accountId: telegramAccountId, - tokenPatchKey: "botToken", - tokenResult: { useEnv: false, token: tokenResult.value }, - }); - resolvedTokenForAllowFrom = tokenResult.resolvedValue; - } + next = tokenStep.cfg; if (forceAllowFrom) { next = await promptTelegramAllowFrom({ cfg: next, prompter, accountId: telegramAccountId, - tokenOverride: resolvedTokenForAllowFrom, + tokenOverride: tokenStep.resolvedValue, }); } diff --git a/src/channels/plugins/outbound/direct-text-media.sendpayload.test.ts b/src/channels/plugins/outbound/direct-text-media.sendpayload.test.ts index 0e5c2ba01db..42971f1e89c 100644 --- a/src/channels/plugins/outbound/direct-text-media.sendpayload.test.ts +++ b/src/channels/plugins/outbound/direct-text-media.sendpayload.test.ts @@ -1,9 +1,17 @@ -import { describe, expect, it, vi } from "vitest"; +import { describe, vi } from "vitest"; import type { ReplyPayload } from "../../../auto-reply/types.js"; +import { + installSendPayloadContractSuite, + primeSendMock, +} from "../../../test-utils/send-payload-contract.js"; import { createDirectTextMediaOutbound } from "./direct-text-media.js"; -function makeOutbound() { - const sendFn = vi.fn().mockResolvedValue({ messageId: "m1" }); +function createDirectHarness(params: { + payload: ReplyPayload; + sendResults?: Array<{ messageId: string }>; +}) { + const sendFn = vi.fn(); + primeSendMock(sendFn, { messageId: "m1" }, params.sendResults); const outbound = createDirectTextMediaOutbound({ channel: "imessage", resolveSender: () => sendFn, @@ -24,94 +32,16 @@ function baseCtx(payload: ReplyPayload) { } describe("createDirectTextMediaOutbound sendPayload", () => { - it("text-only delegates to sendText", async () => { - const { outbound, sendFn } = makeOutbound(); - const result = await outbound.sendPayload!(baseCtx({ text: "hello" })); - - expect(sendFn).toHaveBeenCalledTimes(1); - expect(sendFn).toHaveBeenCalledWith("user1", "hello", expect.any(Object)); - expect(result).toMatchObject({ channel: "imessage", messageId: "m1" }); - }); - - it("single media delegates to sendMedia", async () => { - const { outbound, sendFn } = makeOutbound(); - const result = await outbound.sendPayload!( - baseCtx({ text: "cap", mediaUrl: "https://example.com/a.jpg" }), - ); - - expect(sendFn).toHaveBeenCalledTimes(1); - expect(sendFn).toHaveBeenCalledWith( - "user1", - "cap", - expect.objectContaining({ mediaUrl: "https://example.com/a.jpg" }), - ); - expect(result).toMatchObject({ channel: "imessage", messageId: "m1" }); - }); - - it("multi-media iterates URLs with caption on first", async () => { - const sendFn = vi - .fn() - .mockResolvedValueOnce({ messageId: "m1" }) - .mockResolvedValueOnce({ messageId: "m2" }); - const outbound = createDirectTextMediaOutbound({ - channel: "imessage", - resolveSender: () => sendFn, - resolveMaxBytes: () => undefined, - buildTextOptions: (opts) => opts as never, - buildMediaOptions: (opts) => opts as never, - }); - const result = await outbound.sendPayload!( - baseCtx({ - text: "caption", - mediaUrls: ["https://example.com/1.jpg", "https://example.com/2.jpg"], - }), - ); - - expect(sendFn).toHaveBeenCalledTimes(2); - expect(sendFn).toHaveBeenNthCalledWith( - 1, - "user1", - "caption", - expect.objectContaining({ mediaUrl: "https://example.com/1.jpg" }), - ); - expect(sendFn).toHaveBeenNthCalledWith( - 2, - "user1", - "", - expect.objectContaining({ mediaUrl: "https://example.com/2.jpg" }), - ); - expect(result).toMatchObject({ channel: "imessage", messageId: "m2" }); - }); - - it("empty payload returns no-op", async () => { - const { outbound, sendFn } = makeOutbound(); - const result = await outbound.sendPayload!(baseCtx({})); - - expect(sendFn).not.toHaveBeenCalled(); - expect(result).toEqual({ channel: "imessage", messageId: "" }); - }); - - it("chunking splits long text", async () => { - const sendFn = vi - .fn() - .mockResolvedValueOnce({ messageId: "c1" }) - .mockResolvedValueOnce({ messageId: "c2" }); - const outbound = createDirectTextMediaOutbound({ - channel: "signal", - resolveSender: () => sendFn, - resolveMaxBytes: () => undefined, - buildTextOptions: (opts) => opts as never, - buildMediaOptions: (opts) => opts as never, - }); - // textChunkLimit is 4000; generate text exceeding that - const longText = "a".repeat(5000); - const result = await outbound.sendPayload!(baseCtx({ text: longText })); - - expect(sendFn.mock.calls.length).toBeGreaterThanOrEqual(2); - // Each chunk should be within the limit - for (const call of sendFn.mock.calls) { - expect((call[1] as string).length).toBeLessThanOrEqual(4000); - } - expect(result).toMatchObject({ channel: "signal" }); + installSendPayloadContractSuite({ + channel: "imessage", + chunking: { mode: "split", longTextLength: 5000, maxChunkLength: 4000 }, + createHarness: ({ payload, sendResults }) => { + const { outbound, sendFn } = createDirectHarness({ payload, sendResults }); + return { + run: async () => await outbound.sendPayload!(baseCtx(payload)), + sendMock: sendFn, + to: "user1", + }; + }, }); }); diff --git a/src/channels/plugins/outbound/direct-text-media.ts b/src/channels/plugins/outbound/direct-text-media.ts index 9617798325d..ea813fcf75b 100644 --- a/src/channels/plugins/outbound/direct-text-media.ts +++ b/src/channels/plugins/outbound/direct-text-media.ts @@ -28,34 +28,58 @@ type SendPayloadAdapter = Pick< "sendMedia" | "sendText" | "chunker" | "textChunkLimit" >; +export function resolvePayloadMediaUrls(payload: SendPayloadContext["payload"]): string[] { + return payload.mediaUrls?.length ? payload.mediaUrls : payload.mediaUrl ? [payload.mediaUrl] : []; +} + +export async function sendPayloadMediaSequence(params: { + text: string; + mediaUrls: readonly string[]; + send: (input: { + text: string; + mediaUrl: string; + index: number; + isFirst: boolean; + }) => Promise; +}): Promise { + let lastResult: TResult | undefined; + for (let i = 0; i < params.mediaUrls.length; i += 1) { + const mediaUrl = params.mediaUrls[i]; + if (!mediaUrl) { + continue; + } + lastResult = await params.send({ + text: i === 0 ? params.text : "", + mediaUrl, + index: i, + isFirst: i === 0, + }); + } + return lastResult; +} + export async function sendTextMediaPayload(params: { channel: string; ctx: SendPayloadContext; adapter: SendPayloadAdapter; }): Promise { const text = params.ctx.payload.text ?? ""; - const urls = params.ctx.payload.mediaUrls?.length - ? params.ctx.payload.mediaUrls - : params.ctx.payload.mediaUrl - ? [params.ctx.payload.mediaUrl] - : []; + const urls = resolvePayloadMediaUrls(params.ctx.payload); if (!text && urls.length === 0) { return { channel: params.channel, messageId: "" }; } if (urls.length > 0) { - let lastResult = await params.adapter.sendMedia!({ - ...params.ctx, + const lastResult = await sendPayloadMediaSequence({ text, - mediaUrl: urls[0], + mediaUrls: urls, + send: async ({ text, mediaUrl }) => + await params.adapter.sendMedia!({ + ...params.ctx, + text, + mediaUrl, + }), }); - for (let i = 1; i < urls.length; i++) { - lastResult = await params.adapter.sendMedia!({ - ...params.ctx, - text: "", - mediaUrl: urls[i], - }); - } - return lastResult; + return lastResult ?? { channel: params.channel, messageId: "" }; } const limit = params.adapter.textChunkLimit; const chunks = limit && params.adapter.chunker ? params.adapter.chunker(text, limit) : [text]; diff --git a/src/channels/plugins/outbound/discord.sendpayload.test.ts b/src/channels/plugins/outbound/discord.sendpayload.test.ts index 07c821d6e79..168f8d8d927 100644 --- a/src/channels/plugins/outbound/discord.sendpayload.test.ts +++ b/src/channels/plugins/outbound/discord.sendpayload.test.ts @@ -1,98 +1,37 @@ -import { describe, expect, it, vi } from "vitest"; +import { describe, vi } from "vitest"; import type { ReplyPayload } from "../../../auto-reply/types.js"; +import { + installSendPayloadContractSuite, + primeSendMock, +} from "../../../test-utils/send-payload-contract.js"; import { discordOutbound } from "./discord.js"; -function baseCtx(payload: ReplyPayload) { - return { +function createHarness(params: { + payload: ReplyPayload; + sendResults?: Array<{ messageId: string }>; +}) { + const sendDiscord = vi.fn(); + primeSendMock(sendDiscord, { messageId: "dc-1", channelId: "123456" }, params.sendResults); + const ctx = { cfg: {}, to: "channel:123456", text: "", - payload, + payload: params.payload, deps: { - sendDiscord: vi.fn().mockResolvedValue({ messageId: "dc-1", channelId: "123456" }), + sendDiscord, }, }; + return { + run: async () => await discordOutbound.sendPayload!(ctx), + sendMock: sendDiscord, + to: ctx.to, + }; } describe("discordOutbound sendPayload", () => { - it("text-only delegates to sendText", async () => { - const ctx = baseCtx({ text: "hello" }); - const result = await discordOutbound.sendPayload!(ctx); - - expect(ctx.deps.sendDiscord).toHaveBeenCalledTimes(1); - expect(ctx.deps.sendDiscord).toHaveBeenCalledWith( - "channel:123456", - "hello", - expect.any(Object), - ); - expect(result).toMatchObject({ channel: "discord" }); - }); - - it("single media delegates to sendMedia", async () => { - const ctx = baseCtx({ text: "cap", mediaUrl: "https://example.com/a.jpg" }); - const result = await discordOutbound.sendPayload!(ctx); - - expect(ctx.deps.sendDiscord).toHaveBeenCalledTimes(1); - expect(ctx.deps.sendDiscord).toHaveBeenCalledWith( - "channel:123456", - "cap", - expect.objectContaining({ mediaUrl: "https://example.com/a.jpg" }), - ); - expect(result).toMatchObject({ channel: "discord" }); - }); - - it("multi-media iterates URLs with caption on first", async () => { - const sendDiscord = vi - .fn() - .mockResolvedValueOnce({ messageId: "dc-1", channelId: "123456" }) - .mockResolvedValueOnce({ messageId: "dc-2", channelId: "123456" }); - const ctx = { - cfg: {}, - to: "channel:123456", - text: "", - payload: { - text: "caption", - mediaUrls: ["https://example.com/1.jpg", "https://example.com/2.jpg"], - } as ReplyPayload, - deps: { sendDiscord }, - }; - const result = await discordOutbound.sendPayload!(ctx); - - expect(sendDiscord).toHaveBeenCalledTimes(2); - expect(sendDiscord).toHaveBeenNthCalledWith( - 1, - "channel:123456", - "caption", - expect.objectContaining({ mediaUrl: "https://example.com/1.jpg" }), - ); - expect(sendDiscord).toHaveBeenNthCalledWith( - 2, - "channel:123456", - "", - expect.objectContaining({ mediaUrl: "https://example.com/2.jpg" }), - ); - expect(result).toMatchObject({ channel: "discord", messageId: "dc-2" }); - }); - - it("empty payload returns no-op", async () => { - const ctx = baseCtx({}); - const result = await discordOutbound.sendPayload!(ctx); - - expect(ctx.deps.sendDiscord).not.toHaveBeenCalled(); - expect(result).toEqual({ channel: "discord", messageId: "" }); - }); - - it("text exceeding chunk limit is sent as-is when chunker is null", async () => { - // Discord has chunker: null, so long text should be sent as a single message - const ctx = baseCtx({ text: "a".repeat(3000) }); - const result = await discordOutbound.sendPayload!(ctx); - - expect(ctx.deps.sendDiscord).toHaveBeenCalledTimes(1); - expect(ctx.deps.sendDiscord).toHaveBeenCalledWith( - "channel:123456", - "a".repeat(3000), - expect.any(Object), - ); - expect(result).toMatchObject({ channel: "discord" }); + installSendPayloadContractSuite({ + channel: "discord", + chunking: { mode: "passthrough", longTextLength: 3000 }, + createHarness, }); }); diff --git a/src/channels/plugins/outbound/slack.sendpayload.test.ts b/src/channels/plugins/outbound/slack.sendpayload.test.ts index c6df264df12..8c6b0806254 100644 --- a/src/channels/plugins/outbound/slack.sendpayload.test.ts +++ b/src/channels/plugins/outbound/slack.sendpayload.test.ts @@ -1,92 +1,103 @@ import { describe, expect, it, vi } from "vitest"; import type { ReplyPayload } from "../../../auto-reply/types.js"; +import { + installSendPayloadContractSuite, + primeSendMock, +} from "../../../test-utils/send-payload-contract.js"; import { slackOutbound } from "./slack.js"; -function baseCtx(payload: ReplyPayload) { - return { +function createHarness(params: { + payload: ReplyPayload; + sendResults?: Array<{ messageId: string }>; +}) { + const sendSlack = vi.fn(); + primeSendMock( + sendSlack, + { messageId: "sl-1", channelId: "C12345", ts: "1234.5678" }, + params.sendResults, + ); + const ctx = { cfg: {}, to: "C12345", text: "", - payload, + payload: params.payload, deps: { - sendSlack: vi - .fn() - .mockResolvedValue({ messageId: "sl-1", channelId: "C12345", ts: "1234.5678" }), + sendSlack, }, }; + return { + run: async () => await slackOutbound.sendPayload!(ctx), + sendMock: sendSlack, + to: ctx.to, + }; } describe("slackOutbound sendPayload", () => { - it("text-only delegates to sendText", async () => { - const ctx = baseCtx({ text: "hello" }); - const result = await slackOutbound.sendPayload!(ctx); - - expect(ctx.deps.sendSlack).toHaveBeenCalledTimes(1); - expect(ctx.deps.sendSlack).toHaveBeenCalledWith("C12345", "hello", expect.any(Object)); - expect(result).toMatchObject({ channel: "slack" }); + installSendPayloadContractSuite({ + channel: "slack", + chunking: { mode: "passthrough", longTextLength: 5000 }, + createHarness, }); - it("single media delegates to sendMedia", async () => { - const ctx = baseCtx({ text: "cap", mediaUrl: "https://example.com/a.jpg" }); - const result = await slackOutbound.sendPayload!(ctx); - - expect(ctx.deps.sendSlack).toHaveBeenCalledTimes(1); - expect(ctx.deps.sendSlack).toHaveBeenCalledWith( - "C12345", - "cap", - expect.objectContaining({ mediaUrl: "https://example.com/a.jpg" }), - ); - expect(result).toMatchObject({ channel: "slack" }); - }); - - it("multi-media iterates URLs with caption on first", async () => { - const sendSlack = vi - .fn() - .mockResolvedValueOnce({ messageId: "sl-1", channelId: "C12345" }) - .mockResolvedValueOnce({ messageId: "sl-2", channelId: "C12345" }); - const ctx = { - cfg: {}, - to: "C12345", - text: "", + it("forwards Slack blocks from channelData", async () => { + const { run, sendMock, to } = createHarness({ payload: { - text: "caption", - mediaUrls: ["https://example.com/1.jpg", "https://example.com/2.jpg"], - } as ReplyPayload, - deps: { sendSlack }, - }; - const result = await slackOutbound.sendPayload!(ctx); + text: "Fallback summary", + channelData: { + slack: { + blocks: [{ type: "divider" }], + }, + }, + }, + }); - expect(sendSlack).toHaveBeenCalledTimes(2); - expect(sendSlack).toHaveBeenNthCalledWith( - 1, - "C12345", - "caption", - expect.objectContaining({ mediaUrl: "https://example.com/1.jpg" }), + const result = await run(); + + expect(sendMock).toHaveBeenCalledTimes(1); + expect(sendMock).toHaveBeenCalledWith( + to, + "Fallback summary", + expect.objectContaining({ + blocks: [{ type: "divider" }], + }), ); - expect(sendSlack).toHaveBeenNthCalledWith( - 2, - "C12345", + expect(result).toMatchObject({ channel: "slack", messageId: "sl-1" }); + }); + + it("accepts blocks encoded as JSON strings in Slack channelData", async () => { + const { run, sendMock, to } = createHarness({ + payload: { + channelData: { + slack: { + blocks: '[{"type":"section","text":{"type":"mrkdwn","text":"hello"}}]', + }, + }, + }, + }); + + await run(); + + expect(sendMock).toHaveBeenCalledWith( + to, "", - expect.objectContaining({ mediaUrl: "https://example.com/2.jpg" }), + expect.objectContaining({ + blocks: [{ type: "section", text: { type: "mrkdwn", text: "hello" } }], + }), ); - expect(result).toMatchObject({ channel: "slack", messageId: "sl-2" }); }); - it("empty payload returns no-op", async () => { - const ctx = baseCtx({}); - const result = await slackOutbound.sendPayload!(ctx); + it("rejects invalid Slack blocks from channelData", async () => { + const { run, sendMock } = createHarness({ + payload: { + channelData: { + slack: { + blocks: {}, + }, + }, + }, + }); - expect(ctx.deps.sendSlack).not.toHaveBeenCalled(); - expect(result).toEqual({ channel: "slack", messageId: "" }); - }); - - it("text exceeding chunk limit is sent as-is when chunker is null", async () => { - // Slack has chunker: null, so long text should be sent as a single message - const ctx = baseCtx({ text: "a".repeat(5000) }); - const result = await slackOutbound.sendPayload!(ctx); - - expect(ctx.deps.sendSlack).toHaveBeenCalledTimes(1); - expect(ctx.deps.sendSlack).toHaveBeenCalledWith("C12345", "a".repeat(5000), expect.any(Object)); - expect(result).toMatchObject({ channel: "slack" }); + await expect(run()).rejects.toThrow(/blocks must be an array/i); + expect(sendMock).not.toHaveBeenCalled(); }); }); diff --git a/src/channels/plugins/outbound/slack.ts b/src/channels/plugins/outbound/slack.ts index 1c14cc3743d..96ff7b1b0cb 100644 --- a/src/channels/plugins/outbound/slack.ts +++ b/src/channels/plugins/outbound/slack.ts @@ -1,5 +1,6 @@ import type { OutboundIdentity } from "../../../infra/outbound/identity.js"; import { getGlobalHookRunner } from "../../../plugins/hook-runner-global.js"; +import { parseSlackBlocksInput } from "../../../slack/blocks-input.js"; import { sendMessageSlack, type SlackSendIdentity } from "../../../slack/send.js"; import type { ChannelOutboundAdapter } from "../types.js"; import { sendTextMediaPayload } from "./direct-text-media.js"; @@ -53,6 +54,7 @@ async function sendSlackOutboundMessage(params: { text: string; mediaUrl?: string; mediaLocalRoots?: readonly string[]; + blocks?: NonNullable[2]>["blocks"]; accountId?: string | null; deps?: { sendSlack?: typeof sendMessageSlack } | null; replyToId?: string | null; @@ -87,17 +89,43 @@ async function sendSlackOutboundMessage(params: { ...(params.mediaUrl ? { mediaUrl: params.mediaUrl, mediaLocalRoots: params.mediaLocalRoots } : {}), + ...(params.blocks ? { blocks: params.blocks } : {}), ...(slackIdentity ? { identity: slackIdentity } : {}), }); return { channel: "slack" as const, ...result }; } +function resolveSlackBlocks(channelData: Record | undefined) { + const slackData = channelData?.slack; + if (!slackData || typeof slackData !== "object" || Array.isArray(slackData)) { + return undefined; + } + return parseSlackBlocksInput((slackData as { blocks?: unknown }).blocks); +} + export const slackOutbound: ChannelOutboundAdapter = { deliveryMode: "direct", chunker: null, textChunkLimit: 4000, - sendPayload: async (ctx) => - await sendTextMediaPayload({ channel: "slack", ctx, adapter: slackOutbound }), + sendPayload: async (ctx) => { + const blocks = resolveSlackBlocks(ctx.payload.channelData); + if (!blocks) { + return await sendTextMediaPayload({ channel: "slack", ctx, adapter: slackOutbound }); + } + return await sendSlackOutboundMessage({ + cfg: ctx.cfg, + to: ctx.to, + text: ctx.payload.text ?? "", + mediaUrl: ctx.payload.mediaUrl, + mediaLocalRoots: ctx.mediaLocalRoots, + blocks, + accountId: ctx.accountId, + deps: ctx.deps, + replyToId: ctx.replyToId, + threadId: ctx.threadId, + identity: ctx.identity, + }); + }, sendText: async ({ cfg, to, text, accountId, deps, replyToId, threadId, identity }) => { return await sendSlackOutboundMessage({ cfg, diff --git a/src/channels/plugins/outbound/telegram.ts b/src/channels/plugins/outbound/telegram.ts index 2a079a6014e..c96a44a7047 100644 --- a/src/channels/plugins/outbound/telegram.ts +++ b/src/channels/plugins/outbound/telegram.ts @@ -1,3 +1,4 @@ +import type { ReplyPayload } from "../../../auto-reply/types.js"; import type { OutboundSendDeps } from "../../../infra/outbound/deliver.js"; import type { TelegramInlineButtons } from "../../../telegram/button-types.js"; import { markdownToTelegramHtmlChunks } from "../../../telegram/format.js"; @@ -7,17 +8,21 @@ import { } from "../../../telegram/outbound-params.js"; import { sendMessageTelegram } from "../../../telegram/send.js"; import type { ChannelOutboundAdapter } from "../types.js"; +import { resolvePayloadMediaUrls, sendPayloadMediaSequence } from "./direct-text-media.js"; + +type TelegramSendFn = typeof sendMessageTelegram; +type TelegramSendOpts = Parameters[2]; function resolveTelegramSendContext(params: { - cfg: NonNullable[2]>["cfg"]; + cfg: NonNullable["cfg"]; deps?: OutboundSendDeps; accountId?: string | null; replyToId?: string | null; threadId?: string | number | null; }): { - send: typeof sendMessageTelegram; + send: TelegramSendFn; baseOpts: { - cfg: NonNullable[2]>["cfg"]; + cfg: NonNullable["cfg"]; verbose: false; textMode: "html"; messageThreadId?: number; @@ -39,6 +44,45 @@ function resolveTelegramSendContext(params: { }; } +export async function sendTelegramPayloadMessages(params: { + send: TelegramSendFn; + to: string; + payload: ReplyPayload; + baseOpts: Omit, "buttons" | "mediaUrl" | "quoteText">; +}): Promise>> { + const telegramData = params.payload.channelData?.telegram as + | { buttons?: TelegramInlineButtons; quoteText?: string } + | undefined; + const quoteText = + typeof telegramData?.quoteText === "string" ? telegramData.quoteText : undefined; + const text = params.payload.text ?? ""; + const mediaUrls = resolvePayloadMediaUrls(params.payload); + const payloadOpts = { + ...params.baseOpts, + quoteText, + }; + + if (mediaUrls.length === 0) { + return await params.send(params.to, text, { + ...payloadOpts, + buttons: telegramData?.buttons, + }); + } + + // Telegram allows reply_markup on media; attach buttons only to the first send. + const finalResult = await sendPayloadMediaSequence({ + text, + mediaUrls, + send: async ({ text, mediaUrl, isFirst }) => + await params.send(params.to, text, { + ...payloadOpts, + mediaUrl, + ...(isFirst ? { buttons: telegramData?.buttons } : {}), + }), + }); + return finalResult ?? { messageId: "unknown", chatId: params.to }; +} + export const telegramOutbound: ChannelOutboundAdapter = { deliveryMode: "direct", chunker: markdownToTelegramHtmlChunks, @@ -92,49 +136,22 @@ export const telegramOutbound: ChannelOutboundAdapter = { replyToId, threadId, }) => { - const { send, baseOpts: contextOpts } = resolveTelegramSendContext({ + const { send, baseOpts } = resolveTelegramSendContext({ cfg, deps, accountId, replyToId, threadId, }); - const telegramData = payload.channelData?.telegram as - | { buttons?: TelegramInlineButtons; quoteText?: string } - | undefined; - const quoteText = - typeof telegramData?.quoteText === "string" ? telegramData.quoteText : undefined; - const text = payload.text ?? ""; - const mediaUrls = payload.mediaUrls?.length - ? payload.mediaUrls - : payload.mediaUrl - ? [payload.mediaUrl] - : []; - const payloadOpts = { - ...contextOpts, - quoteText, - mediaLocalRoots, - }; - - if (mediaUrls.length === 0) { - const result = await send(to, text, { - ...payloadOpts, - buttons: telegramData?.buttons, - }); - return { channel: "telegram", ...result }; - } - - // Telegram allows reply_markup on media; attach buttons only to first send. - let finalResult: Awaited> | undefined; - for (let i = 0; i < mediaUrls.length; i += 1) { - const mediaUrl = mediaUrls[i]; - const isFirst = i === 0; - finalResult = await send(to, isFirst ? text : "", { - ...payloadOpts, - mediaUrl, - ...(isFirst ? { buttons: telegramData?.buttons } : {}), - }); - } - return { channel: "telegram", ...(finalResult ?? { messageId: "unknown", chatId: to }) }; + const result = await sendTelegramPayloadMessages({ + send, + to, + payload, + baseOpts: { + ...baseOpts, + mediaLocalRoots, + }, + }); + return { channel: "telegram", ...result }; }, }; diff --git a/src/channels/plugins/outbound/whatsapp.poll.test.ts b/src/channels/plugins/outbound/whatsapp.poll.test.ts index 7164a6b152e..6474322264a 100644 --- a/src/channels/plugins/outbound/whatsapp.poll.test.ts +++ b/src/channels/plugins/outbound/whatsapp.poll.test.ts @@ -1,5 +1,8 @@ import { describe, expect, it, vi } from "vitest"; -import type { OpenClawConfig } from "../../../config/config.js"; +import { + createWhatsAppPollFixture, + expectWhatsAppPollSent, +} from "../../../test-helpers/whatsapp-outbound.js"; const hoisted = vi.hoisted(() => ({ sendPollWhatsApp: vi.fn(async () => ({ messageId: "poll-1", toJid: "1555@s.whatsapp.net" })), @@ -17,25 +20,16 @@ import { whatsappOutbound } from "./whatsapp.js"; describe("whatsappOutbound sendPoll", () => { it("threads cfg through poll send options", async () => { - const cfg = { marker: "resolved-cfg" } as OpenClawConfig; - const poll = { - question: "Lunch?", - options: ["Pizza", "Sushi"], - maxSelections: 1, - }; + const { cfg, poll, to, accountId } = createWhatsAppPollFixture(); const result = await whatsappOutbound.sendPoll!({ cfg, - to: "+1555", + to, poll, - accountId: "work", + accountId, }); - expect(hoisted.sendPollWhatsApp).toHaveBeenCalledWith("+1555", poll, { - verbose: false, - accountId: "work", - cfg, - }); + expectWhatsAppPollSent(hoisted.sendPollWhatsApp, { cfg, poll, to, accountId }); expect(result).toEqual({ messageId: "poll-1", toJid: "1555@s.whatsapp.net" }); }); }); diff --git a/src/channels/plugins/outbound/whatsapp.sendpayload.test.ts b/src/channels/plugins/outbound/whatsapp.sendpayload.test.ts index 3eb6f7467dc..943c8a8ba9b 100644 --- a/src/channels/plugins/outbound/whatsapp.sendpayload.test.ts +++ b/src/channels/plugins/outbound/whatsapp.sendpayload.test.ts @@ -1,106 +1,125 @@ import { describe, expect, it, vi } from "vitest"; import type { ReplyPayload } from "../../../auto-reply/types.js"; +import { + installSendPayloadContractSuite, + primeSendMock, +} from "../../../test-utils/send-payload-contract.js"; import { whatsappOutbound } from "./whatsapp.js"; -function baseCtx(payload: ReplyPayload) { - return { +function createHarness(params: { + payload: ReplyPayload; + sendResults?: Array<{ messageId: string }>; +}) { + const sendWhatsApp = vi.fn(); + primeSendMock(sendWhatsApp, { messageId: "wa-1" }, params.sendResults); + const ctx = { cfg: {}, to: "5511999999999@c.us", text: "", - payload, + payload: params.payload, deps: { - sendWhatsApp: vi.fn().mockResolvedValue({ messageId: "wa-1" }), + sendWhatsApp, }, }; + return { + run: async () => await whatsappOutbound.sendPayload!(ctx), + sendMock: sendWhatsApp, + to: ctx.to, + }; } describe("whatsappOutbound sendPayload", () => { - it("text-only delegates to sendText", async () => { - const ctx = baseCtx({ text: "hello" }); - const result = await whatsappOutbound.sendPayload!(ctx); - - expect(ctx.deps.sendWhatsApp).toHaveBeenCalledTimes(1); - expect(ctx.deps.sendWhatsApp).toHaveBeenCalledWith( - "5511999999999@c.us", - "hello", - expect.any(Object), - ); - expect(result).toMatchObject({ channel: "whatsapp", messageId: "wa-1" }); + installSendPayloadContractSuite({ + channel: "whatsapp", + chunking: { mode: "split", longTextLength: 5000, maxChunkLength: 4000 }, + createHarness, }); - it("single media delegates to sendMedia", async () => { - const ctx = baseCtx({ text: "cap", mediaUrl: "https://example.com/a.jpg" }); - const result = await whatsappOutbound.sendPayload!(ctx); + it("trims leading whitespace for direct text sends", async () => { + const sendWhatsApp = vi.fn(async () => ({ messageId: "wa-1", toJid: "jid" })); - expect(ctx.deps.sendWhatsApp).toHaveBeenCalledTimes(1); - expect(ctx.deps.sendWhatsApp).toHaveBeenCalledWith( - "5511999999999@c.us", - "cap", - expect.objectContaining({ mediaUrl: "https://example.com/a.jpg" }), - ); - expect(result).toMatchObject({ channel: "whatsapp" }); + await whatsappOutbound.sendText!({ + cfg: {}, + to: "5511999999999@c.us", + text: "\n \thello", + deps: { sendWhatsApp }, + }); + + expect(sendWhatsApp).toHaveBeenCalledWith("5511999999999@c.us", "hello", { + verbose: false, + cfg: {}, + accountId: undefined, + gifPlayback: undefined, + }); }); - it("multi-media iterates URLs with caption on first", async () => { - const sendWhatsApp = vi - .fn() - .mockResolvedValueOnce({ messageId: "wa-1" }) - .mockResolvedValueOnce({ messageId: "wa-2" }); - const ctx = { + it("trims leading whitespace for direct media captions", async () => { + const sendWhatsApp = vi.fn(async () => ({ messageId: "wa-1", toJid: "jid" })); + + await whatsappOutbound.sendMedia!({ + cfg: {}, + to: "5511999999999@c.us", + text: "\n \tcaption", + mediaUrl: "/tmp/test.png", + deps: { sendWhatsApp }, + }); + + expect(sendWhatsApp).toHaveBeenCalledWith("5511999999999@c.us", "caption", { + verbose: false, + cfg: {}, + mediaUrl: "/tmp/test.png", + mediaLocalRoots: undefined, + accountId: undefined, + gifPlayback: undefined, + }); + }); + + it("trims leading whitespace for sendPayload text and caption delivery", async () => { + const sendWhatsApp = vi.fn(async () => ({ messageId: "wa-1", toJid: "jid" })); + + await whatsappOutbound.sendPayload!({ cfg: {}, to: "5511999999999@c.us", text: "", - payload: { - text: "caption", - mediaUrls: ["https://example.com/1.jpg", "https://example.com/2.jpg"], - } as ReplyPayload, + payload: { text: "\n\nhello" }, deps: { sendWhatsApp }, - }; - const result = await whatsappOutbound.sendPayload!(ctx); + }); + await whatsappOutbound.sendPayload!({ + cfg: {}, + to: "5511999999999@c.us", + text: "", + payload: { text: "\n\ncaption", mediaUrl: "/tmp/test.png" }, + deps: { sendWhatsApp }, + }); - expect(sendWhatsApp).toHaveBeenCalledTimes(2); - expect(sendWhatsApp).toHaveBeenNthCalledWith( - 1, - "5511999999999@c.us", - "caption", - expect.objectContaining({ mediaUrl: "https://example.com/1.jpg" }), - ); - expect(sendWhatsApp).toHaveBeenNthCalledWith( - 2, - "5511999999999@c.us", - "", - expect.objectContaining({ mediaUrl: "https://example.com/2.jpg" }), - ); - expect(result).toMatchObject({ channel: "whatsapp", messageId: "wa-2" }); + expect(sendWhatsApp).toHaveBeenNthCalledWith(1, "5511999999999@c.us", "hello", { + verbose: false, + cfg: {}, + accountId: undefined, + gifPlayback: undefined, + }); + expect(sendWhatsApp).toHaveBeenNthCalledWith(2, "5511999999999@c.us", "caption", { + verbose: false, + cfg: {}, + mediaUrl: "/tmp/test.png", + mediaLocalRoots: undefined, + accountId: undefined, + gifPlayback: undefined, + }); }); - it("empty payload returns no-op", async () => { - const ctx = baseCtx({}); - const result = await whatsappOutbound.sendPayload!(ctx); + it("skips whitespace-only text payloads", async () => { + const sendWhatsApp = vi.fn(); + + const result = await whatsappOutbound.sendPayload!({ + cfg: {}, + to: "5511999999999@c.us", + text: "", + payload: { text: "\n \t" }, + deps: { sendWhatsApp }, + }); - expect(ctx.deps.sendWhatsApp).not.toHaveBeenCalled(); expect(result).toEqual({ channel: "whatsapp", messageId: "" }); - }); - - it("chunking splits long text", async () => { - const sendWhatsApp = vi - .fn() - .mockResolvedValueOnce({ messageId: "wa-c1" }) - .mockResolvedValueOnce({ messageId: "wa-c2" }); - const longText = "a".repeat(5000); - const ctx = { - cfg: {}, - to: "5511999999999@c.us", - text: "", - payload: { text: longText } as ReplyPayload, - deps: { sendWhatsApp }, - }; - const result = await whatsappOutbound.sendPayload!(ctx); - - expect(sendWhatsApp.mock.calls.length).toBeGreaterThanOrEqual(2); - for (const call of sendWhatsApp.mock.calls) { - expect((call[1] as string).length).toBeLessThanOrEqual(4000); - } - expect(result).toMatchObject({ channel: "whatsapp" }); + expect(sendWhatsApp).not.toHaveBeenCalled(); }); }); diff --git a/src/channels/plugins/outbound/whatsapp.ts b/src/channels/plugins/outbound/whatsapp.ts index e5de15241ae..0cd797c6c10 100644 --- a/src/channels/plugins/outbound/whatsapp.ts +++ b/src/channels/plugins/outbound/whatsapp.ts @@ -1,48 +1,40 @@ import { chunkText } from "../../../auto-reply/chunk.js"; import { shouldLogVerbose } from "../../../globals.js"; import { sendPollWhatsApp } from "../../../web/outbound.js"; -import { resolveWhatsAppOutboundTarget } from "../../../whatsapp/resolve-outbound-target.js"; import type { ChannelOutboundAdapter } from "../types.js"; +import { createWhatsAppOutboundBase } from "../whatsapp-shared.js"; import { sendTextMediaPayload } from "./direct-text-media.js"; +function trimLeadingWhitespace(text: string | undefined): string { + return text?.trimStart() ?? ""; +} + export const whatsappOutbound: ChannelOutboundAdapter = { - deliveryMode: "gateway", - chunker: chunkText, - chunkerMode: "text", - textChunkLimit: 4000, - pollMaxOptions: 12, - resolveTarget: ({ to, allowFrom, mode }) => - resolveWhatsAppOutboundTarget({ to, allowFrom, mode }), - sendPayload: async (ctx) => - await sendTextMediaPayload({ channel: "whatsapp", ctx, adapter: whatsappOutbound }), - sendText: async ({ cfg, to, text, accountId, deps, gifPlayback }) => { - const send = - deps?.sendWhatsApp ?? (await import("../../../web/outbound.js")).sendMessageWhatsApp; - const result = await send(to, text, { - verbose: false, - cfg, - accountId: accountId ?? undefined, - gifPlayback, + ...createWhatsAppOutboundBase({ + chunker: chunkText, + sendMessageWhatsApp: async (...args) => + (await import("../../../web/outbound.js")).sendMessageWhatsApp(...args), + sendPollWhatsApp, + shouldLogVerbose, + normalizeText: trimLeadingWhitespace, + skipEmptyText: true, + }), + sendPayload: async (ctx) => { + const text = trimLeadingWhitespace(ctx.payload.text); + const hasMedia = Boolean(ctx.payload.mediaUrl) || (ctx.payload.mediaUrls?.length ?? 0) > 0; + if (!text && !hasMedia) { + return { channel: "whatsapp", messageId: "" }; + } + return await sendTextMediaPayload({ + channel: "whatsapp", + ctx: { + ...ctx, + payload: { + ...ctx.payload, + text, + }, + }, + adapter: whatsappOutbound, }); - return { channel: "whatsapp", ...result }; }, - sendMedia: async ({ cfg, to, text, mediaUrl, mediaLocalRoots, accountId, deps, gifPlayback }) => { - const send = - deps?.sendWhatsApp ?? (await import("../../../web/outbound.js")).sendMessageWhatsApp; - const result = await send(to, text, { - verbose: false, - cfg, - mediaUrl, - mediaLocalRoots, - accountId: accountId ?? undefined, - gifPlayback, - }); - return { channel: "whatsapp", ...result }; - }, - sendPoll: async ({ cfg, to, poll, accountId }) => - await sendPollWhatsApp(to, poll, { - verbose: shouldLogVerbose(), - accountId: accountId ?? undefined, - cfg, - }), }; diff --git a/src/channels/plugins/plugins-core.test.ts b/src/channels/plugins/plugins-core.test.ts index 49012222982..30ed835873d 100644 --- a/src/channels/plugins/plugins-core.test.ts +++ b/src/channels/plugins/plugins-core.test.ts @@ -19,8 +19,16 @@ import { createTestRegistry, } from "../../test-utils/channel-plugins.js"; import { withEnvAsync } from "../../test-utils/env.js"; +import { INTERNAL_MESSAGE_CHANNEL } from "../../utils/message-channel.js"; import { getChannelPluginCatalogEntry, listChannelPluginCatalogEntries } from "./catalog.js"; -import { resolveChannelConfigWrites } from "./config-writes.js"; +import { + authorizeConfigWrite, + canBypassConfigWritePolicy, + formatConfigWriteDeniedMessage, + resolveExplicitConfigWriteTarget, + resolveChannelConfigWrites, + resolveConfigWriteTargetFromPath, +} from "./config-writes.js"; import { listDiscordDirectoryGroupsFromConfig, listDiscordDirectoryPeersFromConfig, @@ -145,6 +153,82 @@ describe("channel plugin catalog", () => { ); expect(ids).toContain("demo-channel"); }); + + it("uses the provided env for external catalog path resolution", () => { + const home = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-catalog-home-")); + const catalogPath = path.join(home, "catalog.json"); + fs.writeFileSync( + catalogPath, + JSON.stringify({ + entries: [ + { + name: "@openclaw/env-demo-channel", + openclaw: { + channel: { + id: "env-demo-channel", + label: "Env Demo Channel", + selectionLabel: "Env Demo Channel", + docsPath: "/channels/env-demo-channel", + blurb: "Env demo entry", + order: 1000, + }, + install: { + npmSpec: "@openclaw/env-demo-channel", + }, + }, + }, + ], + }), + ); + + const ids = listChannelPluginCatalogEntries({ + env: { + ...process.env, + OPENCLAW_PLUGIN_CATALOG_PATHS: "~/catalog.json", + HOME: home, + }, + }).map((entry) => entry.id); + + expect(ids).toContain("env-demo-channel"); + }); + + it("uses the provided env for default catalog paths", () => { + const stateDir = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-catalog-state-")); + const catalogPath = path.join(stateDir, "plugins", "catalog.json"); + fs.mkdirSync(path.dirname(catalogPath), { recursive: true }); + fs.writeFileSync( + catalogPath, + JSON.stringify({ + entries: [ + { + name: "@openclaw/default-env-demo", + openclaw: { + channel: { + id: "default-env-demo", + label: "Default Env Demo", + selectionLabel: "Default Env Demo", + docsPath: "/channels/default-env-demo", + blurb: "Default env demo entry", + }, + install: { + npmSpec: "@openclaw/default-env-demo", + }, + }, + }, + ], + }), + ); + + const ids = listChannelPluginCatalogEntries({ + env: { + ...process.env, + OPENCLAW_STATE_DIR: stateDir, + CLAWDBOT_STATE_DIR: undefined, + }, + }).map((entry) => entry.id); + + expect(ids).toContain("default-env-demo"); + }); }); const emptyRegistry = createTestRegistry([]); @@ -325,6 +409,108 @@ describe("resolveChannelConfigWrites", () => { }); }); +describe("authorizeConfigWrite", () => { + function expectConfigWriteBlocked(params: { + disabledAccountId: string; + reason: "target-disabled" | "origin-disabled"; + blockedScope: "target" | "origin"; + }) { + expect( + authorizeConfigWrite({ + cfg: makeSlackConfigWritesCfg(params.disabledAccountId), + origin: { channelId: "slack", accountId: "default" }, + target: resolveExplicitConfigWriteTarget({ channelId: "slack", accountId: "work" }), + }), + ).toEqual({ + allowed: false, + reason: params.reason, + blockedScope: { + kind: params.blockedScope, + scope: { + channelId: "slack", + accountId: params.blockedScope === "target" ? "work" : "default", + }, + }, + }); + } + + it("blocks when a target account disables writes", () => { + expectConfigWriteBlocked({ + disabledAccountId: "work", + reason: "target-disabled", + blockedScope: "target", + }); + }); + + it("blocks when the origin account disables writes", () => { + expectConfigWriteBlocked({ + disabledAccountId: "default", + reason: "origin-disabled", + blockedScope: "origin", + }); + }); + + it("allows bypass for internal operator.admin writes", () => { + const cfg = makeSlackConfigWritesCfg("work"); + expect( + authorizeConfigWrite({ + cfg, + origin: { channelId: "slack", accountId: "default" }, + target: resolveExplicitConfigWriteTarget({ channelId: "slack", accountId: "work" }), + allowBypass: canBypassConfigWritePolicy({ + channel: INTERNAL_MESSAGE_CHANNEL, + gatewayClientScopes: ["operator.admin"], + }), + }), + ).toEqual({ allowed: true }); + }); + + it("treats non-channel config paths as global writes", () => { + const cfg = makeSlackConfigWritesCfg("work"); + expect( + authorizeConfigWrite({ + cfg, + origin: { channelId: "slack", accountId: "default" }, + target: resolveConfigWriteTargetFromPath(["messages", "ackReaction"]), + }), + ).toEqual({ allowed: true }); + }); + + it("rejects ambiguous channel collection writes", () => { + expect(resolveConfigWriteTargetFromPath(["channels", "telegram"])).toEqual({ + kind: "ambiguous", + scopes: [{ channelId: "telegram" }], + }); + expect(resolveConfigWriteTargetFromPath(["channels", "telegram", "accounts"])).toEqual({ + kind: "ambiguous", + scopes: [{ channelId: "telegram" }], + }); + }); + + it("resolves explicit channel and account targets", () => { + expect(resolveExplicitConfigWriteTarget({ channelId: "slack" })).toEqual({ + kind: "channel", + scope: { channelId: "slack" }, + }); + expect(resolveExplicitConfigWriteTarget({ channelId: "slack", accountId: "work" })).toEqual({ + kind: "account", + scope: { channelId: "slack", accountId: "work" }, + }); + }); + + it("formats denied messages consistently", () => { + expect( + formatConfigWriteDeniedMessage({ + result: { + allowed: false, + reason: "target-disabled", + blockedScope: { kind: "target", scope: { channelId: "slack", accountId: "work" } }, + }, + }), + ).toContain("channels.slack.accounts.work.configWrites=true"); + }); +}); + describe("directory (config-backed)", () => { it("lists Slack peers/groups from config", async () => { const cfg = { diff --git a/src/channels/plugins/setup-helpers.test.ts b/src/channels/plugins/setup-helpers.test.ts index df4609fc76f..10069c0b9f4 100644 --- a/src/channels/plugins/setup-helpers.test.ts +++ b/src/channels/plugins/setup-helpers.test.ts @@ -30,7 +30,7 @@ describe("applySetupAccountConfigPatch", () => { }); }); - it("patches named account config and enables both channel and account", () => { + it("patches named account config and preserves existing account enabled flag", () => { const next = applySetupAccountConfigPatch({ cfg: asConfig({ channels: { @@ -50,7 +50,7 @@ describe("applySetupAccountConfigPatch", () => { expect(next.channels?.zalo).toMatchObject({ enabled: true, accounts: { - work: { enabled: true, botToken: "new" }, + work: { enabled: false, botToken: "new" }, }, }); }); diff --git a/src/channels/plugins/setup-helpers.ts b/src/channels/plugins/setup-helpers.ts index 5045c431d60..d592a56e475 100644 --- a/src/channels/plugins/setup-helpers.ts +++ b/src/channels/plugins/setup-helpers.ts @@ -125,6 +125,23 @@ export function applySetupAccountConfigPatch(params: { channelKey: string; accountId: string; patch: Record; +}): OpenClawConfig { + return patchScopedAccountConfig({ + cfg: params.cfg, + channelKey: params.channelKey, + accountId: params.accountId, + patch: params.patch, + }); +} + +export function patchScopedAccountConfig(params: { + cfg: OpenClawConfig; + channelKey: string; + accountId: string; + patch: Record; + accountPatch?: Record; + ensureChannelEnabled?: boolean; + ensureAccountEnabled?: boolean; }): OpenClawConfig { const accountId = normalizeAccountId(params.accountId); const channels = params.cfg.channels as Record | undefined; @@ -135,6 +152,10 @@ export function applySetupAccountConfigPatch(params: { accounts?: Record>; }) : undefined; + const ensureChannelEnabled = params.ensureChannelEnabled ?? true; + const ensureAccountEnabled = params.ensureAccountEnabled ?? ensureChannelEnabled; + const patch = params.patch; + const accountPatch = params.accountPatch ?? patch; if (accountId === DEFAULT_ACCOUNT_ID) { return { ...params.cfg, @@ -142,27 +163,33 @@ export function applySetupAccountConfigPatch(params: { ...params.cfg.channels, [params.channelKey]: { ...base, - enabled: true, - ...params.patch, + ...(ensureChannelEnabled ? { enabled: true } : {}), + ...patch, }, }, } as OpenClawConfig; } const accounts = base?.accounts ?? {}; + const existingAccount = accounts[accountId] ?? {}; return { ...params.cfg, channels: { ...params.cfg.channels, [params.channelKey]: { ...base, - enabled: true, + ...(ensureChannelEnabled ? { enabled: true } : {}), accounts: { ...accounts, [accountId]: { - ...accounts[accountId], - enabled: true, - ...params.patch, + ...existingAccount, + ...(ensureAccountEnabled + ? { + enabled: + typeof existingAccount.enabled === "boolean" ? existingAccount.enabled : true, + } + : {}), + ...accountPatch, }, }, }, diff --git a/src/channels/plugins/status.ts b/src/channels/plugins/status.ts index cc7de671a3a..689c50c6710 100644 --- a/src/channels/plugins/status.ts +++ b/src/channels/plugins/status.ts @@ -41,6 +41,19 @@ async function buildSnapshotFromAccount(params: { }; } +function inspectChannelAccount(params: { + plugin: ChannelPlugin; + cfg: OpenClawConfig; + accountId: string; +}): ResolvedAccount | null { + return (params.plugin.config.inspectAccount?.(params.cfg, params.accountId) ?? + inspectReadOnlyChannelAccount({ + channelId: params.plugin.id, + cfg: params.cfg, + accountId: params.accountId, + })) as ResolvedAccount | null; +} + export async function buildReadOnlySourceChannelAccountSnapshot(params: { plugin: ChannelPlugin; cfg: OpenClawConfig; @@ -49,13 +62,7 @@ export async function buildReadOnlySourceChannelAccountSnapshot probe?: unknown; audit?: unknown; }): Promise { - const inspectedAccount = - params.plugin.config.inspectAccount?.(params.cfg, params.accountId) ?? - inspectReadOnlyChannelAccount({ - channelId: params.plugin.id, - cfg: params.cfg, - accountId: params.accountId, - }); + const inspectedAccount = inspectChannelAccount(params); if (!inspectedAccount) { return null; } @@ -73,15 +80,9 @@ export async function buildChannelAccountSnapshot(params: { probe?: unknown; audit?: unknown; }): Promise { - const inspectedAccount = - params.plugin.config.inspectAccount?.(params.cfg, params.accountId) ?? - inspectReadOnlyChannelAccount({ - channelId: params.plugin.id, - cfg: params.cfg, - accountId: params.accountId, - }); - const account = (inspectedAccount ?? - params.plugin.config.resolveAccount(params.cfg, params.accountId)) as ResolvedAccount; + const inspectedAccount = inspectChannelAccount(params); + const account = + inspectedAccount ?? params.plugin.config.resolveAccount(params.cfg, params.accountId); return await buildSnapshotFromAccount({ ...params, account, diff --git a/src/channels/plugins/types.core.ts b/src/channels/plugins/types.core.ts index c1ee5c815ef..1662ad2b0e0 100644 --- a/src/channels/plugins/types.core.ts +++ b/src/channels/plugins/types.core.ts @@ -290,6 +290,18 @@ export type ChannelMessagingAdapter = { targetResolver?: { looksLikeId?: (raw: string, normalized?: string) => boolean; hint?: string; + resolveTarget?: (params: { + cfg: OpenClawConfig; + accountId?: string | null; + input: string; + normalized: string; + preferredKind?: ChannelDirectoryEntryKind | "channel"; + }) => Promise<{ + to: string; + kind: ChannelDirectoryEntryKind | "channel"; + display?: string; + source?: "normalized" | "directory"; + } | null>; }; formatTargetDisplay?: (params: { target: string; diff --git a/src/channels/plugins/whatsapp-shared.ts b/src/channels/plugins/whatsapp-shared.ts index 368b58454fb..1174dff7c73 100644 --- a/src/channels/plugins/whatsapp-shared.ts +++ b/src/channels/plugins/whatsapp-shared.ts @@ -1,4 +1,7 @@ +import type { PluginRuntimeChannel } from "../../plugins/runtime/types-channel.js"; import { escapeRegExp } from "../../utils.js"; +import { resolveWhatsAppOutboundTarget } from "../../whatsapp/resolve-outbound-target.js"; +import type { ChannelOutboundAdapter } from "./types.js"; export const WHATSAPP_GROUP_INTRO_HINT = "WhatsApp IDs: SenderId is the participant JID (group participant id)."; @@ -15,3 +18,89 @@ export function resolveWhatsAppMentionStripPatterns(ctx: { To?: string | null }) const escaped = escapeRegExp(selfE164); return [escaped, `@${escaped}`]; } + +type WhatsAppChunker = NonNullable; +type WhatsAppSendMessage = PluginRuntimeChannel["whatsapp"]["sendMessageWhatsApp"]; +type WhatsAppSendPoll = PluginRuntimeChannel["whatsapp"]["sendPollWhatsApp"]; + +type CreateWhatsAppOutboundBaseParams = { + chunker: WhatsAppChunker; + sendMessageWhatsApp: WhatsAppSendMessage; + sendPollWhatsApp: WhatsAppSendPoll; + shouldLogVerbose: () => boolean; + resolveTarget?: ChannelOutboundAdapter["resolveTarget"]; + normalizeText?: (text: string | undefined) => string; + skipEmptyText?: boolean; +}; + +export function createWhatsAppOutboundBase({ + chunker, + sendMessageWhatsApp, + sendPollWhatsApp, + shouldLogVerbose, + resolveTarget = ({ to, allowFrom, mode }) => + resolveWhatsAppOutboundTarget({ to, allowFrom, mode }), + normalizeText = (text) => text ?? "", + skipEmptyText = false, +}: CreateWhatsAppOutboundBaseParams): Pick< + ChannelOutboundAdapter, + | "deliveryMode" + | "chunker" + | "chunkerMode" + | "textChunkLimit" + | "pollMaxOptions" + | "resolveTarget" + | "sendText" + | "sendMedia" + | "sendPoll" +> { + return { + deliveryMode: "gateway", + chunker, + chunkerMode: "text", + textChunkLimit: 4000, + pollMaxOptions: 12, + resolveTarget, + sendText: async ({ cfg, to, text, accountId, deps, gifPlayback }) => { + const normalizedText = normalizeText(text); + if (skipEmptyText && !normalizedText) { + return { channel: "whatsapp", messageId: "" }; + } + const send = deps?.sendWhatsApp ?? sendMessageWhatsApp; + const result = await send(to, normalizedText, { + verbose: false, + cfg, + accountId: accountId ?? undefined, + gifPlayback, + }); + return { channel: "whatsapp", ...result }; + }, + sendMedia: async ({ + cfg, + to, + text, + mediaUrl, + mediaLocalRoots, + accountId, + deps, + gifPlayback, + }) => { + const send = deps?.sendWhatsApp ?? sendMessageWhatsApp; + const result = await send(to, normalizeText(text), { + verbose: false, + cfg, + mediaUrl, + mediaLocalRoots, + accountId: accountId ?? undefined, + gifPlayback, + }); + return { channel: "whatsapp", ...result }; + }, + sendPoll: async ({ cfg, to, poll, accountId }) => + await sendPollWhatsApp(to, poll, { + verbose: shouldLogVerbose(), + accountId: accountId ?? undefined, + cfg, + }), + }; +} diff --git a/src/channels/reply-prefix.ts b/src/channels/reply-prefix.ts index 2ae6f3d221a..59f0a29381d 100644 --- a/src/channels/reply-prefix.ts +++ b/src/channels/reply-prefix.ts @@ -5,19 +5,24 @@ import { } from "../auto-reply/reply/response-prefix-template.js"; import type { GetReplyOptions } from "../auto-reply/types.js"; import type { OpenClawConfig } from "../config/config.js"; +import { isSlackInteractiveRepliesEnabled } from "../slack/interactive-replies.js"; type ModelSelectionContext = Parameters>[0]; export type ReplyPrefixContextBundle = { prefixContext: ResponsePrefixContext; responsePrefix?: string; + enableSlackInteractiveReplies?: boolean; responsePrefixContextProvider: () => ResponsePrefixContext; onModelSelected: (ctx: ModelSelectionContext) => void; }; export type ReplyPrefixOptions = Pick< ReplyPrefixContextBundle, - "responsePrefix" | "responsePrefixContextProvider" | "onModelSelected" + | "responsePrefix" + | "enableSlackInteractiveReplies" + | "responsePrefixContextProvider" + | "onModelSelected" >; export function createReplyPrefixContext(params: { @@ -45,6 +50,10 @@ export function createReplyPrefixContext(params: { channel: params.channel, accountId: params.accountId, }).responsePrefix, + enableSlackInteractiveReplies: + params.channel === "slack" + ? isSlackInteractiveRepliesEnabled({ cfg, accountId: params.accountId }) + : undefined, responsePrefixContextProvider: () => prefixContext, onModelSelected, }; @@ -56,7 +65,16 @@ export function createReplyPrefixOptions(params: { channel?: string; accountId?: string; }): ReplyPrefixOptions { - const { responsePrefix, responsePrefixContextProvider, onModelSelected } = - createReplyPrefixContext(params); - return { responsePrefix, responsePrefixContextProvider, onModelSelected }; + const { + responsePrefix, + enableSlackInteractiveReplies, + responsePrefixContextProvider, + onModelSelected, + } = createReplyPrefixContext(params); + return { + responsePrefix, + enableSlackInteractiveReplies, + responsePrefixContextProvider, + onModelSelected, + }; } diff --git a/src/channels/status-reactions.test.ts b/src/channels/status-reactions.test.ts index 9b61946d64e..41611c22b1a 100644 --- a/src/channels/status-reactions.test.ts +++ b/src/channels/status-reactions.test.ts @@ -148,6 +148,15 @@ describe("createStatusReactionController", () => { expect(calls).toContainEqual({ method: "set", emoji: DEFAULT_EMOJIS.thinking }); }); + it("should debounce setCompacting and eventually call adapter", async () => { + const { calls, controller } = createEnabledController(); + + void controller.setCompacting(); + await vi.advanceTimersByTimeAsync(DEFAULT_TIMING.debounceMs); + + expect(calls).toContainEqual({ method: "set", emoji: DEFAULT_EMOJIS.compacting }); + }); + it("should classify tool name and debounce", async () => { const { calls, controller } = createEnabledController(); @@ -245,6 +254,19 @@ describe("createStatusReactionController", () => { expect(calls.length).toBe(callsAfterFirst); }); + it("should cancel a pending compacting emoji before resuming thinking", async () => { + const { calls, controller } = createEnabledController(); + + void controller.setCompacting(); + await vi.advanceTimersByTimeAsync(DEFAULT_TIMING.debounceMs - 1); + controller.cancelPending(); + void controller.setThinking(); + await vi.advanceTimersByTimeAsync(DEFAULT_TIMING.debounceMs); + + const setEmojis = calls.filter((call) => call.method === "set").map((call) => call.emoji); + expect(setEmojis).toEqual([DEFAULT_EMOJIS.thinking]); + }); + it("should call removeReaction when adapter supports it and emoji changes", async () => { const { calls, controller } = createEnabledController(); @@ -446,6 +468,7 @@ describe("constants", () => { const emojiKeys = [ "queued", "thinking", + "compacting", "tool", "coding", "web", diff --git a/src/channels/status-reactions.ts b/src/channels/status-reactions.ts index 4b0651232c8..060555a997c 100644 --- a/src/channels/status-reactions.ts +++ b/src/channels/status-reactions.ts @@ -24,6 +24,7 @@ export type StatusReactionEmojis = { error?: string; // Default: "❌" stallSoft?: string; // Default: "⏳" stallHard?: string; // Default: "⚠️" + compacting?: string; // Default: "✍" }; export type StatusReactionTiming = { @@ -38,6 +39,9 @@ export type StatusReactionController = { setQueued: () => Promise | void; setThinking: () => Promise | void; setTool: (toolName?: string) => Promise | void; + setCompacting: () => Promise | void; + /** Cancel any pending debounced emoji (useful before forcing a state transition). */ + cancelPending: () => void; setDone: () => Promise; setError: () => Promise; clear: () => Promise; @@ -58,6 +62,7 @@ export const DEFAULT_EMOJIS: Required = { error: "😱", stallSoft: "🥱", stallHard: "😨", + compacting: "✍", }; export const DEFAULT_TIMING: Required = { @@ -162,6 +167,7 @@ export function createStatusReactionController(params: { emojis.error, emojis.stallSoft, emojis.stallHard, + emojis.compacting, ]); /** @@ -306,6 +312,15 @@ export function createStatusReactionController(params: { scheduleEmoji(emoji); } + function setCompacting(): void { + scheduleEmoji(emojis.compacting); + } + + function cancelPending(): void { + clearDebounceTimer(); + pendingEmoji = ""; + } + function finishWithEmoji(emoji: string): Promise { if (!enabled) { return Promise.resolve(); @@ -375,6 +390,8 @@ export function createStatusReactionController(params: { setQueued, setThinking, setTool, + setCompacting, + cancelPending, setDone, setError, clear, diff --git a/src/cli/acp-cli.option-collisions.test.ts b/src/cli/acp-cli.option-collisions.test.ts index 131db6a67cb..068f415de79 100644 --- a/src/cli/acp-cli.option-collisions.test.ts +++ b/src/cli/acp-cli.option-collisions.test.ts @@ -1,9 +1,7 @@ -import fs from "node:fs/promises"; -import os from "node:os"; -import path from "node:path"; import { Command } from "commander"; import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; import { runRegisteredCli } from "../test-utils/command-runner.js"; +import { withTempSecretFiles } from "../test-utils/secret-file-fixture.js"; const runAcpClientInteractive = vi.fn(async (_opts: unknown) => {}); const serveAcpGateway = vi.fn(async (_opts: unknown) => {}); @@ -30,27 +28,6 @@ vi.mock("../runtime.js", () => ({ describe("acp cli option collisions", () => { let registerAcpCli: typeof import("./acp-cli.js").registerAcpCli; - async function withSecretFiles( - secrets: { token?: string; password?: string }, - run: (files: { tokenFile?: string; passwordFile?: string }) => Promise, - ): Promise { - const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-acp-cli-")); - try { - const files: { tokenFile?: string; passwordFile?: string } = {}; - if (secrets.token !== undefined) { - files.tokenFile = path.join(dir, "token.txt"); - await fs.writeFile(files.tokenFile, secrets.token, "utf8"); - } - if (secrets.password !== undefined) { - files.passwordFile = path.join(dir, "password.txt"); - await fs.writeFile(files.passwordFile, secrets.password, "utf8"); - } - return await run(files); - } finally { - await fs.rm(dir, { recursive: true, force: true }); - } - } - function createAcpProgram() { const program = new Command(); registerAcpCli(program); @@ -93,15 +70,19 @@ describe("acp cli option collisions", () => { }); it("loads gateway token/password from files", async () => { - await withSecretFiles({ token: "tok_file\n", [passwordKey()]: "pw_file\n" }, async (files) => { - // pragma: allowlist secret - await parseAcp([ - "--token-file", - files.tokenFile ?? "", - "--password-file", - files.passwordFile ?? "", - ]); - }); + await withTempSecretFiles( + "openclaw-acp-cli-", + { token: "tok_file\n", [passwordKey()]: "pw_file\n" }, + async (files) => { + // pragma: allowlist secret + await parseAcp([ + "--token-file", + files.tokenFile ?? "", + "--password-file", + files.passwordFile ?? "", + ]); + }, + ); expect(serveAcpGateway).toHaveBeenCalledWith( expect.objectContaining({ @@ -111,21 +92,30 @@ describe("acp cli option collisions", () => { ); }); - it("rejects mixed secret flags and file flags", async () => { - await withSecretFiles({ token: "tok_file\n" }, async (files) => { - await parseAcp(["--token", "tok_inline", "--token-file", files.tokenFile ?? ""]); + it.each([ + { + name: "rejects mixed secret flags and file flags", + files: { token: "tok_file\n" }, + args: (tokenFile: string) => ["--token", "tok_inline", "--token-file", tokenFile], + expected: /Use either --token or --token-file/, + }, + { + name: "rejects mixed password flags and file flags", + files: { password: "pw_file\n" }, // pragma: allowlist secret + args: (_tokenFile: string, passwordFile: string) => [ + "--password", + "pw_inline", + "--password-file", + passwordFile, + ], + expected: /Use either --password or --password-file/, + }, + ])("$name", async ({ files, args, expected }) => { + await withTempSecretFiles("openclaw-acp-cli-", files, async ({ tokenFile, passwordFile }) => { + await parseAcp(args(tokenFile ?? "", passwordFile ?? "")); }); - expectCliError(/Use either --token or --token-file/); - }); - - it("rejects mixed password flags and file flags", async () => { - const passwordFileValue = "pw_file\n"; // pragma: allowlist secret - await withSecretFiles({ password: passwordFileValue }, async (files) => { - await parseAcp(["--password", "pw_inline", "--password-file", files.passwordFile ?? ""]); - }); - - expectCliError(/Use either --password or --password-file/); + expectCliError(expected); }); it("warns when inline secret flags are used", async () => { @@ -140,7 +130,7 @@ describe("acp cli option collisions", () => { }); it("trims token file path before reading", async () => { - await withSecretFiles({ token: "tok_file\n" }, async (files) => { + await withTempSecretFiles("openclaw-acp-cli-", { token: "tok_file\n" }, async (files) => { await parseAcp(["--token-file", ` ${files.tokenFile ?? ""} `]); }); diff --git a/src/cli/browser-cli-manage.test.ts b/src/cli/browser-cli-manage.test.ts new file mode 100644 index 00000000000..e1d01132be3 --- /dev/null +++ b/src/cli/browser-cli-manage.test.ts @@ -0,0 +1,151 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { registerBrowserManageCommands } from "./browser-cli-manage.js"; +import { createBrowserProgram } from "./browser-cli-test-helpers.js"; + +const mocks = vi.hoisted(() => { + const runtimeLog = vi.fn(); + const runtimeError = vi.fn(); + const runtimeExit = vi.fn(); + return { + callBrowserRequest: vi.fn< + ( + opts: unknown, + req: { path?: string }, + runtimeOpts?: { timeoutMs?: number }, + ) => Promise> + >(async () => ({})), + runtimeLog, + runtimeError, + runtimeExit, + runtime: { + log: runtimeLog, + error: runtimeError, + exit: runtimeExit, + }, + }; +}); + +vi.mock("./browser-cli-shared.js", () => ({ + callBrowserRequest: mocks.callBrowserRequest, +})); + +vi.mock("./cli-utils.js", () => ({ + runCommandWithRuntime: async ( + _runtime: unknown, + action: () => Promise, + onError: (err: unknown) => void, + ) => await action().catch(onError), +})); + +vi.mock("../runtime.js", () => ({ + defaultRuntime: mocks.runtime, +})); + +function createProgram() { + const { program, browser, parentOpts } = createBrowserProgram(); + registerBrowserManageCommands(browser, parentOpts); + return program; +} + +describe("browser manage output", () => { + beforeEach(() => { + mocks.callBrowserRequest.mockClear(); + mocks.runtimeLog.mockClear(); + mocks.runtimeError.mockClear(); + mocks.runtimeExit.mockClear(); + }); + + it("shows chrome-mcp transport for existing-session status without fake CDP fields", async () => { + mocks.callBrowserRequest.mockImplementation(async (_opts: unknown, req: { path?: string }) => + req.path === "/" + ? { + enabled: true, + profile: "chrome-live", + driver: "existing-session", + transport: "chrome-mcp", + running: true, + cdpReady: true, + cdpHttp: true, + pid: 4321, + cdpPort: null, + cdpUrl: null, + chosenBrowser: null, + userDataDir: null, + color: "#00AA00", + headless: false, + noSandbox: false, + executablePath: null, + attachOnly: true, + } + : {}, + ); + + const program = createProgram(); + await program.parseAsync(["browser", "--browser-profile", "chrome-live", "status"], { + from: "user", + }); + + const output = mocks.runtimeLog.mock.calls.at(-1)?.[0] as string; + expect(output).toContain("transport: chrome-mcp"); + expect(output).not.toContain("cdpPort:"); + expect(output).not.toContain("cdpUrl:"); + }); + + it("shows chrome-mcp transport in browser profiles output", async () => { + mocks.callBrowserRequest.mockImplementation(async (_opts: unknown, req: { path?: string }) => + req.path === "/profiles" + ? { + profiles: [ + { + name: "chrome-live", + driver: "existing-session", + transport: "chrome-mcp", + running: true, + tabCount: 2, + isDefault: false, + isRemote: false, + cdpPort: null, + cdpUrl: null, + color: "#00AA00", + }, + ], + } + : {}, + ); + + const program = createProgram(); + await program.parseAsync(["browser", "profiles"], { from: "user" }); + + const output = mocks.runtimeLog.mock.calls.at(-1)?.[0] as string; + expect(output).toContain("chrome-live: running (2 tabs) [existing-session]"); + expect(output).toContain("transport: chrome-mcp"); + expect(output).not.toContain("port: 0"); + }); + + it("shows chrome-mcp transport after creating an existing-session profile", async () => { + mocks.callBrowserRequest.mockImplementation(async (_opts: unknown, req: { path?: string }) => + req.path === "/profiles/create" + ? { + ok: true, + profile: "chrome-live", + transport: "chrome-mcp", + cdpPort: null, + cdpUrl: null, + color: "#00AA00", + isRemote: false, + } + : {}, + ); + + const program = createProgram(); + await program.parseAsync( + ["browser", "create-profile", "--name", "chrome-live", "--driver", "existing-session"], + { from: "user" }, + ); + + const output = mocks.runtimeLog.mock.calls.at(-1)?.[0] as string; + expect(output).toContain('Created profile "chrome-live"'); + expect(output).toContain("transport: chrome-mcp"); + expect(output).not.toContain("port: 0"); + }); +}); diff --git a/src/cli/browser-cli-manage.timeout-option.test.ts b/src/cli/browser-cli-manage.timeout-option.test.ts index 134f13bc3c3..7338d97701e 100644 --- a/src/cli/browser-cli-manage.timeout-option.test.ts +++ b/src/cli/browser-cli-manage.timeout-option.test.ts @@ -76,4 +76,48 @@ describe("browser manage start timeout option", () => { expect(startCall?.[0]).toMatchObject({ timeout: "60000" }); expect(startCall?.[2]).toBeUndefined(); }); + + it("uses a longer built-in timeout for browser status", async () => { + const program = createProgram(); + await program.parseAsync(["browser", "status"], { from: "user" }); + + const statusCall = mocks.callBrowserRequest.mock.calls.find( + (call) => ((call[1] ?? {}) as { path?: string }).path === "/", + ) as [Record, { path?: string }, { timeoutMs?: number }] | undefined; + + expect(statusCall?.[2]).toEqual({ timeoutMs: 45_000 }); + }); + + it("uses a longer built-in timeout for browser tabs", async () => { + const program = createProgram(); + await program.parseAsync(["browser", "tabs"], { from: "user" }); + + const tabsCall = mocks.callBrowserRequest.mock.calls.find( + (call) => ((call[1] ?? {}) as { path?: string }).path === "/tabs", + ) as [Record, { path?: string }, { timeoutMs?: number }] | undefined; + + expect(tabsCall?.[2]).toEqual({ timeoutMs: 45_000 }); + }); + + it("uses a longer built-in timeout for browser profiles", async () => { + const program = createProgram(); + await program.parseAsync(["browser", "profiles"], { from: "user" }); + + const profilesCall = mocks.callBrowserRequest.mock.calls.find( + (call) => ((call[1] ?? {}) as { path?: string }).path === "/profiles", + ) as [Record, { path?: string }, { timeoutMs?: number }] | undefined; + + expect(profilesCall?.[2]).toEqual({ timeoutMs: 45_000 }); + }); + + it("uses a longer built-in timeout for browser open", async () => { + const program = createProgram(); + await program.parseAsync(["browser", "open", "https://example.com"], { from: "user" }); + + const openCall = mocks.callBrowserRequest.mock.calls.find( + (call) => ((call[1] ?? {}) as { path?: string }).path === "/tabs/open", + ) as [Record, { path?: string }, { timeoutMs?: number }] | undefined; + + expect(openCall?.[2]).toEqual({ timeoutMs: 45_000 }); + }); }); diff --git a/src/cli/browser-cli-manage.ts b/src/cli/browser-cli-manage.ts index 53b83ca3f97..5bac9b621bf 100644 --- a/src/cli/browser-cli-manage.ts +++ b/src/cli/browser-cli-manage.ts @@ -1,5 +1,6 @@ import type { Command } from "commander"; import type { + BrowserTransport, BrowserCreateProfileResult, BrowserDeleteProfileResult, BrowserResetProfileResult, @@ -13,6 +14,8 @@ import { shortenHomePath } from "../utils.js"; import { callBrowserRequest, type BrowserParentOpts } from "./browser-cli-shared.js"; import { runCommandWithRuntime } from "./cli-utils.js"; +const BROWSER_MANAGE_REQUEST_TIMEOUT_MS = 45_000; + function resolveProfileQuery(profile?: string) { return profile ? { profile } : undefined; } @@ -38,7 +41,7 @@ async function callTabAction( query: resolveProfileQuery(profile), body, }, - { timeoutMs: 10_000 }, + { timeoutMs: BROWSER_MANAGE_REQUEST_TIMEOUT_MS }, ); } @@ -54,7 +57,7 @@ async function fetchBrowserStatus( query: resolveProfileQuery(profile), }, { - timeoutMs: 1500, + timeoutMs: BROWSER_MANAGE_REQUEST_TIMEOUT_MS, }, ); } @@ -99,6 +102,29 @@ function logBrowserTabs(tabs: BrowserTab[], json?: boolean) { ); } +function usesChromeMcpTransport(params: { + transport?: BrowserTransport; + driver?: "openclaw" | "extension" | "existing-session"; +}): boolean { + return params.transport === "chrome-mcp" || params.driver === "existing-session"; +} + +function formatBrowserConnectionSummary(params: { + transport?: BrowserTransport; + driver?: "openclaw" | "extension" | "existing-session"; + isRemote?: boolean; + cdpPort?: number | null; + cdpUrl?: string | null; +}): string { + if (usesChromeMcpTransport(params)) { + return "transport: chrome-mcp"; + } + if (params.isRemote) { + return `cdpUrl: ${params.cdpUrl ?? "(unset)"}`; + } + return `port: ${params.cdpPort ?? "(unset)"}`; +} + export function registerBrowserManageCommands( browser: Command, parentOpts: (cmd: Command) => BrowserParentOpts, @@ -120,8 +146,15 @@ export function registerBrowserManageCommands( `profile: ${status.profile ?? "openclaw"}`, `enabled: ${status.enabled}`, `running: ${status.running}`, - `cdpPort: ${status.cdpPort}`, - `cdpUrl: ${status.cdpUrl ?? `http://127.0.0.1:${status.cdpPort}`}`, + `transport: ${ + usesChromeMcpTransport(status) ? "chrome-mcp" : (status.transport ?? "cdp") + }`, + ...(!usesChromeMcpTransport(status) + ? [ + `cdpPort: ${status.cdpPort ?? "(unset)"}`, + `cdpUrl: ${status.cdpUrl ?? `http://127.0.0.1:${status.cdpPort}`}`, + ] + : []), `browser: ${status.chosenBrowser ?? "unknown"}`, `detectedBrowser: ${status.detectedBrowser ?? "unknown"}`, `detectedPath: ${detectedDisplay}`, @@ -196,7 +229,7 @@ export function registerBrowserManageCommands( path: "/tabs", query: resolveProfileQuery(profile), }, - { timeoutMs: 3000 }, + { timeoutMs: BROWSER_MANAGE_REQUEST_TIMEOUT_MS }, ); const tabs = result.tabs ?? []; logBrowserTabs(tabs, parent?.json); @@ -220,7 +253,7 @@ export function registerBrowserManageCommands( action: "list", }, }, - { timeoutMs: 10_000 }, + { timeoutMs: BROWSER_MANAGE_REQUEST_TIMEOUT_MS }, ); const tabs = result.tabs ?? []; logBrowserTabs(tabs, parent?.json); @@ -305,7 +338,7 @@ export function registerBrowserManageCommands( query: resolveProfileQuery(profile), body: { url }, }, - { timeoutMs: 15000 }, + { timeoutMs: BROWSER_MANAGE_REQUEST_TIMEOUT_MS }, ); if (printJsonResult(parent, tab)) { return; @@ -330,7 +363,7 @@ export function registerBrowserManageCommands( query: resolveProfileQuery(profile), body: { targetId }, }, - { timeoutMs: 5000 }, + { timeoutMs: BROWSER_MANAGE_REQUEST_TIMEOUT_MS }, ); if (printJsonResult(parent, { ok: true })) { return; @@ -355,7 +388,7 @@ export function registerBrowserManageCommands( path: `/tabs/${encodeURIComponent(targetId.trim())}`, query: resolveProfileQuery(profile), }, - { timeoutMs: 5000 }, + { timeoutMs: BROWSER_MANAGE_REQUEST_TIMEOUT_MS }, ); } else { await callBrowserRequest( @@ -366,7 +399,7 @@ export function registerBrowserManageCommands( query: resolveProfileQuery(profile), body: { kind: "close" }, }, - { timeoutMs: 20000 }, + { timeoutMs: BROWSER_MANAGE_REQUEST_TIMEOUT_MS }, ); } if (printJsonResult(parent, { ok: true })) { @@ -389,7 +422,7 @@ export function registerBrowserManageCommands( method: "GET", path: "/profiles", }, - { timeoutMs: 3000 }, + { timeoutMs: BROWSER_MANAGE_REQUEST_TIMEOUT_MS }, ); const profiles = result.profiles ?? []; if (printJsonResult(parent, { profiles })) { @@ -405,9 +438,10 @@ export function registerBrowserManageCommands( const status = p.running ? "running" : "stopped"; const tabs = p.running ? ` (${p.tabCount} tabs)` : ""; const def = p.isDefault ? " [default]" : ""; - const loc = p.isRemote ? `cdpUrl: ${p.cdpUrl}` : `port: ${p.cdpPort}`; + const loc = formatBrowserConnectionSummary(p); const remote = p.isRemote ? " [remote]" : ""; - return `${p.name}: ${status}${tabs}${def}${remote}\n ${loc}, color: ${p.color}`; + const driver = p.driver !== "openclaw" ? ` [${p.driver}]` : ""; + return `${p.name}: ${status}${tabs}${def}${remote}${driver}\n ${loc}, color: ${p.color}`; }) .join("\n"), ); @@ -420,7 +454,10 @@ export function registerBrowserManageCommands( .requiredOption("--name ", "Profile name (lowercase, numbers, hyphens)") .option("--color ", "Profile color (hex format, e.g. #0066CC)") .option("--cdp-url ", "CDP URL for remote Chrome (http/https)") - .option("--driver ", "Profile driver (openclaw|extension). Default: openclaw") + .option( + "--driver ", + "Profile driver (openclaw|extension|existing-session). Default: openclaw", + ) .action( async (opts: { name: string; color?: string; cdpUrl?: string; driver?: string }, cmd) => { const parent = parentOpts(cmd); @@ -434,7 +471,12 @@ export function registerBrowserManageCommands( name: opts.name, color: opts.color, cdpUrl: opts.cdpUrl, - driver: opts.driver === "extension" ? "extension" : undefined, + driver: + opts.driver === "extension" + ? "extension" + : opts.driver === "existing-session" + ? "existing-session" + : undefined, }, }, { timeoutMs: 10_000 }, @@ -442,11 +484,15 @@ export function registerBrowserManageCommands( if (printJsonResult(parent, result)) { return; } - const loc = result.isRemote ? ` cdpUrl: ${result.cdpUrl}` : ` port: ${result.cdpPort}`; + const loc = ` ${formatBrowserConnectionSummary(result)}`; defaultRuntime.log( info( `🦞 Created profile "${result.profile}"\n${loc}\n color: ${result.color}${ - opts.driver === "extension" ? "\n driver: extension" : "" + opts.driver === "extension" + ? "\n driver: extension" + : opts.driver === "existing-session" + ? "\n driver: existing-session" + : "" }`, ), ); diff --git a/src/cli/command-secret-gateway.test.ts b/src/cli/command-secret-gateway.test.ts index 7929cdbdafc..74c47f637e9 100644 --- a/src/cli/command-secret-gateway.test.ts +++ b/src/cli/command-secret-gateway.test.ts @@ -64,6 +64,17 @@ describe("resolveCommandSecretRefsViaGateway", () => { }); } + function expectGatewayUnavailableLocalFallbackDiagnostics( + result: Awaited>, + ) { + expect( + result.diagnostics.some((entry) => entry.includes("gateway secrets.resolve unavailable")), + ).toBe(true); + expect( + result.diagnostics.some((entry) => entry.includes("resolved command secrets locally")), + ).toBe(true); + } + it("returns config unchanged when no target SecretRefs are configured", async () => { const config = { talk: { @@ -206,6 +217,91 @@ describe("resolveCommandSecretRefsViaGateway", () => { } }); + it("falls back to local resolution for web search SecretRefs when gateway is unavailable", async () => { + const envKey = "WEB_SEARCH_GEMINI_API_KEY_LOCAL_FALLBACK"; + await withEnvValue(envKey, "gemini-local-fallback-key", async () => { + callGateway.mockRejectedValueOnce(new Error("gateway closed")); + const result = await resolveCommandSecretRefsViaGateway({ + config: { + tools: { + web: { + search: { + provider: "gemini", + gemini: { + apiKey: { source: "env", provider: "default", id: envKey }, + }, + }, + }, + }, + } as OpenClawConfig, + commandName: "agent", + targetIds: new Set(["tools.web.search.gemini.apiKey"]), + }); + + expect(result.resolvedConfig.tools?.web?.search?.gemini?.apiKey).toBe( + "gemini-local-fallback-key", + ); + expect(result.targetStatesByPath["tools.web.search.gemini.apiKey"]).toBe("resolved_local"); + expectGatewayUnavailableLocalFallbackDiagnostics(result); + }); + }); + + it("falls back to local resolution for Firecrawl SecretRefs when gateway is unavailable", async () => { + const envKey = "WEB_FETCH_FIRECRAWL_API_KEY_LOCAL_FALLBACK"; + await withEnvValue(envKey, "firecrawl-local-fallback-key", async () => { + callGateway.mockRejectedValueOnce(new Error("gateway closed")); + const result = await resolveCommandSecretRefsViaGateway({ + config: { + tools: { + web: { + fetch: { + firecrawl: { + apiKey: { source: "env", provider: "default", id: envKey }, + }, + }, + }, + }, + } as OpenClawConfig, + commandName: "agent", + targetIds: new Set(["tools.web.fetch.firecrawl.apiKey"]), + }); + + expect(result.resolvedConfig.tools?.web?.fetch?.firecrawl?.apiKey).toBe( + "firecrawl-local-fallback-key", + ); + expect(result.targetStatesByPath["tools.web.fetch.firecrawl.apiKey"]).toBe("resolved_local"); + expectGatewayUnavailableLocalFallbackDiagnostics(result); + }); + }); + + it("marks web SecretRefs inactive when the web surface is disabled during local fallback", async () => { + callGateway.mockRejectedValueOnce(new Error("gateway closed")); + const result = await resolveCommandSecretRefsViaGateway({ + config: { + tools: { + web: { + search: { + enabled: false, + gemini: { + apiKey: { source: "env", provider: "default", id: "WEB_SEARCH_DISABLED_KEY" }, + }, + }, + }, + }, + } as OpenClawConfig, + commandName: "agent", + targetIds: new Set(["tools.web.search.gemini.apiKey"]), + }); + + expect(result.hadUnresolvedTargets).toBe(false); + expect(result.targetStatesByPath["tools.web.search.gemini.apiKey"]).toBe("inactive_surface"); + expect( + result.diagnostics.some((entry) => + entry.includes("tools.web.search.gemini.apiKey: tools.web.search is disabled."), + ), + ).toBe(true); + }); + it("returns a version-skew hint when gateway does not support secrets.resolve", async () => { const envKey = "TALK_API_KEY_UNSUPPORTED"; callGateway.mockRejectedValueOnce(new Error("unknown method: secrets.resolve")); diff --git a/src/cli/command-secret-gateway.ts b/src/cli/command-secret-gateway.ts index 89b8c78a3e3..03e578b642c 100644 --- a/src/cli/command-secret-gateway.ts +++ b/src/cli/command-secret-gateway.ts @@ -10,6 +10,7 @@ import { getPath, setPathExistingStrict } from "../secrets/path-utils.js"; import { resolveSecretRefValue } from "../secrets/resolve.js"; import { collectConfigAssignments } from "../secrets/runtime-config-collectors.js"; import { createResolverContext } from "../secrets/runtime-shared.js"; +import { resolveRuntimeWebTools } from "../secrets/runtime-web-tools.js"; import { assertExpectedResolvedSecretValue } from "../secrets/secret-value.js"; import { describeUnknownError } from "../secrets/shared.js"; import { @@ -44,6 +45,15 @@ type GatewaySecretsResolveResult = { inactiveRefPaths?: string[]; }; +const WEB_RUNTIME_SECRET_TARGET_ID_PREFIXES = [ + "tools.web.search", + "tools.web.fetch.firecrawl", +] as const; +const WEB_RUNTIME_SECRET_PATH_PREFIXES = [ + "tools.web.search.", + "tools.web.fetch.firecrawl.", +] as const; + function dedupeDiagnostics(entries: readonly string[]): string[] { const seen = new Set(); const ordered: string[] = []; @@ -58,6 +68,30 @@ function dedupeDiagnostics(entries: readonly string[]): string[] { return ordered; } +function targetsRuntimeWebPath(path: string): boolean { + return WEB_RUNTIME_SECRET_PATH_PREFIXES.some((prefix) => path.startsWith(prefix)); +} + +function targetsRuntimeWebResolution(params: { + targetIds: ReadonlySet; + allowedPaths?: ReadonlySet; +}): boolean { + if (params.allowedPaths) { + for (const path of params.allowedPaths) { + if (targetsRuntimeWebPath(path)) { + return true; + } + } + return false; + } + for (const targetId of params.targetIds) { + if (WEB_RUNTIME_SECRET_TARGET_ID_PREFIXES.some((prefix) => targetId.startsWith(prefix))) { + return true; + } + } + return false; +} + function collectConfiguredTargetRefPaths(params: { config: OpenClawConfig; targetIds: Set; @@ -193,17 +227,40 @@ async function resolveCommandSecretRefsLocally(params: { sourceConfig, env: process.env, }); + const localResolutionDiagnostics: string[] = []; collectConfigAssignments({ config: structuredClone(params.config), context, }); + if ( + targetsRuntimeWebResolution({ targetIds: params.targetIds, allowedPaths: params.allowedPaths }) + ) { + try { + await resolveRuntimeWebTools({ + sourceConfig, + resolvedConfig, + context, + }); + } catch (error) { + if (params.mode === "strict") { + throw error; + } + localResolutionDiagnostics.push( + `${params.commandName}: failed to resolve web tool secrets locally (${describeUnknownError(error)}).`, + ); + } + } const inactiveRefPaths = new Set( context.warnings .filter((warning) => warning.code === "SECRETS_REF_IGNORED_INACTIVE_SURFACE") + .filter((warning) => !params.allowedPaths || params.allowedPaths.has(warning.path)) .map((warning) => warning.path), ); + const inactiveWarningDiagnostics = context.warnings + .filter((warning) => warning.code === "SECRETS_REF_IGNORED_INACTIVE_SURFACE") + .filter((warning) => !params.allowedPaths || params.allowedPaths.has(warning.path)) + .map((warning) => warning.message); const activePaths = new Set(context.assignments.map((assignment) => assignment.path)); - const localResolutionDiagnostics: string[] = []; for (const target of discoverConfigSecretTargetsByIds(sourceConfig, params.targetIds)) { if (params.allowedPaths && !params.allowedPaths.has(target.path)) { continue; @@ -244,6 +301,7 @@ async function resolveCommandSecretRefsLocally(params: { resolvedConfig, diagnostics: dedupeDiagnostics([ ...params.preflightDiagnostics, + ...inactiveWarningDiagnostics, ...filterInactiveSurfaceDiagnostics({ diagnostics: analyzed.diagnostics, inactiveRefPaths, diff --git a/src/cli/command-secret-targets.test.ts b/src/cli/command-secret-targets.test.ts index 3a7de543a02..a71ac5e00c4 100644 --- a/src/cli/command-secret-targets.test.ts +++ b/src/cli/command-secret-targets.test.ts @@ -9,6 +9,7 @@ describe("command secret target ids", () => { const ids = getAgentRuntimeCommandSecretTargetIds(); expect(ids.has("agents.defaults.memorySearch.remote.apiKey")).toBe(true); expect(ids.has("agents.list[].memorySearch.remote.apiKey")).toBe(true); + expect(ids.has("tools.web.fetch.firecrawl.apiKey")).toBe(true); }); it("keeps memory command target set focused on memorySearch remote credentials", () => { diff --git a/src/cli/command-secret-targets.ts b/src/cli/command-secret-targets.ts index c4a4fb5ea4a..e1c2c49e0ae 100644 --- a/src/cli/command-secret-targets.ts +++ b/src/cli/command-secret-targets.ts @@ -23,6 +23,7 @@ const COMMAND_SECRET_TARGETS = { "skills.entries.", "messages.tts.", "tools.web.search", + "tools.web.fetch.firecrawl.", ]), status: idsByPrefix([ "channels.", diff --git a/src/cli/cron-cli/register.cron-add.ts b/src/cli/cron-cli/register.cron-add.ts index 05025dc05e6..e916c459863 100644 --- a/src/cli/cron-cli/register.cron-add.ts +++ b/src/cli/cron-cli/register.cron-add.ts @@ -81,7 +81,10 @@ export function registerCronAddCommand(cron: Command) { .option("--exact", "Disable cron staggering (set stagger to 0)", false) .option("--system-event ", "System event payload (main session)") .option("--message ", "Agent message payload") - .option("--thinking ", "Thinking level for agent jobs (off|minimal|low|medium|high)") + .option( + "--thinking ", + "Thinking level for agent jobs (off|minimal|low|medium|high|xhigh)", + ) .option("--model ", "Model override for agent jobs (provider/model or alias)") .option("--timeout-seconds ", "Timeout seconds for agent jobs") .option("--light-context", "Use lightweight bootstrap context for agent jobs", false) @@ -191,8 +194,13 @@ export function registerCronAddCommand(cron: Command) { const inferredSessionTarget = payload.kind === "agentTurn" ? "isolated" : "main"; const sessionTarget = sessionSource === "cli" ? sessionTargetRaw || "" : inferredSessionTarget; - if (sessionTarget !== "main" && sessionTarget !== "isolated") { - throw new Error("--session must be main or isolated"); + const isCustomSessionTarget = + sessionTarget.toLowerCase().startsWith("session:") && + sessionTarget.slice(8).trim().length > 0; + const isIsolatedLikeSessionTarget = + sessionTarget === "isolated" || sessionTarget === "current" || isCustomSessionTarget; + if (sessionTarget !== "main" && !isIsolatedLikeSessionTarget) { + throw new Error("--session must be main, isolated, current, or session:"); } if (opts.deleteAfterRun && opts.keepAfterRun) { @@ -202,14 +210,14 @@ export function registerCronAddCommand(cron: Command) { if (sessionTarget === "main" && payload.kind !== "systemEvent") { throw new Error("Main jobs require --system-event (systemEvent)."); } - if (sessionTarget === "isolated" && payload.kind !== "agentTurn") { - throw new Error("Isolated jobs require --message (agentTurn)."); + if (isIsolatedLikeSessionTarget && payload.kind !== "agentTurn") { + throw new Error("Isolated/current/custom-session jobs require --message (agentTurn)."); } if ( (opts.announce || typeof opts.deliver === "boolean") && - (sessionTarget !== "isolated" || payload.kind !== "agentTurn") + (!isIsolatedLikeSessionTarget || payload.kind !== "agentTurn") ) { - throw new Error("--announce/--no-deliver require --session isolated."); + throw new Error("--announce/--no-deliver require a non-main agentTurn session target."); } const accountId = @@ -217,12 +225,12 @@ export function registerCronAddCommand(cron: Command) { ? opts.account.trim() : undefined; - if (accountId && (sessionTarget !== "isolated" || payload.kind !== "agentTurn")) { - throw new Error("--account requires an isolated agentTurn job with delivery."); + if (accountId && (!isIsolatedLikeSessionTarget || payload.kind !== "agentTurn")) { + throw new Error("--account requires a non-main agentTurn job with delivery."); } const deliveryMode = - sessionTarget === "isolated" && payload.kind === "agentTurn" + isIsolatedLikeSessionTarget && payload.kind === "agentTurn" ? hasAnnounce ? "announce" : hasNoDeliver diff --git a/src/cli/cron-cli/register.cron-edit.ts b/src/cli/cron-cli/register.cron-edit.ts index 35bf45907f9..b2007fc3f1a 100644 --- a/src/cli/cron-cli/register.cron-edit.ts +++ b/src/cli/cron-cli/register.cron-edit.ts @@ -49,7 +49,10 @@ export function registerCronEditCommand(cron: Command) { .option("--exact", "Disable cron staggering (set stagger to 0)") .option("--system-event ", "Set systemEvent payload") .option("--message ", "Set agentTurn payload message") - .option("--thinking ", "Thinking level for agent jobs") + .option( + "--thinking ", + "Thinking level for agent jobs (off|minimal|low|medium|high|xhigh)", + ) .option("--model ", "Model override for agent jobs") .option("--timeout-seconds ", "Timeout seconds for agent jobs") .option("--light-context", "Enable lightweight bootstrap context for agent jobs") diff --git a/src/cli/cron-cli/register.ts b/src/cli/cron-cli/register.ts index a796583fa21..35f80dbda06 100644 --- a/src/cli/cron-cli/register.ts +++ b/src/cli/cron-cli/register.ts @@ -16,7 +16,7 @@ export function registerCronCli(program: Command) { .addHelpText( "after", () => - `\n${theme.muted("Docs:")} ${formatDocsLink("/cli/cron", "docs.openclaw.ai/cli/cron")}\n`, + `\n${theme.muted("Docs:")} ${formatDocsLink("/cli/cron", "docs.openclaw.ai/cli/cron")}\n${theme.muted("Upgrade tip:")} run \`openclaw doctor --fix\` to normalize legacy cron job storage.\n`, ); registerCronStatusCommand(cron); diff --git a/src/cli/cron-cli/shared.ts b/src/cli/cron-cli/shared.ts index d3601b6ce40..3574a63ab27 100644 --- a/src/cli/cron-cli/shared.ts +++ b/src/cli/cron-cli/shared.ts @@ -247,9 +247,9 @@ export function printCronList(jobs: CronJob[], runtime = defaultRuntime) { })(); const coloredTarget = - job.sessionTarget === "isolated" - ? colorize(rich, theme.accentBright, targetLabel) - : colorize(rich, theme.accent, targetLabel); + job.sessionTarget === "main" + ? colorize(rich, theme.accent, targetLabel) + : colorize(rich, theme.accentBright, targetLabel); const coloredAgent = job.agentId ? colorize(rich, theme.info, agentLabel) : colorize(rich, theme.muted, agentLabel); diff --git a/src/cli/daemon-cli.coverage.test.ts b/src/cli/daemon-cli.coverage.test.ts index d897eee11cc..8faf44cdde3 100644 --- a/src/cli/daemon-cli.coverage.test.ts +++ b/src/cli/daemon-cli.coverage.test.ts @@ -10,7 +10,7 @@ const resolveGatewayProgramArguments = vi.fn(async (_opts?: unknown) => ({ const serviceInstall = vi.fn().mockResolvedValue(undefined); const serviceUninstall = vi.fn().mockResolvedValue(undefined); const serviceStop = vi.fn().mockResolvedValue(undefined); -const serviceRestart = vi.fn().mockResolvedValue(undefined); +const serviceRestart = vi.fn().mockResolvedValue({ outcome: "completed" }); const serviceIsLoaded = vi.fn().mockResolvedValue(false); const serviceReadCommand = vi.fn().mockResolvedValue(null); const serviceReadRuntime = vi.fn().mockResolvedValue({ status: "running" }); @@ -48,20 +48,24 @@ vi.mock("../daemon/program-args.js", () => ({ resolveGatewayProgramArguments: (opts: unknown) => resolveGatewayProgramArguments(opts), })); -vi.mock("../daemon/service.js", () => ({ - resolveGatewayService: () => ({ - label: "LaunchAgent", - loadedText: "loaded", - notLoadedText: "not loaded", - install: serviceInstall, - uninstall: serviceUninstall, - stop: serviceStop, - restart: serviceRestart, - isLoaded: serviceIsLoaded, - readCommand: serviceReadCommand, - readRuntime: serviceReadRuntime, - }), -})); +vi.mock("../daemon/service.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + resolveGatewayService: () => ({ + label: "LaunchAgent", + loadedText: "loaded", + notLoadedText: "not loaded", + install: serviceInstall, + uninstall: serviceUninstall, + stop: serviceStop, + restart: serviceRestart, + isLoaded: serviceIsLoaded, + readCommand: serviceReadCommand, + readRuntime: serviceReadRuntime, + }), + }; +}); vi.mock("../daemon/legacy.js", () => ({ findLegacyGatewayServices: async () => [], diff --git a/src/cli/daemon-cli/gateway-token-drift.test.ts b/src/cli/daemon-cli/gateway-token-drift.test.ts index ff221b24e44..0b9d0cfb308 100644 --- a/src/cli/daemon-cli/gateway-token-drift.test.ts +++ b/src/cli/daemon-cli/gateway-token-drift.test.ts @@ -43,4 +43,29 @@ describe("resolveGatewayTokenForDriftCheck", () => { }), ).toThrow(/gateway\.auth\.token/i); }); + + it("does not fall back to gateway.remote token for unresolved local token refs", () => { + expect(() => + resolveGatewayTokenForDriftCheck({ + cfg: { + secrets: { + providers: { + default: { source: "env" }, + }, + }, + gateway: { + mode: "local", + auth: { + mode: "token", + token: { source: "env", provider: "default", id: "MISSING_LOCAL_TOKEN" }, + }, + remote: { + token: "remote-token", + }, + }, + } as OpenClawConfig, + env: {} as NodeJS.ProcessEnv, + }), + ).toThrow(/gateway\.auth\.token/i); + }); }); diff --git a/src/cli/daemon-cli/gateway-token-drift.ts b/src/cli/daemon-cli/gateway-token-drift.ts index e382a7a91c3..a05ea975ca2 100644 --- a/src/cli/daemon-cli/gateway-token-drift.ts +++ b/src/cli/daemon-cli/gateway-token-drift.ts @@ -1,16 +1,10 @@ import type { OpenClawConfig } from "../../config/config.js"; -import { resolveGatewayCredentialsFromConfig } from "../../gateway/credentials.js"; +import { resolveGatewayDriftCheckCredentialsFromConfig } from "../../gateway/credentials.js"; export function resolveGatewayTokenForDriftCheck(params: { cfg: OpenClawConfig; env?: NodeJS.ProcessEnv; }) { - return resolveGatewayCredentialsFromConfig({ - cfg: params.cfg, - env: {} as NodeJS.ProcessEnv, - modeOverride: "local", - // Drift checks should compare the configured local token source against the - // persisted service token, not let exported shell env hide stale service state. - localTokenPrecedence: "config-first", - }).token; + void params.env; + return resolveGatewayDriftCheckCredentialsFromConfig({ cfg: params.cfg }).token; } diff --git a/src/cli/daemon-cli/install.test.ts b/src/cli/daemon-cli/install.test.ts index 7401dc3b1a2..6d7b618a17a 100644 --- a/src/cli/daemon-cli/install.test.ts +++ b/src/cli/daemon-cli/install.test.ts @@ -84,8 +84,28 @@ vi.mock("../../commands/daemon-install-helpers.js", () => ({ vi.mock("./shared.js", () => ({ parsePort: parsePortMock, + createDaemonInstallActionContext: (jsonFlag: unknown) => { + const json = Boolean(jsonFlag); + return { + json, + stdout: process.stdout, + warnings: actionState.warnings, + emit: (payload: DaemonActionResponse) => { + actionState.emitted.push(payload); + }, + fail: (message: string, hints?: string[]) => { + actionState.failed.push({ message, hints }); + }, + }; + }, + failIfNixDaemonInstallMode: (fail: (message: string, hints?: string[]) => void) => { + if (!resolveIsNixModeMock()) { + return false; + } + fail("Nix mode detected; service install is disabled."); + return true; + }, })); - vi.mock("../../commands/daemon-runtime.js", () => ({ DEFAULT_GATEWAY_DAEMON_RUNTIME: "node", isGatewayDaemonRuntime: isGatewayDaemonRuntimeMock, @@ -97,16 +117,6 @@ vi.mock("../../daemon/service.js", () => ({ vi.mock("./response.js", () => ({ buildDaemonServiceSnapshot: vi.fn(), - createDaemonActionContext: vi.fn(() => ({ - stdout: process.stdout, - warnings: actionState.warnings, - emit: (payload: DaemonActionResponse) => { - actionState.emitted.push(payload); - }, - fail: (message: string, hints?: string[]) => { - actionState.failed.push({ message, hints }); - }, - })), installDaemonServiceAndEmit: installDaemonServiceAndEmitMock, })); @@ -126,6 +136,15 @@ function expectFirstInstallPlanCallOmitsToken() { expect(firstArg && "token" in firstArg).toBe(false); } +function mockResolvedGatewayTokenSecretRef() { + resolveSecretInputRefMock.mockReturnValue({ + ref: { source: "env", provider: "default", id: "OPENCLAW_GATEWAY_TOKEN" }, + }); + resolveSecretRefValuesMock.mockResolvedValue( + new Map([["env:default:OPENCLAW_GATEWAY_TOKEN", "resolved-from-secretref"]]), + ); +} + const { runDaemonInstall } = await import("./install.js"); const envSnapshot = captureFullEnv(); @@ -195,12 +214,7 @@ describe("runDaemonInstall", () => { }); it("validates token SecretRef but does not serialize resolved token into service env", async () => { - resolveSecretInputRefMock.mockReturnValue({ - ref: { source: "env", provider: "default", id: "OPENCLAW_GATEWAY_TOKEN" }, - }); - resolveSecretRefValuesMock.mockResolvedValue( - new Map([["env:default:OPENCLAW_GATEWAY_TOKEN", "resolved-from-secretref"]]), - ); + mockResolvedGatewayTokenSecretRef(); await runDaemonInstall({ json: true }); @@ -219,12 +233,7 @@ describe("runDaemonInstall", () => { loadConfigMock.mockReturnValue({ gateway: { auth: { mode: "token", token: "${OPENCLAW_GATEWAY_TOKEN}" } }, }); - resolveSecretInputRefMock.mockReturnValue({ - ref: { source: "env", provider: "default", id: "OPENCLAW_GATEWAY_TOKEN" }, - }); - resolveSecretRefValuesMock.mockResolvedValue( - new Map([["env:default:OPENCLAW_GATEWAY_TOKEN", "resolved-from-secretref"]]), - ); + mockResolvedGatewayTokenSecretRef(); await runDaemonInstall({ json: true }); diff --git a/src/cli/daemon-cli/install.ts b/src/cli/daemon-cli/install.ts index 96a74bdc748..023ea5e520e 100644 --- a/src/cli/daemon-cli/install.ts +++ b/src/cli/daemon-cli/install.ts @@ -5,25 +5,21 @@ import { } from "../../commands/daemon-runtime.js"; import { resolveGatewayInstallToken } from "../../commands/gateway-install-token.js"; import { readBestEffortConfig, resolveGatewayPort } from "../../config/config.js"; -import { resolveIsNixMode } from "../../config/paths.js"; import { resolveGatewayService } from "../../daemon/service.js"; import { isNonFatalSystemdInstallProbeError } from "../../daemon/systemd.js"; import { defaultRuntime } from "../../runtime.js"; import { formatCliCommand } from "../command-format.js"; +import { buildDaemonServiceSnapshot, installDaemonServiceAndEmit } from "./response.js"; import { - buildDaemonServiceSnapshot, - createDaemonActionContext, - installDaemonServiceAndEmit, -} from "./response.js"; -import { parsePort } from "./shared.js"; + createDaemonInstallActionContext, + failIfNixDaemonInstallMode, + parsePort, +} from "./shared.js"; import type { DaemonInstallOptions } from "./types.js"; export async function runDaemonInstall(opts: DaemonInstallOptions) { - const json = Boolean(opts.json); - const { stdout, warnings, emit, fail } = createDaemonActionContext({ action: "install", json }); - - if (resolveIsNixMode(process.env)) { - fail("Nix mode detected; service install is disabled."); + const { json, stdout, warnings, emit, fail } = createDaemonInstallActionContext(opts.json); + if (failIfNixDaemonInstallMode(fail)) { return; } diff --git a/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts b/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts index a785cde4d9b..59a2926e993 100644 --- a/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts +++ b/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts @@ -1,30 +1,15 @@ import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; +import { + defaultRuntime, + resetLifecycleRuntimeLogs, + resetLifecycleServiceMocks, + service, + stubEmptyGatewayEnv, +} from "./test-helpers/lifecycle-core-harness.js"; const readConfigFileSnapshotMock = vi.fn(); const loadConfig = vi.fn(() => ({})); -const runtimeLogs: string[] = []; -const defaultRuntime = { - log: (message: string) => runtimeLogs.push(message), - error: vi.fn(), - exit: (code: number) => { - throw new Error(`__exit__:${code}`); - }, -}; - -const service = { - label: "TestService", - loadedText: "loaded", - notLoadedText: "not loaded", - install: vi.fn(), - uninstall: vi.fn(), - stop: vi.fn(), - isLoaded: vi.fn(), - readCommand: vi.fn(), - readRuntime: vi.fn(), - restart: vi.fn(), -}; - vi.mock("../../config/config.js", () => ({ loadConfig: () => loadConfig(), readConfigFileSnapshot: () => readConfigFileSnapshotMock(), @@ -42,6 +27,28 @@ vi.mock("../../runtime.js", () => ({ defaultRuntime, })); +function setConfigSnapshot(params: { + exists: boolean; + valid: boolean; + issues?: Array<{ path: string; message: string }>; +}) { + readConfigFileSnapshotMock.mockResolvedValue({ + exists: params.exists, + valid: params.valid, + config: {}, + issues: params.issues ?? [], + }); +} + +function createServiceRunArgs() { + return { + serviceNoun: "Gateway", + service, + renderStartHints: () => [], + opts: { json: true }, + }; +} + describe("runServiceRestart config pre-flight (#35862)", () => { let runServiceRestart: typeof import("./lifecycle-core.js").runServiceRestart; @@ -50,80 +57,40 @@ describe("runServiceRestart config pre-flight (#35862)", () => { }); beforeEach(() => { - runtimeLogs.length = 0; + resetLifecycleRuntimeLogs(); readConfigFileSnapshotMock.mockReset(); - readConfigFileSnapshotMock.mockResolvedValue({ - exists: true, - valid: true, - config: {}, - issues: [], - }); + setConfigSnapshot({ exists: true, valid: true }); loadConfig.mockReset(); loadConfig.mockReturnValue({}); - service.isLoaded.mockClear(); - service.readCommand.mockClear(); - service.restart.mockClear(); - service.isLoaded.mockResolvedValue(true); - service.readCommand.mockResolvedValue({ environment: {} }); - service.restart.mockResolvedValue(undefined); - vi.unstubAllEnvs(); - vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", ""); - vi.stubEnv("CLAWDBOT_GATEWAY_TOKEN", ""); + resetLifecycleServiceMocks(); + stubEmptyGatewayEnv(); }); it("aborts restart when config is invalid", async () => { - readConfigFileSnapshotMock.mockResolvedValue({ + setConfigSnapshot({ exists: true, valid: false, - config: {}, issues: [{ path: "agents.defaults.pdfModel", message: "Unrecognized key" }], }); - await expect( - runServiceRestart({ - serviceNoun: "Gateway", - service, - renderStartHints: () => [], - opts: { json: true }, - }), - ).rejects.toThrow("__exit__:1"); + await expect(runServiceRestart(createServiceRunArgs())).rejects.toThrow("__exit__:1"); expect(service.restart).not.toHaveBeenCalled(); }); it("proceeds with restart when config is valid", async () => { - readConfigFileSnapshotMock.mockResolvedValue({ - exists: true, - valid: true, - config: {}, - issues: [], - }); + setConfigSnapshot({ exists: true, valid: true }); - const result = await runServiceRestart({ - serviceNoun: "Gateway", - service, - renderStartHints: () => [], - opts: { json: true }, - }); + const result = await runServiceRestart(createServiceRunArgs()); expect(result).toBe(true); expect(service.restart).toHaveBeenCalledTimes(1); }); it("proceeds with restart when config file does not exist", async () => { - readConfigFileSnapshotMock.mockResolvedValue({ - exists: false, - valid: true, - config: {}, - issues: [], - }); + setConfigSnapshot({ exists: false, valid: true }); - const result = await runServiceRestart({ - serviceNoun: "Gateway", - service, - renderStartHints: () => [], - opts: { json: true }, - }); + const result = await runServiceRestart(createServiceRunArgs()); expect(result).toBe(true); expect(service.restart).toHaveBeenCalledTimes(1); @@ -132,12 +99,7 @@ describe("runServiceRestart config pre-flight (#35862)", () => { it("proceeds with restart when snapshot read throws", async () => { readConfigFileSnapshotMock.mockRejectedValue(new Error("read failed")); - const result = await runServiceRestart({ - serviceNoun: "Gateway", - service, - renderStartHints: () => [], - opts: { json: true }, - }); + const result = await runServiceRestart(createServiceRunArgs()); expect(result).toBe(true); expect(service.restart).toHaveBeenCalledTimes(1); @@ -152,54 +114,28 @@ describe("runServiceStart config pre-flight (#35862)", () => { }); beforeEach(() => { - runtimeLogs.length = 0; + resetLifecycleRuntimeLogs(); readConfigFileSnapshotMock.mockReset(); - readConfigFileSnapshotMock.mockResolvedValue({ - exists: true, - valid: true, - config: {}, - issues: [], - }); - service.isLoaded.mockClear(); - service.restart.mockClear(); - service.isLoaded.mockResolvedValue(true); - service.restart.mockResolvedValue(undefined); + setConfigSnapshot({ exists: true, valid: true }); + resetLifecycleServiceMocks(); }); it("aborts start when config is invalid", async () => { - readConfigFileSnapshotMock.mockResolvedValue({ + setConfigSnapshot({ exists: true, valid: false, - config: {}, issues: [{ path: "agents.defaults.pdfModel", message: "Unrecognized key" }], }); - await expect( - runServiceStart({ - serviceNoun: "Gateway", - service, - renderStartHints: () => [], - opts: { json: true }, - }), - ).rejects.toThrow("__exit__:1"); + await expect(runServiceStart(createServiceRunArgs())).rejects.toThrow("__exit__:1"); expect(service.restart).not.toHaveBeenCalled(); }); it("proceeds with start when config is valid", async () => { - readConfigFileSnapshotMock.mockResolvedValue({ - exists: true, - valid: true, - config: {}, - issues: [], - }); + setConfigSnapshot({ exists: true, valid: true }); - await runServiceStart({ - serviceNoun: "Gateway", - service, - renderStartHints: () => [], - opts: { json: true }, - }); + await runServiceStart(createServiceRunArgs()); expect(service.restart).toHaveBeenCalledTimes(1); }); diff --git a/src/cli/daemon-cli/lifecycle-core.test.ts b/src/cli/daemon-cli/lifecycle-core.test.ts index 8fa7ded1bde..2f17269eb6c 100644 --- a/src/cli/daemon-cli/lifecycle-core.test.ts +++ b/src/cli/daemon-cli/lifecycle-core.test.ts @@ -1,4 +1,12 @@ import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; +import { + defaultRuntime, + resetLifecycleRuntimeLogs, + resetLifecycleServiceMocks, + runtimeLogs, + service, + stubEmptyGatewayEnv, +} from "./test-helpers/lifecycle-core-harness.js"; const loadConfig = vi.fn(() => ({ gateway: { @@ -8,28 +16,6 @@ const loadConfig = vi.fn(() => ({ }, })); -const runtimeLogs: string[] = []; -const defaultRuntime = { - log: (message: string) => runtimeLogs.push(message), - error: vi.fn(), - exit: (code: number) => { - throw new Error(`__exit__:${code}`); - }, -}; - -const service = { - label: "TestService", - loadedText: "loaded", - notLoadedText: "not loaded", - install: vi.fn(), - uninstall: vi.fn(), - stop: vi.fn(), - isLoaded: vi.fn(), - readCommand: vi.fn(), - readRuntime: vi.fn(), - restart: vi.fn(), -}; - vi.mock("../../config/config.js", () => ({ loadConfig: () => loadConfig(), readBestEffortConfig: async () => loadConfig(), @@ -40,15 +26,31 @@ vi.mock("../../runtime.js", () => ({ })); let runServiceRestart: typeof import("./lifecycle-core.js").runServiceRestart; +let runServiceStart: typeof import("./lifecycle-core.js").runServiceStart; let runServiceStop: typeof import("./lifecycle-core.js").runServiceStop; +function readJsonLog() { + const jsonLine = runtimeLogs.find((line) => line.trim().startsWith("{")); + return JSON.parse(jsonLine ?? "{}") as T; +} + +function createServiceRunArgs(checkTokenDrift?: boolean) { + return { + serviceNoun: "Gateway", + service, + renderStartHints: () => [], + opts: { json: true as const }, + ...(checkTokenDrift ? { checkTokenDrift } : {}), + }; +} + describe("runServiceRestart token drift", () => { beforeAll(async () => { - ({ runServiceRestart, runServiceStop } = await import("./lifecycle-core.js")); + ({ runServiceRestart, runServiceStart, runServiceStop } = await import("./lifecycle-core.js")); }); beforeEach(() => { - runtimeLogs.length = 0; + resetLifecycleRuntimeLogs(); loadConfig.mockReset(); loadConfig.mockReturnValue({ gateway: { @@ -57,33 +59,19 @@ describe("runServiceRestart token drift", () => { }, }, }); - service.isLoaded.mockClear(); - service.readCommand.mockClear(); - service.restart.mockClear(); - service.isLoaded.mockResolvedValue(true); + resetLifecycleServiceMocks(); service.readCommand.mockResolvedValue({ + programArguments: [], environment: { OPENCLAW_GATEWAY_TOKEN: "service-token" }, }); - service.restart.mockResolvedValue(undefined); - vi.unstubAllEnvs(); - vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", ""); - vi.stubEnv("CLAWDBOT_GATEWAY_TOKEN", ""); - vi.stubEnv("OPENCLAW_GATEWAY_URL", ""); - vi.stubEnv("CLAWDBOT_GATEWAY_URL", ""); + stubEmptyGatewayEnv(); }); it("emits drift warning when enabled", async () => { - await runServiceRestart({ - serviceNoun: "Gateway", - service, - renderStartHints: () => [], - opts: { json: true }, - checkTokenDrift: true, - }); + await runServiceRestart(createServiceRunArgs(true)); expect(loadConfig).toHaveBeenCalledTimes(1); - const jsonLine = runtimeLogs.find((line) => line.trim().startsWith("{")); - const payload = JSON.parse(jsonLine ?? "{}") as { warnings?: string[] }; + const payload = readJsonLog<{ warnings?: string[] }>(); expect(payload.warnings).toEqual( expect.arrayContaining([expect.stringContaining("gateway install --force")]), ); @@ -98,20 +86,14 @@ describe("runServiceRestart token drift", () => { }, }); service.readCommand.mockResolvedValue({ + programArguments: [], environment: { OPENCLAW_GATEWAY_TOKEN: "env-token" }, }); vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", "env-token"); - await runServiceRestart({ - serviceNoun: "Gateway", - service, - renderStartHints: () => [], - opts: { json: true }, - checkTokenDrift: true, - }); + await runServiceRestart(createServiceRunArgs(true)); - const jsonLine = runtimeLogs.find((line) => line.trim().startsWith("{")); - const payload = JSON.parse(jsonLine ?? "{}") as { warnings?: string[] }; + const payload = readJsonLog<{ warnings?: string[] }>(); expect(payload.warnings).toEqual( expect.arrayContaining([expect.stringContaining("gateway install --force")]), ); @@ -127,8 +109,7 @@ describe("runServiceRestart token drift", () => { expect(loadConfig).not.toHaveBeenCalled(); expect(service.readCommand).not.toHaveBeenCalled(); - const jsonLine = runtimeLogs.find((line) => line.trim().startsWith("{")); - const payload = JSON.parse(jsonLine ?? "{}") as { warnings?: string[] }; + const payload = readJsonLog<{ warnings?: string[] }>(); expect(payload.warnings).toBeUndefined(); }); @@ -145,8 +126,7 @@ describe("runServiceRestart token drift", () => { }), }); - const jsonLine = runtimeLogs.find((line) => line.trim().startsWith("{")); - const payload = JSON.parse(jsonLine ?? "{}") as { result?: string; message?: string }; + const payload = readJsonLog<{ result?: string; message?: string }>(); expect(payload.result).toBe("stopped"); expect(payload.message).toContain("unmanaged process"); expect(service.stop).not.toHaveBeenCalled(); @@ -171,9 +151,43 @@ describe("runServiceRestart token drift", () => { expect(postRestartCheck).toHaveBeenCalledTimes(1); expect(service.restart).not.toHaveBeenCalled(); expect(service.readCommand).not.toHaveBeenCalled(); - const jsonLine = runtimeLogs.find((line) => line.trim().startsWith("{")); - const payload = JSON.parse(jsonLine ?? "{}") as { result?: string; message?: string }; + const payload = readJsonLog<{ result?: string; message?: string }>(); expect(payload.result).toBe("restarted"); expect(payload.message).toContain("unmanaged process"); }); + + it("skips restart health checks when restart is only scheduled", async () => { + const postRestartCheck = vi.fn(async () => {}); + service.restart.mockResolvedValue({ outcome: "scheduled" }); + + const result = await runServiceRestart({ + serviceNoun: "Gateway", + service, + renderStartHints: () => [], + opts: { json: true }, + postRestartCheck, + }); + + expect(result).toBe(true); + expect(postRestartCheck).not.toHaveBeenCalled(); + const payload = readJsonLog<{ result?: string; message?: string }>(); + expect(payload.result).toBe("scheduled"); + expect(payload.message).toBe("restart scheduled, gateway will restart momentarily"); + }); + + it("emits scheduled when service start routes through a scheduled restart", async () => { + service.restart.mockResolvedValue({ outcome: "scheduled" }); + + await runServiceStart({ + serviceNoun: "Gateway", + service, + renderStartHints: () => [], + opts: { json: true }, + }); + + expect(service.isLoaded).toHaveBeenCalledTimes(1); + const payload = readJsonLog<{ result?: string; message?: string }>(); + expect(payload.result).toBe("scheduled"); + expect(payload.message).toBe("restart scheduled, gateway will restart momentarily"); + }); }); diff --git a/src/cli/daemon-cli/lifecycle-core.ts b/src/cli/daemon-cli/lifecycle-core.ts index 75bba03b418..8def6aeefe6 100644 --- a/src/cli/daemon-cli/lifecycle-core.ts +++ b/src/cli/daemon-cli/lifecycle-core.ts @@ -3,6 +3,8 @@ import { readBestEffortConfig, readConfigFileSnapshot } from "../../config/confi import { formatConfigIssueLines } from "../../config/issue-format.js"; import { resolveIsNixMode } from "../../config/paths.js"; import { checkTokenDrift } from "../../daemon/service-audit.js"; +import type { GatewayServiceRestartResult } from "../../daemon/service-types.js"; +import { describeGatewayServiceRestart } from "../../daemon/service.js"; import type { GatewayService } from "../../daemon/service.js"; import { renderSystemdUnavailableHints } from "../../daemon/systemd-hints.js"; import { isSystemdUserServiceAvailable } from "../../daemon/systemd.js"; @@ -223,7 +225,20 @@ export async function runServiceStart(params: { } try { - await params.service.restart({ env: process.env, stdout }); + const restartResult = await params.service.restart({ env: process.env, stdout }); + const restartStatus = describeGatewayServiceRestart(params.serviceNoun, restartResult); + if (restartStatus.scheduled) { + emit({ + ok: true, + result: restartStatus.daemonActionResult, + message: restartStatus.message, + service: buildDaemonServiceSnapshot(params.service, loaded), + }); + if (!json) { + defaultRuntime.log(restartStatus.message); + } + return; + } } catch (err) { const hints = params.renderStartHints(); fail(`${params.serviceNoun} start failed: ${String(err)}`, hints); @@ -317,13 +332,29 @@ export async function runServiceRestart(params: { renderStartHints: () => string[]; opts?: DaemonLifecycleOptions; checkTokenDrift?: boolean; - postRestartCheck?: (ctx: RestartPostCheckContext) => Promise; + postRestartCheck?: (ctx: RestartPostCheckContext) => Promise; onNotLoaded?: (ctx: NotLoadedActionContext) => Promise; }): Promise { const json = Boolean(params.opts?.json); const { stdout, emit, fail } = createActionIO({ action: "restart", json }); const warnings: string[] = []; let handledNotLoaded: NotLoadedActionResult | null = null; + const emitScheduledRestart = ( + restartStatus: ReturnType, + serviceLoaded: boolean, + ) => { + emit({ + ok: true, + result: restartStatus.daemonActionResult, + message: restartStatus.message, + service: buildDaemonServiceSnapshot(params.service, serviceLoaded), + warnings: warnings.length ? warnings : undefined, + }); + if (!json) { + defaultRuntime.log(restartStatus.message); + } + return true; + }; const loaded = await resolveServiceLoadedOrFail({ serviceNoun: params.serviceNoun, @@ -402,11 +433,22 @@ export async function runServiceRestart(params: { } try { + let restartResult: GatewayServiceRestartResult = { outcome: "completed" }; if (loaded) { - await params.service.restart({ env: process.env, stdout }); + restartResult = await params.service.restart({ env: process.env, stdout }); + } + let restartStatus = describeGatewayServiceRestart(params.serviceNoun, restartResult); + if (restartStatus.scheduled) { + return emitScheduledRestart(restartStatus, loaded); } if (params.postRestartCheck) { - await params.postRestartCheck({ json, stdout, warnings, fail }); + const postRestartResult = await params.postRestartCheck({ json, stdout, warnings, fail }); + if (postRestartResult) { + restartStatus = describeGatewayServiceRestart(params.serviceNoun, postRestartResult); + if (restartStatus.scheduled) { + return emitScheduledRestart(restartStatus, loaded); + } + } } let restarted = loaded; if (loaded) { diff --git a/src/cli/daemon-cli/lifecycle.test.ts b/src/cli/daemon-cli/lifecycle.test.ts index f1e87fc4938..f026f81399f 100644 --- a/src/cli/daemon-cli/lifecycle.test.ts +++ b/src/cli/daemon-cli/lifecycle.test.ts @@ -1,8 +1,5 @@ import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; -const mockReadFileSync = vi.hoisted(() => vi.fn()); -const mockSpawnSync = vi.hoisted(() => vi.fn()); - type RestartHealthSnapshot = { healthy: boolean; staleGatewayPids: number[]; @@ -35,7 +32,9 @@ const terminateStaleGatewayPids = vi.fn(); const renderGatewayPortHealthDiagnostics = vi.fn(() => ["diag: unhealthy port"]); const renderRestartDiagnostics = vi.fn(() => ["diag: unhealthy runtime"]); const resolveGatewayPort = vi.fn(() => 18789); -const findGatewayPidsOnPortSync = vi.fn<(port: number) => number[]>(() => []); +const findVerifiedGatewayListenerPidsOnPortSync = vi.fn<(port: number) => number[]>(() => []); +const signalVerifiedGatewayPidSync = vi.fn<(pid: number, signal: "SIGTERM" | "SIGUSR1") => void>(); +const formatGatewayPidList = vi.fn<(pids: number[]) => string>((pids) => pids.join(", ")); const probeGateway = vi.fn< (opts: { url: string; @@ -49,24 +48,18 @@ const probeGateway = vi.fn< const isRestartEnabled = vi.fn<(config?: { commands?: unknown }) => boolean>(() => true); const loadConfig = vi.fn(() => ({})); -vi.mock("node:fs", () => ({ - default: { - readFileSync: (...args: unknown[]) => mockReadFileSync(...args), - }, -})); - -vi.mock("node:child_process", () => ({ - spawnSync: (...args: unknown[]) => mockSpawnSync(...args), -})); - vi.mock("../../config/config.js", () => ({ loadConfig: () => loadConfig(), readBestEffortConfig: async () => loadConfig(), resolveGatewayPort, })); -vi.mock("../../infra/restart.js", () => ({ - findGatewayPidsOnPortSync: (port: number) => findGatewayPidsOnPortSync(port), +vi.mock("../../infra/gateway-processes.js", () => ({ + findVerifiedGatewayListenerPidsOnPortSync: (port: number) => + findVerifiedGatewayListenerPidsOnPortSync(port), + signalVerifiedGatewayPidSync: (pid: number, signal: "SIGTERM" | "SIGUSR1") => + signalVerifiedGatewayPidSync(pid, signal), + formatGatewayPidList: (pids: number[]) => formatGatewayPidList(pids), })); vi.mock("../../gateway/probe.js", () => ({ @@ -106,6 +99,29 @@ describe("runDaemonRestart health checks", () => { let runDaemonRestart: (opts?: { json?: boolean }) => Promise; let runDaemonStop: (opts?: { json?: boolean }) => Promise; + function mockUnmanagedRestart({ + runPostRestartCheck = false, + }: { + runPostRestartCheck?: boolean; + } = {}) { + runServiceRestart.mockImplementation( + async (params: RestartParams & { onNotLoaded?: () => Promise }) => { + await params.onNotLoaded?.(); + if (runPostRestartCheck) { + await params.postRestartCheck?.({ + json: Boolean(params.opts?.json), + stdout: process.stdout, + warnings: [], + fail: (message: string) => { + throw new Error(message); + }, + }); + } + return true; + }, + ); + } + beforeAll(async () => { ({ runDaemonRestart, runDaemonStop } = await import("./lifecycle.js")); }); @@ -121,17 +137,18 @@ describe("runDaemonRestart health checks", () => { renderGatewayPortHealthDiagnostics.mockReset(); renderRestartDiagnostics.mockReset(); resolveGatewayPort.mockReset(); - findGatewayPidsOnPortSync.mockReset(); + findVerifiedGatewayListenerPidsOnPortSync.mockReset(); + signalVerifiedGatewayPidSync.mockReset(); + formatGatewayPidList.mockReset(); probeGateway.mockReset(); isRestartEnabled.mockReset(); loadConfig.mockReset(); - mockReadFileSync.mockReset(); - mockSpawnSync.mockReset(); service.readCommand.mockResolvedValue({ programArguments: ["openclaw", "gateway", "--port", "18789"], environment: {}, }); + service.restart.mockResolvedValue({ outcome: "completed" }); runServiceRestart.mockImplementation(async (params: RestartParams) => { const fail = (message: string, hints?: string[]) => { @@ -157,23 +174,8 @@ describe("runDaemonRestart health checks", () => { configSnapshot: { commands: { restart: true } }, }); isRestartEnabled.mockReturnValue(true); - mockReadFileSync.mockImplementation((path: string) => { - const match = path.match(/\/proc\/(\d+)\/cmdline$/); - if (!match) { - throw new Error(`unexpected path ${path}`); - } - const pid = Number.parseInt(match[1] ?? "", 10); - if ([4200, 4300].includes(pid)) { - return ["openclaw", "gateway", "--port", "18789", ""].join("\0"); - } - throw new Error(`unknown pid ${pid}`); - }); - mockSpawnSync.mockReturnValue({ - error: null, - status: 0, - stdout: "openclaw gateway --port 18789", - stderr: "", - }); + signalVerifiedGatewayPidSync.mockImplementation(() => {}); + formatGatewayPidList.mockImplementation((pids) => pids.join(", ")); }); afterEach(() => { @@ -204,6 +206,25 @@ describe("runDaemonRestart health checks", () => { expect(waitForGatewayHealthyRestart).toHaveBeenCalledTimes(2); }); + it("skips stale-pid retry health checks when the retry restart is only scheduled", async () => { + const unhealthy: RestartHealthSnapshot = { + healthy: false, + staleGatewayPids: [1993], + runtime: { status: "stopped" }, + portUsage: { port: 18789, status: "busy", listeners: [], hints: [] }, + }; + waitForGatewayHealthyRestart.mockResolvedValueOnce(unhealthy); + terminateStaleGatewayPids.mockResolvedValue([1993]); + service.restart.mockResolvedValueOnce({ outcome: "scheduled" }); + + const result = await runDaemonRestart({ json: true }); + + expect(result).toBe(true); + expect(terminateStaleGatewayPids).toHaveBeenCalledWith([1993]); + expect(service.restart).toHaveBeenCalledTimes(1); + expect(waitForGatewayHealthyRestart).toHaveBeenCalledTimes(1); + }); + it("fails restart when gateway remains unhealthy", async () => { const unhealthy: RestartHealthSnapshot = { healthy: false, @@ -222,57 +243,26 @@ describe("runDaemonRestart health checks", () => { }); it("signals an unmanaged gateway process on stop", async () => { - vi.spyOn(process, "platform", "get").mockReturnValue("win32"); - const killSpy = vi.spyOn(process, "kill").mockImplementation(() => true); - findGatewayPidsOnPortSync.mockReturnValue([4200, 4200, 4300]); - mockSpawnSync.mockReturnValue({ - error: null, - status: 0, - stdout: - 'CommandLine="C:\\\\Program Files\\\\OpenClaw\\\\openclaw.exe" gateway --port 18789\r\n', - stderr: "", - }); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4200, 4200, 4300]); runServiceStop.mockImplementation(async (params: { onNotLoaded?: () => Promise }) => { await params.onNotLoaded?.(); }); await runDaemonStop({ json: true }); - expect(findGatewayPidsOnPortSync).toHaveBeenCalledWith(18789); - expect(killSpy).toHaveBeenCalledWith(4200, "SIGTERM"); - expect(killSpy).toHaveBeenCalledWith(4300, "SIGTERM"); + expect(findVerifiedGatewayListenerPidsOnPortSync).toHaveBeenCalledWith(18789); + expect(signalVerifiedGatewayPidSync).toHaveBeenCalledWith(4200, "SIGTERM"); + expect(signalVerifiedGatewayPidSync).toHaveBeenCalledWith(4300, "SIGTERM"); }); it("signals a single unmanaged gateway process on restart", async () => { - vi.spyOn(process, "platform", "get").mockReturnValue("win32"); - const killSpy = vi.spyOn(process, "kill").mockImplementation(() => true); - findGatewayPidsOnPortSync.mockReturnValue([4200]); - mockSpawnSync.mockReturnValue({ - error: null, - status: 0, - stdout: - 'CommandLine="C:\\\\Program Files\\\\OpenClaw\\\\openclaw.exe" gateway --port 18789\r\n', - stderr: "", - }); - runServiceRestart.mockImplementation( - async (params: RestartParams & { onNotLoaded?: () => Promise }) => { - await params.onNotLoaded?.(); - await params.postRestartCheck?.({ - json: Boolean(params.opts?.json), - stdout: process.stdout, - warnings: [], - fail: (message: string) => { - throw new Error(message); - }, - }); - return true; - }, - ); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4200]); + mockUnmanagedRestart({ runPostRestartCheck: true }); await runDaemonRestart({ json: true }); - expect(findGatewayPidsOnPortSync).toHaveBeenCalledWith(18789); - expect(killSpy).toHaveBeenCalledWith(4200, "SIGUSR1"); + expect(findVerifiedGatewayListenerPidsOnPortSync).toHaveBeenCalledWith(18789); + expect(signalVerifiedGatewayPidSync).toHaveBeenCalledWith(4200, "SIGUSR1"); expect(probeGateway).toHaveBeenCalledTimes(1); expect(waitForGatewayHealthyListener).toHaveBeenCalledTimes(1); expect(waitForGatewayHealthyRestart).not.toHaveBeenCalled(); @@ -281,21 +271,8 @@ describe("runDaemonRestart health checks", () => { }); it("fails unmanaged restart when multiple gateway listeners are present", async () => { - vi.spyOn(process, "platform", "get").mockReturnValue("win32"); - findGatewayPidsOnPortSync.mockReturnValue([4200, 4300]); - mockSpawnSync.mockReturnValue({ - error: null, - status: 0, - stdout: - 'CommandLine="C:\\\\Program Files\\\\OpenClaw\\\\openclaw.exe" gateway --port 18789\r\n', - stderr: "", - }); - runServiceRestart.mockImplementation( - async (params: RestartParams & { onNotLoaded?: () => Promise }) => { - await params.onNotLoaded?.(); - return true; - }, - ); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4200, 4300]); + mockUnmanagedRestart(); await expect(runDaemonRestart({ json: true })).rejects.toThrow( "multiple gateway processes are listening on port 18789", @@ -303,18 +280,13 @@ describe("runDaemonRestart health checks", () => { }); it("fails unmanaged restart when the running gateway has commands.restart disabled", async () => { - findGatewayPidsOnPortSync.mockReturnValue([4200]); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4200]); probeGateway.mockResolvedValue({ ok: true, configSnapshot: { commands: { restart: false } }, }); isRestartEnabled.mockReturnValue(false); - runServiceRestart.mockImplementation( - async (params: RestartParams & { onNotLoaded?: () => Promise }) => { - await params.onNotLoaded?.(); - return true; - }, - ); + mockUnmanagedRestart(); await expect(runDaemonRestart({ json: true })).rejects.toThrow( "Gateway restart is disabled in the running gateway config", @@ -322,21 +294,13 @@ describe("runDaemonRestart health checks", () => { }); it("skips unmanaged signaling for pids that are not live gateway processes", async () => { - const killSpy = vi.spyOn(process, "kill").mockImplementation(() => true); - findGatewayPidsOnPortSync.mockReturnValue([4200]); - mockReadFileSync.mockReturnValue(["python", "-m", "http.server", ""].join("\0")); - mockSpawnSync.mockReturnValue({ - error: null, - status: 0, - stdout: "python -m http.server", - stderr: "", - }); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([]); runServiceStop.mockImplementation(async (params: { onNotLoaded?: () => Promise }) => { await params.onNotLoaded?.(); }); await runDaemonStop({ json: true }); - expect(killSpy).not.toHaveBeenCalled(); + expect(signalVerifiedGatewayPidSync).not.toHaveBeenCalled(); }); }); diff --git a/src/cli/daemon-cli/lifecycle.ts b/src/cli/daemon-cli/lifecycle.ts index 7fa7396d0b0..53efaff9495 100644 --- a/src/cli/daemon-cli/lifecycle.ts +++ b/src/cli/daemon-cli/lifecycle.ts @@ -1,12 +1,12 @@ -import { spawnSync } from "node:child_process"; -import fsSync from "node:fs"; import { isRestartEnabled } from "../../config/commands.js"; import { readBestEffortConfig, resolveGatewayPort } from "../../config/config.js"; -import { parseCmdScriptCommandLine } from "../../daemon/cmd-argv.js"; import { resolveGatewayService } from "../../daemon/service.js"; import { probeGateway } from "../../gateway/probe.js"; -import { isGatewayArgv, parseProcCmdline } from "../../infra/gateway-process-argv.js"; -import { findGatewayPidsOnPortSync } from "../../infra/restart.js"; +import { + findVerifiedGatewayListenerPidsOnPortSync, + formatGatewayPidList, + signalVerifiedGatewayPidSync, +} from "../../infra/gateway-processes.js"; import { defaultRuntime } from "../../runtime.js"; import { theme } from "../../terminal/theme.js"; import { formatCliCommand } from "../command-format.js"; @@ -43,85 +43,12 @@ async function resolveGatewayLifecyclePort(service = resolveGatewayService()) { return portFromArgs ?? resolveGatewayPort(await readBestEffortConfig(), mergedEnv); } -function extractWindowsCommandLine(raw: string): string | null { - const lines = raw - .split(/\r?\n/) - .map((line) => line.trim()) - .filter(Boolean); - for (const line of lines) { - if (!line.toLowerCase().startsWith("commandline=")) { - continue; - } - const value = line.slice("commandline=".length).trim(); - return value || null; - } - return lines.find((line) => line.toLowerCase() !== "commandline") ?? null; -} - -function readGatewayProcessArgsSync(pid: number): string[] | null { - if (process.platform === "linux") { - try { - return parseProcCmdline(fsSync.readFileSync(`/proc/${pid}/cmdline`, "utf8")); - } catch { - return null; - } - } - if (process.platform === "darwin") { - const ps = spawnSync("ps", ["-o", "command=", "-p", String(pid)], { - encoding: "utf8", - timeout: 1000, - }); - if (ps.error || ps.status !== 0) { - return null; - } - const command = ps.stdout.trim(); - return command ? command.split(/\s+/) : null; - } - if (process.platform === "win32") { - const wmic = spawnSync( - "wmic", - ["process", "where", `ProcessId=${pid}`, "get", "CommandLine", "/value"], - { - encoding: "utf8", - timeout: 1000, - }, - ); - if (wmic.error || wmic.status !== 0) { - return null; - } - const command = extractWindowsCommandLine(wmic.stdout); - return command ? parseCmdScriptCommandLine(command) : null; - } - return null; -} - -function resolveGatewayListenerPids(port: number): number[] { - return Array.from(new Set(findGatewayPidsOnPortSync(port))) - .filter((pid): pid is number => Number.isFinite(pid) && pid > 0) - .filter((pid) => { - const args = readGatewayProcessArgsSync(pid); - return args != null && isGatewayArgv(args, { allowGatewayBinary: true }); - }); -} - function resolveGatewayPortFallback(): Promise { return readBestEffortConfig() .then((cfg) => resolveGatewayPort(cfg, process.env)) .catch(() => resolveGatewayPort(undefined, process.env)); } -function signalGatewayPid(pid: number, signal: "SIGTERM" | "SIGUSR1") { - const args = readGatewayProcessArgsSync(pid); - if (!args || !isGatewayArgv(args, { allowGatewayBinary: true })) { - throw new Error(`refusing to signal non-gateway process pid ${pid}`); - } - process.kill(pid, signal); -} - -function formatGatewayPidList(pids: number[]): string { - return pids.join(", "); -} - async function assertUnmanagedGatewayRestartEnabled(port: number): Promise { const probe = await probeGateway({ url: `ws://127.0.0.1:${port}`, @@ -143,7 +70,7 @@ async function assertUnmanagedGatewayRestartEnabled(port: number): Promise } function resolveVerifiedGatewayListenerPids(port: number): number[] { - return resolveGatewayListenerPids(port).filter( + return findVerifiedGatewayListenerPidsOnPortSync(port).filter( (pid): pid is number => Number.isFinite(pid) && pid > 0, ); } @@ -154,7 +81,7 @@ async function stopGatewayWithoutServiceManager(port: number) { return null; } for (const pid of pids) { - signalGatewayPid(pid, "SIGTERM"); + signalVerifiedGatewayPidSync(pid, "SIGTERM"); } return { result: "stopped" as const, @@ -173,7 +100,7 @@ async function restartGatewayWithoutServiceManager(port: number) { `multiple gateway processes are listening on port ${port}: ${formatGatewayPidList(pids)}; use "openclaw gateway status --deep" before retrying restart`, ); } - signalGatewayPid(pids[0], "SIGUSR1"); + signalVerifiedGatewayPidSync(pids[0], "SIGUSR1"); return { result: "restarted" as const, message: `Gateway restart signal sent to unmanaged process on port ${port}: ${pids[0]}.`, @@ -286,7 +213,10 @@ export async function runDaemonRestart(opts: DaemonLifecycleOptions = {}): Promi } await terminateStaleGatewayPids(health.staleGatewayPids); - await service.restart({ env: process.env, stdout }); + const retryRestart = await service.restart({ env: process.env, stdout }); + if (retryRestart.outcome === "scheduled") { + return retryRestart; + } health = await waitForGatewayHealthyRestart({ service, port: restartPort, diff --git a/src/cli/daemon-cli/register-service-commands.test.ts b/src/cli/daemon-cli/register-service-commands.test.ts index cec45d62769..64a1e24589b 100644 --- a/src/cli/daemon-cli/register-service-commands.test.ts +++ b/src/cli/daemon-cli/register-service-commands.test.ts @@ -39,34 +39,48 @@ describe("addGatewayServiceCommands", () => { runDaemonUninstall.mockClear(); }); - it("forwards install option collisions from parent gateway command", async () => { + it.each([ + { + name: "forwards install option collisions from parent gateway command", + argv: ["install", "--force", "--port", "19000", "--token", "tok_test"], + assert: () => { + expect(runDaemonInstall).toHaveBeenCalledWith( + expect.objectContaining({ + force: true, + port: "19000", + token: "tok_test", + }), + ); + }, + }, + { + name: "forwards status auth collisions from parent gateway command", + argv: ["status", "--token", "tok_status", "--password", "pw_status"], + assert: () => { + expect(runDaemonStatus).toHaveBeenCalledWith( + expect.objectContaining({ + rpc: expect.objectContaining({ + token: "tok_status", + password: "pw_status", // pragma: allowlist secret + }), + }), + ); + }, + }, + { + name: "forwards require-rpc for status", + argv: ["status", "--require-rpc"], + assert: () => { + expect(runDaemonStatus).toHaveBeenCalledWith( + expect.objectContaining({ + requireRpc: true, + }), + ); + }, + }, + ])("$name", async ({ argv, assert }) => { const gateway = createGatewayParentLikeCommand(); - await gateway.parseAsync(["install", "--force", "--port", "19000", "--token", "tok_test"], { - from: "user", - }); - - expect(runDaemonInstall).toHaveBeenCalledWith( - expect.objectContaining({ - force: true, - port: "19000", - token: "tok_test", - }), - ); - }); - - it("forwards status auth collisions from parent gateway command", async () => { - const gateway = createGatewayParentLikeCommand(); - await gateway.parseAsync(["status", "--token", "tok_status", "--password", "pw_status"], { - from: "user", - }); - - expect(runDaemonStatus).toHaveBeenCalledWith( - expect.objectContaining({ - rpc: expect.objectContaining({ - token: "tok_status", - password: "pw_status", // pragma: allowlist secret - }), - }), - ); + await gateway.parseAsync(argv, { from: "user" }); + assert(); }); }); diff --git a/src/cli/daemon-cli/register-service-commands.ts b/src/cli/daemon-cli/register-service-commands.ts index 5d4ce0a9c28..2690eb91d7f 100644 --- a/src/cli/daemon-cli/register-service-commands.ts +++ b/src/cli/daemon-cli/register-service-commands.ts @@ -44,12 +44,14 @@ export function addGatewayServiceCommands(parent: Command, opts?: { statusDescri .option("--password ", "Gateway password (password auth)") .option("--timeout ", "Timeout in ms", "10000") .option("--no-probe", "Skip RPC probe") + .option("--require-rpc", "Exit non-zero when the RPC probe fails", false) .option("--deep", "Scan system-level services", false) .option("--json", "Output JSON", false) .action(async (cmdOpts, command) => { await runDaemonStatus({ rpc: resolveRpcOptions(cmdOpts, command), probe: Boolean(cmdOpts.probe), + requireRpc: Boolean(cmdOpts.requireRpc), deep: Boolean(cmdOpts.deep), json: Boolean(cmdOpts.json), }); diff --git a/src/cli/daemon-cli/restart-health.test.ts b/src/cli/daemon-cli/restart-health.test.ts index 0202f591cc2..c4b8eb3b07c 100644 --- a/src/cli/daemon-cli/restart-health.test.ts +++ b/src/cli/daemon-cli/restart-health.test.ts @@ -20,28 +20,45 @@ vi.mock("../../gateway/probe.js", () => ({ const originalPlatform = process.platform; +function makeGatewayService( + runtime: { status: "running"; pid: number } | { status: "stopped" }, +): GatewayService { + return { + readRuntime: vi.fn(async () => runtime), + } as unknown as GatewayService; +} + +async function inspectGatewayRestartWithSnapshot(params: { + runtime: { status: "running"; pid: number } | { status: "stopped" }; + portUsage: PortUsage; + includeUnknownListenersAsStale?: boolean; +}) { + const service = makeGatewayService(params.runtime); + inspectPortUsage.mockResolvedValue(params.portUsage); + const { inspectGatewayRestart } = await import("./restart-health.js"); + return inspectGatewayRestart({ + service, + port: 18789, + ...(params.includeUnknownListenersAsStale === undefined + ? {} + : { includeUnknownListenersAsStale: params.includeUnknownListenersAsStale }), + }); +} + async function inspectUnknownListenerFallback(params: { runtime: { status: "running"; pid: number } | { status: "stopped" }; includeUnknownListenersAsStale: boolean; }) { Object.defineProperty(process, "platform", { value: "win32", configurable: true }); classifyPortListener.mockReturnValue("unknown"); - - const service = { - readRuntime: vi.fn(async () => params.runtime), - } as unknown as GatewayService; - - inspectPortUsage.mockResolvedValue({ - port: 18789, - status: "busy", - listeners: [{ pid: 10920, command: "unknown" }], - hints: [], - }); - - const { inspectGatewayRestart } = await import("./restart-health.js"); - return inspectGatewayRestart({ - service, - port: 18789, + return inspectGatewayRestartWithSnapshot({ + runtime: params.runtime, + portUsage: { + port: 18789, + status: "busy", + listeners: [{ pid: 10920, command: "unknown" }], + hints: [], + }, includeUnknownListenersAsStale: params.includeUnknownListenersAsStale, }); } @@ -49,21 +66,17 @@ async function inspectUnknownListenerFallback(params: { async function inspectAmbiguousOwnershipWithProbe( probeResult: Awaited>, ) { - const service = { - readRuntime: vi.fn(async () => ({ status: "running", pid: 8000 })), - } as unknown as GatewayService; - - inspectPortUsage.mockResolvedValue({ - port: 18789, - status: "busy", - listeners: [{ commandLine: "" }], - hints: [], - }); classifyPortListener.mockReturnValue("unknown"); probeGateway.mockResolvedValue(probeResult); - - const { inspectGatewayRestart } = await import("./restart-health.js"); - return inspectGatewayRestart({ service, port: 18789 }); + return inspectGatewayRestartWithSnapshot({ + runtime: { status: "running", pid: 8000 }, + portUsage: { + port: 18789, + status: "busy", + listeners: [{ commandLine: "" }], + hints: [], + }, + }); } describe("inspectGatewayRestart", () => { @@ -89,39 +102,31 @@ describe("inspectGatewayRestart", () => { }); it("treats a gateway listener child pid as healthy ownership", async () => { - const service = { - readRuntime: vi.fn(async () => ({ status: "running", pid: 7000 })), - } as unknown as GatewayService; - - inspectPortUsage.mockResolvedValue({ - port: 18789, - status: "busy", - listeners: [{ pid: 7001, ppid: 7000, commandLine: "openclaw-gateway" }], - hints: [], + const snapshot = await inspectGatewayRestartWithSnapshot({ + runtime: { status: "running", pid: 7000 }, + portUsage: { + port: 18789, + status: "busy", + listeners: [{ pid: 7001, ppid: 7000, commandLine: "openclaw-gateway" }], + hints: [], + }, }); - const { inspectGatewayRestart } = await import("./restart-health.js"); - const snapshot = await inspectGatewayRestart({ service, port: 18789 }); - expect(snapshot.healthy).toBe(true); expect(snapshot.staleGatewayPids).toEqual([]); }); it("marks non-owned gateway listener pids as stale while runtime is running", async () => { - const service = { - readRuntime: vi.fn(async () => ({ status: "running", pid: 8000 })), - } as unknown as GatewayService; - - inspectPortUsage.mockResolvedValue({ - port: 18789, - status: "busy", - listeners: [{ pid: 9000, ppid: 8999, commandLine: "openclaw-gateway" }], - hints: [], + const snapshot = await inspectGatewayRestartWithSnapshot({ + runtime: { status: "running", pid: 8000 }, + portUsage: { + port: 18789, + status: "busy", + listeners: [{ pid: 9000, ppid: 8999, commandLine: "openclaw-gateway" }], + hints: [], + }, }); - const { inspectGatewayRestart } = await import("./restart-health.js"); - const snapshot = await inspectGatewayRestart({ service, port: 18789 }); - expect(snapshot.healthy).toBe(false); expect(snapshot.staleGatewayPids).toEqual([9000]); }); @@ -157,21 +162,14 @@ describe("inspectGatewayRestart", () => { Object.defineProperty(process, "platform", { value: "win32", configurable: true }); classifyPortListener.mockReturnValue("ssh"); - const service = { - readRuntime: vi.fn(async () => ({ status: "stopped" })), - } as unknown as GatewayService; - - inspectPortUsage.mockResolvedValue({ - port: 18789, - status: "busy", - listeners: [{ pid: 22001, command: "nginx.exe" }], - hints: [], - }); - - const { inspectGatewayRestart } = await import("./restart-health.js"); - const snapshot = await inspectGatewayRestart({ - service, - port: 18789, + const snapshot = await inspectGatewayRestartWithSnapshot({ + runtime: { status: "stopped" }, + portUsage: { + port: 18789, + status: "busy", + listeners: [{ pid: 22001, command: "nginx.exe" }], + hints: [], + }, includeUnknownListenersAsStale: true, }); @@ -190,6 +188,28 @@ describe("inspectGatewayRestart", () => { ); }); + it("treats a busy port as healthy when runtime status lags but the probe succeeds", async () => { + Object.defineProperty(process, "platform", { value: "win32", configurable: true }); + classifyPortListener.mockReturnValue("gateway"); + probeGateway.mockResolvedValue({ + ok: true, + close: null, + }); + + const snapshot = await inspectGatewayRestartWithSnapshot({ + runtime: { status: "stopped" }, + portUsage: { + port: 18789, + status: "busy", + listeners: [{ pid: 9100, commandLine: "openclaw-gateway" }], + hints: [], + }, + }); + + expect(snapshot.healthy).toBe(true); + expect(snapshot.staleGatewayPids).toEqual([]); + }); + it("treats auth-closed probe as healthy gateway reachability", async () => { const snapshot = await inspectAmbiguousOwnershipWithProbe({ ok: false, diff --git a/src/cli/daemon-cli/restart-health.ts b/src/cli/daemon-cli/restart-health.ts index 13741d2e9c4..9bfe3476ee6 100644 --- a/src/cli/daemon-cli/restart-health.ts +++ b/src/cli/daemon-cli/restart-health.ts @@ -65,7 +65,8 @@ async function confirmGatewayReachable(port: number): Promise { const probe = await probeGateway({ url: `ws://127.0.0.1:${port}`, auth: token || password ? { token, password } : undefined, - timeoutMs: 1_000, + timeoutMs: 3_000, + includeDetails: false, }); return probe.ok || looksLikeAuthClose(probe.close?.code, probe.close?.reason); } @@ -123,6 +124,22 @@ export async function inspectGatewayRestart(params: { }; } + if (portUsage.status === "busy" && runtime.status !== "running") { + try { + const reachable = await confirmGatewayReachable(params.port); + if (reachable) { + return { + runtime, + portUsage, + healthy: true, + staleGatewayPids: [], + }; + } + } catch { + // Probe is best-effort; keep the ownership-based diagnostics. + } + } + const gatewayListeners = portUsage.status === "busy" ? portUsage.listeners.filter( diff --git a/src/cli/daemon-cli/shared.ts b/src/cli/daemon-cli/shared.ts index 525b04682b0..eb2760c2630 100644 --- a/src/cli/daemon-cli/shared.ts +++ b/src/cli/daemon-cli/shared.ts @@ -1,3 +1,4 @@ +import { resolveIsNixMode } from "../../config/paths.js"; import { resolveGatewayLaunchAgentLabel, resolveGatewaySystemdServiceName, @@ -12,10 +13,30 @@ import { getResolvedLoggerSettings } from "../../logging.js"; import { colorize, isRich, theme } from "../../terminal/theme.js"; import { formatCliCommand } from "../command-format.js"; import { parsePort } from "../shared/parse-port.js"; +import { createDaemonActionContext } from "./response.js"; export { formatRuntimeStatus }; export { parsePort }; +export function createDaemonInstallActionContext(jsonFlag: unknown) { + const json = Boolean(jsonFlag); + return { + json, + ...createDaemonActionContext({ action: "install", json }), + }; +} + +export function failIfNixDaemonInstallMode( + fail: (message: string, hints?: string[]) => void, + env: NodeJS.ProcessEnv = process.env, +): boolean { + if (!resolveIsNixMode(env)) { + return false; + } + fail("Nix mode detected; service install is disabled."); + return true; +} + export function createCliStatusTextStyles() { const rich = isRich(); return { diff --git a/src/cli/daemon-cli/status.gather.test.ts b/src/cli/daemon-cli/status.gather.test.ts index 9b4d6428d1e..b0c08715abe 100644 --- a/src/cli/daemon-cli/status.gather.test.ts +++ b/src/cli/daemon-cli/status.gather.test.ts @@ -18,7 +18,12 @@ const readLastGatewayErrorLine = vi.fn(async (_env?: NodeJS.ProcessEnv) => null) const auditGatewayServiceConfig = vi.fn(async (_opts?: unknown) => undefined); const serviceIsLoaded = vi.fn(async (_opts?: unknown) => true); const serviceReadRuntime = vi.fn(async (_env?: NodeJS.ProcessEnv) => ({ status: "running" })); -const serviceReadCommand = vi.fn(async (_env?: NodeJS.ProcessEnv) => ({ +const serviceReadCommand = vi.fn< + (env?: NodeJS.ProcessEnv) => Promise<{ + programArguments: string[]; + environment?: Record; + }> +>(async (_env?: NodeJS.ProcessEnv) => ({ programArguments: ["/bin/node", "cli", "gateway", "--port", "19001"], environment: { OPENCLAW_STATE_DIR: "/tmp/openclaw-daemon", @@ -190,6 +195,37 @@ describe("gatherDaemonStatus", () => { expect(status.rpc?.url).toBe("wss://override.example:18790"); }); + it("reuses command environment when reading runtime status", async () => { + serviceReadCommand.mockResolvedValueOnce({ + programArguments: ["/bin/node", "cli", "gateway", "--port", "19001"], + environment: { + OPENCLAW_GATEWAY_PORT: "19001", + OPENCLAW_CONFIG_PATH: "/tmp/openclaw-daemon/openclaw.json", + OPENCLAW_STATE_DIR: "/tmp/openclaw-daemon", + } as Record, + }); + serviceReadRuntime.mockImplementationOnce(async (env?: NodeJS.ProcessEnv) => ({ + status: env?.OPENCLAW_GATEWAY_PORT === "19001" ? "running" : "unknown", + detail: env?.OPENCLAW_GATEWAY_PORT ?? "missing-port", + })); + + const status = await gatherDaemonStatus({ + rpc: {}, + probe: false, + deep: false, + }); + + expect(serviceReadRuntime).toHaveBeenCalledWith( + expect.objectContaining({ + OPENCLAW_GATEWAY_PORT: "19001", + }), + ); + expect(status.service.runtime).toMatchObject({ + status: "running", + detail: "19001", + }); + }); + it("resolves daemon gateway auth password SecretRef values before probing", async () => { daemonLoadedConfig = { gateway: { diff --git a/src/cli/daemon-cli/status.gather.ts b/src/cli/daemon-cli/status.gather.ts index a44ef93c656..ef15a377438 100644 --- a/src/cli/daemon-cli/status.gather.ts +++ b/src/cli/daemon-cli/status.gather.ts @@ -258,17 +258,21 @@ export async function gatherDaemonStatus( } & FindExtraGatewayServicesOptions, ): Promise { const service = resolveGatewayService(); - const [loaded, command, runtime] = await Promise.all([ - service.isLoaded({ env: process.env }).catch(() => false), - service.readCommand(process.env).catch(() => null), - service.readRuntime(process.env).catch((err) => ({ status: "unknown", detail: String(err) })), + const command = await service.readCommand(process.env).catch(() => null); + const serviceEnv = command?.environment + ? ({ + ...process.env, + ...command.environment, + } satisfies NodeJS.ProcessEnv) + : process.env; + const [loaded, runtime] = await Promise.all([ + service.isLoaded({ env: serviceEnv }).catch(() => false), + service.readRuntime(serviceEnv).catch((err) => ({ status: "unknown", detail: String(err) })), ]); const configAudit = await auditGatewayServiceConfig({ env: process.env, command, }); - - const serviceEnv = command?.environment ?? undefined; const { mergedDaemonEnv, cliCfg, @@ -276,7 +280,7 @@ export async function gatherDaemonStatus( cliConfigSummary, daemonConfigSummary, configMismatch, - } = await loadDaemonConfigContext(serviceEnv); + } = await loadDaemonConfigContext(command?.environment); const { gateway, daemonPort, cliPort, probeUrlOverride } = await resolveGatewayStatusSummary({ cliCfg, daemonCfg, diff --git a/src/cli/daemon-cli/status.test.ts b/src/cli/daemon-cli/status.test.ts new file mode 100644 index 00000000000..5cf0484120e --- /dev/null +++ b/src/cli/daemon-cli/status.test.ts @@ -0,0 +1,92 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { createCliRuntimeCapture } from "../test-runtime-capture.js"; +import type { DaemonStatus } from "./status.gather.js"; + +const gatherDaemonStatus = vi.fn( + async (_opts?: unknown): Promise => ({ + service: { + label: "LaunchAgent", + loaded: true, + loadedText: "loaded", + notLoadedText: "not loaded", + }, + rpc: { + ok: true, + url: "ws://127.0.0.1:18789", + }, + extraServices: [], + }), +); +const printDaemonStatus = vi.fn(); + +const { runtimeErrors, defaultRuntime, resetRuntimeCapture } = createCliRuntimeCapture(); + +vi.mock("../../runtime.js", () => ({ + defaultRuntime, +})); + +vi.mock("../../terminal/theme.js", () => ({ + colorize: (_rich: boolean, _color: unknown, text: string) => text, + isRich: () => false, + theme: { error: "error" }, +})); + +vi.mock("./status.gather.js", () => ({ + gatherDaemonStatus: (opts: unknown) => gatherDaemonStatus(opts), +})); + +vi.mock("./status.print.js", () => ({ + printDaemonStatus: (...args: unknown[]) => printDaemonStatus(...args), +})); + +const { runDaemonStatus } = await import("./status.js"); + +describe("runDaemonStatus", () => { + beforeEach(() => { + gatherDaemonStatus.mockClear(); + printDaemonStatus.mockClear(); + resetRuntimeCapture(); + }); + + it("exits when require-rpc is set and the probe fails", async () => { + gatherDaemonStatus.mockResolvedValueOnce({ + service: { + label: "LaunchAgent", + loaded: true, + loadedText: "loaded", + notLoadedText: "not loaded", + }, + rpc: { + ok: false, + url: "ws://127.0.0.1:18789", + error: "gateway closed", + }, + extraServices: [], + }); + + await expect( + runDaemonStatus({ + rpc: {}, + probe: true, + requireRpc: true, + json: false, + }), + ).rejects.toThrow("__exit__:1"); + + expect(printDaemonStatus).toHaveBeenCalledTimes(1); + }); + + it("rejects require-rpc when probing is disabled", async () => { + await expect( + runDaemonStatus({ + rpc: {}, + probe: false, + requireRpc: true, + json: false, + }), + ).rejects.toThrow("__exit__:1"); + + expect(gatherDaemonStatus).not.toHaveBeenCalled(); + expect(runtimeErrors.join("\n")).toContain("--require-rpc cannot be used with --no-probe"); + }); +}); diff --git a/src/cli/daemon-cli/status.ts b/src/cli/daemon-cli/status.ts index 2af5a1977ec..44ae4b0a686 100644 --- a/src/cli/daemon-cli/status.ts +++ b/src/cli/daemon-cli/status.ts @@ -6,12 +6,20 @@ import type { DaemonStatusOptions } from "./types.js"; export async function runDaemonStatus(opts: DaemonStatusOptions) { try { + if (opts.requireRpc && !opts.probe) { + defaultRuntime.error("Gateway status failed: --require-rpc cannot be used with --no-probe."); + defaultRuntime.exit(1); + return; + } const status = await gatherDaemonStatus({ rpc: opts.rpc, probe: Boolean(opts.probe), deep: Boolean(opts.deep), }); printDaemonStatus(status, { json: Boolean(opts.json) }); + if (opts.requireRpc && !status.rpc?.ok) { + defaultRuntime.exit(1); + } } catch (err) { const rich = isRich(); defaultRuntime.error(colorize(rich, theme.error, `Gateway status failed: ${String(err)}`)); diff --git a/src/cli/daemon-cli/test-helpers/lifecycle-core-harness.ts b/src/cli/daemon-cli/test-helpers/lifecycle-core-harness.ts new file mode 100644 index 00000000000..6e2a93d5633 --- /dev/null +++ b/src/cli/daemon-cli/test-helpers/lifecycle-core-harness.ts @@ -0,0 +1,65 @@ +import { vi } from "vitest"; +import type { GatewayService } from "../../../daemon/service.js"; +import type { RuntimeEnv } from "../../../runtime.js"; +import type { MockFn } from "../../../test-utils/vitest-mock-fn.js"; + +export const runtimeLogs: string[] = []; + +type LifecycleRuntimeHarness = RuntimeEnv & { + error: MockFn; + exit: MockFn; +}; + +type LifecycleServiceHarness = GatewayService & { + install: MockFn; + uninstall: MockFn; + stop: MockFn; + isLoaded: MockFn; + readCommand: MockFn; + readRuntime: MockFn; + restart: MockFn; +}; + +export const defaultRuntime: LifecycleRuntimeHarness = { + log: (...args: unknown[]) => { + runtimeLogs.push(args.map((arg) => String(arg)).join(" ")); + }, + error: vi.fn(), + exit: vi.fn((code: number) => { + throw new Error(`__exit__:${code}`); + }), +}; + +export const service: LifecycleServiceHarness = { + label: "TestService", + loadedText: "loaded", + notLoadedText: "not loaded", + install: vi.fn(), + uninstall: vi.fn(), + stop: vi.fn(), + isLoaded: vi.fn(), + readCommand: vi.fn(), + readRuntime: vi.fn(), + restart: vi.fn(), +}; + +export function resetLifecycleRuntimeLogs() { + runtimeLogs.length = 0; +} + +export function resetLifecycleServiceMocks() { + service.isLoaded.mockClear(); + service.readCommand.mockClear(); + service.restart.mockClear(); + service.isLoaded.mockResolvedValue(true); + service.readCommand.mockResolvedValue({ programArguments: [], environment: {} }); + service.restart.mockResolvedValue({ outcome: "completed" }); +} + +export function stubEmptyGatewayEnv() { + vi.unstubAllEnvs(); + vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", ""); + vi.stubEnv("CLAWDBOT_GATEWAY_TOKEN", ""); + vi.stubEnv("OPENCLAW_GATEWAY_URL", ""); + vi.stubEnv("CLAWDBOT_GATEWAY_URL", ""); +} diff --git a/src/cli/daemon-cli/types.ts b/src/cli/daemon-cli/types.ts index 602d47e9fd1..08a6d407329 100644 --- a/src/cli/daemon-cli/types.ts +++ b/src/cli/daemon-cli/types.ts @@ -11,6 +11,7 @@ export type GatewayRpcOpts = { export type DaemonStatusOptions = { rpc: GatewayRpcOpts; probe: boolean; + requireRpc: boolean; json: boolean; } & FindExtraGatewayServicesOptions; diff --git a/src/cli/devices-cli.ts b/src/cli/devices-cli.ts index 0344bf7967a..143d27b20ff 100644 --- a/src/cli/devices-cli.ts +++ b/src/cli/devices-cli.ts @@ -9,7 +9,7 @@ import { } from "../infra/device-pairing.js"; import { formatTimeAgo } from "../infra/format-time/format-relative.ts"; import { defaultRuntime } from "../runtime.js"; -import { renderTable } from "../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../terminal/table.js"; import { theme } from "../terminal/theme.js"; import { GATEWAY_CLIENT_MODES, GATEWAY_CLIENT_NAMES } from "../utils/message-channel.js"; import { withProgress } from "./progress.js"; @@ -224,7 +224,7 @@ export function registerDevicesCli(program: Command) { return; } if (list.pending?.length) { - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); defaultRuntime.log( `${theme.heading("Pending")} ${theme.muted(`(${list.pending.length})`)}`, ); @@ -251,7 +251,7 @@ export function registerDevicesCli(program: Command) { ); } if (list.paired?.length) { - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); defaultRuntime.log( `${theme.heading("Paired")} ${theme.muted(`(${list.paired.length})`)}`, ); diff --git a/src/cli/directory-cli.ts b/src/cli/directory-cli.ts index d11867fbb40..1a9949f224a 100644 --- a/src/cli/directory-cli.ts +++ b/src/cli/directory-cli.ts @@ -6,7 +6,7 @@ import { danger } from "../globals.js"; import { resolveMessageChannelSelection } from "../infra/outbound/channel-selection.js"; import { defaultRuntime } from "../runtime.js"; import { formatDocsLink } from "../terminal/links.js"; -import { renderTable } from "../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../terminal/table.js"; import { theme } from "../terminal/theme.js"; import { formatHelpExamples } from "./help-format.js"; @@ -48,7 +48,7 @@ function printDirectoryList(params: { return; } - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); defaultRuntime.log(`${theme.heading(params.title)} ${theme.muted(`(${params.entries.length})`)}`); defaultRuntime.log( renderTable({ @@ -166,7 +166,7 @@ export function registerDirectoryCli(program: Command) { defaultRuntime.log(theme.muted("Not available.")); return; } - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); defaultRuntime.log(theme.heading("Self")); defaultRuntime.log( renderTable({ diff --git a/src/cli/dns-cli.ts b/src/cli/dns-cli.ts index de6e6c0dec0..f9781d2f38e 100644 --- a/src/cli/dns-cli.ts +++ b/src/cli/dns-cli.ts @@ -7,7 +7,7 @@ import { pickPrimaryTailnetIPv4, pickPrimaryTailnetIPv6 } from "../infra/tailnet import { getWideAreaZonePath, resolveWideAreaDiscoveryDomain } from "../infra/widearea-dns.js"; import { defaultRuntime } from "../runtime.js"; import { formatDocsLink } from "../terminal/links.js"; -import { renderTable } from "../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../terminal/table.js"; import { theme } from "../terminal/theme.js"; type RunOpts = { allowFailure?: boolean; inherit?: boolean }; @@ -133,7 +133,7 @@ export function registerDnsCli(program: Command) { } const zonePath = getWideAreaZonePath(wideAreaDomain); - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); defaultRuntime.log(theme.heading("DNS setup")); defaultRuntime.log( renderTable({ diff --git a/src/cli/exec-approvals-cli.ts b/src/cli/exec-approvals-cli.ts index 07fe5a462a6..c243fb7a0aa 100644 --- a/src/cli/exec-approvals-cli.ts +++ b/src/cli/exec-approvals-cli.ts @@ -10,7 +10,7 @@ import { import { formatTimeAgo } from "../infra/format-time/format-relative.ts"; import { defaultRuntime } from "../runtime.js"; import { formatDocsLink } from "../terminal/links.js"; -import { renderTable } from "../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../terminal/table.js"; import { isRich, theme } from "../terminal/theme.js"; import { describeUnknownError } from "./gateway-cli/shared.js"; import { callGatewayFromCli } from "./gateway-rpc.js"; @@ -151,7 +151,7 @@ function renderApprovalsSnapshot(snapshot: ExecApprovalsSnapshot, targetLabel: s const rich = isRich(); const heading = (text: string) => (rich ? theme.heading(text) : text); const muted = (text: string) => (rich ? theme.muted(text) : text); - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); const file = snapshot.file ?? { version: 1 }; const defaults = file.defaults ?? {}; diff --git a/src/cli/gateway-cli/discover.ts b/src/cli/gateway-cli/discover.ts index 8465cf449ca..51eac4feb76 100644 --- a/src/cli/gateway-cli/discover.ts +++ b/src/cli/gateway-cli/discover.ts @@ -1,5 +1,6 @@ import type { GatewayBonjourBeacon } from "../../infra/bonjour-discovery.js"; import { colorize, theme } from "../../terminal/theme.js"; +import { parseTimeoutMsWithFallback } from "../parse-timeout.js"; export type GatewayDiscoverOpts = { timeout?: string; @@ -7,26 +8,7 @@ export type GatewayDiscoverOpts = { }; export function parseDiscoverTimeoutMs(raw: unknown, fallbackMs: number): number { - if (raw === undefined || raw === null) { - return fallbackMs; - } - const value = - typeof raw === "string" - ? raw.trim() - : typeof raw === "number" || typeof raw === "bigint" - ? String(raw) - : null; - if (value === null) { - throw new Error("invalid --timeout"); - } - if (!value) { - return fallbackMs; - } - const parsed = Number.parseInt(value, 10); - if (!Number.isFinite(parsed) || parsed <= 0) { - throw new Error(`invalid --timeout: ${value}`); - } - return parsed; + return parseTimeoutMsWithFallback(raw, fallbackMs, { invalidType: "error" }); } export function pickBeaconHost(beacon: GatewayBonjourBeacon): string | null { diff --git a/src/cli/gateway-cli/register.option-collisions.test.ts b/src/cli/gateway-cli/register.option-collisions.test.ts index 1ef5ba2c238..665886c76eb 100644 --- a/src/cli/gateway-cli/register.option-collisions.test.ts +++ b/src/cli/gateway-cli/register.option-collisions.test.ts @@ -128,30 +128,34 @@ describe("gateway register option collisions", () => { gatewayStatusCommand.mockClear(); }); - it("forwards --token to gateway call when parent and child option names collide", async () => { - await sharedProgram.parseAsync(["gateway", "call", "health", "--token", "tok_call", "--json"], { - from: "user", - }); - - expect(callGatewayCli).toHaveBeenCalledWith( - "health", - expect.objectContaining({ - token: "tok_call", - }), - {}, - ); - }); - - it("forwards --token to gateway probe when parent and child option names collide", async () => { - await sharedProgram.parseAsync(["gateway", "probe", "--token", "tok_probe", "--json"], { - from: "user", - }); - - expect(gatewayStatusCommand).toHaveBeenCalledWith( - expect.objectContaining({ - token: "tok_probe", - }), - defaultRuntime, - ); + it.each([ + { + name: "forwards --token to gateway call when parent and child option names collide", + argv: ["gateway", "call", "health", "--token", "tok_call", "--json"], + assert: () => { + expect(callGatewayCli).toHaveBeenCalledWith( + "health", + expect.objectContaining({ + token: "tok_call", + }), + {}, + ); + }, + }, + { + name: "forwards --token to gateway probe when parent and child option names collide", + argv: ["gateway", "probe", "--token", "tok_probe", "--json"], + assert: () => { + expect(gatewayStatusCommand).toHaveBeenCalledWith( + expect.objectContaining({ + token: "tok_probe", + }), + defaultRuntime, + ); + }, + }, + ])("$name", async ({ argv, assert }) => { + await sharedProgram.parseAsync(argv, { from: "user" }); + assert(); }); }); diff --git a/src/cli/gateway-cli/run.option-collisions.test.ts b/src/cli/gateway-cli/run.option-collisions.test.ts index 3a1f8bf57c7..a896a7a3f76 100644 --- a/src/cli/gateway-cli/run.option-collisions.test.ts +++ b/src/cli/gateway-cli/run.option-collisions.test.ts @@ -1,8 +1,6 @@ -import fs from "node:fs/promises"; -import os from "node:os"; -import path from "node:path"; import { Command } from "commander"; import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; +import { withTempSecretFiles } from "../../test-utils/secret-file-fixture.js"; import { createCliRuntimeCapture } from "../test-runtime-capture.js"; const startGatewayServer = vi.fn(async (_port: number, _opts?: unknown) => ({ @@ -195,16 +193,10 @@ describe("gateway run option collisions", () => { ); }); - it("accepts --auth none override", async () => { - await runGatewayCli(["gateway", "run", "--auth", "none", "--allow-unconfigured"]); + it.each(["none", "trusted-proxy"] as const)("accepts --auth %s override", async (mode) => { + await runGatewayCli(["gateway", "run", "--auth", mode, "--allow-unconfigured"]); - expectAuthOverrideMode("none"); - }); - - it("accepts --auth trusted-proxy override", async () => { - await runGatewayCli(["gateway", "run", "--auth", "trusted-proxy", "--allow-unconfigured"]); - - expectAuthOverrideMode("trusted-proxy"); + expectAuthOverrideMode(mode); }); it("prints all supported modes on invalid --auth value", async () => { @@ -244,36 +236,34 @@ describe("gateway run option collisions", () => { }); it("reads gateway password from --password-file", async () => { - const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-gateway-run-")); - try { - const passwordFile = path.join(tempDir, "gateway-password.txt"); - await fs.writeFile(passwordFile, "pw_from_file\n", "utf8"); + await withTempSecretFiles( + "openclaw-gateway-run-", + { password: "pw_from_file\n" }, + async ({ passwordFile }) => { + await runGatewayCli([ + "gateway", + "run", + "--auth", + "password", + "--password-file", + passwordFile ?? "", + "--allow-unconfigured", + ]); + }, + ); - await runGatewayCli([ - "gateway", - "run", - "--auth", - "password", - "--password-file", - passwordFile, - "--allow-unconfigured", - ]); - - expect(startGatewayServer).toHaveBeenCalledWith( - 18789, - expect.objectContaining({ - auth: expect.objectContaining({ - mode: "password", - password: "pw_from_file", // pragma: allowlist secret - }), + expect(startGatewayServer).toHaveBeenCalledWith( + 18789, + expect.objectContaining({ + auth: expect.objectContaining({ + mode: "password", + password: "pw_from_file", // pragma: allowlist secret }), - ); - expect(runtimeErrors).not.toContain( - "Warning: --password can be exposed via process listings. Prefer --password-file or OPENCLAW_GATEWAY_PASSWORD.", - ); - } finally { - await fs.rm(tempDir, { recursive: true, force: true }); - } + }), + ); + expect(runtimeErrors).not.toContain( + "Warning: --password can be exposed via process listings. Prefer --password-file or OPENCLAW_GATEWAY_PASSWORD.", + ); }); it("warns when gateway password is passed inline", async () => { @@ -293,26 +283,24 @@ describe("gateway run option collisions", () => { }); it("rejects using both --password and --password-file", async () => { - const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-gateway-run-")); - try { - const passwordFile = path.join(tempDir, "gateway-password.txt"); - await fs.writeFile(passwordFile, "pw_from_file\n", "utf8"); + await withTempSecretFiles( + "openclaw-gateway-run-", + { password: "pw_from_file\n" }, + async ({ passwordFile }) => { + await expect( + runGatewayCli([ + "gateway", + "run", + "--password", + "pw_inline", + "--password-file", + passwordFile ?? "", + "--allow-unconfigured", + ]), + ).rejects.toThrow("__exit__:1"); + }, + ); - await expect( - runGatewayCli([ - "gateway", - "run", - "--password", - "pw_inline", - "--password-file", - passwordFile, - "--allow-unconfigured", - ]), - ).rejects.toThrow("__exit__:1"); - - expect(runtimeErrors).toContain("Use either --password or --password-file."); - } finally { - await fs.rm(tempDir, { recursive: true, force: true }); - } + expect(runtimeErrors).toContain("Use either --password or --password-file."); }); }); diff --git a/src/cli/hooks-cli.ts b/src/cli/hooks-cli.ts index 7ea0de030da..85aa0d0e4b9 100644 --- a/src/cli/hooks-cli.ts +++ b/src/cli/hooks-cli.ts @@ -22,7 +22,7 @@ import { resolveArchiveKind } from "../infra/archive.js"; import { buildPluginStatusReport } from "../plugins/status.js"; import { defaultRuntime } from "../runtime.js"; import { formatDocsLink } from "../terminal/links.js"; -import { renderTable } from "../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../terminal/table.js"; import { theme } from "../terminal/theme.js"; import { resolveUserPath, shortenHomePath } from "../utils.js"; import { formatCliCommand } from "./command-format.js"; @@ -273,7 +273,7 @@ export function formatHooksList(report: HookStatusReport, opts: HooksListOptions } const eligible = hooks.filter((h) => h.eligible); - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); const rows = hooks.map((hook) => { const missing = formatHookMissingSummary(hook); return { diff --git a/src/cli/node-cli/daemon.ts b/src/cli/node-cli/daemon.ts index b293c88c15c..f56b8af3fff 100644 --- a/src/cli/node-cli/daemon.ts +++ b/src/cli/node-cli/daemon.ts @@ -3,7 +3,6 @@ import { DEFAULT_NODE_DAEMON_RUNTIME, isNodeDaemonRuntime, } from "../../commands/node-daemon-runtime.js"; -import { resolveIsNixMode } from "../../config/paths.js"; import { resolveNodeLaunchAgentLabel, resolveNodeSystemdServiceName, @@ -25,13 +24,11 @@ import { runServiceStop, runServiceUninstall, } from "../daemon-cli/lifecycle-core.js"; -import { - buildDaemonServiceSnapshot, - createDaemonActionContext, - installDaemonServiceAndEmit, -} from "../daemon-cli/response.js"; +import { buildDaemonServiceSnapshot, installDaemonServiceAndEmit } from "../daemon-cli/response.js"; import { createCliStatusTextStyles, + createDaemonInstallActionContext, + failIfNixDaemonInstallMode, formatRuntimeStatus, parsePort, resolveRuntimeStatusColor, @@ -89,11 +86,8 @@ function resolveNodeDefaults( } export async function runNodeDaemonInstall(opts: NodeDaemonInstallOptions) { - const json = Boolean(opts.json); - const { stdout, warnings, emit, fail } = createDaemonActionContext({ action: "install", json }); - - if (resolveIsNixMode(process.env)) { - fail("Nix mode detected; service install is disabled."); + const { json, stdout, warnings, emit, fail } = createDaemonInstallActionContext(opts.json); + if (failIfNixDaemonInstallMode(fail)) { return; } diff --git a/src/cli/nodes-cli.coverage.test.ts b/src/cli/nodes-cli.coverage.test.ts index 04bdfb39bf8..81d0f17c07c 100644 --- a/src/cli/nodes-cli.coverage.test.ts +++ b/src/cli/nodes-cli.coverage.test.ts @@ -174,7 +174,7 @@ describe("nodes-cli coverage", () => { expect(invoke?.params?.command).toBe("system.run"); expect(invoke?.params?.params).toEqual({ command: ["echo", "hi"], - rawCommand: null, + rawCommand: "echo hi", cwd: "/tmp", env: { FOO: "bar" }, timeoutMs: 1200, @@ -186,11 +186,11 @@ describe("nodes-cli coverage", () => { }); expect(invoke?.params?.timeoutMs).toBe(5000); const approval = getApprovalRequestCall(); - expect(approval?.params?.["commandArgv"]).toEqual(["echo", "hi"]); expect(approval?.params?.["systemRunPlan"]).toEqual({ argv: ["echo", "hi"], cwd: "/tmp", - rawCommand: null, + commandText: "echo hi", + commandPreview: null, agentId: "main", sessionKey: null, }); @@ -213,18 +213,18 @@ describe("nodes-cli coverage", () => { expect(invoke?.params?.command).toBe("system.run"); expect(invoke?.params?.params).toMatchObject({ command: ["/bin/sh", "-lc", "echo hi"], - rawCommand: "echo hi", + rawCommand: '/bin/sh -lc "echo hi"', agentId: "main", approved: true, approvalDecision: "allow-once", runId: expect.any(String), }); const approval = getApprovalRequestCall(); - expect(approval?.params?.["commandArgv"]).toEqual(["/bin/sh", "-lc", "echo hi"]); expect(approval?.params?.["systemRunPlan"]).toEqual({ argv: ["/bin/sh", "-lc", "echo hi"], cwd: null, - rawCommand: "echo hi", + commandText: '/bin/sh -lc "echo hi"', + commandPreview: "echo hi", agentId: "main", sessionKey: null, }); diff --git a/src/cli/nodes-cli/register.camera.ts b/src/cli/nodes-cli/register.camera.ts index 3bd7d1203dc..9c813cecc5f 100644 --- a/src/cli/nodes-cli/register.camera.ts +++ b/src/cli/nodes-cli/register.camera.ts @@ -1,6 +1,6 @@ import type { Command } from "commander"; import { defaultRuntime } from "../../runtime.js"; -import { renderTable } from "../../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../../terminal/table.js"; import { shortenHomePath } from "../../utils.js"; import { type CameraFacing, @@ -31,6 +31,12 @@ const parseFacing = (value: string): CameraFacing => { throw new Error(`invalid facing: ${value} (expected front|back)`); }; +function getGatewayInvokePayload(raw: unknown): unknown { + return typeof raw === "object" && raw !== null + ? (raw as { payload?: unknown }).payload + : undefined; +} + export function registerNodesCameraCommands(nodes: Command) { const camera = nodes.command("camera").description("Capture camera media from a paired node"); @@ -71,7 +77,7 @@ export function registerNodesCameraCommands(nodes: Command) { } const { heading, muted } = getNodesTheme(); - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); const rows = devices.map((device) => ({ Name: typeof device.name === "string" ? device.name : "Unknown Camera", Position: typeof device.position === "string" ? device.position : muted("unspecified"), @@ -157,9 +163,7 @@ export function registerNodesCameraCommands(nodes: Command) { }); const raw = await callGatewayCli("node.invoke", opts, invokeParams); - const res = - typeof raw === "object" && raw !== null ? (raw as { payload?: unknown }) : {}; - const payload = parseCameraSnapPayload(res.payload); + const payload = parseCameraSnapPayload(getGatewayInvokePayload(raw)); const filePath = cameraTempPath({ kind: "snap", facing, @@ -229,8 +233,7 @@ export function registerNodesCameraCommands(nodes: Command) { }); const raw = await callGatewayCli("node.invoke", opts, invokeParams); - const res = typeof raw === "object" && raw !== null ? (raw as { payload?: unknown }) : {}; - const payload = parseCameraClipPayload(res.payload); + const payload = parseCameraClipPayload(getGatewayInvokePayload(raw)); const filePath = await writeCameraClipPayloadToFile({ payload, facing, diff --git a/src/cli/nodes-cli/register.invoke.ts b/src/cli/nodes-cli/register.invoke.ts index 71a3e2361e4..0bd1fdad895 100644 --- a/src/cli/nodes-cli/register.invoke.ts +++ b/src/cli/nodes-cli/register.invoke.ts @@ -189,7 +189,6 @@ async function maybeRequestNodesRunApproval(params: { opts: NodesRunOpts; nodeId: string; agentId: string | undefined; - preparedCmdText: string; approvalPlan: ReturnType["plan"]; hostSecurity: ExecSecurity; hostAsk: ExecAsk; @@ -215,8 +214,6 @@ async function maybeRequestNodesRunApproval(params: { params.opts, { id: approvalId, - command: params.preparedCmdText, - commandArgv: params.approvalPlan.argv, systemRunPlan: params.approvalPlan, cwd: params.approvalPlan.cwd, nodeId: params.nodeId, @@ -272,7 +269,7 @@ function buildSystemRunInvokeParams(params: { command: "system.run", params: { command: params.approvalPlan.argv, - rawCommand: params.approvalPlan.rawCommand, + rawCommand: params.approvalPlan.commandText, cwd: params.approvalPlan.cwd, env: params.nodeEnv, timeoutMs: params.timeoutMs, @@ -403,7 +400,6 @@ export function registerNodesInvokeCommands(nodes: Command) { opts, nodeId, agentId, - preparedCmdText: preparedContext.prepared.cmdText, approvalPlan, hostSecurity: approvals.hostSecurity, hostAsk: approvals.hostAsk, diff --git a/src/cli/nodes-cli/register.pairing.ts b/src/cli/nodes-cli/register.pairing.ts index b20c989c1c7..fd649fae754 100644 --- a/src/cli/nodes-cli/register.pairing.ts +++ b/src/cli/nodes-cli/register.pairing.ts @@ -1,5 +1,6 @@ import type { Command } from "commander"; import { defaultRuntime } from "../../runtime.js"; +import { getTerminalTableWidth } from "../../terminal/table.js"; import { getNodesTheme, runNodesCommand } from "./cli-utils.js"; import { parsePairingList } from "./format.js"; import { renderPendingPairingRequestsTable } from "./pairing-render.js"; @@ -25,7 +26,7 @@ export function registerNodesPairingCommands(nodes: Command) { return; } const { heading, warn, muted } = getNodesTheme(); - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); const now = Date.now(); const rendered = renderPendingPairingRequestsTable({ pending, diff --git a/src/cli/nodes-cli/register.status.ts b/src/cli/nodes-cli/register.status.ts index 4dcb3be8e38..03e00cbbec4 100644 --- a/src/cli/nodes-cli/register.status.ts +++ b/src/cli/nodes-cli/register.status.ts @@ -1,7 +1,7 @@ import type { Command } from "commander"; import { formatTimeAgo } from "../../infra/format-time/format-relative.ts"; import { defaultRuntime } from "../../runtime.js"; -import { renderTable } from "../../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../../terminal/table.js"; import { shortenHomeInString } from "../../utils.js"; import { parseDurationMs } from "../parse-duration.js"; import { getNodesTheme, runNodesCommand } from "./cli-utils.js"; @@ -112,7 +112,7 @@ export function registerNodesStatusCommands(nodes: Command) { const obj: Record = typeof result === "object" && result !== null ? result : {}; const { ok, warn, muted } = getNodesTheme(); - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); const now = Date.now(); const nodes = parseNodeList(result); const lastConnectedById = @@ -256,7 +256,7 @@ export function registerNodesStatusCommands(nodes: Command) { const status = `${paired ? ok("paired") : warn("unpaired")} · ${ connected ? ok("connected") : muted("disconnected") }`; - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); const rows = [ { Field: "ID", Value: nodeId }, displayName ? { Field: "Name", Value: displayName } : null, @@ -307,7 +307,7 @@ export function registerNodesStatusCommands(nodes: Command) { const result = await callGatewayCli("node.pair.list", opts, {}); const { pending, paired } = parsePairingList(result); const { heading, muted, warn } = getNodesTheme(); - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); const now = Date.now(); const hasFilters = connectedOnly || sinceMs !== undefined; const pendingRows = hasFilters ? [] : pending; diff --git a/src/cli/pairing-cli.ts b/src/cli/pairing-cli.ts index 6974663bd49..7c8cbc750ea 100644 --- a/src/cli/pairing-cli.ts +++ b/src/cli/pairing-cli.ts @@ -10,7 +10,7 @@ import { } from "../pairing/pairing-store.js"; import { defaultRuntime } from "../runtime.js"; import { formatDocsLink } from "../terminal/links.js"; -import { renderTable } from "../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../terminal/table.js"; import { theme } from "../terminal/theme.js"; import { formatCliCommand } from "./command-format.js"; @@ -88,7 +88,7 @@ export function registerPairingCli(program: Command) { return; } const idLabel = resolvePairingIdLabel(channel); - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); defaultRuntime.log( `${theme.heading("Pairing requests")} ${theme.muted(`(${requests.length})`)}`, ); diff --git a/src/cli/parse-timeout.test.ts b/src/cli/parse-timeout.test.ts new file mode 100644 index 00000000000..9d05cf2d244 --- /dev/null +++ b/src/cli/parse-timeout.test.ts @@ -0,0 +1,43 @@ +import { describe, expect, it } from "vitest"; +import { parseTimeoutMs, parseTimeoutMsWithFallback } from "./parse-timeout.js"; + +describe("parseTimeoutMs", () => { + it("parses positive string values", () => { + expect(parseTimeoutMs("1500")).toBe(1500); + }); + + it("returns undefined for empty or invalid values", () => { + expect(parseTimeoutMs(undefined)).toBeUndefined(); + expect(parseTimeoutMs("")).toBeUndefined(); + expect(parseTimeoutMs("nope")).toBeUndefined(); + }); +}); + +describe("parseTimeoutMsWithFallback", () => { + it("returns the fallback for missing or empty values", () => { + expect(parseTimeoutMsWithFallback(undefined, 3000)).toBe(3000); + expect(parseTimeoutMsWithFallback(null, 3000)).toBe(3000); + expect(parseTimeoutMsWithFallback(" ", 3000)).toBe(3000); + }); + + it("parses positive numbers and strings", () => { + expect(parseTimeoutMsWithFallback(2500, 3000)).toBe(2500); + expect(parseTimeoutMsWithFallback(2500n, 3000)).toBe(2500); + expect(parseTimeoutMsWithFallback("2500", 3000)).toBe(2500); + }); + + it("falls back on unsupported types by default", () => { + expect(parseTimeoutMsWithFallback({}, 3000)).toBe(3000); + }); + + it("throws on unsupported types when requested", () => { + expect(() => parseTimeoutMsWithFallback({}, 3000, { invalidType: "error" })).toThrow( + "invalid --timeout", + ); + }); + + it("throws on non-positive parsed values", () => { + expect(() => parseTimeoutMsWithFallback("0", 3000)).toThrow("invalid --timeout: 0"); + expect(() => parseTimeoutMsWithFallback("-1", 3000)).toThrow("invalid --timeout: -1"); + }); +}); diff --git a/src/cli/parse-timeout.ts b/src/cli/parse-timeout.ts index 090559add6e..139393c0176 100644 --- a/src/cli/parse-timeout.ts +++ b/src/cli/parse-timeout.ts @@ -16,3 +16,39 @@ export function parseTimeoutMs(raw: unknown): number | undefined { } return Number.isFinite(value) ? value : undefined; } + +export function parseTimeoutMsWithFallback( + raw: unknown, + fallbackMs: number, + options: { + invalidType?: "fallback" | "error"; + } = {}, +): number { + if (raw === undefined || raw === null) { + return fallbackMs; + } + + const value = + typeof raw === "string" + ? raw.trim() + : typeof raw === "number" || typeof raw === "bigint" + ? String(raw) + : null; + + if (value === null) { + if (options.invalidType === "error") { + throw new Error("invalid --timeout"); + } + return fallbackMs; + } + + if (!value) { + return fallbackMs; + } + + const parsed = Number.parseInt(value, 10); + if (!Number.isFinite(parsed) || parsed <= 0) { + throw new Error(`invalid --timeout: ${value}`); + } + return parsed; +} diff --git a/src/cli/plugins-cli.ts b/src/cli/plugins-cli.ts index 36e198c71a2..e77d7026875 100644 --- a/src/cli/plugins-cli.ts +++ b/src/cli/plugins-cli.ts @@ -19,7 +19,7 @@ import { resolveUninstallDirectoryTarget, uninstallPlugin } from "../plugins/uni import { updateNpmInstalledPlugins } from "../plugins/update.js"; import { defaultRuntime } from "../runtime.js"; import { formatDocsLink } from "../terminal/links.js"; -import { renderTable } from "../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../terminal/table.js"; import { theme } from "../terminal/theme.js"; import { resolveUserPath, shortenHomeInString, shortenHomePath } from "../utils.js"; import { looksLikeLocalInstallSpec } from "./install-spec.js"; @@ -404,7 +404,7 @@ export function registerPluginsCli(program: Command) { ); if (!opts.verbose) { - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); const sourceRoots = resolvePluginSourceRoots({ workspaceDir: report.workspaceDir, }); diff --git a/src/cli/program/help.test.ts b/src/cli/program/help.test.ts index 6acceb5cc41..07b6a8d8f90 100644 --- a/src/cli/program/help.test.ts +++ b/src/cli/program/help.test.ts @@ -90,6 +90,23 @@ describe("configureProgramHelp", () => { } } + function expectVersionExit(params: { expectedVersion: string }) { + const logSpy = vi.spyOn(console, "log").mockImplementation(() => {}); + const exitSpy = vi.spyOn(process, "exit").mockImplementation(((code?: number) => { + throw new Error(`exit:${code ?? ""}`); + }) as typeof process.exit); + + try { + const program = makeProgramWithCommands(); + expect(() => configureProgramHelp(program, testProgramContext)).toThrow("exit:0"); + expect(logSpy).toHaveBeenCalledWith(params.expectedVersion); + expect(exitSpy).toHaveBeenCalledWith(0); + } finally { + logSpy.mockRestore(); + exitSpy.mockRestore(); + } + } + it("adds root help hint and marks commands with subcommands", () => { process.argv = ["node", "openclaw", "--help"]; const program = makeProgramWithCommands(); @@ -115,35 +132,12 @@ describe("configureProgramHelp", () => { it("prints version and exits immediately when version flags are present", () => { process.argv = ["node", "openclaw", "--version"]; - const logSpy = vi.spyOn(console, "log").mockImplementation(() => {}); - const exitSpy = vi.spyOn(process, "exit").mockImplementation(((code?: number) => { - throw new Error(`exit:${code ?? ""}`); - }) as typeof process.exit); - - const program = makeProgramWithCommands(); - expect(() => configureProgramHelp(program, testProgramContext)).toThrow("exit:0"); - expect(logSpy).toHaveBeenCalledWith("OpenClaw 9.9.9-test (abc1234)"); - expect(exitSpy).toHaveBeenCalledWith(0); - - logSpy.mockRestore(); - exitSpy.mockRestore(); + expectVersionExit({ expectedVersion: "OpenClaw 9.9.9-test (abc1234)" }); }); it("prints version and exits immediately without commit metadata", () => { process.argv = ["node", "openclaw", "--version"]; resolveCommitHashMock.mockReturnValue(null); - - const logSpy = vi.spyOn(console, "log").mockImplementation(() => {}); - const exitSpy = vi.spyOn(process, "exit").mockImplementation(((code?: number) => { - throw new Error(`exit:${code ?? ""}`); - }) as typeof process.exit); - - const program = makeProgramWithCommands(); - expect(() => configureProgramHelp(program, testProgramContext)).toThrow("exit:0"); - expect(logSpy).toHaveBeenCalledWith("OpenClaw 9.9.9-test"); - expect(exitSpy).toHaveBeenCalledWith(0); - - logSpy.mockRestore(); - exitSpy.mockRestore(); + expectVersionExit({ expectedVersion: "OpenClaw 9.9.9-test" }); }); }); diff --git a/src/cli/program/register.agent.ts b/src/cli/program/register.agent.ts index fdb45a0960a..e5847f3c164 100644 --- a/src/cli/program/register.agent.ts +++ b/src/cli/program/register.agent.ts @@ -27,7 +27,7 @@ export function registerAgentCommands(program: Command, args: { agentChannelOpti .option("-t, --to ", "Recipient number in E.164 used to derive the session key") .option("--session-id ", "Use an explicit session id") .option("--agent ", "Agent id (overrides routing bindings)") - .option("--thinking ", "Thinking level: off | minimal | low | medium | high") + .option("--thinking ", "Thinking level: off | minimal | low | medium | high | xhigh") .option("--verbose ", "Persist agent verbose level for the session") .option( "--channel ", diff --git a/src/cli/program/register.onboard.ts b/src/cli/program/register.onboard.ts index 03fb832a041..4dd285e63c1 100644 --- a/src/cli/program/register.onboard.ts +++ b/src/cli/program/register.onboard.ts @@ -160,12 +160,15 @@ export function registerOnboardCommand(program: Command) { zaiApiKey: opts.zaiApiKey as string | undefined, xiaomiApiKey: opts.xiaomiApiKey as string | undefined, qianfanApiKey: opts.qianfanApiKey as string | undefined, + modelstudioApiKeyCn: opts.modelstudioApiKeyCn as string | undefined, + modelstudioApiKey: opts.modelstudioApiKey as string | undefined, minimaxApiKey: opts.minimaxApiKey as string | undefined, syntheticApiKey: opts.syntheticApiKey as string | undefined, veniceApiKey: opts.veniceApiKey as string | undefined, togetherApiKey: opts.togetherApiKey as string | undefined, huggingfaceApiKey: opts.huggingfaceApiKey as string | undefined, opencodeZenApiKey: opts.opencodeZenApiKey as string | undefined, + opencodeGoApiKey: opts.opencodeGoApiKey as string | undefined, xaiApiKey: opts.xaiApiKey as string | undefined, litellmApiKey: opts.litellmApiKey as string | undefined, volcengineApiKey: opts.volcengineApiKey as string | undefined, diff --git a/src/cli/qr-cli.test.ts b/src/cli/qr-cli.test.ts index 551c17355ef..1bc8a645719 100644 --- a/src/cli/qr-cli.test.ts +++ b/src/cli/qr-cli.test.ts @@ -27,6 +27,12 @@ vi.mock("../process/exec.js", () => ({ runCommandWithTimeout: mocks.runCommandWi vi.mock("./command-secret-gateway.js", () => ({ resolveCommandSecretRefsViaGateway: mocks.resolveCommandSecretRefsViaGateway, })); +vi.mock("../infra/device-bootstrap.js", () => ({ + issueDeviceBootstrapToken: vi.fn(async () => ({ + token: "bootstrap-123", + expiresAtMs: 123, + })), +})); vi.mock("qrcode-terminal", () => ({ default: { generate: mocks.qrGenerate, @@ -98,6 +104,12 @@ function createLocalGatewayPasswordRefAuth(secretId: string) { }; } +function createLocalGatewayEnvPasswordRefAuth(secretId: string) { + return { + password: { source: "env", provider: "default", id: secretId }, + }; +} + describe("registerQrCli", () => { function createProgram() { const program = new Command(); @@ -123,6 +135,18 @@ describe("registerQrCli", () => { }; } + function expectLoggedSetupCode(url: string) { + const expected = encodePairingSetupCode({ + url, + bootstrapToken: "bootstrap-123", + }); + expect(runtime.log).toHaveBeenCalledWith(expected); + } + + function expectLoggedLocalSetupCode() { + expectLoggedSetupCode("ws://gateway.local:18789"); + } + function mockTailscaleStatusLookup() { runCommandWithTimeout.mockResolvedValue({ code: 0, @@ -156,7 +180,7 @@ describe("registerQrCli", () => { const expected = encodePairingSetupCode({ url: "ws://gateway.local:18789", - token: "tok", + bootstrapToken: "bootstrap-123", }); expect(runtime.log).toHaveBeenCalledWith(expected); expect(qrGenerate).not.toHaveBeenCalled(); @@ -192,11 +216,7 @@ describe("registerQrCli", () => { await runQr(["--setup-code-only", "--token", "override-token"]); - const expected = encodePairingSetupCode({ - url: "ws://gateway.local:18789", - token: "override-token", - }); - expect(runtime.log).toHaveBeenCalledWith(expected); + expectLoggedLocalSetupCode(); }); it("skips local password SecretRef resolution when --token override is provided", async () => { @@ -208,11 +228,7 @@ describe("registerQrCli", () => { await runQr(["--setup-code-only", "--token", "override-token"]); - const expected = encodePairingSetupCode({ - url: "ws://gateway.local:18789", - token: "override-token", - }); - expect(runtime.log).toHaveBeenCalledWith(expected); + expectLoggedLocalSetupCode(); }); it("resolves local gateway auth password SecretRefs before setup code generation", async () => { @@ -225,11 +241,7 @@ describe("registerQrCli", () => { await runQr(["--setup-code-only"]); - const expected = encodePairingSetupCode({ - url: "ws://gateway.local:18789", - password: "local-password-secret", // pragma: allowlist secret - }); - expect(runtime.log).toHaveBeenCalledWith(expected); + expectLoggedLocalSetupCode(); expect(resolveCommandSecretRefsViaGateway).not.toHaveBeenCalled(); }); @@ -243,11 +255,7 @@ describe("registerQrCli", () => { await runQr(["--setup-code-only"]); - const expected = encodePairingSetupCode({ - url: "ws://gateway.local:18789", - password: "password-from-env", // pragma: allowlist secret - }); - expect(runtime.log).toHaveBeenCalledWith(expected); + expectLoggedLocalSetupCode(); expect(resolveCommandSecretRefsViaGateway).not.toHaveBeenCalled(); }); @@ -256,17 +264,13 @@ describe("registerQrCli", () => { createLocalGatewayConfigWithAuth({ mode: "token", token: "token-123", - password: { source: "env", provider: "default", id: "MISSING_LOCAL_GATEWAY_PASSWORD" }, + ...createLocalGatewayEnvPasswordRefAuth("MISSING_LOCAL_GATEWAY_PASSWORD"), }), ); await runQr(["--setup-code-only"]); - const expected = encodePairingSetupCode({ - url: "ws://gateway.local:18789", - token: "token-123", - }); - expect(runtime.log).toHaveBeenCalledWith(expected); + expectLoggedLocalSetupCode(); expect(resolveCommandSecretRefsViaGateway).not.toHaveBeenCalled(); }); @@ -274,17 +278,13 @@ describe("registerQrCli", () => { vi.stubEnv("QR_INFERRED_GATEWAY_PASSWORD", "inferred-password"); loadConfig.mockReturnValue( createLocalGatewayConfigWithAuth({ - password: { source: "env", provider: "default", id: "QR_INFERRED_GATEWAY_PASSWORD" }, + ...createLocalGatewayEnvPasswordRefAuth("QR_INFERRED_GATEWAY_PASSWORD"), }), ); await runQr(["--setup-code-only"]); - const expected = encodePairingSetupCode({ - url: "ws://gateway.local:18789", - password: "inferred-password", // pragma: allowlist secret - }); - expect(runtime.log).toHaveBeenCalledWith(expected); + expectLoggedLocalSetupCode(); expect(resolveCommandSecretRefsViaGateway).not.toHaveBeenCalled(); }); @@ -332,7 +332,7 @@ describe("registerQrCli", () => { const expected = encodePairingSetupCode({ url: "wss://remote.example.com:444", - token: "remote-tok", + bootstrapToken: "bootstrap-123", }); expect(runtime.log).toHaveBeenCalledWith(expected); expect(resolveCommandSecretRefsViaGateway).toHaveBeenCalledWith( @@ -375,7 +375,7 @@ describe("registerQrCli", () => { ).toBe(true); const expected = encodePairingSetupCode({ url: "wss://remote.example.com:444", - token: "remote-tok", + bootstrapToken: "bootstrap-123", }); expect(runtime.log).toHaveBeenCalledWith(expected); }); diff --git a/src/cli/qr-dashboard.integration.test.ts b/src/cli/qr-dashboard.integration.test.ts index 5db9bb43d7a..7a6dedef091 100644 --- a/src/cli/qr-dashboard.integration.test.ts +++ b/src/cli/qr-dashboard.integration.test.ts @@ -66,12 +66,22 @@ function createGatewayTokenRefFixture() { }; } -function decodeSetupCode(setupCode: string): { url?: string; token?: string; password?: string } { +function decodeSetupCode(setupCode: string): { + url?: string; + bootstrapToken?: string; + token?: string; + password?: string; +} { const padded = setupCode.replace(/-/g, "+").replace(/_/g, "/"); const padLength = (4 - (padded.length % 4)) % 4; const normalized = padded + "=".repeat(padLength); const json = Buffer.from(normalized, "base64").toString("utf8"); - return JSON.parse(json) as { url?: string; token?: string; password?: string }; + return JSON.parse(json) as { + url?: string; + bootstrapToken?: string; + token?: string; + password?: string; + }; } async function runCli(args: string[]): Promise { @@ -126,7 +136,8 @@ describe("cli integration: qr + dashboard token SecretRef", () => { expect(setupCode).toBeTruthy(); const payload = decodeSetupCode(setupCode ?? ""); expect(payload.url).toBe("ws://gateway.local:18789"); - expect(payload.token).toBe("shared-token-123"); + expect(payload.bootstrapToken).toBeTruthy(); + expect(payload.token).toBeUndefined(); expect(runtimeErrors).toEqual([]); runtimeLogs.length = 0; diff --git a/src/cli/run-main.exit.test.ts b/src/cli/run-main.exit.test.ts index 86d74f09640..3e56c1ce794 100644 --- a/src/cli/run-main.exit.test.ts +++ b/src/cli/run-main.exit.test.ts @@ -6,6 +6,7 @@ const loadDotEnvMock = vi.hoisted(() => vi.fn()); const normalizeEnvMock = vi.hoisted(() => vi.fn()); const ensurePathMock = vi.hoisted(() => vi.fn()); const assertRuntimeMock = vi.hoisted(() => vi.fn()); +const closeAllMemorySearchManagersMock = vi.hoisted(() => vi.fn(async () => {})); vi.mock("./route.js", () => ({ tryRouteCli: tryRouteCliMock, @@ -27,6 +28,10 @@ vi.mock("../infra/runtime-guard.js", () => ({ assertSupportedRuntime: assertRuntimeMock, })); +vi.mock("../memory/search-manager.js", () => ({ + closeAllMemorySearchManagers: closeAllMemorySearchManagersMock, +})); + const { runCli } = await import("./run-main.js"); describe("runCli exit behavior", () => { @@ -43,6 +48,7 @@ describe("runCli exit behavior", () => { await runCli(["node", "openclaw", "status"]); expect(tryRouteCliMock).toHaveBeenCalledWith(["node", "openclaw", "status"]); + expect(closeAllMemorySearchManagersMock).toHaveBeenCalledTimes(1); expect(exitSpy).not.toHaveBeenCalled(); exitSpy.mockRestore(); }); diff --git a/src/cli/run-main.ts b/src/cli/run-main.ts index e80ce97b845..c0673ddf2af 100644 --- a/src/cli/run-main.ts +++ b/src/cli/run-main.ts @@ -13,6 +13,15 @@ import { applyCliProfileEnv, parseCliProfileArgs } from "./profile.js"; import { tryRouteCli } from "./route.js"; import { normalizeWindowsArgv } from "./windows-argv.js"; +async function closeCliMemoryManagers(): Promise { + try { + const { closeAllMemorySearchManagers } = await import("../memory/search-manager.js"); + await closeAllMemorySearchManagers(); + } catch { + // Best-effort teardown for short-lived CLI processes. + } +} + export function rewriteUpdateFlagArgv(argv: string[]): string[] { const index = argv.indexOf("--update"); if (index === -1) { @@ -82,59 +91,63 @@ export async function runCli(argv: string[] = process.argv) { // Enforce the minimum supported runtime before doing any work. assertSupportedRuntime(); - if (await tryRouteCli(normalizedArgv)) { - return; - } - - // Capture all console output into structured logs while keeping stdout/stderr behavior. - enableConsoleCapture(); - - const { buildProgram } = await import("./program.js"); - const program = buildProgram(); - - // Global error handlers to prevent silent crashes from unhandled rejections/exceptions. - // These log the error and exit gracefully instead of crashing without trace. - installUnhandledRejectionHandler(); - - process.on("uncaughtException", (error) => { - console.error("[openclaw] Uncaught exception:", formatUncaughtError(error)); - process.exit(1); - }); - - const parseArgv = rewriteUpdateFlagArgv(normalizedArgv); - // Register the primary command (builtin or subcli) so help and command parsing - // are correct even with lazy command registration. - const primary = getPrimaryCommand(parseArgv); - if (primary) { - const { getProgramContext } = await import("./program/program-context.js"); - const ctx = getProgramContext(program); - if (ctx) { - const { registerCoreCliByName } = await import("./program/command-registry.js"); - await registerCoreCliByName(program, ctx, primary, parseArgv); + try { + if (await tryRouteCli(normalizedArgv)) { + return; } - const { registerSubCliByName } = await import("./program/register.subclis.js"); - await registerSubCliByName(program, primary); - } - const hasBuiltinPrimary = - primary !== null && program.commands.some((command) => command.name() === primary); - const shouldSkipPluginRegistration = shouldSkipPluginCommandRegistration({ - argv: parseArgv, - primary, - hasBuiltinPrimary, - }); - if (!shouldSkipPluginRegistration) { - // Register plugin CLI commands before parsing - const { registerPluginCliCommands } = await import("../plugins/cli.js"); - const { loadValidatedConfigForPluginRegistration } = - await import("./program/register.subclis.js"); - const config = await loadValidatedConfigForPluginRegistration(); - if (config) { - registerPluginCliCommands(program, config); + // Capture all console output into structured logs while keeping stdout/stderr behavior. + enableConsoleCapture(); + + const { buildProgram } = await import("./program.js"); + const program = buildProgram(); + + // Global error handlers to prevent silent crashes from unhandled rejections/exceptions. + // These log the error and exit gracefully instead of crashing without trace. + installUnhandledRejectionHandler(); + + process.on("uncaughtException", (error) => { + console.error("[openclaw] Uncaught exception:", formatUncaughtError(error)); + process.exit(1); + }); + + const parseArgv = rewriteUpdateFlagArgv(normalizedArgv); + // Register the primary command (builtin or subcli) so help and command parsing + // are correct even with lazy command registration. + const primary = getPrimaryCommand(parseArgv); + if (primary) { + const { getProgramContext } = await import("./program/program-context.js"); + const ctx = getProgramContext(program); + if (ctx) { + const { registerCoreCliByName } = await import("./program/command-registry.js"); + await registerCoreCliByName(program, ctx, primary, parseArgv); + } + const { registerSubCliByName } = await import("./program/register.subclis.js"); + await registerSubCliByName(program, primary); } - } - await program.parseAsync(parseArgv); + const hasBuiltinPrimary = + primary !== null && program.commands.some((command) => command.name() === primary); + const shouldSkipPluginRegistration = shouldSkipPluginCommandRegistration({ + argv: parseArgv, + primary, + hasBuiltinPrimary, + }); + if (!shouldSkipPluginRegistration) { + // Register plugin CLI commands before parsing + const { registerPluginCliCommands } = await import("../plugins/cli.js"); + const { loadValidatedConfigForPluginRegistration } = + await import("./program/register.subclis.js"); + const config = await loadValidatedConfigForPluginRegistration(); + if (config) { + registerPluginCliCommands(program, config); + } + } + + await program.parseAsync(parseArgv); + } finally { + await closeCliMemoryManagers(); + } } export function isCliMainModule(): boolean { diff --git a/src/cli/skills-cli.format.ts b/src/cli/skills-cli.format.ts index 5f6dcfdcd2a..045281bc7d1 100644 --- a/src/cli/skills-cli.format.ts +++ b/src/cli/skills-cli.format.ts @@ -1,5 +1,6 @@ import type { SkillStatusEntry, SkillStatusReport } from "../agents/skills-status.js"; -import { renderTable } from "../terminal/table.js"; +import { stripAnsi } from "../terminal/ansi.js"; +import { getTerminalTableWidth, renderTable } from "../terminal/table.js"; import { theme } from "../terminal/theme.js"; import { shortenHomePath } from "../utils.js"; import { formatCliCommand } from "./command-format.js"; @@ -38,8 +39,38 @@ function formatSkillStatus(skill: SkillStatusEntry): string { return theme.error("✗ missing"); } +function normalizeSkillEmoji(emoji?: string): string { + return (emoji ?? "📦").replaceAll("\uFE0E", "\uFE0F"); +} + +const REMAINING_ESC_SEQUENCE_REGEX = new RegExp( + String.raw`\u001b(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])`, + "g", +); +const JSON_CONTROL_CHAR_REGEX = new RegExp(String.raw`[\u0000-\u001f\u007f-\u009f]`, "g"); + +function sanitizeJsonString(value: string): string { + return stripAnsi(value) + .replace(REMAINING_ESC_SEQUENCE_REGEX, "") + .replace(JSON_CONTROL_CHAR_REGEX, ""); +} + +function sanitizeJsonValue(value: unknown): unknown { + if (typeof value === "string") { + return sanitizeJsonString(value); + } + if (Array.isArray(value)) { + return value.map((item) => sanitizeJsonValue(item)); + } + if (value && typeof value === "object") { + return Object.fromEntries( + Object.entries(value).map(([key, entryValue]) => [key, sanitizeJsonValue(entryValue)]), + ); + } + return value; +} function formatSkillName(skill: SkillStatusEntry): string { - const emoji = skill.emoji ?? "📦"; + const emoji = normalizeSkillEmoji(skill.emoji); return `${emoji} ${theme.command(skill.name)}`; } @@ -67,7 +98,7 @@ export function formatSkillsList(report: SkillStatusReport, opts: SkillsListOpti const skills = opts.eligible ? report.skills.filter((s) => s.eligible) : report.skills; if (opts.json) { - const jsonReport = { + const jsonReport = sanitizeJsonValue({ workspaceDir: report.workspaceDir, managedSkillsDir: report.managedSkillsDir, skills: skills.map((s) => ({ @@ -83,7 +114,7 @@ export function formatSkillsList(report: SkillStatusReport, opts: SkillsListOpti homepage: s.homepage, missing: s.missing, })), - }; + }); return JSON.stringify(jsonReport, null, 2); } @@ -95,7 +126,7 @@ export function formatSkillsList(report: SkillStatusReport, opts: SkillsListOpti } const eligible = skills.filter((s) => s.eligible); - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); const rows = skills.map((skill) => { const missing = formatSkillMissingSummary(skill); return { @@ -109,7 +140,7 @@ export function formatSkillsList(report: SkillStatusReport, opts: SkillsListOpti const columns = [ { key: "Status", header: "Status", minWidth: 10 }, - { key: "Skill", header: "Skill", minWidth: 18, flex: true }, + { key: "Skill", header: "Skill", minWidth: 22 }, { key: "Description", header: "Description", minWidth: 24, flex: true }, { key: "Source", header: "Source", minWidth: 10 }, ]; @@ -150,11 +181,11 @@ export function formatSkillInfo( } if (opts.json) { - return JSON.stringify(skill, null, 2); + return JSON.stringify(sanitizeJsonValue(skill), null, 2); } const lines: string[] = []; - const emoji = skill.emoji ?? "📦"; + const emoji = normalizeSkillEmoji(skill.emoji); const status = skill.eligible ? theme.success("✓ Ready") : skill.disabled @@ -247,7 +278,7 @@ export function formatSkillsCheck(report: SkillStatusReport, opts: SkillsCheckOp if (opts.json) { return JSON.stringify( - { + sanitizeJsonValue({ summary: { total: report.skills.length, eligible: eligible.length, @@ -263,7 +294,7 @@ export function formatSkillsCheck(report: SkillStatusReport, opts: SkillsCheckOp missing: s.missing, install: s.install, })), - }, + }), null, 2, ); @@ -282,7 +313,7 @@ export function formatSkillsCheck(report: SkillStatusReport, opts: SkillsCheckOp lines.push(""); lines.push(theme.heading("Ready to use:")); for (const skill of eligible) { - const emoji = skill.emoji ?? "📦"; + const emoji = normalizeSkillEmoji(skill.emoji); lines.push(` ${emoji} ${skill.name}`); } } @@ -291,7 +322,7 @@ export function formatSkillsCheck(report: SkillStatusReport, opts: SkillsCheckOp lines.push(""); lines.push(theme.heading("Missing requirements:")); for (const skill of missingReqs) { - const emoji = skill.emoji ?? "📦"; + const emoji = normalizeSkillEmoji(skill.emoji); const missing = formatSkillMissingSummary(skill); lines.push(` ${emoji} ${skill.name} ${theme.muted(`(${missing})`)}`); } diff --git a/src/cli/skills-cli.test.ts b/src/cli/skills-cli.test.ts index 37323e7f21d..27031fc0fdf 100644 --- a/src/cli/skills-cli.test.ts +++ b/src/cli/skills-cli.test.ts @@ -148,6 +148,18 @@ describe("skills-cli", () => { expect(output).toContain("Any binaries"); expect(output).toContain("API_KEY"); }); + + it("normalizes text-presentation emoji selectors in info output", () => { + const report = createMockReport([ + createMockSkill({ + name: "info-emoji", + emoji: "🎛\uFE0E", + }), + ]); + + const output = formatSkillInfo(report, "info-emoji", {}); + expect(output).toContain("🎛️"); + }); }); describe("formatSkillsCheck", () => { @@ -170,6 +182,22 @@ describe("skills-cli", () => { expect(output).toContain("go"); // missing binary expect(output).toContain("npx clawhub"); }); + + it("normalizes text-presentation emoji selectors in check output", () => { + const report = createMockReport([ + createMockSkill({ name: "ready-emoji", emoji: "🎛\uFE0E", eligible: true }), + createMockSkill({ + name: "missing-emoji", + emoji: "🎙\uFE0E", + eligible: false, + missing: { bins: ["ffmpeg"], anyBins: [], env: [], config: [], os: [] }, + }), + ]); + + const output = formatSkillsCheck(report, {}); + expect(output).toContain("🎛️ ready-emoji"); + expect(output).toContain("🎙️ missing-emoji"); + }); }); describe("JSON output", () => { @@ -215,5 +243,46 @@ describe("skills-cli", () => { const parsed = JSON.parse(output) as Record; assert(parsed); }); + + it("sanitizes ANSI and C1 controls in skills list JSON output", () => { + const report = createMockReport([ + createMockSkill({ + name: "json-skill", + emoji: "\u001b[31m📧\u001b[0m\u009f", + description: "desc\u0093\u001b[2J\u001b[33m colored\u001b[0m", + }), + ]); + + const output = formatSkillsList(report, { json: true }); + const parsed = JSON.parse(output) as { + skills: Array<{ emoji: string; description: string }>; + }; + + expect(parsed.skills[0]?.emoji).toBe("📧"); + expect(parsed.skills[0]?.description).toBe("desc colored"); + expect(output).not.toContain("\\u001b"); + }); + + it("sanitizes skills info JSON output", () => { + const report = createMockReport([ + createMockSkill({ + name: "info-json", + emoji: "\u001b[31m🎙\u001b[0m\u009f", + description: "hi\u0091", + homepage: "https://example.com/\u0092docs", + }), + ]); + + const output = formatSkillInfo(report, "info-json", { json: true }); + const parsed = JSON.parse(output) as { + emoji: string; + description: string; + homepage: string; + }; + + expect(parsed.emoji).toBe("🎙"); + expect(parsed.description).toBe("hi"); + expect(parsed.homepage).toBe("https://example.com/docs"); + }); }); }); diff --git a/src/cli/update-cli.option-collisions.test.ts b/src/cli/update-cli.option-collisions.test.ts index c0dd2d88404..6db4cfdd260 100644 --- a/src/cli/update-cli.option-collisions.test.ts +++ b/src/cli/update-cli.option-collisions.test.ts @@ -44,30 +44,36 @@ describe("update cli option collisions", () => { defaultRuntime.exit.mockClear(); }); - it("forwards parent-captured --json/--timeout to `update status`", async () => { - await runRegisteredCli({ - register: registerUpdateCli as (program: Command) => void, + it.each([ + { + name: "forwards parent-captured --json/--timeout to `update status`", argv: ["update", "status", "--json", "--timeout", "9"], - }); - - expect(updateStatusCommand).toHaveBeenCalledWith( - expect.objectContaining({ - json: true, - timeout: "9", - }), - ); - }); - - it("forwards parent-captured --timeout to `update wizard`", async () => { + assert: () => { + expect(updateStatusCommand).toHaveBeenCalledWith( + expect.objectContaining({ + json: true, + timeout: "9", + }), + ); + }, + }, + { + name: "forwards parent-captured --timeout to `update wizard`", + argv: ["update", "wizard", "--timeout", "13"], + assert: () => { + expect(updateWizardCommand).toHaveBeenCalledWith( + expect.objectContaining({ + timeout: "13", + }), + ); + }, + }, + ])("$name", async ({ argv, assert }) => { await runRegisteredCli({ register: registerUpdateCli as (program: Command) => void, - argv: ["update", "wizard", "--timeout", "13"], + argv, }); - expect(updateWizardCommand).toHaveBeenCalledWith( - expect.objectContaining({ - timeout: "13", - }), - ); + assert(); }); }); diff --git a/src/cli/update-cli.test.ts b/src/cli/update-cli.test.ts index 2fe5e8f9b23..f2138215327 100644 --- a/src/cli/update-cli.test.ts +++ b/src/cli/update-cli.test.ts @@ -1,3 +1,4 @@ +import fs from "node:fs/promises"; import path from "node:path"; import { beforeEach, describe, expect, it, vi } from "vitest"; import type { OpenClawConfig, ConfigFileSnapshot } from "../config/types.openclaw.js"; @@ -390,14 +391,13 @@ describe("update-cli", () => { }, { name: "defaults to stable channel for package installs when unset", - mode: "npm" as const, options: { yes: true }, prepare: async () => { const tempDir = createCaseDir("openclaw-update"); mockPackageInstallStatus(tempDir); }, - expectedChannel: "stable" as const, - expectedTag: "latest", + expectedChannel: undefined as "stable" | undefined, + expectedTag: undefined as string | undefined, }, { name: "uses stored beta channel when configured", @@ -414,14 +414,25 @@ describe("update-cli", () => { }, ])("$name", async ({ mode, options, prepare, expectedChannel, expectedTag }) => { await prepare(); - vi.mocked(runGatewayUpdate).mockResolvedValue(makeOkUpdateResult({ mode })); + if (mode) { + vi.mocked(runGatewayUpdate).mockResolvedValue(makeOkUpdateResult({ mode })); + } await updateCommand(options); - const call = expectUpdateCallChannel(expectedChannel); - if (expectedTag !== undefined) { - expect(call?.tag).toBe(expectedTag); + if (expectedChannel !== undefined) { + const call = expectUpdateCallChannel(expectedChannel); + if (expectedTag !== undefined) { + expect(call?.tag).toBe(expectedTag); + } + return; } + + expect(runGatewayUpdate).not.toHaveBeenCalled(); + expect(runCommandWithTimeout).toHaveBeenCalledWith( + ["npm", "i", "-g", "openclaw@latest", "--no-fund", "--no-audit", "--loglevel=error"], + expect.any(Object), + ); }); it("falls back to latest when beta tag is older than release", async () => { @@ -436,32 +447,106 @@ describe("update-cli", () => { tag: "latest", version: "1.2.3-1", }); - vi.mocked(runGatewayUpdate).mockResolvedValue( - makeOkUpdateResult({ - mode: "npm", - }), - ); - await updateCommand({}); - const call = expectUpdateCallChannel("beta"); - expect(call?.tag).toBe("latest"); + expect(runGatewayUpdate).not.toHaveBeenCalled(); + expect(runCommandWithTimeout).toHaveBeenCalledWith( + ["npm", "i", "-g", "openclaw@latest", "--no-fund", "--no-audit", "--loglevel=error"], + expect.any(Object), + ); }); it("honors --tag override", async () => { const tempDir = createCaseDir("openclaw-update"); - vi.mocked(resolveOpenClawPackageRoot).mockResolvedValue(tempDir); - vi.mocked(runGatewayUpdate).mockResolvedValue( - makeOkUpdateResult({ - mode: "npm", - }), - ); + mockPackageInstallStatus(tempDir); await updateCommand({ tag: "next" }); - const call = vi.mocked(runGatewayUpdate).mock.calls[0]?.[0]; - expect(call?.tag).toBe("next"); + expect(runGatewayUpdate).not.toHaveBeenCalled(); + expect(runCommandWithTimeout).toHaveBeenCalledWith( + ["npm", "i", "-g", "openclaw@next", "--no-fund", "--no-audit", "--loglevel=error"], + expect.any(Object), + ); + }); + + it("prepends portable Git PATH for package updates on Windows", async () => { + const platformSpy = vi.spyOn(process, "platform", "get").mockReturnValue("win32"); + const tempDir = createCaseDir("openclaw-update"); + const localAppData = createCaseDir("openclaw-localappdata"); + const portableGitMingw = path.join( + localAppData, + "OpenClaw", + "deps", + "portable-git", + "mingw64", + "bin", + ); + const portableGitUsr = path.join( + localAppData, + "OpenClaw", + "deps", + "portable-git", + "usr", + "bin", + ); + await fs.mkdir(portableGitMingw, { recursive: true }); + await fs.mkdir(portableGitUsr, { recursive: true }); + mockPackageInstallStatus(tempDir); + pathExists.mockImplementation( + async (candidate: string) => candidate === portableGitMingw || candidate === portableGitUsr, + ); + + await withEnvAsync({ LOCALAPPDATA: localAppData }, async () => { + await updateCommand({ yes: true }); + }); + + platformSpy.mockRestore(); + + const updateCall = vi + .mocked(runCommandWithTimeout) + .mock.calls.find( + (call) => + Array.isArray(call[0]) && + call[0][0] === "npm" && + call[0][1] === "i" && + call[0][2] === "-g", + ); + const updateOptions = + typeof updateCall?.[1] === "object" && updateCall[1] !== null ? updateCall[1] : undefined; + const mergedPath = updateOptions?.env?.Path ?? updateOptions?.env?.PATH ?? ""; + expect(mergedPath.split(path.delimiter).slice(0, 2)).toEqual([ + portableGitMingw, + portableGitUsr, + ]); + expect(updateOptions?.env?.NPM_CONFIG_SCRIPT_SHELL).toBe("cmd.exe"); + expect(updateOptions?.env?.NODE_LLAMA_CPP_SKIP_DOWNLOAD).toBe("1"); + }); + + it("uses OPENCLAW_UPDATE_PACKAGE_SPEC for package updates", async () => { + const tempDir = createCaseDir("openclaw-update"); + mockPackageInstallStatus(tempDir); + + await withEnvAsync( + { OPENCLAW_UPDATE_PACKAGE_SPEC: "http://10.211.55.2:8138/openclaw-next.tgz" }, + async () => { + await updateCommand({ yes: true, tag: "latest" }); + }, + ); + + expect(runGatewayUpdate).not.toHaveBeenCalled(); + expect(runCommandWithTimeout).toHaveBeenCalledWith( + [ + "npm", + "i", + "-g", + "http://10.211.55.2:8138/openclaw-next.tgz", + "--no-fund", + "--no-audit", + "--loglevel=error", + ], + expect.any(Object), + ); }); it("updateCommand outputs JSON when --json is set", async () => { @@ -539,12 +624,98 @@ describe("update-cli", () => { expect(runCommandWithTimeout).toHaveBeenCalledWith( [expect.stringMatching(/node/), entryPath, "gateway", "install", "--force"], - expect.objectContaining({ timeoutMs: 60_000 }), + expect.objectContaining({ cwd: root, timeoutMs: 60_000 }), ); expect(runDaemonInstall).not.toHaveBeenCalled(); expect(runRestartScript).toHaveBeenCalled(); }); + it("updateCommand preserves invocation-relative service env overrides during refresh", async () => { + const root = createCaseDir("openclaw-updated-root"); + const entryPath = path.join(root, "dist", "entry.js"); + pathExists.mockImplementation(async (candidate: string) => candidate === entryPath); + + vi.mocked(runGatewayUpdate).mockResolvedValue({ + status: "ok", + mode: "npm", + root, + steps: [], + durationMs: 100, + }); + serviceLoaded.mockResolvedValue(true); + + await withEnvAsync( + { + OPENCLAW_STATE_DIR: "./state", + OPENCLAW_CONFIG_PATH: "./config/openclaw.json", + }, + async () => { + await updateCommand({}); + }, + ); + + expect(runCommandWithTimeout).toHaveBeenCalledWith( + [expect.stringMatching(/node/), entryPath, "gateway", "install", "--force"], + expect.objectContaining({ + cwd: root, + env: expect.objectContaining({ + OPENCLAW_STATE_DIR: path.resolve("./state"), + OPENCLAW_CONFIG_PATH: path.resolve("./config/openclaw.json"), + }), + timeoutMs: 60_000, + }), + ); + expect(runDaemonInstall).not.toHaveBeenCalled(); + }); + + it("updateCommand reuses the captured invocation cwd when process.cwd later fails", async () => { + const root = createCaseDir("openclaw-updated-root"); + const entryPath = path.join(root, "dist", "entry.js"); + pathExists.mockImplementation(async (candidate: string) => candidate === entryPath); + + const originalCwd = process.cwd(); + let restoreCwd: (() => void) | undefined; + vi.mocked(runGatewayUpdate).mockImplementation(async () => { + const cwdSpy = vi.spyOn(process, "cwd").mockImplementation(() => { + throw new Error("ENOENT: current working directory is gone"); + }); + restoreCwd = () => cwdSpy.mockRestore(); + return { + status: "ok", + mode: "npm", + root, + steps: [], + durationMs: 100, + }; + }); + serviceLoaded.mockResolvedValue(true); + + try { + await withEnvAsync( + { + OPENCLAW_STATE_DIR: "./state", + }, + async () => { + await updateCommand({}); + }, + ); + } finally { + restoreCwd?.(); + } + + expect(runCommandWithTimeout).toHaveBeenCalledWith( + [expect.stringMatching(/node/), entryPath, "gateway", "install", "--force"], + expect.objectContaining({ + cwd: root, + env: expect.objectContaining({ + OPENCLAW_STATE_DIR: path.resolve(originalCwd, "./state"), + }), + timeoutMs: 60_000, + }), + ); + expect(runDaemonInstall).not.toHaveBeenCalled(); + }); + it("updateCommand falls back to restart when env refresh install fails", async () => { await runRestartFallbackScenario({ daemonInstall: "fail" }); }); @@ -648,15 +819,15 @@ describe("update-cli", () => { name: "requires confirmation without --yes", options: {}, shouldExit: true, - shouldRunUpdate: false, + shouldRunPackageUpdate: false, }, { name: "allows downgrade with --yes", options: { yes: true }, shouldExit: false, - shouldRunUpdate: true, + shouldRunPackageUpdate: true, }, - ])("$name in non-interactive mode", async ({ options, shouldExit, shouldRunUpdate }) => { + ])("$name in non-interactive mode", async ({ options, shouldExit, shouldRunPackageUpdate }) => { await setupNonInteractiveDowngrade(); await updateCommand(options); @@ -667,7 +838,12 @@ describe("update-cli", () => { expect(vi.mocked(defaultRuntime.exit).mock.calls.some((call) => call[0] === 1)).toBe( shouldExit, ); - expect(vi.mocked(runGatewayUpdate).mock.calls.length > 0).toBe(shouldRunUpdate); + expect(vi.mocked(runGatewayUpdate).mock.calls.length > 0).toBe(false); + expect( + vi + .mocked(runCommandWithTimeout) + .mock.calls.some((call) => Array.isArray(call[0]) && call[0][0] === "npm"), + ).toBe(shouldRunPackageUpdate); }); it("dry-run bypasses downgrade confirmation checks in non-interactive mode", async () => { diff --git a/src/cli/update-cli/restart-helper.test.ts b/src/cli/update-cli/restart-helper.test.ts index c8b59d69afa..847893e9f23 100644 --- a/src/cli/update-cli/restart-helper.test.ts +++ b/src/cli/update-cli/restart-helper.test.ts @@ -287,6 +287,7 @@ describe("restart-helper", () => { expect(spawn).toHaveBeenCalledWith("/bin/sh", [scriptPath], { detached: true, stdio: "ignore", + windowsHide: true, }); expect(mockChild.unref).toHaveBeenCalled(); }); @@ -302,6 +303,7 @@ describe("restart-helper", () => { expect(spawn).toHaveBeenCalledWith("cmd.exe", ["/d", "/s", "/c", scriptPath], { detached: true, stdio: "ignore", + windowsHide: true, }); expect(mockChild.unref).toHaveBeenCalled(); }); @@ -317,6 +319,7 @@ describe("restart-helper", () => { expect(spawn).toHaveBeenCalledWith("cmd.exe", ["/d", "/s", "/c", `"${scriptPath}"`], { detached: true, stdio: "ignore", + windowsHide: true, }); }); }); diff --git a/src/cli/update-cli/restart-helper.ts b/src/cli/update-cli/restart-helper.ts index c27f25cdc49..a68fab161fa 100644 --- a/src/cli/update-cli/restart-helper.ts +++ b/src/cli/update-cli/restart-helper.ts @@ -169,6 +169,7 @@ export async function runRestartScript(scriptPath: string): Promise { const child = spawn(file, args, { detached: true, stdio: "ignore", + windowsHide: true, }); child.unref(); } diff --git a/src/cli/update-cli/shared.ts b/src/cli/update-cli/shared.ts index 8e62301e79a..d7cbc5ec86b 100644 --- a/src/cli/update-cli/shared.ts +++ b/src/cli/update-cli/shared.ts @@ -144,6 +144,7 @@ export async function runUpdateStep(params: { cwd?: string; timeoutMs: number; progress?: UpdateStepProgress; + env?: NodeJS.ProcessEnv; }): Promise { const command = params.argv.join(" "); params.progress?.onStepStart?.({ @@ -156,6 +157,7 @@ export async function runUpdateStep(params: { const started = Date.now(); const res = await runCommandWithTimeout(params.argv, { cwd: params.cwd, + env: params.env, timeoutMs: params.timeoutMs, }); const durationMs = Date.now() - started; diff --git a/src/cli/update-cli/status.ts b/src/cli/update-cli/status.ts index 5cf2bf8af49..8266a1e5f21 100644 --- a/src/cli/update-cli/status.ts +++ b/src/cli/update-cli/status.ts @@ -10,7 +10,7 @@ import { } from "../../infra/update-channels.js"; import { checkUpdateStatus } from "../../infra/update-check.js"; import { defaultRuntime } from "../../runtime.js"; -import { renderTable } from "../../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../../terminal/table.js"; import { theme } from "../../terminal/theme.js"; import { parseTimeoutMsOrExit, resolveUpdateRoot, type UpdateStatusOptions } from "./shared.js"; @@ -89,7 +89,7 @@ export async function updateStatusCommand(opts: UpdateStatusOptions): Promise { const args = ["gateway", "install", "--force"]; if (params.jsonMode) { @@ -188,6 +229,8 @@ async function refreshGatewayServiceEnv(params: { continue; } const res = await runCommandWithTimeout([resolveNodeRunner(), candidate, ...args], { + cwd: params.result.root, + env: resolveServiceRefreshEnv(process.env, params.invocationCwd), timeoutMs: SERVICE_REFRESH_TIMEOUT_MS, }); if (res.code === 0) { @@ -269,12 +312,18 @@ async function runPackageInstallUpdate(params: { installKind: params.installKind, timeoutMs: params.timeoutMs, }); + const installEnv = await createGlobalInstallEnv(); const runCommand = createGlobalCommandRunner(); const pkgRoot = await resolveGlobalPackageRoot(manager, runCommand, params.timeoutMs); const packageName = (pkgRoot ? await readPackageName(pkgRoot) : await readPackageName(params.root)) ?? DEFAULT_PACKAGE_NAME; + const installSpec = resolveGlobalInstallSpec({ + packageName, + tag: params.tag, + env: installEnv, + }); const beforeVersion = pkgRoot ? await readPackageVersion(pkgRoot) : null; if (pkgRoot) { @@ -286,7 +335,8 @@ async function runPackageInstallUpdate(params: { const updateStep = await runUpdateStep({ name: "global update", - argv: globalInstallArgs(manager, `${packageName}@${params.tag}`), + argv: globalInstallArgs(manager, installSpec), + env: installEnv, timeoutMs: params.timeoutMs, progress: params.progress, }); @@ -380,6 +430,7 @@ async function runGitUpdate(params: { name: "global install", argv: globalInstallArgs(manager, updateRoot), cwd: updateRoot, + env: await createGlobalInstallEnv(), timeoutMs: effectiveTimeout, progress: params.progress, }); @@ -509,6 +560,7 @@ async function maybeRestartService(params: { refreshServiceEnv: boolean; gatewayPort: number; restartScriptPath?: string | null; + invocationCwd?: string; }): Promise { if (params.shouldRestart) { if (!params.opts.json) { @@ -524,6 +576,7 @@ async function maybeRestartService(params: { await refreshGatewayServiceEnv({ result: params.result, jsonMode: Boolean(params.opts.json), + invocationCwd: params.invocationCwd, }); } catch (err) { if (!params.opts.json) { @@ -629,6 +682,7 @@ async function maybeRestartService(params: { export async function updateCommand(opts: UpdateCommandOptions): Promise { suppressDeprecations(); + const invocationCwd = tryResolveInvocationCwd(); const timeoutMs = parseTimeoutMsOrExit(opts.timeout); const shouldRestart = opts.restart !== false; @@ -835,28 +889,29 @@ export async function updateCommand(opts: UpdateCommandOptions): Promise { } } - const result = switchToPackage - ? await runPackageInstallUpdate({ - root, - installKind, - tag, - timeoutMs: timeoutMs ?? 20 * 60_000, - startedAt, - progress, - }) - : await runGitUpdate({ - root, - switchToGit, - installKind, - timeoutMs, - startedAt, - progress, - channel, - tag, - showProgress, - opts, - stop, - }); + const result = + updateInstallKind === "package" + ? await runPackageInstallUpdate({ + root, + installKind, + tag, + timeoutMs: timeoutMs ?? 20 * 60_000, + startedAt, + progress, + }) + : await runGitUpdate({ + root, + switchToGit, + installKind, + timeoutMs, + startedAt, + progress, + channel, + tag, + showProgress, + opts, + stop, + }); stop(); printResult(result, { ...opts, hideSteps: showProgress }); @@ -910,6 +965,7 @@ export async function updateCommand(opts: UpdateCommandOptions): Promise { refreshServiceEnv: refreshGatewayServiceEnv, gatewayPort, restartScriptPath, + invocationCwd, }); if (!opts.json) { diff --git a/src/commands/agent.acp.test.ts b/src/commands/agent.acp.test.ts index ab8c9da8a6e..4ad423dcf18 100644 --- a/src/commands/agent.acp.test.ts +++ b/src/commands/agent.acp.test.ts @@ -171,6 +171,61 @@ function subscribeAssistantEvents() { return { assistantEvents, stop }; } +async function runAcpTurnWithAssistantEvents(chunks: string[]) { + const { assistantEvents, stop } = subscribeAssistantEvents(); + const runTurn = createRunTurnFromTextDeltas(chunks); + + mockAcpManager({ + runTurn: (params: unknown) => runTurn(params), + }); + + try { + await agentCommand({ message: "ping", sessionKey: "agent:codex:acp:test" }, runtime); + } finally { + stop(); + } + + const logLines = vi.mocked(runtime.log).mock.calls.map(([first]) => String(first)); + return { assistantEvents, logLines }; +} + +async function runAcpTurnWithTextDeltas(params: { message?: string; chunks: string[] }) { + const runTurn = createRunTurnFromTextDeltas(params.chunks); + mockAcpManager({ + runTurn: (input: unknown) => runTurn(input), + }); + await agentCommand( + { + message: params.message ?? "ping", + sessionKey: "agent:codex:acp:test", + }, + runtime, + ); + return { runTurn }; +} + +function expectPersistedAcpTranscript(params: { + storePath: string; + userContent: string; + assistantText: string; +}) { + const persistedStore = JSON.parse(fs.readFileSync(params.storePath, "utf-8")) as Record< + string, + { sessionFile?: string } + >; + const sessionFile = persistedStore["agent:codex:acp:test"]?.sessionFile; + const messages = readSessionMessages("acp-session-1", params.storePath, sessionFile); + expect(messages).toHaveLength(2); + expect(messages[0]).toMatchObject({ + role: "user", + content: params.userContent, + }); + expect(messages[1]).toMatchObject({ + role: "assistant", + content: [{ type: "text", text: params.assistantText }], + }); +} + async function runAcpSessionWithPolicyOverrides(params: { acpOverrides: Partial>; resolveSession?: Parameters[0]["resolveSession"]; @@ -209,13 +264,7 @@ describe("agentCommand ACP runtime routing", () => { it("routes ACP sessions through AcpSessionManager instead of embedded agent", async () => { await withAcpSessionEnv(async () => { - const runTurn = createRunTurnFromTextDeltas(["ACP_", "OK"]); - - mockAcpManager({ - runTurn: (params: unknown) => runTurn(params), - }); - - await agentCommand({ message: "ping", sessionKey: "agent:codex:acp:test" }, runtime); + const { runTurn } = await runAcpTurnWithTextDeltas({ chunks: ["ACP_", "OK"] }); expect(runTurn).toHaveBeenCalledWith( expect.objectContaining({ @@ -234,64 +283,32 @@ describe("agentCommand ACP runtime routing", () => { it("persists ACP child session history to the transcript store", async () => { await withAcpSessionEnvInfo(async ({ storePath }) => { - const runTurn = createRunTurnFromTextDeltas(["ACP_", "OK"]); - - mockAcpManager({ - runTurn: (params: unknown) => runTurn(params), - }); - - await agentCommand({ message: "ping", sessionKey: "agent:codex:acp:test" }, runtime); - - const persistedStore = JSON.parse(fs.readFileSync(storePath, "utf-8")) as Record< - string, - { sessionFile?: string } - >; - const sessionFile = persistedStore["agent:codex:acp:test"]?.sessionFile; - const messages = readSessionMessages("acp-session-1", storePath, sessionFile); - expect(messages).toHaveLength(2); - expect(messages[0]).toMatchObject({ - role: "user", - content: "ping", - }); - expect(messages[1]).toMatchObject({ - role: "assistant", - content: [{ type: "text", text: "ACP_OK" }], + await runAcpTurnWithTextDeltas({ chunks: ["ACP_", "OK"] }); + expectPersistedAcpTranscript({ + storePath, + userContent: "ping", + assistantText: "ACP_OK", }); }); }); it("preserves exact ACP transcript text without trimming whitespace", async () => { await withAcpSessionEnvInfo(async ({ storePath }) => { - const runTurn = createRunTurnFromTextDeltas([" ACP_OK\n"]); - - mockAcpManager({ - runTurn: (params: unknown) => runTurn(params), + await runAcpTurnWithTextDeltas({ + message: " ping\n", + chunks: [" ACP_OK\n"], }); - - await agentCommand({ message: " ping\n", sessionKey: "agent:codex:acp:test" }, runtime); - - const persistedStore = JSON.parse(fs.readFileSync(storePath, "utf-8")) as Record< - string, - { sessionFile?: string } - >; - const sessionFile = persistedStore["agent:codex:acp:test"]?.sessionFile; - const messages = readSessionMessages("acp-session-1", storePath, sessionFile); - expect(messages).toHaveLength(2); - expect(messages[0]).toMatchObject({ - role: "user", - content: " ping\n", - }); - expect(messages[1]).toMatchObject({ - role: "assistant", - content: [{ type: "text", text: " ACP_OK\n" }], + expectPersistedAcpTranscript({ + storePath, + userContent: " ping\n", + assistantText: " ACP_OK\n", }); }); }); it("suppresses ACP NO_REPLY lead fragments before emitting assistant text", async () => { await withAcpSessionEnv(async () => { - const { assistantEvents, stop } = subscribeAssistantEvents(); - const runTurn = createRunTurnFromTextDeltas([ + const { assistantEvents, logLines } = await runAcpTurnWithAssistantEvents([ "NO", "NO_", "NO_RE", @@ -299,19 +316,7 @@ describe("agentCommand ACP runtime routing", () => { "Actual answer", ]); - mockAcpManager({ - runTurn: (params: unknown) => runTurn(params), - }); - - try { - await agentCommand({ message: "ping", sessionKey: "agent:codex:acp:test" }, runtime); - } finally { - stop(); - } - expect(assistantEvents).toEqual([{ text: "Actual answer", delta: "Actual answer" }]); - - const logLines = vi.mocked(runtime.log).mock.calls.map(([first]) => String(first)); expect(logLines.some((line) => line.includes("NO_REPLY"))).toBe(false); expect(logLines.some((line) => line.includes("Actual answer"))).toBe(true); }); @@ -319,31 +324,13 @@ describe("agentCommand ACP runtime routing", () => { it("keeps silent-only ACP turns out of assistant output", async () => { await withAcpSessionEnv(async () => { - const assistantEvents: string[] = []; - const stop = onAgentEvent((evt) => { - if (evt.stream !== "assistant") { - return; - } - if (typeof evt.data?.text === "string") { - assistantEvents.push(evt.data.text); - } - }); - - const runTurn = createRunTurnFromTextDeltas(["NO", "NO_", "NO_RE", "NO_REPLY"]); - - mockAcpManager({ - runTurn: (params: unknown) => runTurn(params), - }); - - try { - await agentCommand({ message: "ping", sessionKey: "agent:codex:acp:test" }, runtime); - } finally { - stop(); - } - - expect(assistantEvents).toEqual([]); - - const logLines = vi.mocked(runtime.log).mock.calls.map(([first]) => String(first)); + const { assistantEvents, logLines } = await runAcpTurnWithAssistantEvents([ + "NO", + "NO_", + "NO_RE", + "NO_REPLY", + ]); + expect(assistantEvents.map((event) => event.text).filter(Boolean)).toEqual([]); expect(logLines.some((line) => line.includes("NO_REPLY"))).toBe(false); expect(logLines.some((line) => line.includes("No reply from agent."))).toBe(true); }); @@ -351,18 +338,12 @@ describe("agentCommand ACP runtime routing", () => { it("preserves repeated identical ACP delta chunks", async () => { await withAcpSessionEnv(async () => { - const { assistantEvents, stop } = subscribeAssistantEvents(); - const runTurn = createRunTurnFromTextDeltas(["b", "o", "o", "k"]); - - mockAcpManager({ - runTurn: (params: unknown) => runTurn(params), - }); - - try { - await agentCommand({ message: "ping", sessionKey: "agent:codex:acp:test" }, runtime); - } finally { - stop(); - } + const { assistantEvents, logLines } = await runAcpTurnWithAssistantEvents([ + "b", + "o", + "o", + "k", + ]); expect(assistantEvents).toEqual([ { text: "b", delta: "b" }, @@ -370,30 +351,15 @@ describe("agentCommand ACP runtime routing", () => { { text: "boo", delta: "o" }, { text: "book", delta: "k" }, ]); - - const logLines = vi.mocked(runtime.log).mock.calls.map(([first]) => String(first)); expect(logLines.some((line) => line.includes("book"))).toBe(true); }); }); it("re-emits buffered NO prefix when ACP text becomes visible content", async () => { await withAcpSessionEnv(async () => { - const { assistantEvents, stop } = subscribeAssistantEvents(); - const runTurn = createRunTurnFromTextDeltas(["NO", "W"]); - - mockAcpManager({ - runTurn: (params: unknown) => runTurn(params), - }); - - try { - await agentCommand({ message: "ping", sessionKey: "agent:codex:acp:test" }, runtime); - } finally { - stop(); - } + const { assistantEvents, logLines } = await runAcpTurnWithAssistantEvents(["NO", "W"]); expect(assistantEvents).toEqual([{ text: "NOW", delta: "NOW" }]); - - const logLines = vi.mocked(runtime.log).mock.calls.map(([first]) => String(first)); expect(logLines.some((line) => line.includes("NOW"))).toBe(true); }); }); diff --git a/src/commands/agent.ts b/src/commands/agent.ts index 24e62cc8998..ab690b37666 100644 --- a/src/commands/agent.ts +++ b/src/commands/agent.ts @@ -950,6 +950,7 @@ async function agentCommandInternal( catalog: modelCatalog, defaultProvider, defaultModel, + agentId: sessionAgentId, }); allowedModelKeys = allowed.allowedKeys; allowedModelCatalog = allowed.allowedCatalog; @@ -1103,6 +1104,7 @@ async function agentCommandInternal( cfg, provider, model, + runId, agentDir, fallbacksOverride: effectiveFallbacksOverride, run: (providerOverride, modelOverride, runOptions) => { diff --git a/src/commands/agent/types.ts b/src/commands/agent/types.ts index 18931aad4bf..66d0209bdfb 100644 --- a/src/commands/agent/types.ts +++ b/src/commands/agent/types.ts @@ -15,6 +15,8 @@ export type AgentStreamParams = { /** Provider stream params override (best-effort). */ temperature?: number; maxTokens?: number; + /** Provider fast-mode override (best-effort). */ + fastMode?: boolean; }; export type AgentRunContext = { diff --git a/src/commands/agents.commands.add.ts b/src/commands/agents.commands.add.ts index 61c45392f59..3d34ada1c5c 100644 --- a/src/commands/agents.commands.add.ts +++ b/src/commands/agents.commands.add.ts @@ -266,6 +266,7 @@ export async function agentsAddCommand( prompter, store: authStore, includeSkip: true, + config: nextConfig, }); const authResult = await applyAuthChoice({ diff --git a/src/commands/auth-choice-legacy.ts b/src/commands/auth-choice-legacy.ts index e93e920503f..d14ab4c6322 100644 --- a/src/commands/auth-choice-legacy.ts +++ b/src/commands/auth-choice-legacy.ts @@ -5,8 +5,6 @@ export const AUTH_CHOICE_LEGACY_ALIASES_FOR_CLI: ReadonlyArray = [ "oauth", "claude-cli", "codex-cli", - "minimax-cloud", - "minimax", ]; export function normalizeLegacyOnboardAuthChoice( diff --git a/src/commands/auth-choice-options.test.ts b/src/commands/auth-choice-options.test.ts index c0c719a70ee..74b729d5db8 100644 --- a/src/commands/auth-choice-options.test.ts +++ b/src/commands/auth-choice-options.test.ts @@ -1,11 +1,19 @@ -import { describe, expect, it } from "vitest"; +import { describe, expect, it, vi } from "vitest"; import type { AuthProfileStore } from "../agents/auth-profiles.js"; +import type { ProviderWizardOption } from "../plugins/provider-wizard.js"; import { buildAuthChoiceGroups, buildAuthChoiceOptions, formatAuthChoiceChoicesForCli, } from "./auth-choice-options.js"; +const resolveProviderWizardOptions = vi.hoisted(() => + vi.fn<() => ProviderWizardOption[]>(() => []), +); +vi.mock("../plugins/provider-wizard.js", () => ({ + resolveProviderWizardOptions, +})); + const EMPTY_STORE: AuthProfileStore = { version: 1, profiles: {} }; function getOptions(includeSkip = false) { @@ -17,6 +25,29 @@ function getOptions(includeSkip = false) { describe("buildAuthChoiceOptions", () => { it("includes core and provider-specific auth choices", () => { + resolveProviderWizardOptions.mockReturnValue([ + { + value: "ollama", + label: "Ollama", + hint: "Cloud and local open models", + groupId: "ollama", + groupLabel: "Ollama", + }, + { + value: "vllm", + label: "vLLM", + hint: "Local/self-hosted OpenAI-compatible server", + groupId: "vllm", + groupLabel: "vLLM", + }, + { + value: "sglang", + label: "SGLang", + hint: "Fast self-hosted OpenAI-compatible server", + groupId: "sglang", + groupLabel: "SGLang", + }, + ]); const options = getOptions(); for (const value of [ @@ -24,9 +55,9 @@ describe("buildAuthChoiceOptions", () => { "token", "zai-api-key", "xiaomi-api-key", - "minimax-api", - "minimax-api-key-cn", - "minimax-api-lightning", + "minimax-global-api", + "minimax-cn-api", + "minimax-global-oauth", "moonshot-api-key", "moonshot-api-key-cn", "kimi-code-api-key", @@ -41,6 +72,9 @@ describe("buildAuthChoiceOptions", () => { "volcengine-api-key", "byteplus-api-key", "vllm", + "opencode-go", + "ollama", + "sglang", ]) { expect(options.some((opt) => opt.value === value)).toBe(true); } @@ -80,4 +114,36 @@ describe("buildAuthChoiceOptions", () => { expect(chutesGroup).toBeDefined(); expect(chutesGroup?.options.some((opt) => opt.value === "chutes")).toBe(true); }); + + it("groups OpenCode Zen and Go under one OpenCode entry", () => { + const { groups } = buildAuthChoiceGroups({ + store: EMPTY_STORE, + includeSkip: false, + }); + const openCodeGroup = groups.find((group) => group.value === "opencode"); + + expect(openCodeGroup).toBeDefined(); + expect(openCodeGroup?.options.some((opt) => opt.value === "opencode-zen")).toBe(true); + expect(openCodeGroup?.options.some((opt) => opt.value === "opencode-go")).toBe(true); + }); + + it("shows Ollama in grouped provider selection", () => { + resolveProviderWizardOptions.mockReturnValue([ + { + value: "ollama", + label: "Ollama", + hint: "Cloud and local open models", + groupId: "ollama", + groupLabel: "Ollama", + }, + ]); + const { groups } = buildAuthChoiceGroups({ + store: EMPTY_STORE, + includeSkip: false, + }); + const ollamaGroup = groups.find((group) => group.value === "ollama"); + + expect(ollamaGroup).toBeDefined(); + expect(ollamaGroup?.options.some((opt) => opt.value === "ollama")).toBe(true); + }); }); diff --git a/src/commands/auth-choice-options.ts b/src/commands/auth-choice-options.ts index 27fee5dc01f..95bb74d1c14 100644 --- a/src/commands/auth-choice-options.ts +++ b/src/commands/auth-choice-options.ts @@ -1,4 +1,6 @@ import type { AuthProfileStore } from "../agents/auth-profiles.js"; +import type { OpenClawConfig } from "../config/config.js"; +import { resolveProviderWizardOptions } from "../plugins/provider-wizard.js"; import { AUTH_CHOICE_LEGACY_ALIASES_FOR_CLI } from "./auth-choice-legacy.js"; import { ONBOARD_PROVIDER_AUTH_FLAGS } from "./onboard-provider-auth-flags.js"; import type { AuthChoice, AuthChoiceGroupId } from "./onboard-types.js"; @@ -41,17 +43,11 @@ const AUTH_CHOICE_GROUP_DEFS: { hint: "OAuth", choices: ["chutes"], }, - { - value: "vllm", - label: "vLLM", - hint: "Local/self-hosted OpenAI-compatible", - choices: ["vllm"], - }, { value: "minimax", label: "MiniMax", hint: "M2.5 (recommended)", - choices: ["minimax-portal", "minimax-api", "minimax-api-key-cn", "minimax-api-lightning"], + choices: ["minimax-global-oauth", "minimax-global-api", "minimax-cn-oauth", "minimax-cn-api"], }, { value: "moonshot", @@ -119,6 +115,12 @@ const AUTH_CHOICE_GROUP_DEFS: { hint: "API key", choices: ["qianfan-api-key"], }, + { + value: "modelstudio", + label: "Alibaba Cloud Model Studio", + hint: "Coding Plan API key (CN / Global)", + choices: ["modelstudio-api-key-cn", "modelstudio-api-key"], + }, { value: "copilot", label: "Copilot", @@ -132,10 +134,10 @@ const AUTH_CHOICE_GROUP_DEFS: { choices: ["ai-gateway-api-key"], }, { - value: "opencode-zen", - label: "OpenCode Zen", - hint: "API key", - choices: ["opencode-zen"], + value: "opencode", + label: "OpenCode", + hint: "Shared API key for Zen + Go catalogs", + choices: ["opencode-zen", "opencode-go"], }, { value: "xiaomi", @@ -193,6 +195,8 @@ const PROVIDER_AUTH_CHOICE_OPTION_HINTS: Partial> = { "venice-api-key": "Privacy-focused inference (uncensored models)", "together-api-key": "Access to Llama, DeepSeek, Qwen, and more open models", "huggingface-api-key": "Inference Providers — OpenAI-compatible chat", + "opencode-zen": "Shared OpenCode key; curated Zen catalog", + "opencode-go": "Shared OpenCode key; Kimi/GLM/MiniMax Go catalog", }; const PROVIDER_AUTH_CHOICE_OPTION_LABELS: Partial> = { @@ -200,6 +204,8 @@ const PROVIDER_AUTH_CHOICE_OPTION_LABELS: Partial> = "moonshot-api-key-cn": "Kimi API key (.cn)", "kimi-code-api-key": "Kimi Code API key (subscription)", "cloudflare-ai-gateway-api-key": "Cloudflare AI Gateway", + "opencode-zen": "OpenCode Zen catalog", + "opencode-go": "OpenCode Go catalog", }; function buildProviderAuthChoiceOptions(): AuthChoiceOption[] { @@ -223,11 +229,6 @@ const BASE_AUTH_CHOICE_OPTIONS: ReadonlyArray = [ label: "OpenAI Codex (ChatGPT OAuth)", }, { value: "chutes", label: "Chutes (OAuth)" }, - { - value: "vllm", - label: "vLLM (custom URL + model)", - hint: "Local/self-hosted OpenAI-compatible server", - }, ...buildProviderAuthChoiceOptions(), { value: "moonshot-api-key-cn", @@ -270,9 +271,24 @@ const BASE_AUTH_CHOICE_OPTIONS: ReadonlyArray = [ label: "Xiaomi API key", }, { - value: "minimax-portal", - label: "MiniMax OAuth", - hint: "Oauth plugin for MiniMax", + value: "minimax-global-oauth", + label: "MiniMax Global — OAuth (minimax.io)", + hint: "Only supports OAuth for the coding plan", + }, + { + value: "minimax-global-api", + label: "MiniMax Global — API Key (minimax.io)", + hint: "sk-api- or sk-cp- keys supported", + }, + { + value: "minimax-cn-oauth", + label: "MiniMax CN — OAuth (minimaxi.com)", + hint: "Only supports OAuth for the coding plan", + }, + { + value: "minimax-cn-api", + label: "MiniMax CN — API Key (minimaxi.com)", + hint: "sk-api- or sk-cp- keys supported", }, { value: "qwen-portal", label: "Qwen OAuth" }, { @@ -283,30 +299,44 @@ const BASE_AUTH_CHOICE_OPTIONS: ReadonlyArray = [ { value: "apiKey", label: "Anthropic API key" }, { value: "opencode-zen", - label: "OpenCode Zen (multi-model proxy)", + label: "OpenCode Zen catalog", hint: "Claude, GPT, Gemini via opencode.ai/zen", }, - { value: "minimax-api", label: "MiniMax M2.5" }, + { value: "qianfan-api-key", label: "Qianfan API key" }, { - value: "minimax-api-key-cn", - label: "MiniMax M2.5 (CN)", - hint: "China endpoint (api.minimaxi.com)", + value: "modelstudio-api-key-cn", + label: "Coding Plan API Key for China (subscription)", + hint: "Endpoint: coding.dashscope.aliyuncs.com", }, { - value: "minimax-api-lightning", - label: "MiniMax M2.5 Highspeed", - hint: "Official fast tier", + value: "modelstudio-api-key", + label: "Coding Plan API Key for Global/Intl (subscription)", + hint: "Endpoint: coding-intl.dashscope.aliyuncs.com", }, { value: "custom-api-key", label: "Custom Provider" }, ]; +function resolveDynamicProviderCliChoices(params?: { + config?: OpenClawConfig; + workspaceDir?: string; + env?: NodeJS.ProcessEnv; +}): string[] { + return [...new Set(resolveProviderWizardOptions(params ?? {}).map((option) => option.value))]; +} + export function formatAuthChoiceChoicesForCli(params?: { includeSkip?: boolean; includeLegacyAliases?: boolean; + config?: OpenClawConfig; + workspaceDir?: string; + env?: NodeJS.ProcessEnv; }): string { const includeSkip = params?.includeSkip ?? true; const includeLegacyAliases = params?.includeLegacyAliases ?? false; - const values = BASE_AUTH_CHOICE_OPTIONS.map((opt) => opt.value); + const values = [ + ...BASE_AUTH_CHOICE_OPTIONS.map((opt) => opt.value), + ...resolveDynamicProviderCliChoices(params), + ]; if (includeSkip) { values.push("skip"); @@ -321,9 +351,29 @@ export function formatAuthChoiceChoicesForCli(params?: { export function buildAuthChoiceOptions(params: { store: AuthProfileStore; includeSkip: boolean; + config?: OpenClawConfig; + workspaceDir?: string; + env?: NodeJS.ProcessEnv; }): AuthChoiceOption[] { void params.store; const options: AuthChoiceOption[] = [...BASE_AUTH_CHOICE_OPTIONS]; + const seen = new Set(options.map((option) => option.value)); + + for (const option of resolveProviderWizardOptions({ + config: params.config, + workspaceDir: params.workspaceDir, + env: params.env, + })) { + if (seen.has(option.value as AuthChoice)) { + continue; + } + options.push({ + value: option.value as AuthChoice, + label: option.label, + hint: option.hint, + }); + seen.add(option.value as AuthChoice); + } if (params.includeSkip) { options.push({ value: "skip", label: "Skip for now" }); @@ -332,7 +382,13 @@ export function buildAuthChoiceOptions(params: { return options; } -export function buildAuthChoiceGroups(params: { store: AuthProfileStore; includeSkip: boolean }): { +export function buildAuthChoiceGroups(params: { + store: AuthProfileStore; + includeSkip: boolean; + config?: OpenClawConfig; + workspaceDir?: string; + env?: NodeJS.ProcessEnv; +}): { groups: AuthChoiceGroup[]; skipOption?: AuthChoiceOption; } { @@ -344,12 +400,42 @@ export function buildAuthChoiceGroups(params: { store: AuthProfileStore; include options.map((opt) => [opt.value, opt]), ); - const groups = AUTH_CHOICE_GROUP_DEFS.map((group) => ({ + const groups: AuthChoiceGroup[] = AUTH_CHOICE_GROUP_DEFS.map((group) => ({ ...group, options: group.choices .map((choice) => optionByValue.get(choice)) .filter((opt): opt is AuthChoiceOption => Boolean(opt)), })); + const staticGroupIds = new Set(groups.map((group) => group.value)); + + for (const option of resolveProviderWizardOptions({ + config: params.config, + workspaceDir: params.workspaceDir, + env: params.env, + })) { + const existing = groups.find((group) => group.value === option.groupId); + const nextOption = optionByValue.get(option.value as AuthChoice) ?? { + value: option.value as AuthChoice, + label: option.label, + hint: option.hint, + }; + if (existing) { + if (!existing.options.some((candidate) => candidate.value === nextOption.value)) { + existing.options.push(nextOption); + } + continue; + } + if (staticGroupIds.has(option.groupId as AuthChoiceGroupId)) { + continue; + } + groups.push({ + value: option.groupId as AuthChoiceGroupId, + label: option.groupLabel, + hint: option.groupHint, + options: [nextOption], + }); + staticGroupIds.add(option.groupId as AuthChoiceGroupId); + } const skipOption = params.includeSkip ? ({ value: "skip", label: "Skip for now" } satisfies AuthChoiceOption) diff --git a/src/commands/auth-choice-prompt.ts b/src/commands/auth-choice-prompt.ts index 35012b61a55..83c2e44eb96 100644 --- a/src/commands/auth-choice-prompt.ts +++ b/src/commands/auth-choice-prompt.ts @@ -1,4 +1,5 @@ import type { AuthProfileStore } from "../agents/auth-profiles.js"; +import type { OpenClawConfig } from "../config/config.js"; import type { WizardPrompter } from "../wizard/prompts.js"; import { buildAuthChoiceGroups } from "./auth-choice-options.js"; import type { AuthChoice } from "./onboard-types.js"; @@ -9,6 +10,9 @@ export async function promptAuthChoiceGrouped(params: { prompter: WizardPrompter; store: AuthProfileStore; includeSkip: boolean; + config?: OpenClawConfig; + workspaceDir?: string; + env?: NodeJS.ProcessEnv; }): Promise { const { groups, skipOption } = buildAuthChoiceGroups(params); const availableGroups = groups.filter((group) => group.options.length > 0); @@ -55,6 +59,6 @@ export async function promptAuthChoiceGrouped(params: { continue; } - return methodSelection as AuthChoice; + return methodSelection; } } diff --git a/src/commands/auth-choice.apply-helpers.ts b/src/commands/auth-choice.apply-helpers.ts index 122be392153..32c6ac82786 100644 --- a/src/commands/auth-choice.apply-helpers.ts +++ b/src/commands/auth-choice.apply-helpers.ts @@ -8,6 +8,8 @@ import { import { encodeJsonPointerToken } from "../secrets/json-pointer.js"; import { PROVIDER_ENV_VARS } from "../secrets/provider-env-vars.js"; import { + formatExecSecretRefIdValidationMessage, + isValidExecSecretRefId, isValidFileSecretRefId, resolveDefaultSecretProviderAlias, } from "../secrets/ref-contract.js"; @@ -238,6 +240,9 @@ export async function promptSecretRefForOnboarding(params: { ) { return 'singleValue mode expects id "value".'; } + if (providerEntry.source === "exec" && !isValidExecSecretRefId(candidate)) { + return formatExecSecretRefIdValidationMessage(); + } return undefined; }, }); diff --git a/src/commands/auth-choice.apply.api-key-providers.ts b/src/commands/auth-choice.apply.api-key-providers.ts new file mode 100644 index 00000000000..ac3690bf3cd --- /dev/null +++ b/src/commands/auth-choice.apply.api-key-providers.ts @@ -0,0 +1,538 @@ +import { ensureAuthProfileStore, resolveAuthProfileOrder } from "../agents/auth-profiles.js"; +import type { SecretInput } from "../config/types.secrets.js"; +import { normalizeApiKeyInput, validateApiKeyInput } from "./auth-choice.api-key.js"; +import { ensureApiKeyFromOptionEnvOrPrompt } from "./auth-choice.apply-helpers.js"; +import type { ApplyAuthChoiceParams, ApplyAuthChoiceResult } from "./auth-choice.apply.js"; +import type { ApiKeyStorageOptions } from "./onboard-auth.credentials.js"; +import { + applyAuthProfileConfig, + applyKilocodeConfig, + applyKilocodeProviderConfig, + applyKimiCodeConfig, + applyKimiCodeProviderConfig, + applyLitellmConfig, + applyLitellmProviderConfig, + applyMistralConfig, + applyMistralProviderConfig, + applyModelStudioConfig, + applyModelStudioConfigCn, + applyModelStudioProviderConfig, + applyModelStudioProviderConfigCn, + applyMoonshotConfig, + applyMoonshotConfigCn, + applyMoonshotProviderConfig, + applyMoonshotProviderConfigCn, + applyOpencodeGoConfig, + applyOpencodeGoProviderConfig, + applyOpencodeZenConfig, + applyOpencodeZenProviderConfig, + applyQianfanConfig, + applyQianfanProviderConfig, + applySyntheticConfig, + applySyntheticProviderConfig, + applyTogetherConfig, + applyTogetherProviderConfig, + applyVeniceConfig, + applyVeniceProviderConfig, + applyVercelAiGatewayConfig, + applyVercelAiGatewayProviderConfig, + applyXiaomiConfig, + applyXiaomiProviderConfig, + KILOCODE_DEFAULT_MODEL_REF, + KIMI_CODING_MODEL_REF, + LITELLM_DEFAULT_MODEL_REF, + MISTRAL_DEFAULT_MODEL_REF, + MODELSTUDIO_DEFAULT_MODEL_REF, + MOONSHOT_DEFAULT_MODEL_REF, + QIANFAN_DEFAULT_MODEL_REF, + setKilocodeApiKey, + setKimiCodingApiKey, + setLitellmApiKey, + setMistralApiKey, + setModelStudioApiKey, + setMoonshotApiKey, + setOpencodeGoApiKey, + setOpencodeZenApiKey, + setQianfanApiKey, + setSyntheticApiKey, + setTogetherApiKey, + setVeniceApiKey, + setVercelAiGatewayApiKey, + setXiaomiApiKey, + SYNTHETIC_DEFAULT_MODEL_REF, + TOGETHER_DEFAULT_MODEL_REF, + VENICE_DEFAULT_MODEL_REF, + VERCEL_AI_GATEWAY_DEFAULT_MODEL_REF, + XIAOMI_DEFAULT_MODEL_REF, +} from "./onboard-auth.js"; +import type { AuthChoice, SecretInputMode } from "./onboard-types.js"; +import { OPENCODE_GO_DEFAULT_MODEL_REF } from "./opencode-go-model-default.js"; +import { OPENCODE_ZEN_DEFAULT_MODEL } from "./opencode-zen-model-default.js"; + +type ApiKeyProviderConfigApplier = ( + config: ApplyAuthChoiceParams["config"], +) => ApplyAuthChoiceParams["config"]; + +type ApplyProviderDefaultModel = (args: { + defaultModel: string; + applyDefaultConfig: ApiKeyProviderConfigApplier; + applyProviderConfig: ApiKeyProviderConfigApplier; + noteDefault?: string; +}) => Promise; + +type ApplyApiKeyProviderParams = { + params: ApplyAuthChoiceParams; + authChoice: AuthChoice; + config: ApplyAuthChoiceParams["config"]; + setConfig: (config: ApplyAuthChoiceParams["config"]) => void; + getConfig: () => ApplyAuthChoiceParams["config"]; + normalizedTokenProvider?: string; + requestedSecretInputMode?: SecretInputMode; + applyProviderDefaultModel: ApplyProviderDefaultModel; + getAgentModelOverride: () => string | undefined; +}; + +type SimpleApiKeyProviderFlow = { + provider: Parameters[0]["provider"]; + profileId: string; + expectedProviders: string[]; + envLabel: string; + promptMessage: string; + setCredential: ( + apiKey: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, + ) => void | Promise; + defaultModel: string; + applyDefaultConfig: ApiKeyProviderConfigApplier; + applyProviderConfig: ApiKeyProviderConfigApplier; + tokenProvider?: string; + normalize?: (value: string) => string; + validate?: (value: string) => string | undefined; + noteDefault?: string; + noteMessage?: string; + noteTitle?: string; +}; + +const SIMPLE_API_KEY_PROVIDER_FLOWS: Partial> = { + "ai-gateway-api-key": { + provider: "vercel-ai-gateway", + profileId: "vercel-ai-gateway:default", + expectedProviders: ["vercel-ai-gateway"], + envLabel: "AI_GATEWAY_API_KEY", + promptMessage: "Enter Vercel AI Gateway API key", + setCredential: setVercelAiGatewayApiKey, + defaultModel: VERCEL_AI_GATEWAY_DEFAULT_MODEL_REF, + applyDefaultConfig: applyVercelAiGatewayConfig, + applyProviderConfig: applyVercelAiGatewayProviderConfig, + noteDefault: VERCEL_AI_GATEWAY_DEFAULT_MODEL_REF, + }, + "moonshot-api-key": { + provider: "moonshot", + profileId: "moonshot:default", + expectedProviders: ["moonshot"], + envLabel: "MOONSHOT_API_KEY", + promptMessage: "Enter Moonshot API key", + setCredential: setMoonshotApiKey, + defaultModel: MOONSHOT_DEFAULT_MODEL_REF, + applyDefaultConfig: applyMoonshotConfig, + applyProviderConfig: applyMoonshotProviderConfig, + }, + "moonshot-api-key-cn": { + provider: "moonshot", + profileId: "moonshot:default", + expectedProviders: ["moonshot"], + envLabel: "MOONSHOT_API_KEY", + promptMessage: "Enter Moonshot API key (.cn)", + setCredential: setMoonshotApiKey, + defaultModel: MOONSHOT_DEFAULT_MODEL_REF, + applyDefaultConfig: applyMoonshotConfigCn, + applyProviderConfig: applyMoonshotProviderConfigCn, + }, + "kimi-code-api-key": { + provider: "kimi-coding", + profileId: "kimi-coding:default", + expectedProviders: ["kimi-code", "kimi-coding"], + envLabel: "KIMI_API_KEY", + promptMessage: "Enter Kimi Coding API key", + setCredential: setKimiCodingApiKey, + defaultModel: KIMI_CODING_MODEL_REF, + applyDefaultConfig: applyKimiCodeConfig, + applyProviderConfig: applyKimiCodeProviderConfig, + noteDefault: KIMI_CODING_MODEL_REF, + noteMessage: [ + "Kimi Coding uses a dedicated endpoint and API key.", + "Get your API key at: https://www.kimi.com/code/en", + ].join("\n"), + noteTitle: "Kimi Coding", + }, + "xiaomi-api-key": { + provider: "xiaomi", + profileId: "xiaomi:default", + expectedProviders: ["xiaomi"], + envLabel: "XIAOMI_API_KEY", + promptMessage: "Enter Xiaomi API key", + setCredential: setXiaomiApiKey, + defaultModel: XIAOMI_DEFAULT_MODEL_REF, + applyDefaultConfig: applyXiaomiConfig, + applyProviderConfig: applyXiaomiProviderConfig, + noteDefault: XIAOMI_DEFAULT_MODEL_REF, + }, + "mistral-api-key": { + provider: "mistral", + profileId: "mistral:default", + expectedProviders: ["mistral"], + envLabel: "MISTRAL_API_KEY", + promptMessage: "Enter Mistral API key", + setCredential: setMistralApiKey, + defaultModel: MISTRAL_DEFAULT_MODEL_REF, + applyDefaultConfig: applyMistralConfig, + applyProviderConfig: applyMistralProviderConfig, + noteDefault: MISTRAL_DEFAULT_MODEL_REF, + }, + "venice-api-key": { + provider: "venice", + profileId: "venice:default", + expectedProviders: ["venice"], + envLabel: "VENICE_API_KEY", + promptMessage: "Enter Venice AI API key", + setCredential: setVeniceApiKey, + defaultModel: VENICE_DEFAULT_MODEL_REF, + applyDefaultConfig: applyVeniceConfig, + applyProviderConfig: applyVeniceProviderConfig, + noteDefault: VENICE_DEFAULT_MODEL_REF, + noteMessage: [ + "Venice AI provides privacy-focused inference with uncensored models.", + "Get your API key at: https://venice.ai/settings/api", + "Supports 'private' (fully private) and 'anonymized' (proxy) modes.", + ].join("\n"), + noteTitle: "Venice AI", + }, + "opencode-zen": { + provider: "opencode", + profileId: "opencode:default", + expectedProviders: ["opencode", "opencode-go"], + envLabel: "OPENCODE_API_KEY", + promptMessage: "Enter OpenCode API key", + setCredential: setOpencodeZenApiKey, + defaultModel: OPENCODE_ZEN_DEFAULT_MODEL, + applyDefaultConfig: applyOpencodeZenConfig, + applyProviderConfig: applyOpencodeZenProviderConfig, + noteDefault: OPENCODE_ZEN_DEFAULT_MODEL, + noteMessage: [ + "OpenCode uses one API key across the Zen and Go catalogs.", + "Zen provides access to Claude, GPT, Gemini, and more models.", + "Get your API key at: https://opencode.ai/auth", + "Choose the Zen catalog when you want the curated multi-model proxy.", + ].join("\n"), + noteTitle: "OpenCode", + }, + "opencode-go": { + provider: "opencode-go", + profileId: "opencode-go:default", + expectedProviders: ["opencode", "opencode-go"], + envLabel: "OPENCODE_API_KEY", + promptMessage: "Enter OpenCode API key", + setCredential: setOpencodeGoApiKey, + defaultModel: OPENCODE_GO_DEFAULT_MODEL_REF, + applyDefaultConfig: applyOpencodeGoConfig, + applyProviderConfig: applyOpencodeGoProviderConfig, + noteDefault: OPENCODE_GO_DEFAULT_MODEL_REF, + noteMessage: [ + "OpenCode uses one API key across the Zen and Go catalogs.", + "Go provides access to Kimi, GLM, and MiniMax models through the Go catalog.", + "Get your API key at: https://opencode.ai/auth", + "Choose the Go catalog when you want the OpenCode-hosted Kimi/GLM/MiniMax lineup.", + ].join("\n"), + noteTitle: "OpenCode", + }, + "together-api-key": { + provider: "together", + profileId: "together:default", + expectedProviders: ["together"], + envLabel: "TOGETHER_API_KEY", + promptMessage: "Enter Together AI API key", + setCredential: setTogetherApiKey, + defaultModel: TOGETHER_DEFAULT_MODEL_REF, + applyDefaultConfig: applyTogetherConfig, + applyProviderConfig: applyTogetherProviderConfig, + noteDefault: TOGETHER_DEFAULT_MODEL_REF, + noteMessage: [ + "Together AI provides access to leading open-source models including Llama, DeepSeek, Qwen, and more.", + "Get your API key at: https://api.together.xyz/settings/api-keys", + ].join("\n"), + noteTitle: "Together AI", + }, + "qianfan-api-key": { + provider: "qianfan", + profileId: "qianfan:default", + expectedProviders: ["qianfan"], + envLabel: "QIANFAN_API_KEY", + promptMessage: "Enter QIANFAN API key", + setCredential: setQianfanApiKey, + defaultModel: QIANFAN_DEFAULT_MODEL_REF, + applyDefaultConfig: applyQianfanConfig, + applyProviderConfig: applyQianfanProviderConfig, + noteDefault: QIANFAN_DEFAULT_MODEL_REF, + noteMessage: [ + "Get your API key at: https://console.bce.baidu.com/qianfan/ais/console/apiKey", + "API key format: bce-v3/ALTAK-...", + ].join("\n"), + noteTitle: "QIANFAN", + }, + "kilocode-api-key": { + provider: "kilocode", + profileId: "kilocode:default", + expectedProviders: ["kilocode"], + envLabel: "KILOCODE_API_KEY", + promptMessage: "Enter Kilo Gateway API key", + setCredential: setKilocodeApiKey, + defaultModel: KILOCODE_DEFAULT_MODEL_REF, + applyDefaultConfig: applyKilocodeConfig, + applyProviderConfig: applyKilocodeProviderConfig, + noteDefault: KILOCODE_DEFAULT_MODEL_REF, + }, + "modelstudio-api-key-cn": { + provider: "modelstudio", + profileId: "modelstudio:default", + expectedProviders: ["modelstudio"], + envLabel: "MODELSTUDIO_API_KEY", + promptMessage: "Enter Alibaba Cloud Model Studio Coding Plan API key (China)", + setCredential: setModelStudioApiKey, + defaultModel: MODELSTUDIO_DEFAULT_MODEL_REF, + applyDefaultConfig: applyModelStudioConfigCn, + applyProviderConfig: applyModelStudioProviderConfigCn, + noteDefault: MODELSTUDIO_DEFAULT_MODEL_REF, + noteMessage: [ + "Get your API key at: https://bailian.console.aliyun.com/", + "Endpoint: coding.dashscope.aliyuncs.com", + "Models: qwen3.5-plus, glm-4.7, kimi-k2.5, MiniMax-M2.5, etc.", + ].join("\n"), + noteTitle: "Alibaba Cloud Model Studio Coding Plan (China)", + normalize: (value) => String(value ?? "").trim(), + validate: (value) => (String(value ?? "").trim() ? undefined : "Required"), + }, + "modelstudio-api-key": { + provider: "modelstudio", + profileId: "modelstudio:default", + expectedProviders: ["modelstudio"], + envLabel: "MODELSTUDIO_API_KEY", + promptMessage: "Enter Alibaba Cloud Model Studio Coding Plan API key (Global/Intl)", + setCredential: setModelStudioApiKey, + defaultModel: MODELSTUDIO_DEFAULT_MODEL_REF, + applyDefaultConfig: applyModelStudioConfig, + applyProviderConfig: applyModelStudioProviderConfig, + noteDefault: MODELSTUDIO_DEFAULT_MODEL_REF, + noteMessage: [ + "Get your API key at: https://bailian.console.aliyun.com/", + "Endpoint: coding-intl.dashscope.aliyuncs.com", + "Models: qwen3.5-plus, glm-4.7, kimi-k2.5, MiniMax-M2.5, etc.", + ].join("\n"), + noteTitle: "Alibaba Cloud Model Studio Coding Plan (Global/Intl)", + normalize: (value) => String(value ?? "").trim(), + validate: (value) => (String(value ?? "").trim() ? undefined : "Required"), + }, + "synthetic-api-key": { + provider: "synthetic", + profileId: "synthetic:default", + expectedProviders: ["synthetic"], + envLabel: "SYNTHETIC_API_KEY", + promptMessage: "Enter Synthetic API key", + setCredential: setSyntheticApiKey, + defaultModel: SYNTHETIC_DEFAULT_MODEL_REF, + applyDefaultConfig: applySyntheticConfig, + applyProviderConfig: applySyntheticProviderConfig, + normalize: (value) => String(value ?? "").trim(), + validate: (value) => (String(value ?? "").trim() ? undefined : "Required"), + }, +}; + +async function applyApiKeyProviderWithDefaultModel({ + params, + config, + setConfig, + getConfig, + normalizedTokenProvider, + requestedSecretInputMode, + applyProviderDefaultModel, + getAgentModelOverride, + provider, + profileId, + expectedProviders, + envLabel, + promptMessage, + setCredential, + defaultModel, + applyDefaultConfig, + applyProviderConfig, + noteMessage, + noteTitle, + tokenProvider = normalizedTokenProvider, + normalize = normalizeApiKeyInput, + validate = validateApiKeyInput, + noteDefault = defaultModel, +}: ApplyApiKeyProviderParams & { + provider: Parameters[0]["provider"]; + profileId: string; + expectedProviders: string[]; + envLabel: string; + promptMessage: string; + setCredential: (apiKey: SecretInput, mode?: SecretInputMode) => void | Promise; + defaultModel: string; + applyDefaultConfig: ApiKeyProviderConfigApplier; + applyProviderConfig: ApiKeyProviderConfigApplier; + noteMessage?: string; + noteTitle?: string; + tokenProvider?: string; + normalize?: (value: string) => string; + validate?: (value: string) => string | undefined; + noteDefault?: string; +}): Promise { + let nextConfig = config; + + await ensureApiKeyFromOptionEnvOrPrompt({ + token: params.opts?.token, + provider, + tokenProvider, + secretInputMode: requestedSecretInputMode, + config: nextConfig, + expectedProviders, + envLabel, + promptMessage, + setCredential: async (apiKey, mode) => { + await setCredential(apiKey, mode); + }, + noteMessage, + noteTitle, + normalize, + validate, + prompter: params.prompter, + }); + + nextConfig = applyAuthProfileConfig(nextConfig, { + profileId, + provider, + mode: "api_key", + }); + setConfig(nextConfig); + await applyProviderDefaultModel({ + defaultModel, + applyDefaultConfig, + applyProviderConfig, + noteDefault, + }); + + return { config: getConfig(), agentModelOverride: getAgentModelOverride() }; +} + +export async function applyLiteLlmApiKeyProvider({ + params, + authChoice, + config, + setConfig, + getConfig, + normalizedTokenProvider, + requestedSecretInputMode, + applyProviderDefaultModel, + getAgentModelOverride, +}: ApplyApiKeyProviderParams): Promise { + if (authChoice !== "litellm-api-key") { + return null; + } + + let nextConfig = config; + const store = ensureAuthProfileStore(params.agentDir, { allowKeychainPrompt: false }); + const profileOrder = resolveAuthProfileOrder({ cfg: nextConfig, store, provider: "litellm" }); + const existingProfileId = profileOrder.find((profileId) => Boolean(store.profiles[profileId])); + const existingCred = existingProfileId ? store.profiles[existingProfileId] : undefined; + let profileId = "litellm:default"; + let hasCredential = Boolean(existingProfileId && existingCred?.type === "api_key"); + if (hasCredential && existingProfileId) { + profileId = existingProfileId; + } + + if (!hasCredential) { + await ensureApiKeyFromOptionEnvOrPrompt({ + token: params.opts?.token, + tokenProvider: normalizedTokenProvider, + secretInputMode: requestedSecretInputMode, + config: nextConfig, + expectedProviders: ["litellm"], + provider: "litellm", + envLabel: "LITELLM_API_KEY", + promptMessage: "Enter LiteLLM API key", + normalize: normalizeApiKeyInput, + validate: validateApiKeyInput, + prompter: params.prompter, + setCredential: async (apiKey, mode) => + setLitellmApiKey(apiKey, params.agentDir, { secretInputMode: mode }), + noteMessage: + "LiteLLM provides a unified API to 100+ LLM providers.\nGet your API key from your LiteLLM proxy or https://litellm.ai\nDefault proxy runs on http://localhost:4000", + noteTitle: "LiteLLM", + }); + hasCredential = true; + } + + if (hasCredential) { + nextConfig = applyAuthProfileConfig(nextConfig, { + profileId, + provider: "litellm", + mode: "api_key", + }); + } + setConfig(nextConfig); + await applyProviderDefaultModel({ + defaultModel: LITELLM_DEFAULT_MODEL_REF, + applyDefaultConfig: applyLitellmConfig, + applyProviderConfig: applyLitellmProviderConfig, + noteDefault: LITELLM_DEFAULT_MODEL_REF, + }); + return { config: getConfig(), agentModelOverride: getAgentModelOverride() }; +} + +export async function applySimpleAuthChoiceApiProvider({ + params, + authChoice, + config, + setConfig, + getConfig, + normalizedTokenProvider, + requestedSecretInputMode, + applyProviderDefaultModel, + getAgentModelOverride, +}: ApplyApiKeyProviderParams): Promise { + const simpleApiKeyProviderFlow = SIMPLE_API_KEY_PROVIDER_FLOWS[authChoice]; + if (!simpleApiKeyProviderFlow) { + return null; + } + + return await applyApiKeyProviderWithDefaultModel({ + params, + authChoice, + config, + setConfig, + getConfig, + normalizedTokenProvider, + requestedSecretInputMode, + applyProviderDefaultModel, + getAgentModelOverride, + provider: simpleApiKeyProviderFlow.provider, + profileId: simpleApiKeyProviderFlow.profileId, + expectedProviders: simpleApiKeyProviderFlow.expectedProviders, + envLabel: simpleApiKeyProviderFlow.envLabel, + promptMessage: simpleApiKeyProviderFlow.promptMessage, + setCredential: async (apiKey, mode) => + simpleApiKeyProviderFlow.setCredential(apiKey, params.agentDir, { + secretInputMode: mode ?? requestedSecretInputMode, + }), + defaultModel: simpleApiKeyProviderFlow.defaultModel, + applyDefaultConfig: simpleApiKeyProviderFlow.applyDefaultConfig, + applyProviderConfig: simpleApiKeyProviderFlow.applyProviderConfig, + noteDefault: simpleApiKeyProviderFlow.noteDefault, + noteMessage: simpleApiKeyProviderFlow.noteMessage, + noteTitle: simpleApiKeyProviderFlow.noteTitle, + tokenProvider: simpleApiKeyProviderFlow.tokenProvider, + normalize: simpleApiKeyProviderFlow.normalize, + validate: simpleApiKeyProviderFlow.validate, + }); +} diff --git a/src/commands/auth-choice.apply.api-providers.ts b/src/commands/auth-choice.apply.api-providers.ts index 370951e9f0d..1ecb2cde3c0 100644 --- a/src/commands/auth-choice.apply.api-providers.ts +++ b/src/commands/auth-choice.apply.api-providers.ts @@ -1,5 +1,3 @@ -import { ensureAuthProfileStore, resolveAuthProfileOrder } from "../agents/auth-profiles.js"; -import type { SecretInput } from "../config/types.secrets.js"; import { normalizeApiKeyInput, validateApiKeyInput } from "./auth-choice.api-key.js"; import { normalizeSecretInputModeInput, @@ -8,6 +6,10 @@ import { ensureApiKeyFromOptionEnvOrPrompt, normalizeTokenProviderInput, } from "./auth-choice.apply-helpers.js"; +import { + applyLiteLlmApiKeyProvider, + applySimpleAuthChoiceApiProvider, +} from "./auth-choice.apply.api-key-providers.js"; import { applyAuthChoiceHuggingface } from "./auth-choice.apply.huggingface.js"; import type { ApplyAuthChoiceParams, ApplyAuthChoiceResult } from "./auth-choice.apply.js"; import { applyAuthChoiceOpenRouter } from "./auth-choice.apply.openrouter.js"; @@ -15,70 +17,19 @@ import { applyGoogleGeminiModelDefault, GOOGLE_GEMINI_DEFAULT_MODEL, } from "./google-gemini-model-default.js"; -import type { ApiKeyStorageOptions } from "./onboard-auth.credentials.js"; import { applyAuthProfileConfig, applyCloudflareAiGatewayConfig, applyCloudflareAiGatewayProviderConfig, - applyKilocodeConfig, - applyKilocodeProviderConfig, - applyQianfanConfig, - applyQianfanProviderConfig, - applyKimiCodeConfig, - applyKimiCodeProviderConfig, - applyLitellmConfig, - applyLitellmProviderConfig, - applyMistralConfig, - applyMistralProviderConfig, - applyMoonshotConfig, - applyMoonshotConfigCn, - applyMoonshotProviderConfig, - applyMoonshotProviderConfigCn, - applyOpencodeZenConfig, - applyOpencodeZenProviderConfig, - applySyntheticConfig, - applySyntheticProviderConfig, - applyTogetherConfig, - applyTogetherProviderConfig, - applyVeniceConfig, - applyVeniceProviderConfig, - applyVercelAiGatewayConfig, - applyVercelAiGatewayProviderConfig, - applyXiaomiConfig, - applyXiaomiProviderConfig, applyZaiConfig, applyZaiProviderConfig, CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_REF, - KILOCODE_DEFAULT_MODEL_REF, - LITELLM_DEFAULT_MODEL_REF, - QIANFAN_DEFAULT_MODEL_REF, - KIMI_CODING_MODEL_REF, - MOONSHOT_DEFAULT_MODEL_REF, - MISTRAL_DEFAULT_MODEL_REF, - SYNTHETIC_DEFAULT_MODEL_REF, - TOGETHER_DEFAULT_MODEL_REF, - VENICE_DEFAULT_MODEL_REF, - VERCEL_AI_GATEWAY_DEFAULT_MODEL_REF, - XIAOMI_DEFAULT_MODEL_REF, setCloudflareAiGatewayConfig, - setQianfanApiKey, setGeminiApiKey, - setKilocodeApiKey, - setLitellmApiKey, - setKimiCodingApiKey, - setMistralApiKey, - setMoonshotApiKey, - setOpencodeZenApiKey, - setSyntheticApiKey, - setTogetherApiKey, - setVeniceApiKey, - setVercelAiGatewayApiKey, - setXiaomiApiKey, setZaiApiKey, ZAI_DEFAULT_MODEL_REF, } from "./onboard-auth.js"; -import type { AuthChoice, SecretInputMode } from "./onboard-types.js"; -import { OPENCODE_ZEN_DEFAULT_MODEL } from "./opencode-zen-model-default.js"; +import type { AuthChoice } from "./onboard-types.js"; import { detectZaiEndpoint } from "./zai-endpoint-detect.js"; const API_KEY_TOKEN_PROVIDER_AUTH_CHOICE: Record = { @@ -98,6 +49,7 @@ const API_KEY_TOKEN_PROVIDER_AUTH_CHOICE: Record = { huggingface: "huggingface-api-key", mistral: "mistral-api-key", opencode: "opencode-zen", + "opencode-go": "opencode-go", kilocode: "kilocode-api-key", qianfan: "qianfan-api-key", }; @@ -111,205 +63,6 @@ const ZAI_AUTH_CHOICE_ENDPOINT: Partial< "zai-cn": "cn", }; -type ApiKeyProviderConfigApplier = ( - config: ApplyAuthChoiceParams["config"], -) => ApplyAuthChoiceParams["config"]; - -type SimpleApiKeyProviderFlow = { - provider: Parameters[0]["provider"]; - profileId: string; - expectedProviders: string[]; - envLabel: string; - promptMessage: string; - setCredential: ( - apiKey: SecretInput, - agentDir?: string, - options?: ApiKeyStorageOptions, - ) => void | Promise; - defaultModel: string; - applyDefaultConfig: ApiKeyProviderConfigApplier; - applyProviderConfig: ApiKeyProviderConfigApplier; - tokenProvider?: string; - normalize?: (value: string) => string; - validate?: (value: string) => string | undefined; - noteDefault?: string; - noteMessage?: string; - noteTitle?: string; -}; - -const SIMPLE_API_KEY_PROVIDER_FLOWS: Partial> = { - "ai-gateway-api-key": { - provider: "vercel-ai-gateway", - profileId: "vercel-ai-gateway:default", - expectedProviders: ["vercel-ai-gateway"], - envLabel: "AI_GATEWAY_API_KEY", - promptMessage: "Enter Vercel AI Gateway API key", - setCredential: setVercelAiGatewayApiKey, - defaultModel: VERCEL_AI_GATEWAY_DEFAULT_MODEL_REF, - applyDefaultConfig: applyVercelAiGatewayConfig, - applyProviderConfig: applyVercelAiGatewayProviderConfig, - noteDefault: VERCEL_AI_GATEWAY_DEFAULT_MODEL_REF, - }, - "moonshot-api-key": { - provider: "moonshot", - profileId: "moonshot:default", - expectedProviders: ["moonshot"], - envLabel: "MOONSHOT_API_KEY", - promptMessage: "Enter Moonshot API key", - setCredential: setMoonshotApiKey, - defaultModel: MOONSHOT_DEFAULT_MODEL_REF, - applyDefaultConfig: applyMoonshotConfig, - applyProviderConfig: applyMoonshotProviderConfig, - }, - "moonshot-api-key-cn": { - provider: "moonshot", - profileId: "moonshot:default", - expectedProviders: ["moonshot"], - envLabel: "MOONSHOT_API_KEY", - promptMessage: "Enter Moonshot API key (.cn)", - setCredential: setMoonshotApiKey, - defaultModel: MOONSHOT_DEFAULT_MODEL_REF, - applyDefaultConfig: applyMoonshotConfigCn, - applyProviderConfig: applyMoonshotProviderConfigCn, - }, - "kimi-code-api-key": { - provider: "kimi-coding", - profileId: "kimi-coding:default", - expectedProviders: ["kimi-code", "kimi-coding"], - envLabel: "KIMI_API_KEY", - promptMessage: "Enter Kimi Coding API key", - setCredential: setKimiCodingApiKey, - defaultModel: KIMI_CODING_MODEL_REF, - applyDefaultConfig: applyKimiCodeConfig, - applyProviderConfig: applyKimiCodeProviderConfig, - noteDefault: KIMI_CODING_MODEL_REF, - noteMessage: [ - "Kimi Coding uses a dedicated endpoint and API key.", - "Get your API key at: https://www.kimi.com/code/en", - ].join("\n"), - noteTitle: "Kimi Coding", - }, - "xiaomi-api-key": { - provider: "xiaomi", - profileId: "xiaomi:default", - expectedProviders: ["xiaomi"], - envLabel: "XIAOMI_API_KEY", - promptMessage: "Enter Xiaomi API key", - setCredential: setXiaomiApiKey, - defaultModel: XIAOMI_DEFAULT_MODEL_REF, - applyDefaultConfig: applyXiaomiConfig, - applyProviderConfig: applyXiaomiProviderConfig, - noteDefault: XIAOMI_DEFAULT_MODEL_REF, - }, - "mistral-api-key": { - provider: "mistral", - profileId: "mistral:default", - expectedProviders: ["mistral"], - envLabel: "MISTRAL_API_KEY", - promptMessage: "Enter Mistral API key", - setCredential: setMistralApiKey, - defaultModel: MISTRAL_DEFAULT_MODEL_REF, - applyDefaultConfig: applyMistralConfig, - applyProviderConfig: applyMistralProviderConfig, - noteDefault: MISTRAL_DEFAULT_MODEL_REF, - }, - "venice-api-key": { - provider: "venice", - profileId: "venice:default", - expectedProviders: ["venice"], - envLabel: "VENICE_API_KEY", - promptMessage: "Enter Venice AI API key", - setCredential: setVeniceApiKey, - defaultModel: VENICE_DEFAULT_MODEL_REF, - applyDefaultConfig: applyVeniceConfig, - applyProviderConfig: applyVeniceProviderConfig, - noteDefault: VENICE_DEFAULT_MODEL_REF, - noteMessage: [ - "Venice AI provides privacy-focused inference with uncensored models.", - "Get your API key at: https://venice.ai/settings/api", - "Supports 'private' (fully private) and 'anonymized' (proxy) modes.", - ].join("\n"), - noteTitle: "Venice AI", - }, - "opencode-zen": { - provider: "opencode", - profileId: "opencode:default", - expectedProviders: ["opencode"], - envLabel: "OPENCODE_API_KEY", - promptMessage: "Enter OpenCode Zen API key", - setCredential: setOpencodeZenApiKey, - defaultModel: OPENCODE_ZEN_DEFAULT_MODEL, - applyDefaultConfig: applyOpencodeZenConfig, - applyProviderConfig: applyOpencodeZenProviderConfig, - noteDefault: OPENCODE_ZEN_DEFAULT_MODEL, - noteMessage: [ - "OpenCode Zen provides access to Claude, GPT, Gemini, and more models.", - "Get your API key at: https://opencode.ai/auth", - "OpenCode Zen bills per request. Check your OpenCode dashboard for details.", - ].join("\n"), - noteTitle: "OpenCode Zen", - }, - "together-api-key": { - provider: "together", - profileId: "together:default", - expectedProviders: ["together"], - envLabel: "TOGETHER_API_KEY", - promptMessage: "Enter Together AI API key", - setCredential: setTogetherApiKey, - defaultModel: TOGETHER_DEFAULT_MODEL_REF, - applyDefaultConfig: applyTogetherConfig, - applyProviderConfig: applyTogetherProviderConfig, - noteDefault: TOGETHER_DEFAULT_MODEL_REF, - noteMessage: [ - "Together AI provides access to leading open-source models including Llama, DeepSeek, Qwen, and more.", - "Get your API key at: https://api.together.xyz/settings/api-keys", - ].join("\n"), - noteTitle: "Together AI", - }, - "qianfan-api-key": { - provider: "qianfan", - profileId: "qianfan:default", - expectedProviders: ["qianfan"], - envLabel: "QIANFAN_API_KEY", - promptMessage: "Enter QIANFAN API key", - setCredential: setQianfanApiKey, - defaultModel: QIANFAN_DEFAULT_MODEL_REF, - applyDefaultConfig: applyQianfanConfig, - applyProviderConfig: applyQianfanProviderConfig, - noteDefault: QIANFAN_DEFAULT_MODEL_REF, - noteMessage: [ - "Get your API key at: https://console.bce.baidu.com/qianfan/ais/console/apiKey", - "API key format: bce-v3/ALTAK-...", - ].join("\n"), - noteTitle: "QIANFAN", - }, - "kilocode-api-key": { - provider: "kilocode", - profileId: "kilocode:default", - expectedProviders: ["kilocode"], - envLabel: "KILOCODE_API_KEY", - promptMessage: "Enter Kilo Gateway API key", - setCredential: setKilocodeApiKey, - defaultModel: KILOCODE_DEFAULT_MODEL_REF, - applyDefaultConfig: applyKilocodeConfig, - applyProviderConfig: applyKilocodeProviderConfig, - noteDefault: KILOCODE_DEFAULT_MODEL_REF, - }, - "synthetic-api-key": { - provider: "synthetic", - profileId: "synthetic:default", - expectedProviders: ["synthetic"], - envLabel: "SYNTHETIC_API_KEY", - promptMessage: "Enter Synthetic API key", - setCredential: setSyntheticApiKey, - defaultModel: SYNTHETIC_DEFAULT_MODEL_REF, - applyDefaultConfig: applySyntheticConfig, - applyProviderConfig: applySyntheticProviderConfig, - normalize: (value) => String(value ?? "").trim(), - validate: (value) => (String(value ?? "").trim() ? undefined : "Required"), - }, -}; - export async function applyAuthChoiceApiProviders( params: ApplyAuthChoiceParams, ): Promise { @@ -333,152 +86,38 @@ export async function applyAuthChoiceApiProviders( } } - async function applyApiKeyProviderWithDefaultModel({ - provider, - profileId, - expectedProviders, - envLabel, - promptMessage, - setCredential, - defaultModel, - applyDefaultConfig, - applyProviderConfig, - noteMessage, - noteTitle, - tokenProvider = normalizedTokenProvider, - normalize = normalizeApiKeyInput, - validate = validateApiKeyInput, - noteDefault = defaultModel, - }: { - provider: Parameters[0]["provider"]; - profileId: string; - expectedProviders: string[]; - envLabel: string; - promptMessage: string; - setCredential: (apiKey: SecretInput, mode?: SecretInputMode) => void | Promise; - defaultModel: string; - applyDefaultConfig: ( - config: ApplyAuthChoiceParams["config"], - ) => ApplyAuthChoiceParams["config"]; - applyProviderConfig: ( - config: ApplyAuthChoiceParams["config"], - ) => ApplyAuthChoiceParams["config"]; - noteMessage?: string; - noteTitle?: string; - tokenProvider?: string; - normalize?: (value: string) => string; - validate?: (value: string) => string | undefined; - noteDefault?: string; - }): Promise { - await ensureApiKeyFromOptionEnvOrPrompt({ - token: params.opts?.token, - provider, - tokenProvider, - secretInputMode: requestedSecretInputMode, - config: nextConfig, - expectedProviders, - envLabel, - promptMessage, - setCredential: async (apiKey, mode) => { - await setCredential(apiKey, mode); - }, - noteMessage, - noteTitle, - normalize, - validate, - prompter: params.prompter, - }); - - nextConfig = applyAuthProfileConfig(nextConfig, { - profileId, - provider, - mode: "api_key", - }); - await applyProviderDefaultModel({ - defaultModel, - applyDefaultConfig, - applyProviderConfig, - noteDefault, - }); - - return { config: nextConfig, agentModelOverride }; - } - if (authChoice === "openrouter-api-key") { return applyAuthChoiceOpenRouter(params); } - if (authChoice === "litellm-api-key") { - const store = ensureAuthProfileStore(params.agentDir, { allowKeychainPrompt: false }); - const profileOrder = resolveAuthProfileOrder({ cfg: nextConfig, store, provider: "litellm" }); - const existingProfileId = profileOrder.find((profileId) => Boolean(store.profiles[profileId])); - const existingCred = existingProfileId ? store.profiles[existingProfileId] : undefined; - let profileId = "litellm:default"; - let hasCredential = Boolean(existingProfileId && existingCred?.type === "api_key"); - if (hasCredential && existingProfileId) { - profileId = existingProfileId; - } - - if (!hasCredential) { - await ensureApiKeyFromOptionEnvOrPrompt({ - token: params.opts?.token, - tokenProvider: normalizedTokenProvider, - secretInputMode: requestedSecretInputMode, - config: nextConfig, - expectedProviders: ["litellm"], - provider: "litellm", - envLabel: "LITELLM_API_KEY", - promptMessage: "Enter LiteLLM API key", - normalize: normalizeApiKeyInput, - validate: validateApiKeyInput, - prompter: params.prompter, - setCredential: async (apiKey, mode) => - setLitellmApiKey(apiKey, params.agentDir, { secretInputMode: mode }), - noteMessage: - "LiteLLM provides a unified API to 100+ LLM providers.\nGet your API key from your LiteLLM proxy or https://litellm.ai\nDefault proxy runs on http://localhost:4000", - noteTitle: "LiteLLM", - }); - hasCredential = true; - } - - if (hasCredential) { - nextConfig = applyAuthProfileConfig(nextConfig, { - profileId, - provider: "litellm", - mode: "api_key", - }); - } - await applyProviderDefaultModel({ - defaultModel: LITELLM_DEFAULT_MODEL_REF, - applyDefaultConfig: applyLitellmConfig, - applyProviderConfig: applyLitellmProviderConfig, - noteDefault: LITELLM_DEFAULT_MODEL_REF, - }); - return { config: nextConfig, agentModelOverride }; + const litellmResult = await applyLiteLlmApiKeyProvider({ + params, + authChoice, + config: nextConfig, + setConfig: (config) => (nextConfig = config), + getConfig: () => nextConfig, + normalizedTokenProvider, + requestedSecretInputMode, + applyProviderDefaultModel, + getAgentModelOverride: () => agentModelOverride, + }); + if (litellmResult) { + return litellmResult; } - const simpleApiKeyProviderFlow = SIMPLE_API_KEY_PROVIDER_FLOWS[authChoice]; - if (simpleApiKeyProviderFlow) { - return await applyApiKeyProviderWithDefaultModel({ - provider: simpleApiKeyProviderFlow.provider, - profileId: simpleApiKeyProviderFlow.profileId, - expectedProviders: simpleApiKeyProviderFlow.expectedProviders, - envLabel: simpleApiKeyProviderFlow.envLabel, - promptMessage: simpleApiKeyProviderFlow.promptMessage, - setCredential: async (apiKey, mode) => - simpleApiKeyProviderFlow.setCredential(apiKey, params.agentDir, { - secretInputMode: mode ?? requestedSecretInputMode, - }), - defaultModel: simpleApiKeyProviderFlow.defaultModel, - applyDefaultConfig: simpleApiKeyProviderFlow.applyDefaultConfig, - applyProviderConfig: simpleApiKeyProviderFlow.applyProviderConfig, - noteDefault: simpleApiKeyProviderFlow.noteDefault, - noteMessage: simpleApiKeyProviderFlow.noteMessage, - noteTitle: simpleApiKeyProviderFlow.noteTitle, - tokenProvider: simpleApiKeyProviderFlow.tokenProvider, - normalize: simpleApiKeyProviderFlow.normalize, - validate: simpleApiKeyProviderFlow.validate, - }); + const simpleProviderResult = await applySimpleAuthChoiceApiProvider({ + params, + authChoice, + config: nextConfig, + setConfig: (config) => (nextConfig = config), + getConfig: () => nextConfig, + normalizedTokenProvider, + requestedSecretInputMode, + applyProviderDefaultModel, + getAgentModelOverride: () => agentModelOverride, + }); + if (simpleProviderResult) { + return simpleProviderResult; } if (authChoice === "cloudflare-ai-gateway-api-key") { diff --git a/src/commands/auth-choice.apply.minimax.test.ts b/src/commands/auth-choice.apply.minimax.test.ts index 5998fde9484..9b5442b108c 100644 --- a/src/commands/auth-choice.apply.minimax.test.ts +++ b/src/commands/auth-choice.apply.minimax.test.ts @@ -1,6 +1,5 @@ import { afterEach, describe, expect, it, vi } from "vitest"; import { resolveAgentModelPrimaryValue } from "../config/model-input.js"; -import type { WizardPrompter } from "../wizard/prompts.js"; import { applyAuthChoiceMiniMax } from "./auth-choice.apply.minimax.js"; import { createAuthTestLifecycle, @@ -10,23 +9,6 @@ import { setupAuthTestEnv, } from "./test-wizard-helpers.js"; -function createMinimaxPrompter( - params: { - text?: WizardPrompter["text"]; - confirm?: WizardPrompter["confirm"]; - select?: WizardPrompter["select"]; - } = {}, -): WizardPrompter { - return createWizardPrompter( - { - text: params.text, - confirm: params.confirm, - select: params.select, - }, - { defaultSelect: "oauth" }, - ); -} - describe("applyAuthChoiceMiniMax", () => { const lifecycle = createAuthTestLifecycle([ "OPENCLAW_STATE_DIR", @@ -56,27 +38,25 @@ describe("applyAuthChoiceMiniMax", () => { async function runMiniMaxChoice(params: { authChoice: Parameters[0]["authChoice"]; opts?: Parameters[0]["opts"]; - env?: { apiKey?: string; oauthToken?: string }; - prompter?: Parameters[0]; + env?: { apiKey?: string }; + prompterText?: () => Promise; }) { const agentDir = await setupTempState(); resetMiniMaxEnv(); if (params.env?.apiKey !== undefined) { process.env.MINIMAX_API_KEY = params.env.apiKey; } - if (params.env?.oauthToken !== undefined) { - process.env.MINIMAX_OAUTH_TOKEN = params.env.oauthToken; - } const text = vi.fn(async () => "should-not-be-used"); const confirm = vi.fn(async () => true); const result = await applyAuthChoiceMiniMax({ authChoice: params.authChoice, config: {}, - prompter: createMinimaxPrompter({ - text, + // Pass select: undefined so ref-mode uses the non-interactive fallback (same as old test behavior). + prompter: createWizardPrompter({ + text: params.prompterText ?? text, confirm, - ...params.prompter, + select: undefined, }), runtime: createExitThrowingRuntime(), setDefaultModel: true, @@ -94,7 +74,7 @@ describe("applyAuthChoiceMiniMax", () => { const result = await applyAuthChoiceMiniMax({ authChoice: "openrouter-api-key", config: {}, - prompter: createMinimaxPrompter(), + prompter: createWizardPrompter({}), runtime: createExitThrowingRuntime(), setDefaultModel: true, }); @@ -104,61 +84,52 @@ describe("applyAuthChoiceMiniMax", () => { it.each([ { - caseName: "uses opts token for minimax-api without prompt", - authChoice: "minimax-api" as const, + caseName: "uses opts token for minimax-global-api without prompt", + authChoice: "minimax-global-api" as const, tokenProvider: "minimax", token: "mm-opts-token", - profileId: "minimax:default", - provider: "minimax", + profileId: "minimax:global", expectedModel: "minimax/MiniMax-M2.5", }, { - caseName: - "uses opts token for minimax-api-key-cn with trimmed/case-insensitive tokenProvider", - authChoice: "minimax-api-key-cn" as const, - tokenProvider: " MINIMAX-CN ", + caseName: "uses opts token for minimax-cn-api with trimmed/case-insensitive tokenProvider", + authChoice: "minimax-cn-api" as const, + tokenProvider: " MINIMAX ", token: "mm-cn-opts-token", - profileId: "minimax-cn:default", - provider: "minimax-cn", - expectedModel: "minimax-cn/MiniMax-M2.5", + profileId: "minimax:cn", + expectedModel: "minimax/MiniMax-M2.5", }, - ])( - "$caseName", - async ({ authChoice, tokenProvider, token, profileId, provider, expectedModel }) => { - const { agentDir, result, text, confirm } = await runMiniMaxChoice({ - authChoice, - opts: { - tokenProvider, - token, - }, - }); + ])("$caseName", async ({ authChoice, tokenProvider, token, profileId, expectedModel }) => { + const { agentDir, result, text, confirm } = await runMiniMaxChoice({ + authChoice, + opts: { tokenProvider, token }, + }); - expect(result).not.toBeNull(); - expect(result?.config.auth?.profiles?.[profileId]).toMatchObject({ - provider, - mode: "api_key", - }); - expect(resolveAgentModelPrimaryValue(result?.config.agents?.defaults?.model)).toBe( - expectedModel, - ); - expect(text).not.toHaveBeenCalled(); - expect(confirm).not.toHaveBeenCalled(); + expect(result).not.toBeNull(); + expect(result?.config.auth?.profiles?.[profileId]).toMatchObject({ + provider: "minimax", + mode: "api_key", + }); + expect(resolveAgentModelPrimaryValue(result?.config.agents?.defaults?.model)).toBe( + expectedModel, + ); + expect(text).not.toHaveBeenCalled(); + expect(confirm).not.toHaveBeenCalled(); - const parsed = await readAuthProfiles(agentDir); - expect(parsed.profiles?.[profileId]?.key).toBe(token); - }, - ); + const parsed = await readAuthProfiles(agentDir); + expect(parsed.profiles?.[profileId]?.key).toBe(token); + }); it.each([ { - name: "uses env token for minimax-api-key-cn as plaintext by default", + name: "uses env token for minimax-cn-api as plaintext by default", opts: undefined, expectKey: "mm-env-token", expectKeyRef: undefined, expectConfirmCalls: 1, }, { - name: "uses env token for minimax-api-key-cn as keyRef in ref mode", + name: "uses env token for minimax-cn-api as keyRef in ref mode", opts: { secretInputMode: "ref" as const }, // pragma: allowlist secret expectKey: undefined, expectKeyRef: { @@ -170,54 +141,68 @@ describe("applyAuthChoiceMiniMax", () => { }, ])("$name", async ({ opts, expectKey, expectKeyRef, expectConfirmCalls }) => { const { agentDir, result, text, confirm } = await runMiniMaxChoice({ - authChoice: "minimax-api-key-cn", + authChoice: "minimax-cn-api", opts, env: { apiKey: "mm-env-token" }, // pragma: allowlist secret }); expect(result).not.toBeNull(); if (!opts) { - expect(result?.config.auth?.profiles?.["minimax-cn:default"]).toMatchObject({ - provider: "minimax-cn", + expect(result?.config.auth?.profiles?.["minimax:cn"]).toMatchObject({ + provider: "minimax", mode: "api_key", }); expect(resolveAgentModelPrimaryValue(result?.config.agents?.defaults?.model)).toBe( - "minimax-cn/MiniMax-M2.5", + "minimax/MiniMax-M2.5", ); } expect(text).not.toHaveBeenCalled(); expect(confirm).toHaveBeenCalledTimes(expectConfirmCalls); const parsed = await readAuthProfiles(agentDir); - expect(parsed.profiles?.["minimax-cn:default"]?.key).toBe(expectKey); + expect(parsed.profiles?.["minimax:cn"]?.key).toBe(expectKey); if (expectKeyRef) { - expect(parsed.profiles?.["minimax-cn:default"]?.keyRef).toEqual(expectKeyRef); + expect(parsed.profiles?.["minimax:cn"]?.keyRef).toEqual(expectKeyRef); } else { - expect(parsed.profiles?.["minimax-cn:default"]?.keyRef).toBeUndefined(); + expect(parsed.profiles?.["minimax:cn"]?.keyRef).toBeUndefined(); } }); - it("uses minimax-api-lightning default model", async () => { + it("minimax-global-api uses minimax:global profile and minimax/MiniMax-M2.5 model", async () => { const { agentDir, result, text, confirm } = await runMiniMaxChoice({ - authChoice: "minimax-api-lightning", + authChoice: "minimax-global-api", opts: { tokenProvider: "minimax", - token: "mm-lightning-token", + token: "mm-global-token", }, }); expect(result).not.toBeNull(); - expect(result?.config.auth?.profiles?.["minimax:default"]).toMatchObject({ + expect(result?.config.auth?.profiles?.["minimax:global"]).toMatchObject({ provider: "minimax", mode: "api_key", }); expect(resolveAgentModelPrimaryValue(result?.config.agents?.defaults?.model)).toBe( - "minimax/MiniMax-M2.5-highspeed", + "minimax/MiniMax-M2.5", ); + expect(result?.config.models?.providers?.minimax?.baseUrl).toContain("minimax.io"); expect(text).not.toHaveBeenCalled(); expect(confirm).not.toHaveBeenCalled(); const parsed = await readAuthProfiles(agentDir); - expect(parsed.profiles?.["minimax:default"]?.key).toBe("mm-lightning-token"); + expect(parsed.profiles?.["minimax:global"]?.key).toBe("mm-global-token"); + }); + + it("minimax-cn-api sets CN baseUrl", async () => { + const { result } = await runMiniMaxChoice({ + authChoice: "minimax-cn-api", + opts: { + tokenProvider: "minimax", + token: "mm-cn-token", + }, + }); + + expect(result).not.toBeNull(); + expect(result?.config.models?.providers?.minimax?.baseUrl).toContain("minimaxi.com"); }); }); diff --git a/src/commands/auth-choice.apply.minimax.ts b/src/commands/auth-choice.apply.minimax.ts index 86e5a485afd..1a381b908b8 100644 --- a/src/commands/auth-choice.apply.minimax.ts +++ b/src/commands/auth-choice.apply.minimax.ts @@ -12,130 +12,93 @@ import { applyMinimaxApiConfigCn, applyMinimaxApiProviderConfig, applyMinimaxApiProviderConfigCn, - applyMinimaxConfig, - applyMinimaxProviderConfig, setMinimaxApiKey, } from "./onboard-auth.js"; export async function applyAuthChoiceMiniMax( params: ApplyAuthChoiceParams, ): Promise { - let nextConfig = params.config; - let agentModelOverride: string | undefined; - const applyProviderDefaultModel = createAuthChoiceDefaultModelApplierForMutableState( - params, - () => nextConfig, - (config) => (nextConfig = config), - () => agentModelOverride, - (model) => (agentModelOverride = model), - ); - const requestedSecretInputMode = normalizeSecretInputModeInput(params.opts?.secretInputMode); - const ensureMinimaxApiKey = async (opts: { - profileId: string; - promptMessage: string; - }): Promise => { + // OAuth paths — delegate to plugin, no API key needed + if (params.authChoice === "minimax-global-oauth") { + return await applyAuthChoicePluginProvider(params, { + authChoice: "minimax-global-oauth", + pluginId: "minimax-portal-auth", + providerId: "minimax-portal", + methodId: "oauth", + label: "MiniMax", + }); + } + + if (params.authChoice === "minimax-cn-oauth") { + return await applyAuthChoicePluginProvider(params, { + authChoice: "minimax-cn-oauth", + pluginId: "minimax-portal-auth", + providerId: "minimax-portal", + methodId: "oauth-cn", + label: "MiniMax CN", + }); + } + + // API key paths + if (params.authChoice === "minimax-global-api" || params.authChoice === "minimax-cn-api") { + const isCn = params.authChoice === "minimax-cn-api"; + const profileId = isCn ? "minimax:cn" : "minimax:global"; + const keyLink = isCn + ? "https://platform.minimaxi.com/user-center/basic-information/interface-key" + : "https://platform.minimax.io/user-center/basic-information/interface-key"; + const promptMessage = `Enter MiniMax ${isCn ? "CN " : ""}API key (sk-api- or sk-cp-)\n${keyLink}`; + + let nextConfig = params.config; + let agentModelOverride: string | undefined; + const applyProviderDefaultModel = createAuthChoiceDefaultModelApplierForMutableState( + params, + () => nextConfig, + (config) => (nextConfig = config), + () => agentModelOverride, + (model) => (agentModelOverride = model), + ); + const requestedSecretInputMode = normalizeSecretInputModeInput(params.opts?.secretInputMode); + + // Warn when both Global and CN share the same `minimax` provider entry — configuring one + // overwrites the other's baseUrl. Only show when the other profile is already present. + const otherProfileId = isCn ? "minimax:global" : "minimax:cn"; + const hasOtherProfile = Boolean(nextConfig.auth?.profiles?.[otherProfileId]); + const noteMessage = hasOtherProfile + ? `Note: Global and CN both use the "minimax" provider entry. Saving this key will overwrite the existing ${isCn ? "Global" : "CN"} endpoint (${otherProfileId}).` + : undefined; + await ensureApiKeyFromOptionEnvOrPrompt({ token: params.opts?.token, tokenProvider: params.opts?.tokenProvider, secretInputMode: requestedSecretInputMode, config: nextConfig, - expectedProviders: ["minimax", "minimax-cn"], + // Accept "minimax-cn" as a legacy tokenProvider alias for the CN path. + expectedProviders: isCn ? ["minimax", "minimax-cn"] : ["minimax"], provider: "minimax", envLabel: "MINIMAX_API_KEY", - promptMessage: opts.promptMessage, + promptMessage, normalize: normalizeApiKeyInput, validate: validateApiKeyInput, prompter: params.prompter, + noteMessage, setCredential: async (apiKey, mode) => - setMinimaxApiKey(apiKey, params.agentDir, opts.profileId, { secretInputMode: mode }), - }); - }; - const applyMinimaxApiVariant = async (opts: { - profileId: string; - provider: "minimax" | "minimax-cn"; - promptMessage: string; - modelRefPrefix: "minimax" | "minimax-cn"; - modelId: string; - applyDefaultConfig: ( - config: ApplyAuthChoiceParams["config"], - modelId: string, - ) => ApplyAuthChoiceParams["config"]; - applyProviderConfig: ( - config: ApplyAuthChoiceParams["config"], - modelId: string, - ) => ApplyAuthChoiceParams["config"]; - }): Promise => { - await ensureMinimaxApiKey({ - profileId: opts.profileId, - promptMessage: opts.promptMessage, + setMinimaxApiKey(apiKey, params.agentDir, profileId, { secretInputMode: mode }), }); + nextConfig = applyAuthProfileConfig(nextConfig, { - profileId: opts.profileId, - provider: opts.provider, + profileId, + provider: "minimax", mode: "api_key", }); - const modelRef = `${opts.modelRefPrefix}/${opts.modelId}`; + await applyProviderDefaultModel({ - defaultModel: modelRef, - applyDefaultConfig: (config) => opts.applyDefaultConfig(config, opts.modelId), - applyProviderConfig: (config) => opts.applyProviderConfig(config, opts.modelId), - }); - return { config: nextConfig, agentModelOverride }; - }; - if (params.authChoice === "minimax-portal") { - // Let user choose between Global/CN endpoints - const endpoint = await params.prompter.select({ - message: "Select MiniMax endpoint", - options: [ - { value: "oauth", label: "Global", hint: "OAuth for international users" }, - { value: "oauth-cn", label: "CN", hint: "OAuth for users in China" }, - ], + defaultModel: "minimax/MiniMax-M2.5", + applyDefaultConfig: (config) => + isCn ? applyMinimaxApiConfigCn(config) : applyMinimaxApiConfig(config), + applyProviderConfig: (config) => + isCn ? applyMinimaxApiProviderConfigCn(config) : applyMinimaxApiProviderConfig(config), }); - return await applyAuthChoicePluginProvider(params, { - authChoice: "minimax-portal", - pluginId: "minimax-portal-auth", - providerId: "minimax-portal", - methodId: endpoint, - label: "MiniMax", - }); - } - - if ( - params.authChoice === "minimax-cloud" || - params.authChoice === "minimax-api" || - params.authChoice === "minimax-api-lightning" - ) { - return await applyMinimaxApiVariant({ - profileId: "minimax:default", - provider: "minimax", - promptMessage: "Enter MiniMax API key", - modelRefPrefix: "minimax", - modelId: - params.authChoice === "minimax-api-lightning" ? "MiniMax-M2.5-highspeed" : "MiniMax-M2.5", - applyDefaultConfig: applyMinimaxApiConfig, - applyProviderConfig: applyMinimaxApiProviderConfig, - }); - } - - if (params.authChoice === "minimax-api-key-cn") { - return await applyMinimaxApiVariant({ - profileId: "minimax-cn:default", - provider: "minimax-cn", - promptMessage: "Enter MiniMax China API key", - modelRefPrefix: "minimax-cn", - modelId: "MiniMax-M2.5", - applyDefaultConfig: applyMinimaxApiConfigCn, - applyProviderConfig: applyMinimaxApiProviderConfigCn, - }); - } - - if (params.authChoice === "minimax") { - await applyProviderDefaultModel({ - defaultModel: "lmstudio/minimax-m2.5-gs32", - applyDefaultConfig: applyMinimaxConfig, - applyProviderConfig: applyMinimaxProviderConfig, - }); return { config: nextConfig, agentModelOverride }; } diff --git a/src/commands/auth-choice.apply.plugin-provider.test.ts b/src/commands/auth-choice.apply.plugin-provider.test.ts new file mode 100644 index 00000000000..2557fcd2f5c --- /dev/null +++ b/src/commands/auth-choice.apply.plugin-provider.test.ts @@ -0,0 +1,321 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import type { ProviderPlugin } from "../plugins/types.js"; +import type { ProviderAuthMethod } from "../plugins/types.js"; +import type { ApplyAuthChoiceParams } from "./auth-choice.apply.js"; +import { + applyAuthChoiceLoadedPluginProvider, + applyAuthChoicePluginProvider, + runProviderPluginAuthMethod, +} from "./auth-choice.apply.plugin-provider.js"; + +const resolvePluginProviders = vi.hoisted(() => vi.fn<() => ProviderPlugin[]>(() => [])); +vi.mock("../plugins/providers.js", () => ({ + resolvePluginProviders, +})); + +const resolveProviderPluginChoice = vi.hoisted(() => + vi.fn<() => { provider: ProviderPlugin; method: ProviderAuthMethod } | null>(), +); +const runProviderModelSelectedHook = vi.hoisted(() => vi.fn(async () => {})); +vi.mock("../plugins/provider-wizard.js", () => ({ + resolveProviderPluginChoice, + runProviderModelSelectedHook, +})); + +const upsertAuthProfile = vi.hoisted(() => vi.fn()); +vi.mock("../agents/auth-profiles.js", () => ({ + upsertAuthProfile, +})); + +const resolveDefaultAgentId = vi.hoisted(() => vi.fn(() => "default")); +const resolveAgentWorkspaceDir = vi.hoisted(() => vi.fn(() => "/tmp/workspace")); +const resolveAgentDir = vi.hoisted(() => vi.fn(() => "/tmp/agent")); +vi.mock("../agents/agent-scope.js", () => ({ + resolveDefaultAgentId, + resolveAgentDir, + resolveAgentWorkspaceDir, +})); + +const resolveDefaultAgentWorkspaceDir = vi.hoisted(() => vi.fn(() => "/tmp/workspace")); +vi.mock("../agents/workspace.js", () => ({ + resolveDefaultAgentWorkspaceDir, +})); + +const resolveOpenClawAgentDir = vi.hoisted(() => vi.fn(() => "/tmp/agent")); +vi.mock("../agents/agent-paths.js", () => ({ + resolveOpenClawAgentDir, +})); + +const applyAuthProfileConfig = vi.hoisted(() => vi.fn((config) => config)); +vi.mock("./onboard-auth.js", () => ({ + applyAuthProfileConfig, +})); + +const isRemoteEnvironment = vi.hoisted(() => vi.fn(() => false)); +vi.mock("./oauth-env.js", () => ({ + isRemoteEnvironment, +})); + +const createVpsAwareOAuthHandlers = vi.hoisted(() => vi.fn()); +vi.mock("./oauth-flow.js", () => ({ + createVpsAwareOAuthHandlers, +})); + +const openUrl = vi.hoisted(() => vi.fn(async () => {})); +vi.mock("./onboard-helpers.js", () => ({ + openUrl, +})); + +function buildProvider(): ProviderPlugin { + return { + id: "ollama", + label: "Ollama", + auth: [ + { + id: "local", + label: "Ollama", + kind: "custom", + run: async () => ({ + profiles: [ + { + profileId: "ollama:default", + credential: { + type: "api_key", + provider: "ollama", + key: "ollama-local", + }, + }, + ], + defaultModel: "ollama/qwen3:4b", + }), + }, + ], + }; +} + +function buildParams(overrides: Partial = {}): ApplyAuthChoiceParams { + return { + authChoice: "ollama", + config: {}, + prompter: { + note: vi.fn(async () => {}), + } as unknown as ApplyAuthChoiceParams["prompter"], + runtime: {} as ApplyAuthChoiceParams["runtime"], + setDefaultModel: true, + ...overrides, + }; +} + +describe("applyAuthChoiceLoadedPluginProvider", () => { + beforeEach(() => { + vi.clearAllMocks(); + applyAuthProfileConfig.mockImplementation((config) => config); + }); + + it("returns an agent model override when default model application is deferred", async () => { + const provider = buildProvider(); + resolvePluginProviders.mockReturnValue([provider]); + resolveProviderPluginChoice.mockReturnValue({ + provider, + method: provider.auth[0], + }); + + const result = await applyAuthChoiceLoadedPluginProvider( + buildParams({ + setDefaultModel: false, + }), + ); + + expect(result).toEqual({ + config: {}, + agentModelOverride: "ollama/qwen3:4b", + }); + expect(runProviderModelSelectedHook).not.toHaveBeenCalled(); + }); + + it("applies the default model and runs provider post-setup hooks", async () => { + const provider = buildProvider(); + resolvePluginProviders.mockReturnValue([provider]); + resolveProviderPluginChoice.mockReturnValue({ + provider, + method: provider.auth[0], + }); + + const result = await applyAuthChoiceLoadedPluginProvider(buildParams()); + + expect(result?.config.agents?.defaults?.model).toEqual({ + primary: "ollama/qwen3:4b", + }); + expect(upsertAuthProfile).toHaveBeenCalledWith({ + profileId: "ollama:default", + credential: { + type: "api_key", + provider: "ollama", + key: "ollama-local", + }, + agentDir: "/tmp/agent", + }); + expect(runProviderModelSelectedHook).toHaveBeenCalledWith({ + config: result?.config, + model: "ollama/qwen3:4b", + prompter: expect.objectContaining({ note: expect.any(Function) }), + agentDir: undefined, + workspaceDir: "/tmp/workspace", + }); + }); + + it("merges provider config patches and emits provider notes", async () => { + applyAuthProfileConfig.mockImplementation((( + config: { + auth?: { + profiles?: Record; + }; + }, + profile: { profileId: string; provider: string; mode: string }, + ) => ({ + ...config, + auth: { + profiles: { + ...config.auth?.profiles, + [profile.profileId]: { + provider: profile.provider, + mode: profile.mode, + }, + }, + }, + })) as never); + + const note = vi.fn(async () => {}); + const method: ProviderAuthMethod = { + id: "local", + label: "Local", + kind: "custom", + run: async () => ({ + profiles: [ + { + profileId: "ollama:default", + credential: { + type: "api_key", + provider: "ollama", + key: "ollama-local", + }, + }, + ], + configPatch: { + models: { + providers: { + ollama: { + api: "ollama", + baseUrl: "http://127.0.0.1:11434", + models: [], + }, + }, + }, + }, + defaultModel: "ollama/qwen3:4b", + notes: ["Detected local Ollama runtime.", "Pulled model metadata."], + }), + }; + + const result = await runProviderPluginAuthMethod({ + config: { + agents: { + defaults: { + model: { primary: "anthropic/claude-sonnet-4-5" }, + }, + }, + }, + runtime: {} as ApplyAuthChoiceParams["runtime"], + prompter: { + note, + } as unknown as ApplyAuthChoiceParams["prompter"], + method, + }); + + expect(result.defaultModel).toBe("ollama/qwen3:4b"); + expect(result.config.models?.providers?.ollama).toEqual({ + api: "ollama", + baseUrl: "http://127.0.0.1:11434", + models: [], + }); + expect(result.config.auth?.profiles?.["ollama:default"]).toEqual({ + provider: "ollama", + mode: "api_key", + }); + expect(note).toHaveBeenCalledWith( + "Detected local Ollama runtime.\nPulled model metadata.", + "Provider notes", + ); + }); + + it("returns an agent-scoped override for plugin auth choices when default model application is deferred", async () => { + const provider = buildProvider(); + resolvePluginProviders.mockReturnValue([provider]); + + const note = vi.fn(async () => {}); + const result = await applyAuthChoicePluginProvider( + buildParams({ + authChoice: "provider-plugin:ollama:local", + agentId: "worker", + setDefaultModel: false, + prompter: { + note, + } as unknown as ApplyAuthChoiceParams["prompter"], + }), + { + authChoice: "provider-plugin:ollama:local", + pluginId: "ollama", + providerId: "ollama", + methodId: "local", + label: "Ollama", + }, + ); + + expect(result?.agentModelOverride).toBe("ollama/qwen3:4b"); + expect(result?.config.plugins).toEqual({ + entries: { + ollama: { + enabled: true, + }, + }, + }); + expect(runProviderModelSelectedHook).not.toHaveBeenCalled(); + expect(note).toHaveBeenCalledWith( + 'Default model set to ollama/qwen3:4b for agent "worker".', + "Model configured", + ); + }); + + it("stops early when the plugin is disabled in config", async () => { + const note = vi.fn(async () => {}); + + const result = await applyAuthChoicePluginProvider( + buildParams({ + config: { + plugins: { + enabled: false, + }, + }, + prompter: { + note, + } as unknown as ApplyAuthChoiceParams["prompter"], + }), + { + authChoice: "ollama", + pluginId: "ollama", + providerId: "ollama", + label: "Ollama", + }, + ); + + expect(result).toEqual({ + config: { + plugins: { + enabled: false, + }, + }, + }); + expect(resolvePluginProviders).not.toHaveBeenCalled(); + expect(note).toHaveBeenCalledWith("Ollama plugin is disabled (plugins disabled).", "Ollama"); + }); +}); diff --git a/src/commands/auth-choice.apply.plugin-provider.ts b/src/commands/auth-choice.apply.plugin-provider.ts index e1568ca86b0..bd97928db91 100644 --- a/src/commands/auth-choice.apply.plugin-provider.ts +++ b/src/commands/auth-choice.apply.plugin-provider.ts @@ -7,7 +7,12 @@ import { import { upsertAuthProfile } from "../agents/auth-profiles.js"; import { resolveDefaultAgentWorkspaceDir } from "../agents/workspace.js"; import { enablePluginInConfig } from "../plugins/enable.js"; +import { + resolveProviderPluginChoice, + runProviderModelSelectedHook, +} from "../plugins/provider-wizard.js"; import { resolvePluginProviders } from "../plugins/providers.js"; +import type { ProviderAuthMethod } from "../plugins/types.js"; import type { ApplyAuthChoiceParams, ApplyAuthChoiceResult } from "./auth-choice.apply.js"; import { isRemoteEnvironment } from "./oauth-env.js"; import { createVpsAwareOAuthHandlers } from "./oauth-flow.js"; @@ -28,6 +33,124 @@ export type PluginProviderAuthChoiceOptions = { label: string; }; +export async function runProviderPluginAuthMethod(params: { + config: ApplyAuthChoiceParams["config"]; + runtime: ApplyAuthChoiceParams["runtime"]; + prompter: ApplyAuthChoiceParams["prompter"]; + method: ProviderAuthMethod; + agentDir?: string; + agentId?: string; + workspaceDir?: string; + emitNotes?: boolean; +}): Promise<{ config: ApplyAuthChoiceParams["config"]; defaultModel?: string }> { + const agentId = params.agentId ?? resolveDefaultAgentId(params.config); + const defaultAgentId = resolveDefaultAgentId(params.config); + const agentDir = + params.agentDir ?? + (agentId === defaultAgentId + ? resolveOpenClawAgentDir() + : resolveAgentDir(params.config, agentId)); + const workspaceDir = + params.workspaceDir ?? + resolveAgentWorkspaceDir(params.config, agentId) ?? + resolveDefaultAgentWorkspaceDir(); + + const isRemote = isRemoteEnvironment(); + const result = await params.method.run({ + config: params.config, + agentDir, + workspaceDir, + prompter: params.prompter, + runtime: params.runtime, + isRemote, + openUrl: async (url) => { + await openUrl(url); + }, + oauth: { + createVpsAwareHandlers: (opts) => createVpsAwareOAuthHandlers(opts), + }, + }); + + let nextConfig = params.config; + if (result.configPatch) { + nextConfig = mergeConfigPatch(nextConfig, result.configPatch); + } + + for (const profile of result.profiles) { + upsertAuthProfile({ + profileId: profile.profileId, + credential: profile.credential, + agentDir, + }); + + nextConfig = applyAuthProfileConfig(nextConfig, { + profileId: profile.profileId, + provider: profile.credential.provider, + mode: profile.credential.type === "token" ? "token" : profile.credential.type, + ...("email" in profile.credential && profile.credential.email + ? { email: profile.credential.email } + : {}), + }); + } + + if (params.emitNotes !== false && result.notes && result.notes.length > 0) { + await params.prompter.note(result.notes.join("\n"), "Provider notes"); + } + + return { + config: nextConfig, + defaultModel: result.defaultModel, + }; +} + +export async function applyAuthChoiceLoadedPluginProvider( + params: ApplyAuthChoiceParams, +): Promise { + const agentId = params.agentId ?? resolveDefaultAgentId(params.config); + const workspaceDir = + resolveAgentWorkspaceDir(params.config, agentId) ?? resolveDefaultAgentWorkspaceDir(); + const providers = resolvePluginProviders({ config: params.config, workspaceDir }); + const resolved = resolveProviderPluginChoice({ + providers, + choice: params.authChoice, + }); + if (!resolved) { + return null; + } + + const applied = await runProviderPluginAuthMethod({ + config: params.config, + runtime: params.runtime, + prompter: params.prompter, + method: resolved.method, + agentDir: params.agentDir, + agentId: params.agentId, + workspaceDir, + }); + + let agentModelOverride: string | undefined; + if (applied.defaultModel) { + if (params.setDefaultModel) { + const nextConfig = applyDefaultModel(applied.config, applied.defaultModel); + await runProviderModelSelectedHook({ + config: nextConfig, + model: applied.defaultModel, + prompter: params.prompter, + agentDir: params.agentDir, + workspaceDir, + }); + await params.prompter.note( + `Default model set to ${applied.defaultModel}`, + "Model configured", + ); + return { config: nextConfig }; + } + agentModelOverride = applied.defaultModel; + } + + return { config: applied.config, agentModelOverride }; +} + export async function applyAuthChoicePluginProvider( params: ApplyAuthChoiceParams, options: PluginProviderAuthChoiceOptions, @@ -70,60 +193,40 @@ export async function applyAuthChoicePluginProvider( return { config: nextConfig }; } - const isRemote = isRemoteEnvironment(); - const result = await method.run({ + const applied = await runProviderPluginAuthMethod({ config: nextConfig, - agentDir, - workspaceDir, - prompter: params.prompter, runtime: params.runtime, - isRemote, - openUrl: async (url) => { - await openUrl(url); - }, - oauth: { - createVpsAwareHandlers: (opts) => createVpsAwareOAuthHandlers(opts), - }, + prompter: params.prompter, + method, + agentDir, + agentId, + workspaceDir, }); - - if (result.configPatch) { - nextConfig = mergeConfigPatch(nextConfig, result.configPatch); - } - - for (const profile of result.profiles) { - upsertAuthProfile({ - profileId: profile.profileId, - credential: profile.credential, - agentDir, - }); - - nextConfig = applyAuthProfileConfig(nextConfig, { - profileId: profile.profileId, - provider: profile.credential.provider, - mode: profile.credential.type === "token" ? "token" : profile.credential.type, - ...("email" in profile.credential && profile.credential.email - ? { email: profile.credential.email } - : {}), - }); - } + nextConfig = applied.config; let agentModelOverride: string | undefined; - if (result.defaultModel) { + if (applied.defaultModel) { if (params.setDefaultModel) { - nextConfig = applyDefaultModel(nextConfig, result.defaultModel); - await params.prompter.note(`Default model set to ${result.defaultModel}`, "Model configured"); - } else if (params.agentId) { - agentModelOverride = result.defaultModel; + nextConfig = applyDefaultModel(nextConfig, applied.defaultModel); + await runProviderModelSelectedHook({ + config: nextConfig, + model: applied.defaultModel, + prompter: params.prompter, + agentDir, + workspaceDir, + }); await params.prompter.note( - `Default model set to ${result.defaultModel} for agent "${params.agentId}".`, + `Default model set to ${applied.defaultModel}`, + "Model configured", + ); + } else if (params.agentId) { + agentModelOverride = applied.defaultModel; + await params.prompter.note( + `Default model set to ${applied.defaultModel} for agent "${params.agentId}".`, "Model configured", ); } } - if (result.notes && result.notes.length > 0) { - await params.prompter.note(result.notes.join("\n"), "Provider notes"); - } - return { config: nextConfig, agentModelOverride }; } diff --git a/src/commands/auth-choice.apply.ts b/src/commands/auth-choice.apply.ts index e6dfa9ed52a..b01fd65c875 100644 --- a/src/commands/auth-choice.apply.ts +++ b/src/commands/auth-choice.apply.ts @@ -10,8 +10,8 @@ import { applyAuthChoiceGoogleGeminiCli } from "./auth-choice.apply.google-gemin import { applyAuthChoiceMiniMax } from "./auth-choice.apply.minimax.js"; import { applyAuthChoiceOAuth } from "./auth-choice.apply.oauth.js"; import { applyAuthChoiceOpenAI } from "./auth-choice.apply.openai.js"; +import { applyAuthChoiceLoadedPluginProvider } from "./auth-choice.apply.plugin-provider.js"; import { applyAuthChoiceQwenPortal } from "./auth-choice.apply.qwen-portal.js"; -import { applyAuthChoiceVllm } from "./auth-choice.apply.vllm.js"; import { applyAuthChoiceVolcengine } from "./auth-choice.apply.volcengine.js"; import { applyAuthChoiceXAI } from "./auth-choice.apply.xai.js"; import type { AuthChoice, OnboardOptions } from "./onboard-types.js"; @@ -36,8 +36,8 @@ export async function applyAuthChoice( params: ApplyAuthChoiceParams, ): Promise { const handlers: Array<(p: ApplyAuthChoiceParams) => Promise> = [ + applyAuthChoiceLoadedPluginProvider, applyAuthChoiceAnthropic, - applyAuthChoiceVllm, applyAuthChoiceOpenAI, applyAuthChoiceOAuth, applyAuthChoiceApiProviders, diff --git a/src/commands/auth-choice.apply.vllm.ts b/src/commands/auth-choice.apply.vllm.ts deleted file mode 100644 index 53d44a7cbf8..00000000000 --- a/src/commands/auth-choice.apply.vllm.ts +++ /dev/null @@ -1,46 +0,0 @@ -import type { OpenClawConfig } from "../config/config.js"; -import type { ApplyAuthChoiceParams, ApplyAuthChoiceResult } from "./auth-choice.apply.js"; -import { promptAndConfigureVllm } from "./vllm-setup.js"; - -function applyVllmDefaultModel(cfg: OpenClawConfig, modelRef: string): OpenClawConfig { - const existingModel = cfg.agents?.defaults?.model; - const fallbacks = - existingModel && typeof existingModel === "object" && "fallbacks" in existingModel - ? (existingModel as { fallbacks?: string[] }).fallbacks - : undefined; - - return { - ...cfg, - agents: { - ...cfg.agents, - defaults: { - ...cfg.agents?.defaults, - model: { - ...(fallbacks ? { fallbacks } : undefined), - primary: modelRef, - }, - }, - }, - }; -} - -export async function applyAuthChoiceVllm( - params: ApplyAuthChoiceParams, -): Promise { - if (params.authChoice !== "vllm") { - return null; - } - - const { config: nextConfig, modelRef } = await promptAndConfigureVllm({ - cfg: params.config, - prompter: params.prompter, - agentDir: params.agentDir, - }); - - if (!params.setDefaultModel) { - return { config: nextConfig, agentModelOverride: modelRef }; - } - - await params.prompter.note(`Default model set to ${modelRef}`, "Model configured"); - return { config: applyVllmDefaultModel(nextConfig, modelRef) }; -} diff --git a/src/commands/auth-choice.model-check.ts b/src/commands/auth-choice.model-check.ts index ea7da2f9d6d..975fc3521d3 100644 --- a/src/commands/auth-choice.model-check.ts +++ b/src/commands/auth-choice.model-check.ts @@ -1,5 +1,5 @@ import { ensureAuthProfileStore, listProfilesForProvider } from "../agents/auth-profiles.js"; -import { getCustomProviderApiKey, resolveEnvApiKey } from "../agents/model-auth.js"; +import { hasUsableCustomProviderApiKey, resolveEnvApiKey } from "../agents/model-auth.js"; import { loadModelCatalog } from "../agents/model-catalog.js"; import { resolveDefaultModelForAgent } from "../agents/model-selection.js"; import type { OpenClawConfig } from "../config/config.js"; @@ -34,8 +34,8 @@ export async function warnIfModelConfigLooksOff( const store = ensureAuthProfileStore(options?.agentDir); const hasProfile = listProfilesForProvider(store, ref.provider).length > 0; const envKey = resolveEnvApiKey(ref.provider); - const customKey = getCustomProviderApiKey(config, ref.provider); - if (!hasProfile && !envKey && !customKey) { + const hasCustomKey = hasUsableCustomProviderApiKey(config, ref.provider); + if (!hasProfile && !envKey && !hasCustomKey) { warnings.push( `No auth configured for provider "${ref.provider}". The agent may fail until credentials are added.`, ); diff --git a/src/commands/auth-choice.preferred-provider.ts b/src/commands/auth-choice.preferred-provider.ts index e56950ea711..959754625bc 100644 --- a/src/commands/auth-choice.preferred-provider.ts +++ b/src/commands/auth-choice.preferred-provider.ts @@ -1,3 +1,6 @@ +import type { OpenClawConfig } from "../config/config.js"; +import { resolveProviderPluginChoice } from "../plugins/provider-wizard.js"; +import { resolvePluginProviders } from "../plugins/providers.js"; import type { AuthChoice } from "./onboard-types.js"; const PREFERRED_PROVIDER_BY_AUTH_CHOICE: Partial> = { @@ -6,7 +9,6 @@ const PREFERRED_PROVIDER_BY_AUTH_CHOICE: Partial> = { "claude-cli": "anthropic", token: "anthropic", apiKey: "anthropic", - vllm: "vllm", "openai-codex": "openai-codex", "codex-cli": "openai-codex", chutes: "chutes", @@ -21,6 +23,8 @@ const PREFERRED_PROVIDER_BY_AUTH_CHOICE: Partial> = { "gemini-api-key": "google", "google-gemini-cli": "google-gemini-cli", "mistral-api-key": "mistral", + ollama: "ollama", + sglang: "sglang", "zai-api-key": "zai", "zai-coding-global": "zai", "zai-coding-cn": "zai", @@ -33,22 +37,40 @@ const PREFERRED_PROVIDER_BY_AUTH_CHOICE: Partial> = { "huggingface-api-key": "huggingface", "github-copilot": "github-copilot", "copilot-proxy": "copilot-proxy", - "minimax-cloud": "minimax", - "minimax-api": "minimax", - "minimax-api-key-cn": "minimax-cn", - "minimax-api-lightning": "minimax", - minimax: "lmstudio", + "minimax-global-oauth": "minimax-portal", + "minimax-global-api": "minimax", + "minimax-cn-oauth": "minimax-portal", + "minimax-cn-api": "minimax", "opencode-zen": "opencode", + "opencode-go": "opencode-go", "xai-api-key": "xai", "litellm-api-key": "litellm", "qwen-portal": "qwen-portal", "volcengine-api-key": "volcengine", "byteplus-api-key": "byteplus", - "minimax-portal": "minimax-portal", "qianfan-api-key": "qianfan", "custom-api-key": "custom", + vllm: "vllm", }; -export function resolvePreferredProviderForAuthChoice(choice: AuthChoice): string | undefined { - return PREFERRED_PROVIDER_BY_AUTH_CHOICE[choice]; +export function resolvePreferredProviderForAuthChoice(params: { + choice: AuthChoice; + config?: OpenClawConfig; + workspaceDir?: string; + env?: NodeJS.ProcessEnv; +}): string | undefined { + const preferred = PREFERRED_PROVIDER_BY_AUTH_CHOICE[params.choice]; + if (preferred) { + return preferred; + } + + const providers = resolvePluginProviders({ + config: params.config, + workspaceDir: params.workspaceDir, + env: params.env, + }); + return resolveProviderPluginChoice({ + providers, + choice: params.choice, + })?.provider.id; } diff --git a/src/commands/auth-choice.test.ts b/src/commands/auth-choice.test.ts index 0431e558dac..f77df4a07e4 100644 --- a/src/commands/auth-choice.test.ts +++ b/src/commands/auth-choice.test.ts @@ -192,8 +192,8 @@ describe("applyAuthChoice", () => { it("prompts and writes provider API key for common providers", async () => { const scenarios: Array<{ authChoice: - | "minimax-api" - | "minimax-api-key-cn" + | "minimax-global-api" + | "minimax-cn-api" | "synthetic-api-key" | "huggingface-api-key"; promptContains: string; @@ -204,17 +204,17 @@ describe("applyAuthChoice", () => { expectedModelPrefix?: string; }> = [ { - authChoice: "minimax-api" as const, + authChoice: "minimax-global-api" as const, promptContains: "Enter MiniMax API key", - profileId: "minimax:default", + profileId: "minimax:global", provider: "minimax", token: "sk-minimax-test", }, { - authChoice: "minimax-api-key-cn" as const, - promptContains: "Enter MiniMax China API key", - profileId: "minimax-cn:default", - provider: "minimax-cn", + authChoice: "minimax-cn-api" as const, + promptContains: "Enter MiniMax CN API key", + profileId: "minimax:cn", + provider: "minimax", token: "sk-minimax-test", expectedBaseUrl: MINIMAX_CN_API_BASE_URL, }, @@ -498,6 +498,15 @@ describe("applyAuthChoice", () => { profileId: "opencode:default", provider: "opencode", modelPrefix: "opencode/", + extraProfiles: ["opencode-go:default"], + }, + { + authChoice: "opencode-go", + tokenProvider: "opencode-go", + profileId: "opencode-go:default", + provider: "opencode-go", + modelPrefix: "opencode-go/", + extraProfiles: ["opencode:default"], }, { authChoice: "together-api-key", @@ -522,7 +531,7 @@ describe("applyAuthChoice", () => { }, ] as const)( "uses opts token for $authChoice without prompting", - async ({ authChoice, tokenProvider, profileId, provider, modelPrefix }) => { + async ({ authChoice, tokenProvider, profileId, provider, modelPrefix, extraProfiles }) => { await setupTempState(); const text = vi.fn(); @@ -554,6 +563,9 @@ describe("applyAuthChoice", () => { ), ).toBe(true); expect((await readAuthProfile(profileId))?.key).toBe(token); + for (const extraProfile of extraProfiles ?? []) { + expect((await readAuthProfile(extraProfile))?.key).toBe(token); + } }, ); @@ -805,14 +817,15 @@ describe("applyAuthChoice", () => { it("keeps existing default model for explicit provider keys when setDefaultModel=false", async () => { const scenarios: Array<{ - authChoice: "xai-api-key" | "opencode-zen"; + authChoice: "xai-api-key" | "opencode-zen" | "opencode-go"; token: string; promptMessage: string; existingPrimary: string; expectedOverride: string; profileId?: string; profileProvider?: string; - expectProviderConfigUndefined?: "opencode-zen"; + extraProfileId?: string; + expectProviderConfigUndefined?: "opencode" | "opencode-go" | "opencode-zen"; agentId?: string; }> = [ { @@ -828,10 +841,24 @@ describe("applyAuthChoice", () => { { authChoice: "opencode-zen", token: "sk-opencode-zen-test", - promptMessage: "Enter OpenCode Zen API key", + promptMessage: "Enter OpenCode API key", existingPrimary: "anthropic/claude-opus-4-5", expectedOverride: "opencode/claude-opus-4-6", - expectProviderConfigUndefined: "opencode-zen", + profileId: "opencode:default", + profileProvider: "opencode", + extraProfileId: "opencode-go:default", + expectProviderConfigUndefined: "opencode", + }, + { + authChoice: "opencode-go", + token: "sk-opencode-go-test", + promptMessage: "Enter OpenCode API key", + existingPrimary: "anthropic/claude-opus-4-5", + expectedOverride: "opencode-go/kimi-k2.5", + profileId: "opencode-go:default", + profileProvider: "opencode-go", + extraProfileId: "opencode:default", + expectProviderConfigUndefined: "opencode-go", }, ]; for (const scenario of scenarios) { @@ -863,6 +890,9 @@ describe("applyAuthChoice", () => { }); expect((await readAuthProfile(scenario.profileId))?.key).toBe(scenario.token); } + if (scenario.extraProfileId) { + expect((await readAuthProfile(scenario.extraProfileId))?.key).toBe(scenario.token); + } if (scenario.expectProviderConfigUndefined) { expect( result.config.models?.providers?.[scenario.expectProviderConfigUndefined], @@ -1197,7 +1227,7 @@ describe("applyAuthChoice", () => { it("writes portal OAuth credentials for plugin providers", async () => { const scenarios: Array<{ - authChoice: "qwen-portal" | "minimax-portal"; + authChoice: "qwen-portal" | "minimax-global-oauth"; label: string; authId: string; authLabel: string; @@ -1222,7 +1252,7 @@ describe("applyAuthChoice", () => { apiKey: "qwen-oauth", // pragma: allowlist secret }, { - authChoice: "minimax-portal", + authChoice: "minimax-global-oauth", label: "MiniMax", authId: "oauth", authLabel: "MiniMax OAuth (Global)", @@ -1232,7 +1262,6 @@ describe("applyAuthChoice", () => { api: "anthropic-messages", defaultModel: "minimax-portal/MiniMax-M2.5", apiKey: "minimax-oauth", // pragma: allowlist secret - selectValue: "oauth", }, ]; for (const scenario of scenarios) { @@ -1320,10 +1349,11 @@ describe("resolvePreferredProviderForAuthChoice", () => { { authChoice: "github-copilot" as const, expectedProvider: "github-copilot" }, { authChoice: "qwen-portal" as const, expectedProvider: "qwen-portal" }, { authChoice: "mistral-api-key" as const, expectedProvider: "mistral" }, + { authChoice: "ollama" as const, expectedProvider: "ollama" }, { authChoice: "unknown" as AuthChoice, expectedProvider: undefined }, ] as const; for (const scenario of scenarios) { - expect(resolvePreferredProviderForAuthChoice(scenario.authChoice)).toBe( + expect(resolvePreferredProviderForAuthChoice({ choice: scenario.authChoice })).toBe( scenario.expectedProvider, ); } diff --git a/src/commands/backup-verify.test.ts b/src/commands/backup-verify.test.ts index 9288d2fb8c1..a5f0384e61b 100644 --- a/src/commands/backup-verify.test.ts +++ b/src/commands/backup-verify.test.ts @@ -8,6 +8,92 @@ import { buildBackupArchiveRoot } from "./backup-shared.js"; import { backupVerifyCommand } from "./backup-verify.js"; import { backupCreateCommand } from "./backup.js"; +const TEST_ARCHIVE_ROOT = "2026-03-09T00-00-00.000Z-openclaw-backup"; + +const createBackupVerifyRuntime = () => ({ + log: vi.fn(), + error: vi.fn(), + exit: vi.fn(), +}); + +function createBackupManifest(assetArchivePath: string) { + return { + schemaVersion: 1, + createdAt: "2026-03-09T00:00:00.000Z", + archiveRoot: TEST_ARCHIVE_ROOT, + runtimeVersion: "test", + platform: process.platform, + nodeVersion: process.version, + assets: [ + { + kind: "state", + sourcePath: "/tmp/.openclaw", + archivePath: assetArchivePath, + }, + ], + }; +} + +async function withBrokenArchiveFixture( + options: { + tempPrefix: string; + manifestAssetArchivePath: string; + payloads: Array<{ fileName: string; contents: string; archivePath?: string }>; + buildTarEntries?: (paths: { manifestPath: string; payloadPaths: string[] }) => string[]; + }, + run: (archivePath: string) => Promise, +) { + const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), options.tempPrefix)); + const archivePath = path.join(tempDir, "broken.tar.gz"); + const manifestPath = path.join(tempDir, "manifest.json"); + const payloadSpecs = await Promise.all( + options.payloads.map(async (payload) => { + const payloadPath = path.join(tempDir, payload.fileName); + await fs.writeFile(payloadPath, payload.contents, "utf8"); + return { + path: payloadPath, + archivePath: payload.archivePath ?? options.manifestAssetArchivePath, + }; + }), + ); + const payloadEntryPathBySource = new Map( + payloadSpecs.map((payload) => [payload.path, payload.archivePath]), + ); + + try { + await fs.writeFile( + manifestPath, + `${JSON.stringify(createBackupManifest(options.manifestAssetArchivePath), null, 2)}\n`, + "utf8", + ); + await tar.c( + { + file: archivePath, + gzip: true, + portable: true, + preservePaths: true, + onWriteEntry: (entry) => { + if (entry.path === manifestPath) { + entry.path = `${TEST_ARCHIVE_ROOT}/manifest.json`; + return; + } + const payloadEntryPath = payloadEntryPathBySource.get(entry.path); + if (payloadEntryPath) { + entry.path = payloadEntryPath; + } + }, + }, + options.buildTarEntries?.({ + manifestPath, + payloadPaths: payloadSpecs.map((payload) => payload.path), + }) ?? [manifestPath, ...payloadSpecs.map((payload) => payload.path)], + ); + await run(archivePath); + } finally { + await fs.rm(tempDir, { recursive: true, force: true }); + } +} + describe("backupVerifyCommand", () => { let tempHome: TempHomeEnv; @@ -26,12 +112,7 @@ describe("backupVerifyCommand", () => { await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); await fs.writeFile(path.join(stateDir, "state.txt"), "hello\n", "utf8"); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - + const runtime = createBackupVerifyRuntime(); const nowMs = Date.UTC(2026, 2, 9, 0, 0, 0); const created = await backupCreateCommand(runtime, { output: archiveDir, nowMs }); const verified = await backupVerifyCommand(runtime, { archive: created.archivePath }); @@ -53,12 +134,7 @@ describe("backupVerifyCommand", () => { await fs.writeFile(path.join(root, "payload", "data.txt"), "x\n", "utf8"); await tar.c({ file: archivePath, gzip: true, cwd: tempDir }, ["root"]); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - + const runtime = createBackupVerifyRuntime(); await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( /expected exactly one backup manifest entry/i, ); @@ -95,12 +171,7 @@ describe("backupVerifyCommand", () => { ); await tar.c({ file: archivePath, gzip: true, cwd: tempDir }, [rootName]); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - + const runtime = createBackupVerifyRuntime(); await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( /missing payload for manifest asset/i, ); @@ -110,119 +181,37 @@ describe("backupVerifyCommand", () => { }); it("fails when archive paths contain traversal segments", async () => { - const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-traversal-")); - const archivePath = path.join(tempDir, "broken.tar.gz"); - const manifestPath = path.join(tempDir, "manifest.json"); - const payloadPath = path.join(tempDir, "payload.txt"); - try { - const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup"; - const traversalPath = `${rootName}/payload/../escaped.txt`; - const manifest = { - schemaVersion: 1, - createdAt: "2026-03-09T00:00:00.000Z", - archiveRoot: rootName, - runtimeVersion: "test", - platform: process.platform, - nodeVersion: process.version, - assets: [ - { - kind: "state", - sourcePath: "/tmp/.openclaw", - archivePath: traversalPath, - }, - ], - }; - await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8"); - await fs.writeFile(payloadPath, "payload\n", "utf8"); - await tar.c( - { - file: archivePath, - gzip: true, - portable: true, - preservePaths: true, - onWriteEntry: (entry) => { - if (entry.path === manifestPath) { - entry.path = `${rootName}/manifest.json`; - return; - } - if (entry.path === payloadPath) { - entry.path = traversalPath; - } - }, - }, - [manifestPath, payloadPath], - ); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( - /path traversal segments/i, - ); - } finally { - await fs.rm(tempDir, { recursive: true, force: true }); - } + const traversalPath = `${TEST_ARCHIVE_ROOT}/payload/../escaped.txt`; + await withBrokenArchiveFixture( + { + tempPrefix: "openclaw-backup-traversal-", + manifestAssetArchivePath: traversalPath, + payloads: [{ fileName: "payload.txt", contents: "payload\n", archivePath: traversalPath }], + }, + async (archivePath) => { + const runtime = createBackupVerifyRuntime(); + await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( + /path traversal segments/i, + ); + }, + ); }); it("fails when archive paths contain backslashes", async () => { - const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-backslash-")); - const archivePath = path.join(tempDir, "broken.tar.gz"); - const manifestPath = path.join(tempDir, "manifest.json"); - const payloadPath = path.join(tempDir, "payload.txt"); - try { - const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup"; - const invalidPath = `${rootName}/payload\\..\\escaped.txt`; - const manifest = { - schemaVersion: 1, - createdAt: "2026-03-09T00:00:00.000Z", - archiveRoot: rootName, - runtimeVersion: "test", - platform: process.platform, - nodeVersion: process.version, - assets: [ - { - kind: "state", - sourcePath: "/tmp/.openclaw", - archivePath: invalidPath, - }, - ], - }; - await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8"); - await fs.writeFile(payloadPath, "payload\n", "utf8"); - await tar.c( - { - file: archivePath, - gzip: true, - portable: true, - preservePaths: true, - onWriteEntry: (entry) => { - if (entry.path === manifestPath) { - entry.path = `${rootName}/manifest.json`; - return; - } - if (entry.path === payloadPath) { - entry.path = invalidPath; - } - }, - }, - [manifestPath, payloadPath], - ); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( - /forward slashes/i, - ); - } finally { - await fs.rm(tempDir, { recursive: true, force: true }); - } + const invalidPath = `${TEST_ARCHIVE_ROOT}/payload\\..\\escaped.txt`; + await withBrokenArchiveFixture( + { + tempPrefix: "openclaw-backup-backslash-", + manifestAssetArchivePath: invalidPath, + payloads: [{ fileName: "payload.txt", contents: "payload\n", archivePath: invalidPath }], + }, + async (archivePath) => { + const runtime = createBackupVerifyRuntime(); + await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( + /forward slashes/i, + ); + }, + ); }); it("ignores payload manifest.json files when locating the backup manifest", async () => { @@ -251,12 +240,7 @@ describe("backupVerifyCommand", () => { "utf8", ); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - + const runtime = createBackupVerifyRuntime(); const created = await backupCreateCommand(runtime, { output: archiveDir, includeWorkspace: true, @@ -274,119 +258,44 @@ describe("backupVerifyCommand", () => { }); it("fails when the archive contains duplicate root manifest entries", async () => { - const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-duplicate-manifest-")); - const archivePath = path.join(tempDir, "broken.tar.gz"); - const manifestPath = path.join(tempDir, "manifest.json"); - const payloadPath = path.join(tempDir, "payload.txt"); - try { - const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup"; - const manifest = { - schemaVersion: 1, - createdAt: "2026-03-09T00:00:00.000Z", - archiveRoot: rootName, - runtimeVersion: "test", - platform: process.platform, - nodeVersion: process.version, - assets: [ - { - kind: "state", - sourcePath: "/tmp/.openclaw", - archivePath: `${rootName}/payload/posix/tmp/.openclaw/payload.txt`, - }, + const payloadArchivePath = `${TEST_ARCHIVE_ROOT}/payload/posix/tmp/.openclaw/payload.txt`; + await withBrokenArchiveFixture( + { + tempPrefix: "openclaw-backup-duplicate-manifest-", + manifestAssetArchivePath: payloadArchivePath, + payloads: [{ fileName: "payload.txt", contents: "payload\n" }], + buildTarEntries: ({ manifestPath, payloadPaths }) => [ + manifestPath, + manifestPath, + ...payloadPaths, ], - }; - await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8"); - await fs.writeFile(payloadPath, "payload\n", "utf8"); - await tar.c( - { - file: archivePath, - gzip: true, - portable: true, - preservePaths: true, - onWriteEntry: (entry) => { - if (entry.path === manifestPath) { - entry.path = `${rootName}/manifest.json`; - return; - } - if (entry.path === payloadPath) { - entry.path = `${rootName}/payload/posix/tmp/.openclaw/payload.txt`; - } - }, - }, - [manifestPath, manifestPath, payloadPath], - ); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( - /expected exactly one backup manifest entry, found 2/i, - ); - } finally { - await fs.rm(tempDir, { recursive: true, force: true }); - } + }, + async (archivePath) => { + const runtime = createBackupVerifyRuntime(); + await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( + /expected exactly one backup manifest entry, found 2/i, + ); + }, + ); }); it("fails when the archive contains duplicate payload entries", async () => { - const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-duplicate-payload-")); - const archivePath = path.join(tempDir, "broken.tar.gz"); - const manifestPath = path.join(tempDir, "manifest.json"); - const payloadPathA = path.join(tempDir, "payload-a.txt"); - const payloadPathB = path.join(tempDir, "payload-b.txt"); - try { - const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup"; - const payloadArchivePath = `${rootName}/payload/posix/tmp/.openclaw/payload.txt`; - const manifest = { - schemaVersion: 1, - createdAt: "2026-03-09T00:00:00.000Z", - archiveRoot: rootName, - runtimeVersion: "test", - platform: process.platform, - nodeVersion: process.version, - assets: [ - { - kind: "state", - sourcePath: "/tmp/.openclaw", - archivePath: payloadArchivePath, - }, + const payloadArchivePath = `${TEST_ARCHIVE_ROOT}/payload/posix/tmp/.openclaw/payload.txt`; + await withBrokenArchiveFixture( + { + tempPrefix: "openclaw-backup-duplicate-payload-", + manifestAssetArchivePath: payloadArchivePath, + payloads: [ + { fileName: "payload-a.txt", contents: "payload-a\n", archivePath: payloadArchivePath }, + { fileName: "payload-b.txt", contents: "payload-b\n", archivePath: payloadArchivePath }, ], - }; - await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8"); - await fs.writeFile(payloadPathA, "payload-a\n", "utf8"); - await fs.writeFile(payloadPathB, "payload-b\n", "utf8"); - await tar.c( - { - file: archivePath, - gzip: true, - portable: true, - preservePaths: true, - onWriteEntry: (entry) => { - if (entry.path === manifestPath) { - entry.path = `${rootName}/manifest.json`; - return; - } - if (entry.path === payloadPathA || entry.path === payloadPathB) { - entry.path = payloadArchivePath; - } - }, - }, - [manifestPath, payloadPathA, payloadPathB], - ); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( - /duplicate entry path/i, - ); - } finally { - await fs.rm(tempDir, { recursive: true, force: true }); - } + }, + async (archivePath) => { + const runtime = createBackupVerifyRuntime(); + await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( + /duplicate entry path/i, + ); + }, + ); }); }); diff --git a/src/commands/backup.test.ts b/src/commands/backup.test.ts index 349714e4d15..decc55e6c05 100644 --- a/src/commands/backup.test.ts +++ b/src/commands/backup.test.ts @@ -3,6 +3,7 @@ import os from "node:os"; import path from "node:path"; import * as tar from "tar"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import type { RuntimeEnv } from "../runtime.js"; import { createTempHomeEnv, type TempHomeEnv } from "../test-utils/temp-home.js"; import { buildBackupArchiveRoot, @@ -41,6 +42,39 @@ describe("backup commands", () => { await tempHome.restore(); }); + function createRuntime(): RuntimeEnv { + return { + log: vi.fn(), + error: vi.fn(), + exit: vi.fn(), + } satisfies RuntimeEnv; + } + + async function withInvalidWorkspaceBackupConfig(fn: (runtime: RuntimeEnv) => Promise) { + const stateDir = path.join(tempHome.home, ".openclaw"); + const configPath = path.join(tempHome.home, "custom-config.json"); + process.env.OPENCLAW_CONFIG_PATH = configPath; + await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); + await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8"); + const runtime = createRuntime(); + + try { + return await fn(runtime); + } finally { + delete process.env.OPENCLAW_CONFIG_PATH; + } + } + + function expectWorkspaceCoveredByState( + plan: Awaited>, + ) { + expect(plan.included).toHaveLength(1); + expect(plan.included[0]?.kind).toBe("state"); + expect(plan.skipped).toEqual( + expect.arrayContaining([expect.objectContaining({ kind: "workspace", reason: "covered" })]), + ); + } + it("collapses default config, credentials, and workspace into the state backup root", async () => { const stateDir = path.join(tempHome.home, ".openclaw"); await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); @@ -50,12 +84,7 @@ describe("backup commands", () => { await fs.writeFile(path.join(stateDir, "workspace", "SOUL.md"), "# soul\n", "utf8"); const plan = await resolveBackupPlanFromDisk({ includeWorkspace: true, nowMs: 123 }); - - expect(plan.included).toHaveLength(1); - expect(plan.included[0]?.kind).toBe("state"); - expect(plan.skipped).toEqual( - expect.arrayContaining([expect.objectContaining({ kind: "workspace", reason: "covered" })]), - ); + expectWorkspaceCoveredByState(plan); }); it("orders coverage checks by canonical path so symlinked workspaces do not duplicate state", async () => { @@ -84,12 +113,7 @@ describe("backup commands", () => { ); const plan = await resolveBackupPlanFromDisk({ includeWorkspace: true, nowMs: 123 }); - - expect(plan.included).toHaveLength(1); - expect(plan.included[0]?.kind).toBe("state"); - expect(plan.skipped).toEqual( - expect.arrayContaining([expect.objectContaining({ kind: "workspace", reason: "covered" })]), - ); + expectWorkspaceCoveredByState(plan); } finally { await fs.rm(symlinkDir, { recursive: true, force: true }); } @@ -116,11 +140,7 @@ describe("backup commands", () => { await fs.writeFile(path.join(stateDir, "state.txt"), "state\n", "utf8"); await fs.writeFile(path.join(externalWorkspace, "SOUL.md"), "# external\n", "utf8"); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); const nowMs = Date.UTC(2026, 2, 9, 0, 0, 0); const result = await backupCreateCommand(runtime, { @@ -189,11 +209,7 @@ describe("backup commands", () => { await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); await fs.writeFile(path.join(stateDir, "state.txt"), "state\n", "utf8"); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); const result = await backupCreateCommand(runtime, { output: archiveDir, @@ -214,11 +230,7 @@ describe("backup commands", () => { const stateDir = path.join(tempHome.home, ".openclaw"); await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); await expect( backupCreateCommand(runtime, { @@ -239,11 +251,7 @@ describe("backup commands", () => { await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); await fs.symlink(stateDir, symlinkPath); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); await expect( backupCreateCommand(runtime, { @@ -263,11 +271,7 @@ describe("backup commands", () => { await fs.writeFile(path.join(workspaceDir, "SOUL.md"), "# soul\n", "utf8"); process.chdir(workspaceDir); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); const nowMs = Date.UTC(2026, 2, 9, 1, 2, 3); const result = await backupCreateCommand(runtime, { nowMs }); @@ -294,11 +298,7 @@ describe("backup commands", () => { await fs.symlink(workspaceDir, workspaceLink); process.chdir(workspaceLink); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); const nowMs = Date.UTC(2026, 2, 9, 1, 3, 4); const result = await backupCreateCommand(runtime, { nowMs }); @@ -318,11 +318,7 @@ describe("backup commands", () => { await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); await fs.writeFile(existingArchive, "already here", "utf8"); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); const result = await backupCreateCommand(runtime, { output: existingArchive, @@ -336,41 +332,15 @@ describe("backup commands", () => { }); it("fails fast when config is invalid and workspace backup is enabled", async () => { - const stateDir = path.join(tempHome.home, ".openclaw"); - const configPath = path.join(tempHome.home, "custom-config.json"); - process.env.OPENCLAW_CONFIG_PATH = configPath; - await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); - await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8"); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - try { + await withInvalidWorkspaceBackupConfig(async (runtime) => { await expect(backupCreateCommand(runtime, { dryRun: true })).rejects.toThrow( /--no-include-workspace/i, ); - } finally { - delete process.env.OPENCLAW_CONFIG_PATH; - } + }); }); it("allows explicit partial backups when config is invalid", async () => { - const stateDir = path.join(tempHome.home, ".openclaw"); - const configPath = path.join(tempHome.home, "custom-config.json"); - process.env.OPENCLAW_CONFIG_PATH = configPath; - await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); - await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8"); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - try { + await withInvalidWorkspaceBackupConfig(async (runtime) => { const result = await backupCreateCommand(runtime, { dryRun: true, includeWorkspace: false, @@ -378,9 +348,7 @@ describe("backup commands", () => { expect(result.includeWorkspace).toBe(false); expect(result.assets.some((asset) => asset.kind === "workspace")).toBe(false); - } finally { - delete process.env.OPENCLAW_CONFIG_PATH; - } + }); }); it("backs up only the active config file when --only-config is requested", async () => { @@ -391,11 +359,7 @@ describe("backup commands", () => { await fs.writeFile(path.join(stateDir, "state.txt"), "state\n", "utf8"); await fs.writeFile(path.join(stateDir, "credentials", "oauth.json"), "{}", "utf8"); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); const result = await backupCreateCommand(runtime, { dryRun: true, @@ -413,11 +377,7 @@ describe("backup commands", () => { process.env.OPENCLAW_CONFIG_PATH = configPath; await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8"); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); try { const result = await backupCreateCommand(runtime, { diff --git a/src/commands/backup.ts b/src/commands/backup.ts index 15f0f505d76..ab4397db0f3 100644 --- a/src/commands/backup.ts +++ b/src/commands/backup.ts @@ -1,382 +1,31 @@ -import { randomUUID } from "node:crypto"; -import { constants as fsConstants } from "node:fs"; -import fs from "node:fs/promises"; -import os from "node:os"; -import path from "node:path"; -import * as tar from "tar"; -import type { RuntimeEnv } from "../runtime.js"; -import { resolveHomeDir, resolveUserPath } from "../utils.js"; -import { resolveRuntimeServiceVersion } from "../version.js"; import { - buildBackupArchiveBasename, - buildBackupArchiveRoot, - buildBackupArchivePath, - type BackupAsset, - resolveBackupPlanFromDisk, -} from "./backup-shared.js"; + createBackupArchive, + formatBackupCreateSummary, + type BackupCreateOptions, + type BackupCreateResult, +} from "../infra/backup-create.js"; +import type { RuntimeEnv } from "../runtime.js"; import { backupVerifyCommand } from "./backup-verify.js"; -import { isPathWithin } from "./cleanup-utils.js"; - -export type BackupCreateOptions = { - output?: string; - dryRun?: boolean; - includeWorkspace?: boolean; - onlyConfig?: boolean; - verify?: boolean; - json?: boolean; - nowMs?: number; -}; - -type BackupManifestAsset = { - kind: BackupAsset["kind"]; - sourcePath: string; - archivePath: string; -}; - -type BackupManifest = { - schemaVersion: 1; - createdAt: string; - archiveRoot: string; - runtimeVersion: string; - platform: NodeJS.Platform; - nodeVersion: string; - options: { - includeWorkspace: boolean; - onlyConfig?: boolean; - }; - paths: { - stateDir: string; - configPath: string; - oauthDir: string; - workspaceDirs: string[]; - }; - assets: BackupManifestAsset[]; - skipped: Array<{ - kind: string; - sourcePath: string; - reason: string; - coveredBy?: string; - }>; -}; - -export type BackupCreateResult = { - createdAt: string; - archiveRoot: string; - archivePath: string; - dryRun: boolean; - includeWorkspace: boolean; - onlyConfig: boolean; - verified: boolean; - assets: BackupAsset[]; - skipped: Array<{ - kind: string; - sourcePath: string; - displayPath: string; - reason: string; - coveredBy?: string; - }>; -}; - -async function resolveOutputPath(params: { - output?: string; - nowMs: number; - includedAssets: BackupAsset[]; - stateDir: string; -}): Promise { - const basename = buildBackupArchiveBasename(params.nowMs); - const rawOutput = params.output?.trim(); - if (!rawOutput) { - const cwd = path.resolve(process.cwd()); - const canonicalCwd = await fs.realpath(cwd).catch(() => cwd); - const cwdInsideSource = params.includedAssets.some((asset) => - isPathWithin(canonicalCwd, asset.sourcePath), - ); - const defaultDir = cwdInsideSource ? (resolveHomeDir() ?? path.dirname(params.stateDir)) : cwd; - return path.resolve(defaultDir, basename); - } - - const resolved = resolveUserPath(rawOutput); - if (rawOutput.endsWith("/") || rawOutput.endsWith("\\")) { - return path.join(resolved, basename); - } - - try { - const stat = await fs.stat(resolved); - if (stat.isDirectory()) { - return path.join(resolved, basename); - } - } catch { - // Treat as a file path when the target does not exist yet. - } - - return resolved; -} - -async function assertOutputPathReady(outputPath: string): Promise { - try { - await fs.access(outputPath); - throw new Error(`Refusing to overwrite existing backup archive: ${outputPath}`); - } catch (err) { - const code = (err as NodeJS.ErrnoException | undefined)?.code; - if (code === "ENOENT") { - return; - } - throw err; - } -} - -function buildTempArchivePath(outputPath: string): string { - return `${outputPath}.${randomUUID()}.tmp`; -} - -function isLinkUnsupportedError(code: string | undefined): boolean { - return code === "ENOTSUP" || code === "EOPNOTSUPP" || code === "EPERM"; -} - -async function publishTempArchive(params: { - tempArchivePath: string; - outputPath: string; -}): Promise { - try { - await fs.link(params.tempArchivePath, params.outputPath); - } catch (err) { - const code = (err as NodeJS.ErrnoException | undefined)?.code; - if (code === "EEXIST") { - throw new Error(`Refusing to overwrite existing backup archive: ${params.outputPath}`, { - cause: err, - }); - } - if (!isLinkUnsupportedError(code)) { - throw err; - } - - try { - // Some backup targets support ordinary files but not hard links. - await fs.copyFile(params.tempArchivePath, params.outputPath, fsConstants.COPYFILE_EXCL); - } catch (copyErr) { - const copyCode = (copyErr as NodeJS.ErrnoException | undefined)?.code; - if (copyCode !== "EEXIST") { - await fs.rm(params.outputPath, { force: true }).catch(() => undefined); - } - if (copyCode === "EEXIST") { - throw new Error(`Refusing to overwrite existing backup archive: ${params.outputPath}`, { - cause: copyErr, - }); - } - throw copyErr; - } - } - await fs.rm(params.tempArchivePath, { force: true }); -} - -async function canonicalizePathForContainment(targetPath: string): Promise { - const resolved = path.resolve(targetPath); - const suffix: string[] = []; - let probe = resolved; - - while (true) { - try { - const realProbe = await fs.realpath(probe); - return suffix.length === 0 ? realProbe : path.join(realProbe, ...suffix.toReversed()); - } catch { - const parent = path.dirname(probe); - if (parent === probe) { - return resolved; - } - suffix.push(path.basename(probe)); - probe = parent; - } - } -} - -function buildManifest(params: { - createdAt: string; - archiveRoot: string; - includeWorkspace: boolean; - onlyConfig: boolean; - assets: BackupAsset[]; - skipped: BackupCreateResult["skipped"]; - stateDir: string; - configPath: string; - oauthDir: string; - workspaceDirs: string[]; -}): BackupManifest { - return { - schemaVersion: 1, - createdAt: params.createdAt, - archiveRoot: params.archiveRoot, - runtimeVersion: resolveRuntimeServiceVersion(), - platform: process.platform, - nodeVersion: process.version, - options: { - includeWorkspace: params.includeWorkspace, - onlyConfig: params.onlyConfig, - }, - paths: { - stateDir: params.stateDir, - configPath: params.configPath, - oauthDir: params.oauthDir, - workspaceDirs: params.workspaceDirs, - }, - assets: params.assets.map((asset) => ({ - kind: asset.kind, - sourcePath: asset.sourcePath, - archivePath: asset.archivePath, - })), - skipped: params.skipped.map((entry) => ({ - kind: entry.kind, - sourcePath: entry.sourcePath, - reason: entry.reason, - coveredBy: entry.coveredBy, - })), - }; -} - -function formatTextSummary(result: BackupCreateResult): string[] { - const lines = [`Backup archive: ${result.archivePath}`]; - lines.push(`Included ${result.assets.length} path${result.assets.length === 1 ? "" : "s"}:`); - for (const asset of result.assets) { - lines.push(`- ${asset.kind}: ${asset.displayPath}`); - } - if (result.skipped.length > 0) { - lines.push(`Skipped ${result.skipped.length} path${result.skipped.length === 1 ? "" : "s"}:`); - for (const entry of result.skipped) { - if (entry.reason === "covered" && entry.coveredBy) { - lines.push(`- ${entry.kind}: ${entry.displayPath} (${entry.reason} by ${entry.coveredBy})`); - } else { - lines.push(`- ${entry.kind}: ${entry.displayPath} (${entry.reason})`); - } - } - } - if (result.dryRun) { - lines.push("Dry run only; archive was not written."); - } else { - lines.push(`Created ${result.archivePath}`); - if (result.verified) { - lines.push("Archive verification: passed"); - } - } - return lines; -} - -function remapArchiveEntryPath(params: { - entryPath: string; - manifestPath: string; - archiveRoot: string; -}): string { - const normalizedEntry = path.resolve(params.entryPath); - if (normalizedEntry === params.manifestPath) { - return path.posix.join(params.archiveRoot, "manifest.json"); - } - return buildBackupArchivePath(params.archiveRoot, normalizedEntry); -} +export type { BackupCreateOptions, BackupCreateResult } from "../infra/backup-create.js"; export async function backupCreateCommand( runtime: RuntimeEnv, opts: BackupCreateOptions = {}, ): Promise { - const nowMs = opts.nowMs ?? Date.now(); - const archiveRoot = buildBackupArchiveRoot(nowMs); - const onlyConfig = Boolean(opts.onlyConfig); - const includeWorkspace = onlyConfig ? false : (opts.includeWorkspace ?? true); - const plan = await resolveBackupPlanFromDisk({ includeWorkspace, onlyConfig, nowMs }); - const outputPath = await resolveOutputPath({ - output: opts.output, - nowMs, - includedAssets: plan.included, - stateDir: plan.stateDir, - }); - - if (plan.included.length === 0) { - throw new Error( - onlyConfig - ? "No OpenClaw config file was found to back up." - : "No local OpenClaw state was found to back up.", + const result = await createBackupArchive(opts); + if (opts.verify && !opts.dryRun) { + await backupVerifyCommand( + { + ...runtime, + log: () => {}, + }, + { archive: result.archivePath, json: false }, ); + result.verified = true; } - - const canonicalOutputPath = await canonicalizePathForContainment(outputPath); - const overlappingAsset = plan.included.find((asset) => - isPathWithin(canonicalOutputPath, asset.sourcePath), - ); - if (overlappingAsset) { - throw new Error( - `Backup output must not be written inside a source path: ${outputPath} is inside ${overlappingAsset.sourcePath}`, - ); - } - - if (!opts.dryRun) { - await assertOutputPathReady(outputPath); - } - - const createdAt = new Date(nowMs).toISOString(); - const result: BackupCreateResult = { - createdAt, - archiveRoot, - archivePath: outputPath, - dryRun: Boolean(opts.dryRun), - includeWorkspace, - onlyConfig, - verified: false, - assets: plan.included, - skipped: plan.skipped, - }; - - if (!opts.dryRun) { - await fs.mkdir(path.dirname(outputPath), { recursive: true }); - const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-")); - const manifestPath = path.join(tempDir, "manifest.json"); - const tempArchivePath = buildTempArchivePath(outputPath); - try { - const manifest = buildManifest({ - createdAt, - archiveRoot, - includeWorkspace, - onlyConfig, - assets: result.assets, - skipped: result.skipped, - stateDir: plan.stateDir, - configPath: plan.configPath, - oauthDir: plan.oauthDir, - workspaceDirs: plan.workspaceDirs, - }); - await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8"); - - await tar.c( - { - file: tempArchivePath, - gzip: true, - portable: true, - preservePaths: true, - onWriteEntry: (entry) => { - entry.path = remapArchiveEntryPath({ - entryPath: entry.path, - manifestPath, - archiveRoot, - }); - }, - }, - [manifestPath, ...result.assets.map((asset) => asset.sourcePath)], - ); - await publishTempArchive({ tempArchivePath, outputPath }); - } finally { - await fs.rm(tempArchivePath, { force: true }).catch(() => undefined); - await fs.rm(tempDir, { recursive: true, force: true }).catch(() => undefined); - } - - if (opts.verify) { - await backupVerifyCommand( - { - ...runtime, - log: () => {}, - }, - { archive: outputPath, json: false }, - ); - result.verified = true; - } - } - - const output = opts.json ? JSON.stringify(result, null, 2) : formatTextSummary(result).join("\n"); + const output = opts.json + ? JSON.stringify(result, null, 2) + : formatBackupCreateSummary(result).join("\n"); runtime.log(output); return result; } diff --git a/src/commands/channels.config-only-status-output.test.ts b/src/commands/channels.config-only-status-output.test.ts index 89ff1cc2614..7019c84bb3a 100644 --- a/src/commands/channels.config-only-status-output.test.ts +++ b/src/commands/channels.config-only-status-output.test.ts @@ -5,24 +5,60 @@ import { makeDirectPlugin } from "../test-utils/channel-plugin-test-fixtures.js" import { createTestRegistry } from "../test-utils/channel-plugins.js"; import { formatConfigChannelsStatusLines } from "./channels/status.js"; +function registerSingleTestPlugin(pluginId: string, plugin: ChannelPlugin) { + setActivePluginRegistry( + createTestRegistry([ + { + pluginId, + source: "test", + plugin, + }, + ]), + ); +} + +async function formatLocalStatusSummary( + cfg: unknown, + options?: { + sourceConfig?: unknown; + }, +) { + const lines = await formatConfigChannelsStatusLines( + cfg as never, + { mode: "local" }, + options?.sourceConfig ? { sourceConfig: options.sourceConfig as never } : undefined, + ); + return lines.join("\n"); +} + +function unresolvedTokenAccount() { + return { + name: "Primary", + enabled: true, + configured: true, + token: "", + tokenSource: "config", + tokenStatus: "configured_unavailable", + } as const; +} + +function tokenOnlyPluginConfig() { + return { + listAccountIds: () => ["primary"], + defaultAccountId: () => "primary", + isConfigured: () => true, + isEnabled: () => true, + } as const; +} + function makeUnavailableTokenPlugin(): ChannelPlugin { return makeDirectPlugin({ id: "token-only", label: "TokenOnly", docsPath: "/channels/token-only", config: { - listAccountIds: () => ["primary"], - defaultAccountId: () => "primary", - resolveAccount: () => ({ - name: "Primary", - enabled: true, - configured: true, - token: "", - tokenSource: "config", - tokenStatus: "configured_unavailable", - }), - isConfigured: () => true, - isEnabled: () => true, + ...tokenOnlyPluginConfig(), + resolveAccount: () => unresolvedTokenAccount(), }, }); } @@ -33,8 +69,7 @@ function makeResolvedTokenPlugin(): ChannelPlugin { label: "TokenOnly", docsPath: "/channels/token-only", config: { - listAccountIds: () => ["primary"], - defaultAccountId: () => "primary", + ...tokenOnlyPluginConfig(), inspectAccount: (cfg) => (cfg as { secretResolved?: boolean }).secretResolved ? { @@ -46,25 +81,8 @@ function makeResolvedTokenPlugin(): ChannelPlugin { tokenSource: "config", tokenStatus: "available", } - : { - accountId: "primary", - name: "Primary", - enabled: true, - configured: true, - token: "", - tokenSource: "config", - tokenStatus: "configured_unavailable", - }, - resolveAccount: () => ({ - name: "Primary", - enabled: true, - configured: true, - token: "", - tokenSource: "config", - tokenStatus: "configured_unavailable", - }), - isConfigured: () => true, - isEnabled: () => true, + : { accountId: "primary", ...unresolvedTokenAccount() }, + resolveAccount: () => unresolvedTokenAccount(), }, }); } @@ -156,92 +174,42 @@ describe("config-only channels status output", () => { }); it("shows configured-but-unavailable credentials distinctly from not configured", async () => { - setActivePluginRegistry( - createTestRegistry([ - { - pluginId: "token-only", - source: "test", - plugin: makeUnavailableTokenPlugin(), - }, - ]), - ); + registerSingleTestPlugin("token-only", makeUnavailableTokenPlugin()); - const lines = await formatConfigChannelsStatusLines({ channels: {} } as never, { - mode: "local", - }); - - const joined = lines.join("\n"); + const joined = await formatLocalStatusSummary({ channels: {} }); expect(joined).toContain("TokenOnly"); expect(joined).toContain("configured, secret unavailable in this command path"); expect(joined).toContain("token:config (unavailable)"); }); it("prefers resolved config snapshots when command-local secret resolution succeeds", async () => { - setActivePluginRegistry( - createTestRegistry([ - { - pluginId: "token-only", - source: "test", - plugin: makeResolvedTokenPlugin(), - }, - ]), - ); + registerSingleTestPlugin("token-only", makeResolvedTokenPlugin()); - const lines = await formatConfigChannelsStatusLines( - { secretResolved: true, channels: {} } as never, + const joined = await formatLocalStatusSummary( + { secretResolved: true, channels: {} }, { - mode: "local", - }, - { - sourceConfig: { channels: {} } as never, + sourceConfig: { channels: {} }, }, ); - - const joined = lines.join("\n"); expectResolvedTokenStatusSummary(joined, { includeUnavailableTokenLine: false }); }); it("does not resolve raw source config for extension channels without inspectAccount", async () => { - setActivePluginRegistry( - createTestRegistry([ - { - pluginId: "token-only", - source: "test", - plugin: makeResolvedTokenPluginWithoutInspectAccount(), - }, - ]), - ); + registerSingleTestPlugin("token-only", makeResolvedTokenPluginWithoutInspectAccount()); - const lines = await formatConfigChannelsStatusLines( - { secretResolved: true, channels: {} } as never, + const joined = await formatLocalStatusSummary( + { secretResolved: true, channels: {} }, { - mode: "local", - }, - { - sourceConfig: { channels: {} } as never, + sourceConfig: { channels: {} }, }, ); - - const joined = lines.join("\n"); expectResolvedTokenStatusSummary(joined); }); it("renders Slack HTTP signing-secret availability in config-only status", async () => { - setActivePluginRegistry( - createTestRegistry([ - { - pluginId: "slack", - source: "test", - plugin: makeUnavailableHttpSlackPlugin(), - }, - ]), - ); + registerSingleTestPlugin("slack", makeUnavailableHttpSlackPlugin()); - const lines = await formatConfigChannelsStatusLines({ channels: {} } as never, { - mode: "local", - }); - - const joined = lines.join("\n"); + const joined = await formatLocalStatusSummary({ channels: {} }); expect(joined).toContain("Slack"); expect(joined).toContain("configured, secret unavailable in this command path"); expect(joined).toContain("mode:http"); diff --git a/src/commands/channels/add.ts b/src/commands/channels/add.ts index 882e7f16ca5..ebf80e6a735 100644 --- a/src/commands/channels/add.ts +++ b/src/commands/channels/add.ts @@ -1,5 +1,6 @@ import { resolveAgentWorkspaceDir, resolveDefaultAgentId } from "../../agents/agent-scope.js"; import { listChannelPluginCatalogEntries } from "../../channels/plugins/catalog.js"; +import { parseOptionalDelimitedEntries } from "../../channels/plugins/helpers.js"; import { getChannelPlugin, normalizeChannelId } from "../../channels/plugins/index.js"; import { moveSingleAccountChannelSectionToDefaultAccount } from "../../channels/plugins/setup-helpers.js"; import type { ChannelId, ChannelSetupInput } from "../../channels/plugins/types.js"; @@ -28,17 +29,6 @@ export type ChannelsAddOptions = { dmAllowlist?: string; } & Omit; -function parseList(value: string | undefined): string[] | undefined { - if (!value?.trim()) { - return undefined; - } - const parsed = value - .split(/[\n,;]+/g) - .map((entry) => entry.trim()) - .filter(Boolean); - return parsed.length > 0 ? parsed : undefined; -} - function resolveCatalogChannelEntry(raw: string, cfg: OpenClawConfig | null) { const trimmed = raw.trim().toLowerCase(); if (!trimmed) { @@ -225,8 +215,8 @@ export async function channelsAddCommand( : typeof opts.initialSyncLimit === "string" && opts.initialSyncLimit.trim() ? Number.parseInt(opts.initialSyncLimit, 10) : undefined; - const groupChannels = parseList(opts.groupChannels); - const dmAllowlist = parseList(opts.dmAllowlist); + const groupChannels = parseOptionalDelimitedEntries(opts.groupChannels); + const dmAllowlist = parseOptionalDelimitedEntries(opts.dmAllowlist); const input: ChannelSetupInput = { name: opts.name, diff --git a/src/commands/configure.daemon.test.ts b/src/commands/configure.daemon.test.ts index 9a7aa76e0c8..11b54dc6b19 100644 --- a/src/commands/configure.daemon.test.ts +++ b/src/commands/configure.daemon.test.ts @@ -1,13 +1,22 @@ import { beforeEach, describe, expect, it, vi } from "vitest"; -const withProgress = vi.hoisted(() => vi.fn(async (_opts, run) => run({ setLabel: vi.fn() }))); +const progressSetLabel = vi.hoisted(() => vi.fn()); +const withProgress = vi.hoisted(() => + vi.fn(async (_opts, run) => run({ setLabel: progressSetLabel })), +); const loadConfig = vi.hoisted(() => vi.fn()); const resolveGatewayInstallToken = vi.hoisted(() => vi.fn()); const buildGatewayInstallPlan = vi.hoisted(() => vi.fn()); const note = vi.hoisted(() => vi.fn()); const serviceIsLoaded = vi.hoisted(() => vi.fn(async () => false)); const serviceInstall = vi.hoisted(() => vi.fn(async () => {})); +const serviceRestart = vi.hoisted(() => + vi.fn<() => Promise<{ outcome: "completed" } | { outcome: "scheduled" }>>(async () => ({ + outcome: "completed", + })), +); const ensureSystemdUserLingerInteractive = vi.hoisted(() => vi.fn(async () => {})); +const select = vi.hoisted(() => vi.fn(async () => "node")); vi.mock("../cli/progress.js", () => ({ withProgress, @@ -32,7 +41,7 @@ vi.mock("../terminal/note.js", () => ({ vi.mock("./configure.shared.js", () => ({ confirm: vi.fn(async () => true), - select: vi.fn(async () => "node"), + select, })); vi.mock("./daemon-runtime.js", () => ({ @@ -40,12 +49,17 @@ vi.mock("./daemon-runtime.js", () => ({ GATEWAY_DAEMON_RUNTIME_OPTIONS: [{ value: "node", label: "Node" }], })); -vi.mock("../daemon/service.js", () => ({ - resolveGatewayService: vi.fn(() => ({ - isLoaded: serviceIsLoaded, - install: serviceInstall, - })), -})); +vi.mock("../daemon/service.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + resolveGatewayService: vi.fn(() => ({ + isLoaded: serviceIsLoaded, + install: serviceInstall, + restart: serviceRestart, + })), + }; +}); vi.mock("./onboard-helpers.js", () => ({ guardCancel: (value: unknown) => value, @@ -60,8 +74,10 @@ const { maybeInstallDaemon } = await import("./configure.daemon.js"); describe("maybeInstallDaemon", () => { beforeEach(() => { vi.clearAllMocks(); + progressSetLabel.mockReset(); serviceIsLoaded.mockResolvedValue(false); serviceInstall.mockResolvedValue(undefined); + serviceRestart.mockResolvedValue({ outcome: "completed" }); loadConfig.mockReturnValue({}); resolveGatewayInstallToken.mockResolvedValue({ token: undefined, @@ -152,4 +168,19 @@ describe("maybeInstallDaemon", () => { expect(serviceInstall).toHaveBeenCalledTimes(1); }); + + it("shows restart scheduled when a loaded service defers restart handoff", async () => { + serviceIsLoaded.mockResolvedValue(true); + select.mockResolvedValueOnce("restart"); + serviceRestart.mockResolvedValueOnce({ outcome: "scheduled" }); + + await maybeInstallDaemon({ + runtime: { log: vi.fn(), error: vi.fn(), exit: vi.fn() }, + port: 18789, + }); + + expect(serviceRestart).toHaveBeenCalledTimes(1); + expect(serviceInstall).not.toHaveBeenCalled(); + expect(progressSetLabel).toHaveBeenLastCalledWith("Gateway service restart scheduled."); + }); }); diff --git a/src/commands/configure.daemon.ts b/src/commands/configure.daemon.ts index 4f943982a38..64272c9e2bc 100644 --- a/src/commands/configure.daemon.ts +++ b/src/commands/configure.daemon.ts @@ -1,6 +1,6 @@ import { withProgress } from "../cli/progress.js"; import { loadConfig } from "../config/config.js"; -import { resolveGatewayService } from "../daemon/service.js"; +import { describeGatewayServiceRestart, resolveGatewayService } from "../daemon/service.js"; import { isNonFatalSystemdInstallProbeError } from "../daemon/systemd.js"; import type { RuntimeEnv } from "../runtime.js"; import { note } from "../terminal/note.js"; @@ -50,11 +50,13 @@ export async function maybeInstallDaemon(params: { { label: "Gateway service", indeterminate: true, delayMs: 0 }, async (progress) => { progress.setLabel("Restarting Gateway service…"); - await service.restart({ + const restartResult = await service.restart({ env: process.env, stdout: process.stdout, }); - progress.setLabel("Gateway service restarted."); + progress.setLabel( + describeGatewayServiceRestart("Gateway", restartResult).progressMessage, + ); }, ); shouldCheckLinger = true; diff --git a/src/commands/configure.gateway-auth.ts b/src/commands/configure.gateway-auth.ts index 40cb26bf4e5..78bcc88ca5f 100644 --- a/src/commands/configure.gateway-auth.ts +++ b/src/commands/configure.gateway-auth.ts @@ -1,4 +1,5 @@ import { ensureAuthProfileStore } from "../agents/auth-profiles.js"; +import { resolveDefaultAgentWorkspaceDir } from "../agents/workspace.js"; import type { OpenClawConfig, GatewayAuthConfig } from "../config/config.js"; import { isSecretRef, type SecretInput } from "../config/types.secrets.js"; import type { RuntimeEnv } from "../runtime.js"; @@ -86,6 +87,7 @@ export async function promptAuthConfig( allowKeychainPrompt: false, }), includeSkip: true, + config: cfg, }); let next = cfg; @@ -107,7 +109,13 @@ export async function promptAuthConfig( prompter, allowKeep: true, ignoreAllowlist: true, - preferredProvider: resolvePreferredProviderForAuthChoice(authChoice), + includeProviderPluginSetups: true, + preferredProvider: resolvePreferredProviderForAuthChoice({ + choice: authChoice, + config: next, + }), + workspaceDir: resolveDefaultAgentWorkspaceDir(), + runtime, }); if (modelSelection.config) { next = modelSelection.config; diff --git a/src/commands/daemon-install-helpers.test.ts b/src/commands/daemon-install-helpers.test.ts index 54c5ef7e704..931a983a8ee 100644 --- a/src/commands/daemon-install-helpers.test.ts +++ b/src/commands/daemon-install-helpers.test.ts @@ -1,6 +1,7 @@ import { afterEach, describe, expect, it, vi } from "vitest"; const mocks = vi.hoisted(() => ({ + loadAuthProfileStoreForSecretsRuntime: vi.fn(), resolvePreferredNodePath: vi.fn(), resolveGatewayProgramArguments: vi.fn(), resolveSystemNodeInfo: vi.fn(), @@ -8,6 +9,10 @@ const mocks = vi.hoisted(() => ({ buildServiceEnvironment: vi.fn(), })); +vi.mock("../agents/auth-profiles.js", () => ({ + loadAuthProfileStoreForSecretsRuntime: mocks.loadAuthProfileStoreForSecretsRuntime, +})); + vi.mock("../daemon/runtime-paths.js", () => ({ resolvePreferredNodePath: mocks.resolvePreferredNodePath, resolveSystemNodeInfo: mocks.resolveSystemNodeInfo, @@ -63,6 +68,10 @@ function mockNodeGatewayPlanFixture( programArguments: ["node", "gateway"], workingDirectory, }); + mocks.loadAuthProfileStoreForSecretsRuntime.mockReturnValue({ + version: 1, + profiles: {}, + }); mocks.resolveSystemNodeInfo.mockResolvedValue({ path: "/opt/node", version, @@ -232,11 +241,73 @@ describe("buildGatewayInstallPlan", () => { expect(plan.environment.HOME).toBe("/Users/service"); expect(plan.environment.OPENCLAW_PORT).toBe("3000"); }); + + it("merges env-backed auth-profile refs into the service environment", async () => { + mockNodeGatewayPlanFixture({ + serviceEnvironment: { + OPENCLAW_PORT: "3000", + }, + }); + mocks.loadAuthProfileStoreForSecretsRuntime.mockReturnValue({ + version: 1, + profiles: { + "openai:default": { + type: "api_key", + provider: "openai", + keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }, + "anthropic:default": { + type: "token", + provider: "anthropic", + tokenRef: { source: "env", provider: "default", id: "ANTHROPIC_TOKEN" }, + }, + }, + }); + + const plan = await buildGatewayInstallPlan({ + env: { + OPENAI_API_KEY: "sk-openai-test", // pragma: allowlist secret + ANTHROPIC_TOKEN: "ant-test-token", + }, + port: 3000, + runtime: "node", + }); + + expect(plan.environment.OPENAI_API_KEY).toBe("sk-openai-test"); + expect(plan.environment.ANTHROPIC_TOKEN).toBe("ant-test-token"); + }); + + it("skips unresolved auth-profile env refs", async () => { + mockNodeGatewayPlanFixture({ + serviceEnvironment: { + OPENCLAW_PORT: "3000", + }, + }); + mocks.loadAuthProfileStoreForSecretsRuntime.mockReturnValue({ + version: 1, + profiles: { + "openai:default": { + type: "api_key", + provider: "openai", + keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }, + }, + }); + + const plan = await buildGatewayInstallPlan({ + env: {}, + port: 3000, + runtime: "node", + }); + + expect(plan.environment.OPENAI_API_KEY).toBeUndefined(); + }); }); describe("gatewayInstallErrorHint", () => { it("returns platform-specific hints", () => { - expect(gatewayInstallErrorHint("win32")).toContain("Run as administrator"); + expect(gatewayInstallErrorHint("win32")).toContain("Startup-folder login item"); + expect(gatewayInstallErrorHint("win32")).toContain("elevated PowerShell"); expect(gatewayInstallErrorHint("linux")).toMatch( /(?:openclaw|openclaw)( --profile isolated)? gateway install/, ); diff --git a/src/commands/daemon-install-helpers.ts b/src/commands/daemon-install-helpers.ts index 68b78630ffe..91248cb86a7 100644 --- a/src/commands/daemon-install-helpers.ts +++ b/src/commands/daemon-install-helpers.ts @@ -1,3 +1,7 @@ +import { + loadAuthProfileStoreForSecretsRuntime, + type AuthProfileStore, +} from "../agents/auth-profiles.js"; import { formatCliCommand } from "../cli/command-format.js"; import { collectConfigServiceEnvVars } from "../config/env-vars.js"; import type { OpenClawConfig } from "../config/types.js"; @@ -19,6 +23,33 @@ export type GatewayInstallPlan = { environment: Record; }; +function collectAuthProfileServiceEnvVars(params: { + env: Record; + authStore?: AuthProfileStore; +}): Record { + const authStore = params.authStore ?? loadAuthProfileStoreForSecretsRuntime(); + const entries: Record = {}; + + for (const credential of Object.values(authStore.profiles)) { + const ref = + credential.type === "api_key" + ? credential.keyRef + : credential.type === "token" + ? credential.tokenRef + : undefined; + if (!ref || ref.source !== "env") { + continue; + } + const value = params.env[ref.id]?.trim(); + if (!value) { + continue; + } + entries[ref.id] = value; + } + + return entries; +} + export async function buildGatewayInstallPlan(params: { env: Record; port: number; @@ -28,6 +59,7 @@ export async function buildGatewayInstallPlan(params: { warn?: DaemonInstallWarnFn; /** Full config to extract env vars from (env vars + inline env keys). */ config?: OpenClawConfig; + authStore?: AuthProfileStore; }): Promise { const { devMode, nodePath } = await resolveDaemonInstallRuntimeInputs({ env: params.env, @@ -61,6 +93,10 @@ export async function buildGatewayInstallPlan(params: { // Config env vars are added first so service-specific vars take precedence. const environment: Record = { ...collectConfigServiceEnvVars(params.config), + ...collectAuthProfileServiceEnvVars({ + env: params.env, + authStore: params.authStore, + }), }; Object.assign(environment, serviceEnvironment); @@ -69,6 +105,6 @@ export async function buildGatewayInstallPlan(params: { export function gatewayInstallErrorHint(platform = process.platform): string { return platform === "win32" - ? "Tip: rerun from an elevated PowerShell (Start → type PowerShell → right-click → Run as administrator) or skip service install." + ? "Tip: native Windows now falls back to a per-user Startup-folder login item when Scheduled Task creation is denied; if install still fails, rerun from an elevated PowerShell or skip service install." : `Tip: rerun \`${formatCliCommand("openclaw gateway install")}\` after fixing the error.`; } diff --git a/src/commands/doctor-config-analysis.ts b/src/commands/doctor-config-analysis.ts index dea3fa1b3f2..994bac5f863 100644 --- a/src/commands/doctor-config-analysis.ts +++ b/src/commands/doctor-config-analysis.ts @@ -105,18 +105,22 @@ export function noteOpencodeProviderOverrides(cfg: OpenClawConfig): void { if (providers["opencode-zen"]) { overrides.push("opencode-zen"); } + if (providers["opencode-go"]) { + overrides.push("opencode-go"); + } if (overrides.length === 0) { return; } const lines = overrides.flatMap((id) => { + const providerLabel = id === "opencode-go" ? "OpenCode Go" : "OpenCode Zen"; const providerEntry = providers[id]; const api = isRecord(providerEntry) && typeof providerEntry.api === "string" ? providerEntry.api : undefined; return [ - `- models.providers.${id} is set; this overrides the built-in OpenCode Zen catalog.`, + `- models.providers.${id} is set; this overrides the built-in ${providerLabel} catalog.`, api ? `- models.providers.${id}.api=${api}` : null, ].filter((line): line is string => Boolean(line)); }); @@ -124,7 +128,7 @@ export function noteOpencodeProviderOverrides(cfg: OpenClawConfig): void { lines.push( "- Remove these entries to restore per-model API routing + costs (then re-run onboarding if needed).", ); - note(lines.join("\n"), "OpenCode Zen"); + note(lines.join("\n"), "OpenCode"); } export function noteIncludeConfinementWarning(snapshot: { diff --git a/src/commands/doctor-config-flow.test.ts b/src/commands/doctor-config-flow.test.ts index 2ce46adeb29..265c90197e2 100644 --- a/src/commands/doctor-config-flow.test.ts +++ b/src/commands/doctor-config-flow.test.ts @@ -107,6 +107,40 @@ describe("doctor config flow", () => { ).toBe(false); }); + it("warns on mutable Zalouser group entries when dangerous name matching is disabled", async () => { + const doctorWarnings = await collectDoctorWarnings({ + channels: { + zalouser: { + groups: { + "Ops Room": { allow: true }, + }, + }, + }, + }); + + expect( + doctorWarnings.some( + (line) => + line.includes("mutable allowlist") && line.includes("channels.zalouser.groups: Ops Room"), + ), + ).toBe(true); + }); + + it("does not warn on mutable Zalouser group entries when dangerous name matching is enabled", async () => { + const doctorWarnings = await collectDoctorWarnings({ + channels: { + zalouser: { + dangerouslyAllowNameMatching: true, + groups: { + "Ops Room": { allow: true }, + }, + }, + }, + }); + + expect(doctorWarnings.some((line) => line.includes("channels.zalouser.groups"))).toBe(false); + }); + it("warns when imessage group allowlist is empty even if allowFrom is set", async () => { const doctorWarnings = await collectDoctorWarnings({ channels: { diff --git a/src/commands/doctor-config-flow.ts b/src/commands/doctor-config-flow.ts index ff97c001f07..71cd6926417 100644 --- a/src/commands/doctor-config-flow.ts +++ b/src/commands/doctor-config-flow.ts @@ -44,6 +44,7 @@ import { isMSTeamsMutableAllowEntry, isMattermostMutableAllowEntry, isSlackMutableAllowEntry, + isZalouserMutableGroupEntry, } from "../security/mutable-allowlist-detectors.js"; import { inspectTelegramAccount } from "../telegram/account-inspect.js"; import { listTelegramAccountIds, resolveTelegramAccount } from "../telegram/accounts.js"; @@ -885,6 +886,27 @@ function scanMutableAllowlistEntries(cfg: OpenClawConfig): MutableAllowlistHit[] } } + for (const scope of collectProviderDangerousNameMatchingScopes(cfg, "zalouser")) { + if (scope.dangerousNameMatchingEnabled) { + continue; + } + const groups = asObjectRecord(scope.account.groups); + if (!groups) { + continue; + } + for (const entry of Object.keys(groups)) { + if (!isZalouserMutableGroupEntry(entry)) { + continue; + } + hits.push({ + channel: "zalouser", + path: `${scope.prefix}.groups`, + entry, + dangerousFlagPath: scope.dangerousFlagPath, + }); + } + } + return hits; } diff --git a/src/commands/doctor-cron.test.ts b/src/commands/doctor-cron.test.ts new file mode 100644 index 00000000000..3ad4f2811ed --- /dev/null +++ b/src/commands/doctor-cron.test.ts @@ -0,0 +1,249 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { afterEach, describe, expect, it, vi } from "vitest"; +import type { OpenClawConfig } from "../config/config.js"; +import * as noteModule from "../terminal/note.js"; +import { maybeRepairLegacyCronStore } from "./doctor-cron.js"; + +let tempRoot: string | null = null; + +async function makeTempStorePath() { + tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-doctor-cron-")); + return path.join(tempRoot, "cron", "jobs.json"); +} + +afterEach(async () => { + vi.restoreAllMocks(); + if (tempRoot) { + await fs.rm(tempRoot, { recursive: true, force: true }); + tempRoot = null; + } +}); + +function makePrompter(confirmResult = true) { + return { + confirm: vi.fn().mockResolvedValue(confirmResult), + }; +} + +function createCronConfig(storePath: string): OpenClawConfig { + return { + cron: { + store: storePath, + webhook: "https://example.invalid/cron-finished", + }, + }; +} + +function createLegacyCronJob(overrides: Record = {}) { + return { + jobId: "legacy-job", + name: "Legacy job", + notify: true, + createdAtMs: Date.parse("2026-02-01T00:00:00.000Z"), + updatedAtMs: Date.parse("2026-02-02T00:00:00.000Z"), + schedule: { kind: "cron", cron: "0 7 * * *", tz: "UTC" }, + payload: { + kind: "systemEvent", + text: "Morning brief", + }, + state: {}, + ...overrides, + }; +} + +async function writeCronStore(storePath: string, jobs: Array>) { + await fs.mkdir(path.dirname(storePath), { recursive: true }); + await fs.writeFile( + storePath, + JSON.stringify( + { + version: 1, + jobs, + }, + null, + 2, + ), + "utf-8", + ); +} + +describe("maybeRepairLegacyCronStore", () => { + it("repairs legacy cron store fields and migrates notify fallback to webhook delivery", async () => { + const storePath = await makeTempStorePath(); + await writeCronStore(storePath, [createLegacyCronJob()]); + + const noteSpy = vi.spyOn(noteModule, "note").mockImplementation(() => {}); + const cfg = createCronConfig(storePath); + + await maybeRepairLegacyCronStore({ + cfg, + options: {}, + prompter: makePrompter(true), + }); + + const persisted = JSON.parse(await fs.readFile(storePath, "utf-8")) as { + jobs: Array>; + }; + const [job] = persisted.jobs; + expect(job?.jobId).toBeUndefined(); + expect(job?.id).toBe("legacy-job"); + expect(job?.notify).toBeUndefined(); + expect(job?.schedule).toMatchObject({ + kind: "cron", + expr: "0 7 * * *", + tz: "UTC", + }); + expect(job?.delivery).toMatchObject({ + mode: "webhook", + to: "https://example.invalid/cron-finished", + }); + expect(job?.payload).toMatchObject({ + kind: "systemEvent", + text: "Morning brief", + }); + + expect(noteSpy).toHaveBeenCalledWith( + expect.stringContaining("Legacy cron job storage detected"), + "Cron", + ); + expect(noteSpy).toHaveBeenCalledWith( + expect.stringContaining("Cron store normalized"), + "Doctor changes", + ); + }); + + it("warns instead of replacing announce delivery for notify fallback jobs", async () => { + const storePath = await makeTempStorePath(); + await fs.mkdir(path.dirname(storePath), { recursive: true }); + await fs.writeFile( + storePath, + JSON.stringify( + { + version: 1, + jobs: [ + { + id: "notify-and-announce", + name: "Notify and announce", + notify: true, + createdAtMs: Date.parse("2026-02-01T00:00:00.000Z"), + updatedAtMs: Date.parse("2026-02-02T00:00:00.000Z"), + schedule: { kind: "every", everyMs: 60_000 }, + sessionTarget: "isolated", + wakeMode: "now", + payload: { kind: "agentTurn", message: "Status" }, + delivery: { mode: "announce", channel: "telegram", to: "123" }, + state: {}, + }, + ], + }, + null, + 2, + ), + "utf-8", + ); + + const noteSpy = vi.spyOn(noteModule, "note").mockImplementation(() => {}); + + await maybeRepairLegacyCronStore({ + cfg: { + cron: { + store: storePath, + webhook: "https://example.invalid/cron-finished", + }, + }, + options: { nonInteractive: true }, + prompter: makePrompter(true), + }); + + const persisted = JSON.parse(await fs.readFile(storePath, "utf-8")) as { + jobs: Array>; + }; + expect(persisted.jobs[0]?.notify).toBe(true); + expect(noteSpy).toHaveBeenCalledWith( + expect.stringContaining('uses legacy notify fallback alongside delivery mode "announce"'), + "Doctor warnings", + ); + }); + + it("does not auto-repair in non-interactive mode without explicit repair approval", async () => { + const storePath = await makeTempStorePath(); + await writeCronStore(storePath, [createLegacyCronJob()]); + + const noteSpy = vi.spyOn(noteModule, "note").mockImplementation(() => {}); + const prompter = makePrompter(false); + + await maybeRepairLegacyCronStore({ + cfg: createCronConfig(storePath), + options: { nonInteractive: true }, + prompter, + }); + + const persisted = JSON.parse(await fs.readFile(storePath, "utf-8")) as { + jobs: Array>; + }; + expect(prompter.confirm).toHaveBeenCalledWith({ + message: "Repair legacy cron jobs now?", + initialValue: true, + }); + expect(persisted.jobs[0]?.jobId).toBe("legacy-job"); + expect(persisted.jobs[0]?.notify).toBe(true); + expect(noteSpy).not.toHaveBeenCalledWith( + expect.stringContaining("Cron store normalized"), + "Doctor changes", + ); + }); + + it("migrates notify fallback none delivery jobs to cron.webhook", async () => { + const storePath = await makeTempStorePath(); + await fs.mkdir(path.dirname(storePath), { recursive: true }); + await fs.writeFile( + storePath, + JSON.stringify( + { + version: 1, + jobs: [ + { + id: "notify-none", + name: "Notify none", + notify: true, + createdAtMs: Date.parse("2026-02-01T00:00:00.000Z"), + updatedAtMs: Date.parse("2026-02-02T00:00:00.000Z"), + schedule: { kind: "every", everyMs: 60_000 }, + payload: { + kind: "systemEvent", + text: "Status", + }, + delivery: { mode: "none", to: "123456789" }, + state: {}, + }, + ], + }, + null, + 2, + ), + "utf-8", + ); + + await maybeRepairLegacyCronStore({ + cfg: { + cron: { + store: storePath, + webhook: "https://example.invalid/cron-finished", + }, + }, + options: {}, + prompter: makePrompter(true), + }); + + const persisted = JSON.parse(await fs.readFile(storePath, "utf-8")) as { + jobs: Array>; + }; + expect(persisted.jobs[0]?.notify).toBeUndefined(); + expect(persisted.jobs[0]?.delivery).toMatchObject({ + mode: "webhook", + to: "https://example.invalid/cron-finished", + }); + }); +}); diff --git a/src/commands/doctor-cron.ts b/src/commands/doctor-cron.ts new file mode 100644 index 00000000000..53963cb0d14 --- /dev/null +++ b/src/commands/doctor-cron.ts @@ -0,0 +1,183 @@ +import { formatCliCommand } from "../cli/command-format.js"; +import type { OpenClawConfig } from "../config/config.js"; +import { normalizeStoredCronJobs } from "../cron/store-migration.js"; +import { resolveCronStorePath, loadCronStore, saveCronStore } from "../cron/store.js"; +import type { CronJob } from "../cron/types.js"; +import { note } from "../terminal/note.js"; +import { shortenHomePath } from "../utils.js"; +import type { DoctorPrompter, DoctorOptions } from "./doctor-prompter.js"; + +type CronDoctorOutcome = { + changed: boolean; + warnings: string[]; +}; + +function pluralize(count: number, noun: string) { + return `${count} ${noun}${count === 1 ? "" : "s"}`; +} + +function formatLegacyIssuePreview(issues: Partial>): string[] { + const lines: string[] = []; + if (issues.jobId) { + lines.push(`- ${pluralize(issues.jobId, "job")} still uses legacy \`jobId\``); + } + if (issues.legacyScheduleString) { + lines.push( + `- ${pluralize(issues.legacyScheduleString, "job")} stores schedule as a bare string`, + ); + } + if (issues.legacyScheduleCron) { + lines.push(`- ${pluralize(issues.legacyScheduleCron, "job")} still uses \`schedule.cron\``); + } + if (issues.legacyPayloadKind) { + lines.push(`- ${pluralize(issues.legacyPayloadKind, "job")} needs payload kind normalization`); + } + if (issues.legacyPayloadProvider) { + lines.push( + `- ${pluralize(issues.legacyPayloadProvider, "job")} still uses payload \`provider\` as a delivery alias`, + ); + } + if (issues.legacyTopLevelPayloadFields) { + lines.push( + `- ${pluralize(issues.legacyTopLevelPayloadFields, "job")} still uses top-level payload fields`, + ); + } + if (issues.legacyTopLevelDeliveryFields) { + lines.push( + `- ${pluralize(issues.legacyTopLevelDeliveryFields, "job")} still uses top-level delivery fields`, + ); + } + if (issues.legacyDeliveryMode) { + lines.push( + `- ${pluralize(issues.legacyDeliveryMode, "job")} still uses delivery mode \`deliver\``, + ); + } + return lines; +} + +function trimString(value: unknown): string | undefined { + return typeof value === "string" && value.trim() ? value.trim() : undefined; +} + +function migrateLegacyNotifyFallback(params: { + jobs: Array>; + legacyWebhook?: string; +}): CronDoctorOutcome { + let changed = false; + const warnings: string[] = []; + + for (const raw of params.jobs) { + if (!("notify" in raw)) { + continue; + } + + const jobName = trimString(raw.name) ?? trimString(raw.id) ?? ""; + const notify = raw.notify === true; + if (!notify) { + delete raw.notify; + changed = true; + continue; + } + + const delivery = + raw.delivery && typeof raw.delivery === "object" && !Array.isArray(raw.delivery) + ? (raw.delivery as Record) + : null; + const mode = trimString(delivery?.mode)?.toLowerCase(); + const to = trimString(delivery?.to); + + if (mode === "webhook" && to) { + delete raw.notify; + changed = true; + continue; + } + + if ((mode === undefined || mode === "none" || mode === "webhook") && params.legacyWebhook) { + raw.delivery = { + ...delivery, + mode: "webhook", + to: mode === "none" ? params.legacyWebhook : (to ?? params.legacyWebhook), + }; + delete raw.notify; + changed = true; + continue; + } + + if (!params.legacyWebhook) { + warnings.push( + `Cron job "${jobName}" still uses legacy notify fallback, but cron.webhook is unset so doctor cannot migrate it automatically.`, + ); + continue; + } + + warnings.push( + `Cron job "${jobName}" uses legacy notify fallback alongside delivery mode "${mode}". Migrate it manually so webhook delivery does not replace existing announce behavior.`, + ); + } + + return { changed, warnings }; +} + +export async function maybeRepairLegacyCronStore(params: { + cfg: OpenClawConfig; + options: DoctorOptions; + prompter: Pick; +}) { + const storePath = resolveCronStorePath(params.cfg.cron?.store); + const store = await loadCronStore(storePath); + const rawJobs = (store.jobs ?? []) as unknown as Array>; + if (rawJobs.length === 0) { + return; + } + + const normalized = normalizeStoredCronJobs(rawJobs); + const legacyWebhook = trimString(params.cfg.cron?.webhook); + const notifyCount = rawJobs.filter((job) => job.notify === true).length; + const previewLines = formatLegacyIssuePreview(normalized.issues); + if (notifyCount > 0) { + previewLines.push( + `- ${pluralize(notifyCount, "job")} still uses legacy \`notify: true\` webhook fallback`, + ); + } + if (previewLines.length === 0) { + return; + } + + note( + [ + `Legacy cron job storage detected at ${shortenHomePath(storePath)}.`, + ...previewLines, + `Repair with ${formatCliCommand("openclaw doctor --fix")} to normalize the store before the next scheduler run.`, + ].join("\n"), + "Cron", + ); + + const shouldRepair = await params.prompter.confirm({ + message: "Repair legacy cron jobs now?", + initialValue: true, + }); + if (!shouldRepair) { + return; + } + + const notifyMigration = migrateLegacyNotifyFallback({ + jobs: rawJobs, + legacyWebhook, + }); + const changed = normalized.mutated || notifyMigration.changed; + if (!changed && notifyMigration.warnings.length === 0) { + return; + } + + if (changed) { + await saveCronStore(storePath, { + version: 1, + jobs: rawJobs as unknown as CronJob[], + }); + note(`Cron store normalized at ${shortenHomePath(storePath)}.`, "Doctor changes"); + } + + if (notifyMigration.warnings.length > 0) { + note(notifyMigration.warnings.join("\n"), "Doctor warnings"); + } +} diff --git a/src/commands/doctor-gateway-daemon-flow.test.ts b/src/commands/doctor-gateway-daemon-flow.test.ts new file mode 100644 index 00000000000..02c0b885bb0 --- /dev/null +++ b/src/commands/doctor-gateway-daemon-flow.test.ts @@ -0,0 +1,194 @@ +import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; + +const service = vi.hoisted(() => ({ + isLoaded: vi.fn(), + readRuntime: vi.fn(), + restart: vi.fn(), + install: vi.fn(), + readCommand: vi.fn(), +})); +const note = vi.hoisted(() => vi.fn()); +const sleep = vi.hoisted(() => vi.fn(async () => {})); +const healthCommand = vi.hoisted(() => vi.fn(async () => {})); +const inspectPortUsage = vi.hoisted(() => vi.fn()); +const readLastGatewayErrorLine = vi.hoisted(() => vi.fn(async () => null)); + +vi.mock("../config/config.js", () => ({ + resolveGatewayPort: vi.fn(() => 18789), +})); + +vi.mock("../daemon/constants.js", () => ({ + resolveGatewayLaunchAgentLabel: vi.fn(() => "ai.openclaw.gateway"), + resolveNodeLaunchAgentLabel: vi.fn(() => "ai.openclaw.node"), +})); + +vi.mock("../daemon/diagnostics.js", () => ({ + readLastGatewayErrorLine, +})); + +vi.mock("../daemon/launchd.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + isLaunchAgentListed: vi.fn(async () => false), + isLaunchAgentLoaded: vi.fn(async () => false), + launchAgentPlistExists: vi.fn(async () => false), + repairLaunchAgentBootstrap: vi.fn(async () => ({ ok: true })), + }; +}); + +vi.mock("../daemon/service.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + resolveGatewayService: () => service, + }; +}); + +vi.mock("../daemon/systemd-hints.js", () => ({ + renderSystemdUnavailableHints: vi.fn(() => []), +})); + +vi.mock("../daemon/systemd.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + isSystemdUserServiceAvailable: vi.fn(async () => true), + }; +}); + +vi.mock("../infra/ports.js", () => ({ + inspectPortUsage, + formatPortDiagnostics: vi.fn(() => []), +})); + +vi.mock("../infra/wsl.js", () => ({ + isWSL: vi.fn(async () => false), +})); + +vi.mock("../terminal/note.js", () => ({ + note, +})); + +vi.mock("../utils.js", () => ({ + sleep, +})); + +vi.mock("./daemon-install-helpers.js", () => ({ + buildGatewayInstallPlan: vi.fn(), + gatewayInstallErrorHint: vi.fn(() => "hint"), +})); + +vi.mock("./doctor-format.js", () => ({ + buildGatewayRuntimeHints: vi.fn(() => []), + formatGatewayRuntimeSummary: vi.fn(() => null), +})); + +vi.mock("./gateway-install-token.js", () => ({ + resolveGatewayInstallToken: vi.fn(), +})); + +vi.mock("./health-format.js", () => ({ + formatHealthCheckFailure: vi.fn(() => "health failed"), +})); + +vi.mock("./health.js", () => ({ + healthCommand, +})); + +describe("maybeRepairGatewayDaemon", () => { + let maybeRepairGatewayDaemon: typeof import("./doctor-gateway-daemon-flow.js").maybeRepairGatewayDaemon; + const originalPlatformDescriptor = Object.getOwnPropertyDescriptor(process, "platform"); + + beforeAll(async () => { + ({ maybeRepairGatewayDaemon } = await import("./doctor-gateway-daemon-flow.js")); + }); + + beforeEach(() => { + vi.clearAllMocks(); + service.isLoaded.mockResolvedValue(true); + service.readRuntime.mockResolvedValue({ status: "running" }); + service.restart.mockResolvedValue({ outcome: "completed" }); + inspectPortUsage.mockResolvedValue({ + port: 18789, + status: "free", + listeners: [], + hints: [], + }); + }); + + afterEach(() => { + if (originalPlatformDescriptor) { + Object.defineProperty(process, "platform", originalPlatformDescriptor); + } + }); + + function setPlatform(platform: NodeJS.Platform) { + if (!originalPlatformDescriptor) { + return; + } + Object.defineProperty(process, "platform", { + ...originalPlatformDescriptor, + value: platform, + }); + } + + function createPrompter(confirmImpl: (message: string) => boolean) { + return { + confirm: vi.fn(), + confirmRepair: vi.fn(), + confirmAggressive: vi.fn(), + confirmSkipInNonInteractive: vi.fn(async ({ message }: { message: string }) => + confirmImpl(message), + ), + select: vi.fn(), + shouldRepair: false, + shouldForce: false, + }; + } + + it("skips restart verification when a running service restart is only scheduled", async () => { + setPlatform("linux"); + service.restart.mockResolvedValueOnce({ outcome: "scheduled" }); + + await maybeRepairGatewayDaemon({ + cfg: { gateway: {} }, + runtime: { log: vi.fn(), error: vi.fn(), exit: vi.fn() }, + prompter: createPrompter((message) => message === "Restart gateway service now?"), + options: { deep: false }, + gatewayDetailsMessage: "details", + healthOk: false, + }); + + expect(service.restart).toHaveBeenCalledTimes(1); + expect(note).toHaveBeenCalledWith( + "restart scheduled, gateway will restart momentarily", + "Gateway", + ); + expect(sleep).not.toHaveBeenCalled(); + expect(healthCommand).not.toHaveBeenCalled(); + }); + + it("skips start verification when a stopped service start is only scheduled", async () => { + setPlatform("linux"); + service.readRuntime.mockResolvedValue({ status: "stopped" }); + service.restart.mockResolvedValueOnce({ outcome: "scheduled" }); + + await maybeRepairGatewayDaemon({ + cfg: { gateway: {} }, + runtime: { log: vi.fn(), error: vi.fn(), exit: vi.fn() }, + prompter: createPrompter((message) => message === "Start gateway service now?"), + options: { deep: false }, + gatewayDetailsMessage: "details", + healthOk: false, + }); + + expect(service.restart).toHaveBeenCalledTimes(1); + expect(note).toHaveBeenCalledWith( + "restart scheduled, gateway will restart momentarily", + "Gateway", + ); + expect(sleep).not.toHaveBeenCalled(); + expect(healthCommand).not.toHaveBeenCalled(); + }); +}); diff --git a/src/commands/doctor-gateway-daemon-flow.ts b/src/commands/doctor-gateway-daemon-flow.ts index 4fd8df3490b..c476efa615f 100644 --- a/src/commands/doctor-gateway-daemon-flow.ts +++ b/src/commands/doctor-gateway-daemon-flow.ts @@ -12,7 +12,7 @@ import { launchAgentPlistExists, repairLaunchAgentBootstrap, } from "../daemon/launchd.js"; -import { resolveGatewayService } from "../daemon/service.js"; +import { describeGatewayServiceRestart, resolveGatewayService } from "../daemon/service.js"; import { renderSystemdUnavailableHints } from "../daemon/systemd-hints.js"; import { isSystemdUserServiceAvailable } from "../daemon/systemd.js"; import { formatPortDiagnostics, inspectPortUsage } from "../infra/ports.js"; @@ -235,11 +235,16 @@ export async function maybeRepairGatewayDaemon(params: { initialValue: true, }); if (start) { - await service.restart({ + const restartResult = await service.restart({ env: process.env, stdout: process.stdout, }); - await sleep(1500); + const restartStatus = describeGatewayServiceRestart("Gateway", restartResult); + if (!restartStatus.scheduled) { + await sleep(1500); + } else { + note(restartStatus.message, "Gateway"); + } } } @@ -257,10 +262,15 @@ export async function maybeRepairGatewayDaemon(params: { initialValue: true, }); if (restart) { - await service.restart({ + const restartResult = await service.restart({ env: process.env, stdout: process.stdout, }); + const restartStatus = describeGatewayServiceRestart("Gateway", restartResult); + if (restartStatus.scheduled) { + note(restartStatus.message, "Gateway"); + return; + } await sleep(1500); try { await healthCommand({ json: false, timeoutMs: 10_000 }, params.runtime); diff --git a/src/commands/doctor-gateway-services.test.ts b/src/commands/doctor-gateway-services.test.ts index 66dd090f2b8..7809f6b003d 100644 --- a/src/commands/doctor-gateway-services.test.ts +++ b/src/commands/doctor-gateway-services.test.ts @@ -2,6 +2,22 @@ import { beforeEach, describe, expect, it, vi } from "vitest"; import type { OpenClawConfig } from "../config/config.js"; import { withEnvAsync } from "../test-utils/env.js"; +const fsMocks = vi.hoisted(() => ({ + realpath: vi.fn(), +})); + +vi.mock("node:fs/promises", async () => { + const actual = await vi.importActual("node:fs/promises"); + return { + ...actual, + default: { + ...actual, + realpath: fsMocks.realpath, + }, + realpath: fsMocks.realpath, + }; +}); + const mocks = vi.hoisted(() => ({ readCommand: vi.fn(), install: vi.fn(), @@ -137,6 +153,7 @@ function setupGatewayTokenRepairScenario() { describe("maybeRepairGatewayServiceConfig", () => { beforeEach(() => { vi.clearAllMocks(); + fsMocks.realpath.mockImplementation(async (value: string) => value); mocks.resolveGatewayAuthTokenForService.mockImplementation(async (cfg: OpenClawConfig, env) => { const configToken = typeof cfg.gateway?.auth?.token === "string" ? cfg.gateway.auth.token.trim() : undefined; @@ -218,6 +235,121 @@ describe("maybeRepairGatewayServiceConfig", () => { }); }); + it("does not flag entrypoint mismatch when symlink and realpath match", async () => { + mocks.readCommand.mockResolvedValue({ + programArguments: [ + "/usr/bin/node", + "/Users/test/Library/pnpm/global/5/node_modules/openclaw/dist/index.js", + "gateway", + "--port", + "18789", + ], + environment: {}, + }); + mocks.auditGatewayServiceConfig.mockResolvedValue({ + ok: true, + issues: [], + }); + mocks.buildGatewayInstallPlan.mockResolvedValue({ + programArguments: [ + "/usr/bin/node", + "/Users/test/Library/pnpm/global/5/node_modules/.pnpm/openclaw@2026.3.12/node_modules/openclaw/dist/index.js", + "gateway", + "--port", + "18789", + ], + environment: {}, + }); + fsMocks.realpath.mockImplementation(async (value: string) => { + if (value.includes("/global/5/node_modules/openclaw/")) { + return value.replace( + "/global/5/node_modules/openclaw/", + "/global/5/node_modules/.pnpm/openclaw@2026.3.12/node_modules/openclaw/", + ); + } + return value; + }); + + await runRepair({ gateway: {} }); + + expect(mocks.note).not.toHaveBeenCalledWith( + expect.stringContaining("Gateway service entrypoint does not match the current install."), + "Gateway service config", + ); + expect(mocks.install).not.toHaveBeenCalled(); + }); + + it("does not flag entrypoint mismatch when realpath fails but normalized absolute paths match", async () => { + mocks.readCommand.mockResolvedValue({ + programArguments: [ + "/usr/bin/node", + "/opt/openclaw/../openclaw/dist/index.js", + "gateway", + "--port", + "18789", + ], + environment: {}, + }); + mocks.auditGatewayServiceConfig.mockResolvedValue({ + ok: true, + issues: [], + }); + mocks.buildGatewayInstallPlan.mockResolvedValue({ + programArguments: [ + "/usr/bin/node", + "/opt/openclaw/dist/index.js", + "gateway", + "--port", + "18789", + ], + environment: {}, + }); + fsMocks.realpath.mockRejectedValue(new Error("no realpath")); + + await runRepair({ gateway: {} }); + + expect(mocks.note).not.toHaveBeenCalledWith( + expect.stringContaining("Gateway service entrypoint does not match the current install."), + "Gateway service config", + ); + expect(mocks.install).not.toHaveBeenCalled(); + }); + + it("still flags entrypoint mismatch when canonicalized paths differ", async () => { + mocks.readCommand.mockResolvedValue({ + programArguments: [ + "/usr/bin/node", + "/Users/test/.nvm/versions/node/v22.0.0/lib/node_modules/openclaw/dist/index.js", + "gateway", + "--port", + "18789", + ], + environment: {}, + }); + mocks.auditGatewayServiceConfig.mockResolvedValue({ + ok: true, + issues: [], + }); + mocks.buildGatewayInstallPlan.mockResolvedValue({ + programArguments: [ + "/usr/bin/node", + "/Users/test/Library/pnpm/global/5/node_modules/openclaw/dist/index.js", + "gateway", + "--port", + "18789", + ], + environment: {}, + }); + + await runRepair({ gateway: {} }); + + expect(mocks.note).toHaveBeenCalledWith( + expect.stringContaining("Gateway service entrypoint does not match the current install."), + "Gateway service config", + ); + expect(mocks.install).toHaveBeenCalledTimes(1); + }); + it("treats SecretRef-managed gateway token as non-persisted service state", async () => { mocks.readCommand.mockResolvedValue({ programArguments: gatewayProgramArguments, diff --git a/src/commands/doctor-gateway-services.ts b/src/commands/doctor-gateway-services.ts index 68adf9374c6..ba9b032b4ec 100644 --- a/src/commands/doctor-gateway-services.ts +++ b/src/commands/doctor-gateway-services.ts @@ -54,8 +54,13 @@ function findGatewayEntrypoint(programArguments?: string[]): string | null { return programArguments[gatewayIndex - 1] ?? null; } -function normalizeExecutablePath(value: string): string { - return path.resolve(value); +async function normalizeExecutablePath(value: string): Promise { + const resolvedPath = path.resolve(value); + try { + return await fs.realpath(resolvedPath); + } catch { + return resolvedPath; + } } function extractDetailPath(detail: string, prefix: string): string | null { @@ -252,7 +257,7 @@ export async function maybeRepairGatewayServiceConfig( note(warning, "Gateway runtime"); } note( - "System Node 22+ not found. Install via Homebrew/apt/choco and rerun doctor to migrate off Bun/version managers.", + "System Node 22 LTS (22.16+) or Node 24 not found. Install via Homebrew/apt/choco and rerun doctor to migrate off Bun/version managers.", "Gateway runtime", ); } @@ -269,10 +274,16 @@ export async function maybeRepairGatewayServiceConfig( }); const expectedEntrypoint = findGatewayEntrypoint(programArguments); const currentEntrypoint = findGatewayEntrypoint(command.programArguments); + const normalizedExpectedEntrypoint = expectedEntrypoint + ? await normalizeExecutablePath(expectedEntrypoint) + : null; + const normalizedCurrentEntrypoint = currentEntrypoint + ? await normalizeExecutablePath(currentEntrypoint) + : null; if ( - expectedEntrypoint && - currentEntrypoint && - normalizeExecutablePath(expectedEntrypoint) !== normalizeExecutablePath(currentEntrypoint) + normalizedExpectedEntrypoint && + normalizedCurrentEntrypoint && + normalizedExpectedEntrypoint !== normalizedCurrentEntrypoint ) { audit.issues.push({ code: SERVICE_AUDIT_CODES.gatewayEntrypointMismatch, diff --git a/src/commands/doctor-state-migrations.test.ts b/src/commands/doctor-state-migrations.test.ts index 4116a6fca6e..ec465632cfa 100644 --- a/src/commands/doctor-state-migrations.test.ts +++ b/src/commands/doctor-state-migrations.test.ts @@ -26,6 +26,32 @@ async function makeRootWithEmptyCfg() { return { root, cfg }; } +function writeLegacyTelegramAllowFromStore(oauthDir: string) { + fs.writeFileSync( + path.join(oauthDir, "telegram-allowFrom.json"), + JSON.stringify( + { + version: 1, + allowFrom: ["123456"], + }, + null, + 2, + ) + "\n", + "utf-8", + ); +} + +async function runTelegramAllowFromMigration(params: { root: string; cfg: OpenClawConfig }) { + const oauthDir = ensureCredentialsDir(params.root); + writeLegacyTelegramAllowFromStore(oauthDir); + const detected = await detectLegacyStateMigrations({ + cfg: params.cfg, + env: { OPENCLAW_STATE_DIR: params.root } as NodeJS.ProcessEnv, + }); + const result = await runLegacyStateMigrations({ detected, now: () => 123 }); + return { oauthDir, detected, result }; +} + afterEach(async () => { resetAutoMigrateLegacyStateForTest(); resetAutoMigrateLegacyStateDirForTest(); @@ -277,30 +303,11 @@ describe("doctor legacy state migrations", () => { it("migrates legacy Telegram pairing allowFrom store to account-scoped default file", async () => { const { root, cfg } = await makeRootWithEmptyCfg(); - const oauthDir = ensureCredentialsDir(root); - fs.writeFileSync( - path.join(oauthDir, "telegram-allowFrom.json"), - JSON.stringify( - { - version: 1, - allowFrom: ["123456"], - }, - null, - 2, - ) + "\n", - "utf-8", - ); - - const detected = await detectLegacyStateMigrations({ - cfg, - env: { OPENCLAW_STATE_DIR: root } as NodeJS.ProcessEnv, - }); + const { oauthDir, detected, result } = await runTelegramAllowFromMigration({ root, cfg }); expect(detected.pairingAllowFrom.hasLegacyTelegram).toBe(true); expect( detected.pairingAllowFrom.copyPlans.map((plan) => path.basename(plan.targetPath)), ).toEqual(["telegram-default-allowFrom.json"]); - - const result = await runLegacyStateMigrations({ detected, now: () => 123 }); expect(result.warnings).toEqual([]); const target = path.join(oauthDir, "telegram-default-allowFrom.json"); @@ -323,30 +330,11 @@ describe("doctor legacy state migrations", () => { }, }, }; - const oauthDir = ensureCredentialsDir(root); - fs.writeFileSync( - path.join(oauthDir, "telegram-allowFrom.json"), - JSON.stringify( - { - version: 1, - allowFrom: ["123456"], - }, - null, - 2, - ) + "\n", - "utf-8", - ); - - const detected = await detectLegacyStateMigrations({ - cfg, - env: { OPENCLAW_STATE_DIR: root } as NodeJS.ProcessEnv, - }); + const { oauthDir, detected, result } = await runTelegramAllowFromMigration({ root, cfg }); expect(detected.pairingAllowFrom.hasLegacyTelegram).toBe(true); expect( detected.pairingAllowFrom.copyPlans.map((plan) => path.basename(plan.targetPath)).toSorted(), ).toEqual(["telegram-bot1-allowFrom.json", "telegram-bot2-allowFrom.json"]); - - const result = await runLegacyStateMigrations({ detected, now: () => 123 }); expect(result.warnings).toEqual([]); const bot1Target = path.join(oauthDir, "telegram-bot1-allowFrom.json"); diff --git a/src/commands/doctor.ts b/src/commands/doctor.ts index 2688774b8bb..bdde2781ff9 100644 --- a/src/commands/doctor.ts +++ b/src/commands/doctor.ts @@ -31,6 +31,7 @@ import { import { noteBootstrapFileSize } from "./doctor-bootstrap-size.js"; import { doctorShellCompletion } from "./doctor-completion.js"; import { loadAndMaybeMigrateDoctorConfig } from "./doctor-config-flow.js"; +import { maybeRepairLegacyCronStore } from "./doctor-cron.js"; import { maybeRepairGatewayDaemon } from "./doctor-gateway-daemon-flow.js"; import { checkGatewayHealth, probeGatewayMemoryStatus } from "./doctor-gateway-health.js"; import { @@ -220,6 +221,11 @@ export async function doctorCommand( await noteStateIntegrity(cfg, prompter, configResult.path ?? CONFIG_PATH); await noteSessionLockHealth({ shouldRepair: prompter.shouldRepair }); + await maybeRepairLegacyCronStore({ + cfg, + options, + prompter, + }); cfg = await maybeRepairSandboxImages(cfg, runtime, prompter); noteSandboxScopeWarnings(cfg); diff --git a/src/commands/doctor.warns-state-directory-is-missing.e2e.test.ts b/src/commands/doctor.warns-state-directory-is-missing.e2e.test.ts index 69c9da9d579..68d865996d2 100644 --- a/src/commands/doctor.warns-state-directory-is-missing.e2e.test.ts +++ b/src/commands/doctor.warns-state-directory-is-missing.e2e.test.ts @@ -41,6 +41,10 @@ describe("doctor command", () => { api: "openai-completions", baseUrl: "https://opencode.ai/zen/v1", }, + "opencode-go": { + api: "openai-completions", + baseUrl: "https://opencode.ai/zen/go/v1", + }, }, }, }, @@ -53,7 +57,9 @@ describe("doctor command", () => { const warned = note.mock.calls.some( ([message, title]) => - title === "OpenCode Zen" && String(message).includes("models.providers.opencode"), + title === "OpenCode" && + String(message).includes("models.providers.opencode") && + String(message).includes("models.providers.opencode-go"), ); expect(warned).toBe(true); }); diff --git a/src/commands/gateway-status.test.ts b/src/commands/gateway-status.test.ts index 64d515c0b4d..452bcb3691b 100644 --- a/src/commands/gateway-status.test.ts +++ b/src/commands/gateway-status.test.ts @@ -1,4 +1,5 @@ import { describe, expect, it, vi } from "vitest"; +import type { GatewayProbeResult } from "../gateway/probe.js"; import type { RuntimeEnv } from "../runtime.js"; import { withEnvAsync } from "../test-utils/env.js"; @@ -33,7 +34,7 @@ const startSshPortForward = vi.fn(async (_opts?: unknown) => ({ stderr: [], stop: sshStop, })); -const probeGateway = vi.fn(async (opts: { url: string }) => { +const probeGateway = vi.fn(async (opts: { url: string }): Promise => { const { url } = opts; if (url.includes("127.0.0.1")) { return { @@ -52,7 +53,16 @@ const probeGateway = vi.fn(async (opts: { url: string }) => { }, sessions: { count: 0 }, }, - presence: [{ mode: "gateway", reason: "self", host: "local", ip: "127.0.0.1" }], + presence: [ + { + mode: "gateway", + reason: "self", + host: "local", + ip: "127.0.0.1", + text: "Gateway: local (127.0.0.1) · app test · mode gateway · reason self", + ts: Date.now(), + }, + ], configSnapshot: { path: "/tmp/cfg.json", exists: true, @@ -81,7 +91,16 @@ const probeGateway = vi.fn(async (opts: { url: string }) => { }, sessions: { count: 2 }, }, - presence: [{ mode: "gateway", reason: "self", host: "remote", ip: "100.64.0.2" }], + presence: [ + { + mode: "gateway", + reason: "self", + host: "remote", + ip: "100.64.0.2", + text: "Gateway: remote (100.64.0.2) · app test · mode gateway · reason self", + ts: Date.now(), + }, + ], configSnapshot: { path: "/tmp/remote.json", exists: true, @@ -201,6 +220,54 @@ describe("gateway-status command", () => { expect(targets[0]?.summary).toBeTruthy(); }); + it("treats missing-scope RPC probe failures as degraded but reachable", async () => { + const { runtime, runtimeLogs, runtimeErrors } = createRuntimeCapture(); + readBestEffortConfig.mockResolvedValueOnce({ + gateway: { + mode: "local", + auth: { mode: "token", token: "ltok" }, + }, + } as never); + probeGateway.mockResolvedValueOnce({ + ok: false, + url: "ws://127.0.0.1:18789", + connectLatencyMs: 51, + error: "missing scope: operator.read", + close: null, + health: null, + status: null, + presence: null, + configSnapshot: null, + }); + + await runGatewayStatus(runtime, { timeout: "1000", json: true }); + + expect(runtimeErrors).toHaveLength(0); + const parsed = JSON.parse(runtimeLogs.join("\n")) as { + ok?: boolean; + degraded?: boolean; + warnings?: Array<{ code?: string; targetIds?: string[] }>; + targets?: Array<{ + connect?: { + ok?: boolean; + rpcOk?: boolean; + scopeLimited?: boolean; + }; + }>; + }; + expect(parsed.ok).toBe(true); + expect(parsed.degraded).toBe(true); + expect(parsed.targets?.[0]?.connect).toMatchObject({ + ok: true, + rpcOk: false, + scopeLimited: true, + }); + const scopeLimitedWarning = parsed.warnings?.find( + (warning) => warning.code === "probe_scope_limited", + ); + expect(scopeLimitedWarning?.targetIds).toContain("localLoopback"); + }); + it("surfaces unresolved SecretRef auth diagnostics in warnings", async () => { const { runtime, runtimeLogs, runtimeErrors } = createRuntimeCapture(); await withEnvAsync({ MISSING_GATEWAY_TOKEN: undefined }, async () => { @@ -361,7 +428,16 @@ describe("gateway-status command", () => { }, sessions: { count: 1 }, }, - presence: [{ mode: "gateway", reason: "self", host: "remote", ip: "100.64.0.2" }], + presence: [ + { + mode: "gateway", + reason: "self", + host: "remote", + ip: "100.64.0.2", + text: "Gateway: remote (100.64.0.2) · app test · mode gateway · reason self", + ts: Date.now(), + }, + ], configSnapshot: { path: "/tmp/secretref-config.json", exists: true, diff --git a/src/commands/gateway-status.ts b/src/commands/gateway-status.ts index 4ac54eca0c4..be0b9abf69a 100644 --- a/src/commands/gateway-status.ts +++ b/src/commands/gateway-status.ts @@ -10,6 +10,8 @@ import { colorize, isRich, theme } from "../terminal/theme.js"; import { buildNetworkHints, extractConfigSummary, + isProbeReachable, + isScopeLimitedProbeFailure, type GatewayStatusTarget, parseTimeoutMs, pickGatewaySelfPresence, @@ -193,8 +195,10 @@ export async function gatewayStatusCommand( }, ); - const reachable = probed.filter((p) => p.probe.ok); + const reachable = probed.filter((p) => isProbeReachable(p.probe)); const ok = reachable.length > 0; + const degradedScopeLimited = probed.filter((p) => isScopeLimitedProbeFailure(p.probe)); + const degraded = degradedScopeLimited.length > 0; const multipleGateways = reachable.length > 1; const primary = reachable.find((p) => p.target.kind === "explicit") ?? @@ -236,12 +240,21 @@ export async function gatewayStatusCommand( }); } } + for (const result of degradedScopeLimited) { + warnings.push({ + code: "probe_scope_limited", + message: + "Probe diagnostics are limited by gateway scopes (missing operator.read). Connection succeeded, but status details may be incomplete. Hint: pair device identity or use credentials with operator.read.", + targetIds: [result.target.id], + }); + } if (opts.json) { runtime.log( JSON.stringify( { ok, + degraded, ts: Date.now(), durationMs: Date.now() - startedAt, timeoutMs: overallTimeoutMs, @@ -274,7 +287,9 @@ export async function gatewayStatusCommand( active: p.target.active, tunnel: p.target.tunnel ?? null, connect: { - ok: p.probe.ok, + ok: isProbeReachable(p.probe), + rpcOk: p.probe.ok, + scopeLimited: isScopeLimitedProbeFailure(p.probe), latencyMs: p.probe.connectLatencyMs, error: p.probe.error, close: p.probe.close, diff --git a/src/commands/gateway-status/helpers.test.ts b/src/commands/gateway-status/helpers.test.ts index c726db00829..e0c1ecee763 100644 --- a/src/commands/gateway-status/helpers.test.ts +++ b/src/commands/gateway-status/helpers.test.ts @@ -1,6 +1,12 @@ import { describe, expect, it } from "vitest"; import { withEnvAsync } from "../../test-utils/env.js"; -import { extractConfigSummary, resolveAuthForTarget } from "./helpers.js"; +import { + extractConfigSummary, + isProbeReachable, + isScopeLimitedProbeFailure, + renderProbeSummaryLine, + resolveAuthForTarget, +} from "./helpers.js"; describe("extractConfigSummary", () => { it("marks SecretRef-backed gateway auth credentials as configured", () => { @@ -67,6 +73,37 @@ describe("extractConfigSummary", () => { }); describe("resolveAuthForTarget", () => { + function createConfigRemoteTarget() { + return { + id: "configRemote", + kind: "configRemote" as const, + url: "wss://remote.example:18789", + active: true, + }; + } + + function createRemoteGatewayTargetConfig(params?: { mode?: "none" | "password" | "token" }) { + return { + secrets: { + providers: { + default: { source: "env" as const }, + }, + }, + gateway: { + ...(params?.mode + ? { + auth: { + mode: params.mode, + }, + } + : {}), + remote: { + token: { source: "env" as const, provider: "default", id: "REMOTE_GATEWAY_TOKEN" }, + }, + }, + }; + } + it("resolves local auth token SecretRef before probing local targets", async () => { await withEnvAsync( { @@ -109,24 +146,8 @@ describe("resolveAuthForTarget", () => { }, async () => { const auth = await resolveAuthForTarget( - { - secrets: { - providers: { - default: { source: "env" }, - }, - }, - gateway: { - remote: { - token: { source: "env", provider: "default", id: "REMOTE_GATEWAY_TOKEN" }, - }, - }, - }, - { - id: "configRemote", - kind: "configRemote", - url: "wss://remote.example:18789", - active: true, - }, + createRemoteGatewayTargetConfig(), + createConfigRemoteTarget(), {}, ); @@ -142,27 +163,8 @@ describe("resolveAuthForTarget", () => { }, async () => { const auth = await resolveAuthForTarget( - { - secrets: { - providers: { - default: { source: "env" }, - }, - }, - gateway: { - auth: { - mode: "none", - }, - remote: { - token: { source: "env", provider: "default", id: "REMOTE_GATEWAY_TOKEN" }, - }, - }, - }, - { - id: "configRemote", - kind: "configRemote", - url: "wss://remote.example:18789", - active: true, - }, + createRemoteGatewayTargetConfig({ mode: "none" }), + createConfigRemoteTarget(), {}, ); @@ -233,3 +235,41 @@ describe("resolveAuthForTarget", () => { ); }); }); + +describe("probe reachability classification", () => { + it("treats missing-scope RPC failures as scope-limited and reachable", () => { + const probe = { + ok: false, + url: "ws://127.0.0.1:18789", + connectLatencyMs: 51, + error: "missing scope: operator.read", + close: null, + health: null, + status: null, + presence: null, + configSnapshot: null, + }; + + expect(isScopeLimitedProbeFailure(probe)).toBe(true); + expect(isProbeReachable(probe)).toBe(true); + expect(renderProbeSummaryLine(probe, false)).toContain("RPC: limited"); + }); + + it("keeps non-scope RPC failures as unreachable", () => { + const probe = { + ok: false, + url: "ws://127.0.0.1:18789", + connectLatencyMs: 43, + error: "unknown method: status", + close: null, + health: null, + status: null, + presence: null, + configSnapshot: null, + }; + + expect(isScopeLimitedProbeFailure(probe)).toBe(false); + expect(isProbeReachable(probe)).toBe(false); + expect(renderProbeSummaryLine(probe, false)).toContain("RPC: failed"); + }); +}); diff --git a/src/commands/gateway-status/helpers.ts b/src/commands/gateway-status/helpers.ts index 24519e6e8be..5f1a5e2f5ee 100644 --- a/src/commands/gateway-status/helpers.ts +++ b/src/commands/gateway-status/helpers.ts @@ -1,3 +1,4 @@ +import { parseTimeoutMsWithFallback } from "../../cli/parse-timeout.js"; import { resolveGatewayPort } from "../../config/config.js"; import type { OpenClawConfig, ConfigFileSnapshot } from "../../config/types.js"; import { hasConfiguredSecretInput } from "../../config/types.secrets.js"; @@ -8,6 +9,8 @@ import { pickPrimaryTailnetIPv4 } from "../../infra/tailnet.js"; import { colorize, theme } from "../../terminal/theme.js"; import { pickGatewaySelfPresence } from "../gateway-presence.js"; +const MISSING_SCOPE_PATTERN = /\bmissing scope:\s*[a-z0-9._-]+/i; + type TargetKind = "explicit" | "configRemote" | "localLoopback" | "sshTunnel"; export type GatewayStatusTarget = { @@ -64,20 +67,7 @@ function parseIntOrNull(value: unknown): number | null { } export function parseTimeoutMs(raw: unknown, fallbackMs: number): number { - const value = - typeof raw === "string" - ? raw.trim() - : typeof raw === "number" || typeof raw === "bigint" - ? String(raw) - : ""; - if (!value) { - return fallbackMs; - } - const parsed = Number.parseInt(value, 10); - if (!Number.isFinite(parsed) || parsed <= 0) { - throw new Error(`invalid --timeout: ${value}`); - } - return parsed; + return parseTimeoutMsWithFallback(raw, fallbackMs); } function normalizeWsUrl(value: string): string | null { @@ -336,6 +326,17 @@ export function renderTargetHeader(target: GatewayStatusTarget, rich: boolean) { return `${colorize(rich, theme.heading, kindLabel)} ${colorize(rich, theme.muted, target.url)}`; } +export function isScopeLimitedProbeFailure(probe: GatewayProbeResult): boolean { + if (probe.ok || probe.connectLatencyMs == null) { + return false; + } + return MISSING_SCOPE_PATTERN.test(probe.error ?? ""); +} + +export function isProbeReachable(probe: GatewayProbeResult): boolean { + return probe.ok || isScopeLimitedProbeFailure(probe); +} + export function renderProbeSummaryLine(probe: GatewayProbeResult, rich: boolean) { if (probe.ok) { const latency = @@ -347,7 +348,10 @@ export function renderProbeSummaryLine(probe: GatewayProbeResult, rich: boolean) if (probe.connectLatencyMs != null) { const latency = typeof probe.connectLatencyMs === "number" ? `${probe.connectLatencyMs}ms` : "unknown"; - return `${colorize(rich, theme.success, "Connect: ok")} (${latency}) · ${colorize(rich, theme.error, "RPC: failed")}${detail}`; + const rpcStatus = isScopeLimitedProbeFailure(probe) + ? colorize(rich, theme.warn, "RPC: limited") + : colorize(rich, theme.error, "RPC: failed"); + return `${colorize(rich, theme.success, "Connect: ok")} (${latency}) · ${rpcStatus}${detail}`; } return `${colorize(rich, theme.error, "Connect: failed")}${detail}`; diff --git a/src/commands/message-format.ts b/src/commands/message-format.ts index aafe570287c..8f4fe9bd08c 100644 --- a/src/commands/message-format.ts +++ b/src/commands/message-format.ts @@ -4,7 +4,7 @@ import type { OutboundDeliveryResult } from "../infra/outbound/deliver.js"; import { formatGatewaySummary, formatOutboundDeliverySummary } from "../infra/outbound/format.js"; import type { MessageActionRunResult } from "../infra/outbound/message-action-runner.js"; import { formatTargetDisplay } from "../infra/outbound/target-resolver.js"; -import { renderTable } from "../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../terminal/table.js"; import { isRich, theme } from "../terminal/theme.js"; import { shortenText } from "./text-format.js"; @@ -257,7 +257,7 @@ export function formatMessageCliText(result: MessageActionRunResult): string[] { const muted = (text: string) => (rich ? theme.muted(text) : text); const heading = (text: string) => (rich ? theme.heading(text) : text); - const width = Math.max(60, (process.stdout.columns ?? 120) - 1); + const width = getTerminalTableWidth(); const opts: FormatOpts = { width }; if (result.handledBy === "dry-run") { diff --git a/src/commands/model-picker.test.ts b/src/commands/model-picker.test.ts index 5cf0fd57547..ef8b6a3887b 100644 --- a/src/commands/model-picker.test.ts +++ b/src/commands/model-picker.test.ts @@ -21,19 +21,36 @@ const ensureAuthProfileStore = vi.hoisted(() => ); const listProfilesForProvider = vi.hoisted(() => vi.fn(() => [])); const upsertAuthProfile = vi.hoisted(() => vi.fn()); -const upsertAuthProfileWithLock = vi.hoisted(() => vi.fn(async () => {})); vi.mock("../agents/auth-profiles.js", () => ({ ensureAuthProfileStore, listProfilesForProvider, upsertAuthProfile, - upsertAuthProfileWithLock, })); const resolveEnvApiKey = vi.hoisted(() => vi.fn(() => undefined)); -const getCustomProviderApiKey = vi.hoisted(() => vi.fn(() => undefined)); +const hasUsableCustomProviderApiKey = vi.hoisted(() => vi.fn(() => false)); vi.mock("../agents/model-auth.js", () => ({ resolveEnvApiKey, - getCustomProviderApiKey, + hasUsableCustomProviderApiKey, +})); + +const resolveProviderModelPickerEntries = vi.hoisted(() => vi.fn(() => [])); +const resolveProviderPluginChoice = vi.hoisted(() => vi.fn()); +const runProviderModelSelectedHook = vi.hoisted(() => vi.fn(async () => {})); +vi.mock("../plugins/provider-wizard.js", () => ({ + resolveProviderModelPickerEntries, + resolveProviderPluginChoice, + runProviderModelSelectedHook, +})); + +const resolvePluginProviders = vi.hoisted(() => vi.fn(() => [])); +vi.mock("../plugins/providers.js", () => ({ + resolvePluginProviders, +})); + +const runProviderPluginAuthMethod = vi.hoisted(() => vi.fn()); +vi.mock("./auth-choice.apply.plugin-provider.js", () => ({ + runProviderPluginAuthMethod, })); const OPENROUTER_CATALOG = [ @@ -69,17 +86,40 @@ describe("promptDefaultModel", () => { name: "Claude Sonnet 4.5", }, ]); + resolveProviderModelPickerEntries.mockReturnValue([ + { value: "vllm", label: "vLLM (custom)", hint: "Enter vLLM URL + API key + model" }, + ] as never); + resolvePluginProviders.mockReturnValue([{ id: "vllm" }] as never); + resolveProviderPluginChoice.mockReturnValue({ + provider: { id: "vllm", label: "vLLM", auth: [] }, + method: { id: "custom", label: "vLLM", kind: "custom" }, + }); + runProviderPluginAuthMethod.mockResolvedValue({ + config: { + models: { + providers: { + vllm: { + baseUrl: "http://127.0.0.1:8000/v1", + api: "openai-completions", + apiKey: "VLLM_API_KEY", + models: [ + { + id: "meta-llama/Meta-Llama-3-8B-Instruct", + name: "meta-llama/Meta-Llama-3-8B-Instruct", + }, + ], + }, + }, + }, + }, + defaultModel: "vllm/meta-llama/Meta-Llama-3-8B-Instruct", + }); const select = vi.fn(async (params) => { - const vllm = params.options.find((opt: { value: string }) => opt.value === "__vllm__"); + const vllm = params.options.find((opt: { value: string }) => opt.value === "vllm"); return (vllm?.value ?? "") as never; }); - const text = vi - .fn() - .mockResolvedValueOnce("http://127.0.0.1:8000/v1") - .mockResolvedValueOnce("sk-vllm-test") - .mockResolvedValueOnce("meta-llama/Meta-Llama-3-8B-Instruct"); - const prompter = makePrompter({ select, text: text as never }); + const prompter = makePrompter({ select }); const config = { agents: { defaults: {} } } as OpenClawConfig; const result = await promptDefaultModel({ @@ -87,17 +127,13 @@ describe("promptDefaultModel", () => { prompter, allowKeep: false, includeManual: false, - includeVllm: true, + includeProviderPluginSetups: true, ignoreAllowlist: true, agentDir: "/tmp/openclaw-agent", + runtime: {} as never, }); - expect(upsertAuthProfileWithLock).toHaveBeenCalledWith( - expect.objectContaining({ - profileId: "vllm:default", - credential: expect.objectContaining({ provider: "vllm" }), - }), - ); + expect(runProviderPluginAuthMethod).toHaveBeenCalledOnce(); expect(result.model).toBe("vllm/meta-llama/Meta-Llama-3-8B-Instruct"); expect(result.config?.models?.providers?.vllm).toMatchObject({ baseUrl: "http://127.0.0.1:8000/v1", diff --git a/src/commands/model-picker.ts b/src/commands/model-picker.ts index db794210354..2e97a01a977 100644 --- a/src/commands/model-picker.ts +++ b/src/commands/model-picker.ts @@ -1,6 +1,6 @@ import { ensureAuthProfileStore, listProfilesForProvider } from "../agents/auth-profiles.js"; import { DEFAULT_MODEL, DEFAULT_PROVIDER } from "../agents/defaults.js"; -import { getCustomProviderApiKey, resolveEnvApiKey } from "../agents/model-auth.js"; +import { hasUsableCustomProviderApiKey, resolveEnvApiKey } from "../agents/model-auth.js"; import { loadModelCatalog } from "../agents/model-catalog.js"; import { buildAllowedModelSet, @@ -11,14 +11,19 @@ import { } from "../agents/model-selection.js"; import type { OpenClawConfig } from "../config/config.js"; import { resolveAgentModelPrimaryValue } from "../config/model-input.js"; +import { + resolveProviderPluginChoice, + resolveProviderModelPickerEntries, + runProviderModelSelectedHook, +} from "../plugins/provider-wizard.js"; +import { resolvePluginProviders } from "../plugins/providers.js"; import type { WizardPrompter, WizardSelectOption } from "../wizard/prompts.js"; +import { runProviderPluginAuthMethod } from "./auth-choice.apply.plugin-provider.js"; import { formatTokenK } from "./models/shared.js"; import { OPENAI_CODEX_DEFAULT_MODEL } from "./openai-codex-model-default.js"; -import { promptAndConfigureVllm } from "./vllm-setup.js"; const KEEP_VALUE = "__keep__"; const MANUAL_VALUE = "__manual__"; -const VLLM_VALUE = "__vllm__"; const PROVIDER_FILTER_THRESHOLD = 30; // Models that are internal routing features and should not be shown in selection lists. @@ -31,10 +36,13 @@ type PromptDefaultModelParams = { prompter: WizardPrompter; allowKeep?: boolean; includeManual?: boolean; - includeVllm?: boolean; + includeProviderPluginSetups?: boolean; ignoreAllowlist?: boolean; preferredProvider?: string; agentDir?: string; + workspaceDir?: string; + env?: NodeJS.ProcessEnv; + runtime?: import("../runtime.js").RuntimeEnv; message?: string; }; @@ -52,7 +60,7 @@ function hasAuthForProvider( if (resolveEnvApiKey(provider)) { return true; } - if (getCustomProviderApiKey(cfg, provider)) { + if (hasUsableCustomProviderApiKey(cfg, provider)) { return true; } return false; @@ -180,7 +188,7 @@ export async function promptDefaultModel( const cfg = params.config; const allowKeep = params.allowKeep ?? true; const includeManual = params.includeManual ?? true; - const includeVllm = params.includeVllm ?? false; + const includeProviderPluginSetups = params.includeProviderPluginSetups ?? false; const ignoreAllowlist = params.ignoreAllowlist ?? false; const preferredProviderRaw = params.preferredProvider?.trim(); const preferredProvider = preferredProviderRaw @@ -227,19 +235,19 @@ export async function promptDefaultModel( }); } - const providers = Array.from(new Set(models.map((entry) => entry.provider))).toSorted((a, b) => + const providerIds = Array.from(new Set(models.map((entry) => entry.provider))).toSorted((a, b) => a.localeCompare(b), ); - const hasPreferredProvider = preferredProvider ? providers.includes(preferredProvider) : false; + const hasPreferredProvider = preferredProvider ? providerIds.includes(preferredProvider) : false; const shouldPromptProvider = - !hasPreferredProvider && providers.length > 1 && models.length > PROVIDER_FILTER_THRESHOLD; + !hasPreferredProvider && providerIds.length > 1 && models.length > PROVIDER_FILTER_THRESHOLD; if (shouldPromptProvider) { const selection = await params.prompter.select({ message: "Filter models by provider", options: [ { value: "*", label: "All providers" }, - ...providers.map((provider) => { + ...providerIds.map((provider) => { const count = models.filter((entry) => entry.provider === provider).length; return { value: provider, @@ -286,12 +294,14 @@ export async function promptDefaultModel( if (includeManual) { options.push({ value: MANUAL_VALUE, label: "Enter model manually" }); } - if (includeVllm && agentDir) { - options.push({ - value: VLLM_VALUE, - label: "vLLM (custom)", - hint: "Enter vLLM URL + API key + model", - }); + if (includeProviderPluginSetups && agentDir) { + options.push( + ...resolveProviderModelPickerEntries({ + config: cfg, + workspaceDir: params.workspaceDir, + env: params.env, + }), + ); } const seen = new Set(); @@ -337,23 +347,65 @@ export async function promptDefaultModel( initialValue: configuredRaw || resolvedKey || undefined, }); } - if (selection === VLLM_VALUE) { - if (!agentDir) { + const pluginProviders = resolvePluginProviders({ + config: cfg, + workspaceDir: params.workspaceDir, + env: params.env, + }); + const pluginResolution = selection.startsWith("provider-plugin:") + ? selection + : selection.includes("/") + ? null + : pluginProviders.some( + (provider) => normalizeProviderId(provider.id) === normalizeProviderId(selection), + ) + ? selection + : null; + if (pluginResolution) { + if (!agentDir || !params.runtime) { await params.prompter.note( - "vLLM setup requires an agent directory context.", - "vLLM not available", + "Provider setup requires agent and runtime context.", + "Provider setup unavailable", ); return {}; } - const { config: nextConfig, modelRef } = await promptAndConfigureVllm({ - cfg, - prompter: params.prompter, - agentDir, + const resolved = resolveProviderPluginChoice({ + providers: pluginProviders, + choice: pluginResolution, }); - - return { model: modelRef, config: nextConfig }; + if (!resolved) { + return {}; + } + const applied = await runProviderPluginAuthMethod({ + config: cfg, + runtime: params.runtime, + prompter: params.prompter, + method: resolved.method, + agentDir, + workspaceDir: params.workspaceDir, + }); + if (applied.defaultModel) { + await runProviderModelSelectedHook({ + config: applied.config, + model: applied.defaultModel, + prompter: params.prompter, + agentDir, + workspaceDir: params.workspaceDir, + env: params.env, + }); + } + return { model: applied.defaultModel, config: applied.config }; } - return { model: String(selection) }; + const model = String(selection); + await runProviderModelSelectedHook({ + config: cfg, + model, + prompter: params.prompter, + agentDir, + workspaceDir: params.workspaceDir, + env: params.env, + }); + return { model }; } export async function promptModelAllowlist(params: { diff --git a/src/commands/models.list.e2e.test.ts b/src/commands/models.list.e2e.test.ts index e7d55e00b3c..f3d6dce4406 100644 --- a/src/commands/models.list.e2e.test.ts +++ b/src/commands/models.list.e2e.test.ts @@ -21,6 +21,8 @@ const resolveAuthStorePathForDisplay = vi const resolveProfileUnusableUntilForDisplay = vi.fn().mockReturnValue(null); const resolveEnvApiKey = vi.fn().mockReturnValue(undefined); const resolveAwsSdkEnvVarName = vi.fn().mockReturnValue(undefined); +const hasUsableCustomProviderApiKey = vi.fn().mockReturnValue(false); +const resolveUsableCustomProviderApiKey = vi.fn().mockReturnValue(null); const getCustomProviderApiKey = vi.fn().mockReturnValue(undefined); const modelRegistryState = { models: [] as Array>, @@ -57,6 +59,8 @@ vi.mock("../agents/auth-profiles.js", () => ({ vi.mock("../agents/model-auth.js", () => ({ resolveEnvApiKey, resolveAwsSdkEnvVarName, + hasUsableCustomProviderApiKey, + resolveUsableCustomProviderApiKey, getCustomProviderApiKey, })); @@ -159,6 +163,30 @@ describe("models list/status", () => { baseUrl: "https://api.openai.com/v1", contextWindow: 128000, }; + const OPENAI_SPARK_MODEL = { + provider: "openai", + id: "gpt-5.3-codex-spark", + name: "GPT-5.3 Codex Spark", + input: ["text", "image"], + baseUrl: "https://api.openai.com/v1", + contextWindow: 128000, + }; + const OPENAI_CODEX_SPARK_MODEL = { + provider: "openai-codex", + id: "gpt-5.3-codex-spark", + name: "GPT-5.3 Codex Spark", + input: ["text"], + baseUrl: "https://chatgpt.com/backend-api", + contextWindow: 128000, + }; + const AZURE_OPENAI_SPARK_MODEL = { + provider: "azure-openai-responses", + id: "gpt-5.3-codex-spark", + name: "GPT-5.3 Codex Spark", + input: ["text", "image"], + baseUrl: "https://example.openai.azure.com/openai/v1", + contextWindow: 128000, + }; const GOOGLE_ANTIGRAVITY_TEMPLATE_BASE = { provider: "google-antigravity", api: "google-gemini-cli", @@ -269,6 +297,29 @@ describe("models list/status", () => { expect(runtime.log.mock.calls[0]?.[0]).toBe("zai/glm-4.7"); }); + it("models list plain keeps canonical OpenRouter native ids", async () => { + loadConfig.mockReturnValue({ + agents: { defaults: { model: "openrouter/hunter-alpha" } }, + }); + const runtime = makeRuntime(); + + modelRegistryState.models = [ + { + provider: "openrouter", + id: "openrouter/hunter-alpha", + name: "Hunter Alpha", + input: ["text"], + baseUrl: "https://openrouter.ai/api/v1", + contextWindow: 1048576, + }, + ]; + modelRegistryState.available = modelRegistryState.models; + await modelsListCommand({ plain: true }, runtime); + + expect(runtime.log).toHaveBeenCalledTimes(1); + expect(runtime.log.mock.calls[0]?.[0]).toBe("openrouter/hunter-alpha"); + }); + it.each(["z.ai", "Z.AI", "z-ai"] as const)( "models list provider filter normalizes %s alias", async (provider) => { @@ -336,6 +387,34 @@ describe("models list/status", () => { expect(ensureOpenClawModelsJson).not.toHaveBeenCalled(); }); + it("filters stale direct OpenAI spark rows from models list and registry views", async () => { + setDefaultModel("openai-codex/gpt-5.3-codex-spark"); + modelRegistryState.models = [ + OPENAI_SPARK_MODEL, + AZURE_OPENAI_SPARK_MODEL, + OPENAI_CODEX_SPARK_MODEL, + ]; + modelRegistryState.available = [ + OPENAI_SPARK_MODEL, + AZURE_OPENAI_SPARK_MODEL, + OPENAI_CODEX_SPARK_MODEL, + ]; + const runtime = makeRuntime(); + + await modelsListCommand({ all: true, json: true }, runtime); + + const payload = parseJsonLog(runtime); + expect(payload.models.map((model: { key: string }) => model.key)).toEqual([ + "openai-codex/gpt-5.3-codex-spark", + ]); + + const loaded = await loadModelRegistry({} as never); + expect(loaded.models.map((model) => `${model.provider}/${model.id}`)).toEqual([ + "openai-codex/gpt-5.3-codex-spark", + ]); + expect(Array.from(loaded.availableKeys ?? [])).toEqual(["openai-codex/gpt-5.3-codex-spark"]); + }); + it("modelsListCommand persists using the write snapshot config when provided", async () => { modelRegistryState.models = [OPENAI_MODEL]; modelRegistryState.available = [OPENAI_MODEL]; diff --git a/src/commands/models.set.e2e.test.ts b/src/commands/models.set.e2e.test.ts index 6671c6bb1f0..f544a1fc383 100644 --- a/src/commands/models.set.e2e.test.ts +++ b/src/commands/models.set.e2e.test.ts @@ -110,6 +110,45 @@ describe("models set + fallbacks", () => { expectWrittenPrimaryModel("zai/glm-4.7"); }); + it("keeps canonical OpenRouter native ids in models set", async () => { + mockConfigSnapshot({}); + const runtime = makeRuntime(); + + await modelsSetCommand("openrouter/hunter-alpha", runtime); + + expectWrittenPrimaryModel("openrouter/hunter-alpha"); + }); + + it("migrates legacy duplicated OpenRouter keys on write", async () => { + mockConfigSnapshot({ + agents: { + defaults: { + models: { + "openrouter/openrouter/hunter-alpha": { + params: { thinking: "high" }, + }, + }, + }, + }, + }); + const runtime = makeRuntime(); + + await modelsSetCommand("openrouter/hunter-alpha", runtime); + + expect(writeConfigFile).toHaveBeenCalledTimes(1); + const written = getWrittenConfig(); + expect(written.agents).toEqual({ + defaults: { + model: { primary: "openrouter/hunter-alpha" }, + models: { + "openrouter/hunter-alpha": { + params: { thinking: "high" }, + }, + }, + }, + }); + }); + it("rewrites string defaults.model to object form when setting primary", async () => { mockConfigSnapshot({ agents: { defaults: { model: "openai/gpt-4.1-mini" } } }); const runtime = makeRuntime(); diff --git a/src/commands/models/auth.test.ts b/src/commands/models/auth.test.ts index d5e383d775e..e59e7fd021e 100644 --- a/src/commands/models/auth.test.ts +++ b/src/commands/models/auth.test.ts @@ -183,7 +183,7 @@ describe("modelsAuthLoginCommand", () => { "Auth profile: openai-codex:user@example.com (openai-codex/oauth)", ); expect(runtime.log).toHaveBeenCalledWith( - "Default model available: openai-codex/gpt-5.3-codex (use --set-default to apply)", + "Default model available: openai-codex/gpt-5.4 (use --set-default to apply)", ); }); @@ -193,9 +193,9 @@ describe("modelsAuthLoginCommand", () => { await modelsAuthLoginCommand({ provider: "openai-codex", setDefault: true }, runtime); expect(lastUpdatedConfig?.agents?.defaults?.model).toEqual({ - primary: "openai-codex/gpt-5.3-codex", + primary: "openai-codex/gpt-5.4", }); - expect(runtime.log).toHaveBeenCalledWith("Default model set to openai-codex/gpt-5.3-codex"); + expect(runtime.log).toHaveBeenCalledWith("Default model set to openai-codex/gpt-5.4"); }); it("keeps existing plugin error behavior for non built-in providers", async () => { diff --git a/src/commands/models/fallbacks-shared.ts b/src/commands/models/fallbacks-shared.ts index eb1401edd86..b7ffb79f222 100644 --- a/src/commands/models/fallbacks-shared.ts +++ b/src/commands/models/fallbacks-shared.ts @@ -2,6 +2,7 @@ import { buildModelAliasIndex, resolveModelRefFromString } from "../../agents/mo import type { OpenClawConfig } from "../../config/config.js"; import { logConfigUpdated } from "../../config/logging.js"; import { resolveAgentModelFallbackValues, toAgentModelListLike } from "../../config/model-input.js"; +import type { AgentModelEntryConfig } from "../../config/types.agent-defaults.js"; import type { RuntimeEnv } from "../../runtime.js"; import { loadModelsConfig } from "./load-config.js"; import { @@ -11,6 +12,7 @@ import { modelKey, resolveModelTarget, resolveModelKeysFromEntries, + upsertCanonicalModelConfigEntry, updateConfig, } from "./shared.js"; @@ -79,11 +81,10 @@ export async function addFallbackCommand( ) { const updated = await updateConfig((cfg) => { const resolved = resolveModelTarget({ raw: modelRaw, cfg }); - const targetKey = modelKey(resolved.provider, resolved.model); - const nextModels = { ...cfg.agents?.defaults?.models } as Record; - if (!nextModels[targetKey]) { - nextModels[targetKey] = {}; - } + const nextModels = { + ...cfg.agents?.defaults?.models, + } as Record; + const targetKey = upsertCanonicalModelConfigEntry(nextModels, resolved); const existing = getFallbacks(cfg, params.key); const existingKeys = resolveModelKeysFromEntries({ cfg, entries: existing }); if (existingKeys.includes(targetKey)) { diff --git a/src/commands/models/list.auth-overview.test.ts b/src/commands/models/list.auth-overview.test.ts index 98906ced281..65c324d4b42 100644 --- a/src/commands/models/list.auth-overview.test.ts +++ b/src/commands/models/list.auth-overview.test.ts @@ -1,7 +1,28 @@ import { describe, expect, it } from "vitest"; import { NON_ENV_SECRETREF_MARKER } from "../../agents/model-auth-markers.js"; +import { withEnv } from "../../test-utils/env.js"; import { resolveProviderAuthOverview } from "./list.auth-overview.js"; +function resolveOpenAiOverview(apiKey: string) { + return resolveProviderAuthOverview({ + provider: "openai", + cfg: { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions", + apiKey, + models: [], + }, + }, + }, + } as never, + store: { version: 1, profiles: {} } as never, + modelsPath: "/tmp/models.json", + }); +} + describe("resolveProviderAuthOverview", () => { it("does not throw when token profile only has tokenRef", () => { const overview = resolveProviderAuthOverview({ @@ -24,50 +45,39 @@ describe("resolveProviderAuthOverview", () => { }); it("renders marker-backed models.json auth as marker detail", () => { - const overview = resolveProviderAuthOverview({ - provider: "openai", - cfg: { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions", - apiKey: NON_ENV_SECRETREF_MARKER, - models: [], - }, - }, - }, - } as never, - store: { version: 1, profiles: {} } as never, - modelsPath: "/tmp/models.json", - }); + const overview = withEnv({ OPENAI_API_KEY: undefined }, () => + resolveOpenAiOverview(NON_ENV_SECRETREF_MARKER), + ); - expect(overview.effective.kind).toBe("models.json"); - expect(overview.effective.detail).toContain(`marker(${NON_ENV_SECRETREF_MARKER})`); + expect(overview.effective.kind).toBe("missing"); + expect(overview.effective.detail).toBe("missing"); expect(overview.modelsJson?.value).toContain(`marker(${NON_ENV_SECRETREF_MARKER})`); }); it("keeps env-var-shaped models.json values masked to avoid accidental plaintext exposure", () => { - const overview = resolveProviderAuthOverview({ - provider: "openai", - cfg: { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions", - apiKey: "OPENAI_API_KEY", // pragma: allowlist secret - models: [], - }, - }, - }, - } as never, - store: { version: 1, profiles: {} } as never, - modelsPath: "/tmp/models.json", - }); + const overview = withEnv({ OPENAI_API_KEY: undefined }, () => + resolveOpenAiOverview("OPENAI_API_KEY"), + ); - expect(overview.effective.kind).toBe("models.json"); - expect(overview.effective.detail).not.toContain("marker("); - expect(overview.effective.detail).not.toContain("OPENAI_API_KEY"); + expect(overview.effective.kind).toBe("missing"); + expect(overview.effective.detail).toBe("missing"); + expect(overview.modelsJson?.value).not.toContain("marker("); + expect(overview.modelsJson?.value).not.toContain("OPENAI_API_KEY"); + }); + + it("treats env-var marker as usable only when the env key is currently resolvable", () => { + const prior = process.env.OPENAI_API_KEY; + process.env.OPENAI_API_KEY = "sk-openai-from-env"; // pragma: allowlist secret + try { + const overview = resolveOpenAiOverview("OPENAI_API_KEY"); + expect(overview.effective.kind).toBe("env"); + expect(overview.effective.detail).not.toContain("OPENAI_API_KEY"); + } finally { + if (prior === undefined) { + delete process.env.OPENAI_API_KEY; + } else { + process.env.OPENAI_API_KEY = prior; + } + } }); }); diff --git a/src/commands/models/list.auth-overview.ts b/src/commands/models/list.auth-overview.ts index 28880415eeb..17803153c42 100644 --- a/src/commands/models/list.auth-overview.ts +++ b/src/commands/models/list.auth-overview.ts @@ -7,7 +7,11 @@ import { resolveProfileUnusableUntilForDisplay, } from "../../agents/auth-profiles.js"; import { isNonSecretApiKeyMarker } from "../../agents/model-auth-markers.js"; -import { getCustomProviderApiKey, resolveEnvApiKey } from "../../agents/model-auth.js"; +import { + getCustomProviderApiKey, + resolveEnvApiKey, + resolveUsableCustomProviderApiKey, +} from "../../agents/model-auth.js"; import type { OpenClawConfig } from "../../config/config.js"; import { shortenHomePath } from "../../utils.js"; import { maskApiKey } from "./list.format.js"; @@ -99,6 +103,7 @@ export function resolveProviderAuthOverview(params: { const envKey = resolveEnvApiKey(provider); const customKey = getCustomProviderApiKey(cfg, provider); + const usableCustomKey = resolveUsableCustomProviderApiKey({ cfg, provider }); const effective: ProviderAuthOverview["effective"] = (() => { if (profiles.length > 0) { @@ -115,8 +120,8 @@ export function resolveProviderAuthOverview(params: { detail: isOAuthEnv ? "OAuth (env)" : maskApiKey(envKey.apiKey), }; } - if (customKey) { - return { kind: "models.json", detail: formatMarkerOrSecret(customKey) }; + if (usableCustomKey) { + return { kind: "models.json", detail: formatMarkerOrSecret(usableCustomKey.apiKey) }; } return { kind: "missing", detail: "missing" }; })(); diff --git a/src/commands/models/list.configured.ts b/src/commands/models/list.configured.ts index fed70a4fe47..d83dd9d7f1b 100644 --- a/src/commands/models/list.configured.ts +++ b/src/commands/models/list.configured.ts @@ -39,6 +39,17 @@ export function resolveConfiguredEntries(cfg: OpenClawConfig) { tagsByKey.get(key)?.add(tag); }; + const addResolvedModelRef = (raw: string, tag: string) => { + const resolved = resolveModelRefFromString({ + raw, + defaultProvider: DEFAULT_PROVIDER, + aliasIndex, + }); + if (resolved) { + addEntry(resolved.ref, tag); + } + }; + addEntry(resolvedDefault, "default"); const modelFallbacks = resolveAgentModelFallbackValues(cfg.agents?.defaults?.model); @@ -46,38 +57,15 @@ export function resolveConfiguredEntries(cfg: OpenClawConfig) { const imagePrimary = resolveAgentModelPrimaryValue(cfg.agents?.defaults?.imageModel) ?? ""; modelFallbacks.forEach((raw, idx) => { - const resolved = resolveModelRefFromString({ - raw: String(raw ?? ""), - defaultProvider: DEFAULT_PROVIDER, - aliasIndex, - }); - if (!resolved) { - return; - } - addEntry(resolved.ref, `fallback#${idx + 1}`); + addResolvedModelRef(String(raw ?? ""), `fallback#${idx + 1}`); }); if (imagePrimary) { - const resolved = resolveModelRefFromString({ - raw: imagePrimary, - defaultProvider: DEFAULT_PROVIDER, - aliasIndex, - }); - if (resolved) { - addEntry(resolved.ref, "image"); - } + addResolvedModelRef(imagePrimary, "image"); } imageFallbacks.forEach((raw, idx) => { - const resolved = resolveModelRefFromString({ - raw: String(raw ?? ""), - defaultProvider: DEFAULT_PROVIDER, - aliasIndex, - }); - if (!resolved) { - return; - } - addEntry(resolved.ref, `img-fallback#${idx + 1}`); + addResolvedModelRef(String(raw ?? ""), `img-fallback#${idx + 1}`); }); for (const key of Object.keys(cfg.agents?.defaults?.models ?? {})) { diff --git a/src/commands/models/list.list-command.forward-compat.test.ts b/src/commands/models/list.list-command.forward-compat.test.ts index eafe6a1cb01..f0cc594ab35 100644 --- a/src/commands/models/list.list-command.forward-compat.test.ts +++ b/src/commands/models/list.list-command.forward-compat.test.ts @@ -96,6 +96,23 @@ function lastPrintedRows() { return (mocks.printModelTable.mock.calls.at(-1)?.[0] ?? []) as T[]; } +function mockDiscoveredCodex53Registry() { + mocks.resolveConfiguredEntries.mockReturnValueOnce({ entries: [] }); + mocks.loadModelRegistry.mockResolvedValueOnce({ + models: [{ ...OPENAI_CODEX_53_MODEL }], + availableKeys: new Set(["openai-codex/gpt-5.3-codex"]), + registry: { + getAll: () => [{ ...OPENAI_CODEX_53_MODEL }], + }, + }); +} + +async function runAllOpenAiCodexCommand() { + const runtime = createRuntime(); + await modelsListCommand({ all: true, provider: "openai-codex", json: true }, runtime as never); + expect(mocks.printModelTable).toHaveBeenCalled(); +} + vi.mock("../../config/config.js", () => ({ loadConfig: mocks.loadConfig, getRuntimeConfigSnapshot: vi.fn().mockReturnValue(null), @@ -261,14 +278,7 @@ describe("modelsListCommand forward-compat", () => { describe("--all catalog supplementation", () => { it("includes synthetic codex gpt-5.4 in --all output when catalog supports it", async () => { - mocks.resolveConfiguredEntries.mockReturnValueOnce({ entries: [] }); - mocks.loadModelRegistry.mockResolvedValueOnce({ - models: [{ ...OPENAI_CODEX_53_MODEL }], - availableKeys: new Set(["openai-codex/gpt-5.3-codex"]), - registry: { - getAll: () => [{ ...OPENAI_CODEX_53_MODEL }], - }, - }); + mockDiscoveredCodex53Registry(); mocks.loadModelCatalog.mockResolvedValueOnce([ { provider: "openai-codex", @@ -304,14 +314,7 @@ describe("modelsListCommand forward-compat", () => { return undefined; }, ); - const runtime = createRuntime(); - - await modelsListCommand( - { all: true, provider: "openai-codex", json: true }, - runtime as never, - ); - - expect(mocks.printModelTable).toHaveBeenCalled(); + await runAllOpenAiCodexCommand(); expect(lastPrintedRows<{ key: string; available: boolean }>()).toEqual([ expect.objectContaining({ key: "openai-codex/gpt-5.3-codex", @@ -324,10 +327,49 @@ describe("modelsListCommand forward-compat", () => { }); it("keeps discovered rows in --all output when catalog lookup is empty", async () => { + mockDiscoveredCodex53Registry(); + mocks.loadModelCatalog.mockResolvedValueOnce([]); + await runAllOpenAiCodexCommand(); + expect(lastPrintedRows<{ key: string }>()).toEqual([ + expect.objectContaining({ + key: "openai-codex/gpt-5.3-codex", + }), + ]); + }); + + it("suppresses direct openai gpt-5.3-codex-spark rows in --all output", async () => { mocks.resolveConfiguredEntries.mockReturnValueOnce({ entries: [] }); mocks.loadModelRegistry.mockResolvedValueOnce({ - models: [{ ...OPENAI_CODEX_53_MODEL }], - availableKeys: new Set(["openai-codex/gpt-5.3-codex"]), + models: [ + { + provider: "openai", + id: "gpt-5.3-codex-spark", + name: "GPT-5.3 Codex Spark", + api: "openai-responses", + baseUrl: "https://api.openai.com/v1", + input: ["text", "image"], + contextWindow: 128000, + maxTokens: 32000, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + }, + { + provider: "azure-openai-responses", + id: "gpt-5.3-codex-spark", + name: "GPT-5.3 Codex Spark", + api: "azure-openai-responses", + baseUrl: "https://example.openai.azure.com/openai/v1", + input: ["text", "image"], + contextWindow: 128000, + maxTokens: 32000, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + }, + { ...OPENAI_CODEX_53_MODEL }, + ], + availableKeys: new Set([ + "openai/gpt-5.3-codex-spark", + "azure-openai-responses/gpt-5.3-codex-spark", + "openai-codex/gpt-5.3-codex", + ]), registry: { getAll: () => [{ ...OPENAI_CODEX_53_MODEL }], }, @@ -335,10 +377,7 @@ describe("modelsListCommand forward-compat", () => { mocks.loadModelCatalog.mockResolvedValueOnce([]); const runtime = createRuntime(); - await modelsListCommand( - { all: true, provider: "openai-codex", json: true }, - runtime as never, - ); + await modelsListCommand({ all: true, json: true }, runtime as never); expect(mocks.printModelTable).toHaveBeenCalled(); expect(lastPrintedRows<{ key: string }>()).toEqual([ diff --git a/src/commands/models/list.list-command.ts b/src/commands/models/list.list-command.ts index d99a84199aa..57d0af32b95 100644 --- a/src/commands/models/list.list-command.ts +++ b/src/commands/models/list.list-command.ts @@ -25,7 +25,7 @@ export async function modelsListCommand( runtime: RuntimeEnv, ) { ensureFlagCompatibility(opts); - const { ensureAuthProfileStore } = await import("../../agents/auth-profiles.js"); + const { ensureAuthProfileStore } = await import("../../agents/auth-profiles.runtime.js"); const { ensureOpenClawModelsJson } = await import("../../agents/models-config.js"); const { sourceConfig, resolvedConfig: cfg } = await loadModelsConfigWithSource({ commandName: "models list", diff --git a/src/commands/models/list.probe.ts b/src/commands/models/list.probe.ts index 40eb6b99b9b..7b75d1be726 100644 --- a/src/commands/models/list.probe.ts +++ b/src/commands/models/list.probe.ts @@ -12,8 +12,7 @@ import { resolveAuthProfileOrder, } from "../../agents/auth-profiles.js"; import { describeFailoverError } from "../../agents/failover-error.js"; -import { isNonSecretApiKeyMarker } from "../../agents/model-auth-markers.js"; -import { getCustomProviderApiKey, resolveEnvApiKey } from "../../agents/model-auth.js"; +import { hasUsableCustomProviderApiKey, resolveEnvApiKey } from "../../agents/model-auth.js"; import { loadModelCatalog } from "../../agents/model-catalog.js"; import { findNormalizedProviderValue, @@ -373,8 +372,7 @@ export async function buildProbeTargets(params: { } const envKey = resolveEnvApiKey(providerKey); - const customKey = getCustomProviderApiKey(cfg, providerKey); - const hasUsableModelsJsonKey = Boolean(customKey && !isNonSecretApiKeyMarker(customKey)); + const hasUsableModelsJsonKey = hasUsableCustomProviderApiKey(cfg, providerKey); if (!envKey && !hasUsableModelsJsonKey) { continue; } @@ -433,12 +431,24 @@ async function probeTarget(params: { error: "No model available for probe", }; } + const model = target.model; const sessionId = `probe-${target.provider}-${crypto.randomUUID()}`; const sessionFile = resolveSessionTranscriptPath(sessionId, agentId); await fs.mkdir(sessionDir, { recursive: true }); const start = Date.now(); + const buildResult = (status: AuthProbeResult["status"], error?: string): AuthProbeResult => ({ + provider: target.provider, + model: `${model.provider}/${model.model}`, + profileId: target.profileId, + label: target.label, + source: target.source, + mode: target.mode, + status, + ...(error ? { error } : {}), + latencyMs: Date.now() - start, + }); try { await runEmbeddedPiAgent({ sessionId, @@ -460,29 +470,13 @@ async function probeTarget(params: { verboseLevel: "off", streamParams: { maxTokens }, }); - return { - provider: target.provider, - model: `${target.model.provider}/${target.model.model}`, - profileId: target.profileId, - label: target.label, - source: target.source, - mode: target.mode, - status: "ok", - latencyMs: Date.now() - start, - }; + return buildResult("ok"); } catch (err) { const described = describeFailoverError(err); - return { - provider: target.provider, - model: `${target.model.provider}/${target.model.model}`, - profileId: target.profileId, - label: target.label, - source: target.source, - mode: target.mode, - status: mapFailoverReasonToProbeStatus(described.reason), - error: redactSecrets(described.message), - latencyMs: Date.now() - start, - }; + return buildResult( + mapFailoverReasonToProbeStatus(described.reason), + redactSecrets(described.message), + ); } } diff --git a/src/commands/models/list.registry.ts b/src/commands/models/list.registry.ts index 340d49155df..0b68d9685e3 100644 --- a/src/commands/models/list.registry.ts +++ b/src/commands/models/list.registry.ts @@ -4,10 +4,11 @@ import { resolveOpenClawAgentDir } from "../../agents/agent-paths.js"; import type { AuthProfileStore } from "../../agents/auth-profiles.js"; import { listProfilesForProvider } from "../../agents/auth-profiles.js"; import { - getCustomProviderApiKey, + hasUsableCustomProviderApiKey, resolveAwsSdkEnvVarName, resolveEnvApiKey, } from "../../agents/model-auth.js"; +import { shouldSuppressBuiltInModel } from "../../agents/model-suppression.js"; import { discoverAuthStorage, discoverModels } from "../../agents/pi-model-discovery.js"; import type { OpenClawConfig } from "../../config/config.js"; import { @@ -35,7 +36,7 @@ const hasAuthForProvider = ( if (resolveEnvApiKey(provider)) { return true; } - if (getCustomProviderApiKey(cfg, provider)) { + if (hasUsableCustomProviderApiKey(cfg, provider)) { return true; } return false; @@ -87,7 +88,9 @@ function loadAvailableModels(registry: ModelRegistry): Model[] { throw normalizeAvailabilityError(err); } try { - return validateAvailableModels(availableModels); + return validateAvailableModels(availableModels).filter( + (model) => !shouldSuppressBuiltInModel({ provider: model.provider, id: model.id }), + ); } catch (err) { throw normalizeAvailabilityError(err); } @@ -100,7 +103,9 @@ export async function loadModelRegistry( const agentDir = resolveOpenClawAgentDir(); const authStorage = discoverAuthStorage(agentDir); const registry = discoverModels(authStorage, agentDir); - const models = registry.getAll(); + const models = registry + .getAll() + .filter((model) => !shouldSuppressBuiltInModel({ provider: model.provider, id: model.id })); let availableKeys: Set | undefined; let availabilityErrorMessage: string | undefined; diff --git a/src/commands/models/list.rows.ts b/src/commands/models/list.rows.ts index c00d21fd6df..7abf7861914 100644 --- a/src/commands/models/list.rows.ts +++ b/src/commands/models/list.rows.ts @@ -2,6 +2,7 @@ import type { Api, Model } from "@mariozechner/pi-ai"; import type { ModelRegistry } from "@mariozechner/pi-coding-agent"; import type { AuthProfileStore } from "../../agents/auth-profiles.js"; import { loadModelCatalog } from "../../agents/model-catalog.js"; +import { shouldSuppressBuiltInModel } from "../../agents/model-suppression.js"; import { resolveModelWithRegistry } from "../../agents/pi-embedded-runner/model.js"; import type { OpenClawConfig } from "../../config/config.js"; import { loadModelRegistry, toModelRow } from "./list.registry.js"; @@ -79,6 +80,9 @@ export function appendDiscoveredRows(params: { }); for (const model of sorted) { + if (shouldSuppressBuiltInModel({ provider: model.provider, id: model.id })) { + continue; + } if (!matchesRowFilter(params.context.filter, model)) { continue; } diff --git a/src/commands/models/list.status-command.ts b/src/commands/models/list.status-command.ts index 59614e3f866..156860bb960 100644 --- a/src/commands/models/list.status-command.ts +++ b/src/commands/models/list.status-command.ts @@ -38,7 +38,7 @@ import { } from "../../infra/provider-usage.js"; import { getShellEnvAppliedKeys, shouldEnableShellEnvFallback } from "../../infra/shell-env.js"; import type { RuntimeEnv } from "../../runtime.js"; -import { renderTable } from "../../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../../terminal/table.js"; import { colorize, theme } from "../../terminal/theme.js"; import { shortenHomePath } from "../../utils.js"; import { resolveProviderAuthOverview } from "./list.auth-overview.js"; @@ -631,7 +631,7 @@ export async function modelsStatusCommand( if (probeSummary.results.length === 0) { runtime.log(colorize(rich, theme.muted, "- none")); } else { - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); const sorted = sortProbeResults(probeSummary.results); const statusColor = (status: string) => { if (status === "ok") { diff --git a/src/commands/models/list.status.test.ts b/src/commands/models/list.status.test.ts index 6f06e63f4b8..9b408f50d93 100644 --- a/src/commands/models/list.status.test.ts +++ b/src/commands/models/list.status.test.ts @@ -61,6 +61,8 @@ const mocks = vi.hoisted(() => { } return null; }), + hasUsableCustomProviderApiKey: vi.fn().mockReturnValue(false), + resolveUsableCustomProviderApiKey: vi.fn().mockReturnValue(null), getCustomProviderApiKey: vi.fn().mockReturnValue(undefined), getShellEnvAppliedKeys: vi.fn().mockReturnValue(["OPENAI_API_KEY", "ANTHROPIC_OAUTH_TOKEN"]), shouldEnableShellEnvFallback: vi.fn().mockReturnValue(true), @@ -106,6 +108,8 @@ vi.mock("../../agents/auth-profiles.js", async (importOriginal) => { vi.mock("../../agents/model-auth.js", () => ({ resolveEnvApiKey: mocks.resolveEnvApiKey, + hasUsableCustomProviderApiKey: mocks.hasUsableCustomProviderApiKey, + resolveUsableCustomProviderApiKey: mocks.resolveUsableCustomProviderApiKey, getCustomProviderApiKey: mocks.getCustomProviderApiKey, })); diff --git a/src/commands/models/load-config.test.ts b/src/commands/models/load-config.test.ts index b8969fd4681..2d35c012a49 100644 --- a/src/commands/models/load-config.test.ts +++ b/src/commands/models/load-config.test.ts @@ -25,6 +25,27 @@ vi.mock("../../cli/command-secret-targets.js", () => ({ import { loadModelsConfig, loadModelsConfigWithSource } from "./load-config.js"; describe("models load-config", () => { + const runtimeConfig = { + models: { providers: { openai: { apiKey: "sk-runtime" } } }, // pragma: allowlist secret + }; + const resolvedConfig = { + models: { providers: { openai: { apiKey: "sk-resolved" } } }, // pragma: allowlist secret + }; + const targetIds = new Set(["models.providers.*.apiKey"]); + + function mockResolvedConfigFlow(params: { sourceConfig: unknown; diagnostics: string[] }) { + mocks.loadConfig.mockReturnValue(runtimeConfig); + mocks.readConfigFileSnapshotForWrite.mockResolvedValue({ + snapshot: { valid: true, resolved: params.sourceConfig }, + writeOptions: {}, + }); + mocks.getModelsCommandSecretTargetIds.mockReturnValue(targetIds); + mocks.resolveCommandSecretRefsViaGateway.mockResolvedValue({ + resolvedConfig, + diagnostics: params.diagnostics, + }); + } + beforeEach(() => { vi.clearAllMocks(); }); @@ -39,25 +60,9 @@ describe("models load-config", () => { }, }, }; - const runtimeConfig = { - models: { providers: { openai: { apiKey: "sk-runtime" } } }, // pragma: allowlist secret - }; - const resolvedConfig = { - models: { providers: { openai: { apiKey: "sk-resolved" } } }, // pragma: allowlist secret - }; - const targetIds = new Set(["models.providers.*.apiKey"]); const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() }; - mocks.loadConfig.mockReturnValue(runtimeConfig); - mocks.readConfigFileSnapshotForWrite.mockResolvedValue({ - snapshot: { valid: true, resolved: sourceConfig }, - writeOptions: {}, - }); - mocks.getModelsCommandSecretTargetIds.mockReturnValue(targetIds); - mocks.resolveCommandSecretRefsViaGateway.mockResolvedValue({ - resolvedConfig, - diagnostics: ["diag-one", "diag-two"], - }); + mockResolvedConfigFlow({ sourceConfig, diagnostics: ["diag-one", "diag-two"] }); const result = await loadModelsConfigWithSource({ commandName: "models list", runtime }); @@ -78,24 +83,7 @@ describe("models load-config", () => { it("loadModelsConfig returns resolved config while preserving runtime snapshot behavior", async () => { const sourceConfig = { models: { providers: {} } }; - const runtimeConfig = { - models: { providers: { openai: { apiKey: "sk-runtime" } } }, // pragma: allowlist secret - }; - const resolvedConfig = { - models: { providers: { openai: { apiKey: "sk-resolved" } } }, // pragma: allowlist secret - }; - const targetIds = new Set(["models.providers.*.apiKey"]); - - mocks.loadConfig.mockReturnValue(runtimeConfig); - mocks.readConfigFileSnapshotForWrite.mockResolvedValue({ - snapshot: { valid: true, resolved: sourceConfig }, - writeOptions: {}, - }); - mocks.getModelsCommandSecretTargetIds.mockReturnValue(targetIds); - mocks.resolveCommandSecretRefsViaGateway.mockResolvedValue({ - resolvedConfig, - diagnostics: [], - }); + mockResolvedConfigFlow({ sourceConfig, diagnostics: [] }); await expect(loadModelsConfig({ commandName: "models list" })).resolves.toBe(resolvedConfig); expect(mocks.setRuntimeConfigSnapshot).toHaveBeenCalledWith(resolvedConfig, sourceConfig); diff --git a/src/commands/models/shared.ts b/src/commands/models/shared.ts index 793e7e4b8e3..604b594b613 100644 --- a/src/commands/models/shared.ts +++ b/src/commands/models/shared.ts @@ -2,6 +2,7 @@ import { listAgentIds } from "../../agents/agent-scope.js"; import { DEFAULT_MODEL, DEFAULT_PROVIDER } from "../../agents/defaults.js"; import { buildModelAliasIndex, + legacyModelKey, modelKey, parseModelRef, resolveModelRefFromString, @@ -14,6 +15,7 @@ import { } from "../../config/config.js"; import { formatConfigIssueLines } from "../../config/issue-format.js"; import { toAgentModelListLike } from "../../config/model-input.js"; +import type { AgentModelEntryConfig } from "../../config/types.agent-defaults.js"; import type { AgentModelConfig } from "../../config/types.agents-shared.js"; import { normalizeAgentId } from "../../routing/session-key.js"; @@ -163,6 +165,25 @@ export function resolveKnownAgentId(params: { export type PrimaryFallbackConfig = { primary?: string; fallbacks?: string[] }; +export function upsertCanonicalModelConfigEntry( + models: Record, + params: { provider: string; model: string }, +) { + const key = modelKey(params.provider, params.model); + const legacyKey = legacyModelKey(params.provider, params.model); + if (!models[key]) { + if (legacyKey && models[legacyKey]) { + models[key] = models[legacyKey]; + } else { + models[key] = {}; + } + } + if (legacyKey) { + delete models[legacyKey]; + } + return key; +} + export function mergePrimaryFallbackConfig( existing: PrimaryFallbackConfig | undefined, patch: { primary?: string; fallbacks?: string[] }, @@ -184,12 +205,10 @@ export function applyDefaultModelPrimaryUpdate(params: { field: "model" | "imageModel"; }): OpenClawConfig { const resolved = resolveModelTarget({ raw: params.modelRaw, cfg: params.cfg }); - const key = `${resolved.provider}/${resolved.model}`; - - const nextModels = { ...params.cfg.agents?.defaults?.models }; - if (!nextModels[key]) { - nextModels[key] = {}; - } + const nextModels = { + ...params.cfg.agents?.defaults?.models, + } as Record; + const key = upsertCanonicalModelConfigEntry(nextModels, resolved); const defaults = params.cfg.agents?.defaults ?? {}; const existing = toAgentModelListLike( diff --git a/src/commands/ollama-setup.test.ts b/src/commands/ollama-setup.test.ts new file mode 100644 index 00000000000..0b9b5d0e414 --- /dev/null +++ b/src/commands/ollama-setup.test.ts @@ -0,0 +1,399 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import type { RuntimeEnv } from "../runtime.js"; +import { jsonResponse, requestBodyText, requestUrl } from "../test-helpers/http.js"; +import type { WizardPrompter } from "../wizard/prompts.js"; +import { + configureOllamaNonInteractive, + ensureOllamaModelPulled, + promptAndConfigureOllama, +} from "./ollama-setup.js"; + +const upsertAuthProfileWithLock = vi.hoisted(() => vi.fn(async () => {})); +vi.mock("../agents/auth-profiles.js", () => ({ + upsertAuthProfileWithLock, +})); + +const openUrlMock = vi.hoisted(() => vi.fn(async () => false)); +vi.mock("./onboard-helpers.js", async (importOriginal) => { + const original = await importOriginal(); + return { ...original, openUrl: openUrlMock }; +}); + +const isRemoteEnvironmentMock = vi.hoisted(() => vi.fn(() => false)); +vi.mock("./oauth-env.js", () => ({ + isRemoteEnvironment: isRemoteEnvironmentMock, +})); + +function createOllamaFetchMock(params: { + tags?: string[]; + show?: Record; + meResponses?: Response[]; + pullResponse?: Response; + tagsError?: Error; +}) { + const meResponses = [...(params.meResponses ?? [])]; + return vi.fn(async (input: string | URL | Request, init?: RequestInit) => { + const url = requestUrl(input); + if (url.endsWith("/api/tags")) { + if (params.tagsError) { + throw params.tagsError; + } + return jsonResponse({ models: (params.tags ?? []).map((name) => ({ name })) }); + } + if (url.endsWith("/api/show")) { + const body = JSON.parse(requestBodyText(init?.body)) as { name?: string }; + const contextWindow = body.name ? params.show?.[body.name] : undefined; + return contextWindow + ? jsonResponse({ model_info: { "llama.context_length": contextWindow } }) + : jsonResponse({}); + } + if (url.endsWith("/api/me")) { + return meResponses.shift() ?? jsonResponse({ username: "testuser" }); + } + if (url.endsWith("/api/pull")) { + return params.pullResponse ?? new Response('{"status":"success"}\n', { status: 200 }); + } + throw new Error(`Unexpected fetch: ${url}`); + }); +} + +function createModePrompter( + mode: "local" | "remote", + params?: { confirm?: boolean }, +): WizardPrompter { + return { + text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), + select: vi.fn().mockResolvedValueOnce(mode), + ...(params?.confirm !== undefined + ? { confirm: vi.fn().mockResolvedValueOnce(params.confirm) } + : {}), + note: vi.fn(async () => undefined), + } as unknown as WizardPrompter; +} + +function createSignedOutRemoteFetchMock() { + return createOllamaFetchMock({ + tags: ["llama3:8b"], + meResponses: [ + jsonResponse({ error: "not signed in", signin_url: "https://ollama.com/signin" }, 401), + jsonResponse({ username: "testuser" }), + ], + }); +} + +function createDefaultOllamaConfig(primary: string) { + return { + agents: { defaults: { model: { primary } } }, + models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } }, + }; +} + +function createRuntime() { + return { + log: vi.fn(), + error: vi.fn(), + exit: vi.fn(), + } as unknown as RuntimeEnv; +} + +describe("ollama setup", () => { + afterEach(() => { + vi.unstubAllGlobals(); + upsertAuthProfileWithLock.mockClear(); + openUrlMock.mockClear(); + isRemoteEnvironmentMock.mockReset().mockReturnValue(false); + }); + + it("returns suggested default model for local mode", async () => { + const prompter = createModePrompter("local"); + + const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] }); + vi.stubGlobal("fetch", fetchMock); + + const result = await promptAndConfigureOllama({ cfg: {}, prompter }); + + expect(result.defaultModelId).toBe("glm-4.7-flash"); + }); + + it("returns suggested default model for remote mode", async () => { + const prompter = createModePrompter("remote"); + + const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] }); + vi.stubGlobal("fetch", fetchMock); + + const result = await promptAndConfigureOllama({ cfg: {}, prompter }); + + expect(result.defaultModelId).toBe("kimi-k2.5:cloud"); + }); + + it("mode selection affects model ordering (local)", async () => { + const prompter = createModePrompter("local"); + + const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b", "glm-4.7-flash"] }); + vi.stubGlobal("fetch", fetchMock); + + const result = await promptAndConfigureOllama({ cfg: {}, prompter }); + + expect(result.defaultModelId).toBe("glm-4.7-flash"); + const modelIds = result.config.models?.providers?.ollama?.models?.map((m) => m.id); + expect(modelIds?.[0]).toBe("glm-4.7-flash"); + expect(modelIds).toContain("llama3:8b"); + }); + + it("cloud+local mode triggers /api/me check and opens sign-in URL", async () => { + const prompter = createModePrompter("remote", { confirm: true }); + const fetchMock = createSignedOutRemoteFetchMock(); + vi.stubGlobal("fetch", fetchMock); + + await promptAndConfigureOllama({ cfg: {}, prompter }); + + expect(openUrlMock).toHaveBeenCalledWith("https://ollama.com/signin"); + expect(prompter.confirm).toHaveBeenCalled(); + }); + + it("cloud+local mode does not open browser in remote environment", async () => { + isRemoteEnvironmentMock.mockReturnValue(true); + const prompter = createModePrompter("remote", { confirm: true }); + const fetchMock = createSignedOutRemoteFetchMock(); + vi.stubGlobal("fetch", fetchMock); + + await promptAndConfigureOllama({ cfg: {}, prompter }); + + expect(openUrlMock).not.toHaveBeenCalled(); + }); + + it("local mode does not trigger cloud auth", async () => { + const prompter = createModePrompter("local"); + + const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] }); + vi.stubGlobal("fetch", fetchMock); + + await promptAndConfigureOllama({ cfg: {}, prompter }); + + expect(fetchMock).toHaveBeenCalledTimes(2); + expect(fetchMock.mock.calls[0]?.[0]).toContain("/api/tags"); + expect(fetchMock.mock.calls.some((call) => requestUrl(call[0]).includes("/api/me"))).toBe( + false, + ); + }); + + it("suggested models appear first in model list (cloud+local)", async () => { + const prompter = { + text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), + select: vi.fn().mockResolvedValueOnce("remote"), + note: vi.fn(async () => undefined), + } as unknown as WizardPrompter; + + const fetchMock = createOllamaFetchMock({ + tags: ["llama3:8b", "glm-4.7-flash", "deepseek-r1:14b"], + }); + vi.stubGlobal("fetch", fetchMock); + + const result = await promptAndConfigureOllama({ cfg: {}, prompter }); + const modelIds = result.config.models?.providers?.ollama?.models?.map((m) => m.id); + + expect(modelIds).toEqual([ + "kimi-k2.5:cloud", + "minimax-m2.5:cloud", + "glm-5:cloud", + "llama3:8b", + "glm-4.7-flash", + "deepseek-r1:14b", + ]); + }); + + it("uses /api/show context windows when building Ollama model configs", async () => { + const prompter = { + text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), + select: vi.fn().mockResolvedValueOnce("local"), + note: vi.fn(async () => undefined), + } as unknown as WizardPrompter; + + const fetchMock = createOllamaFetchMock({ + tags: ["llama3:8b"], + show: { "llama3:8b": 65536 }, + }); + vi.stubGlobal("fetch", fetchMock); + + const result = await promptAndConfigureOllama({ cfg: {}, prompter }); + const model = result.config.models?.providers?.ollama?.models?.find( + (m) => m.id === "llama3:8b", + ); + + expect(model?.contextWindow).toBe(65536); + }); + + describe("ensureOllamaModelPulled", () => { + it("pulls model when not available locally", async () => { + const progress = { update: vi.fn(), stop: vi.fn() }; + const prompter = { + progress: vi.fn(() => progress), + } as unknown as WizardPrompter; + + const fetchMock = createOllamaFetchMock({ + tags: ["llama3:8b"], + pullResponse: new Response('{"status":"success"}\n', { status: 200 }), + }); + vi.stubGlobal("fetch", fetchMock); + + await ensureOllamaModelPulled({ + config: createDefaultOllamaConfig("ollama/glm-4.7-flash"), + prompter, + }); + + expect(fetchMock).toHaveBeenCalledTimes(2); + expect(fetchMock.mock.calls[1][0]).toContain("/api/pull"); + }); + + it("skips pull when model is already available", async () => { + const prompter = {} as unknown as WizardPrompter; + + const fetchMock = createOllamaFetchMock({ tags: ["glm-4.7-flash"] }); + vi.stubGlobal("fetch", fetchMock); + + await ensureOllamaModelPulled({ + config: createDefaultOllamaConfig("ollama/glm-4.7-flash"), + prompter, + }); + + expect(fetchMock).toHaveBeenCalledTimes(1); + }); + + it("skips pull for cloud models", async () => { + const prompter = {} as unknown as WizardPrompter; + const fetchMock = vi.fn(); + vi.stubGlobal("fetch", fetchMock); + + await ensureOllamaModelPulled({ + config: createDefaultOllamaConfig("ollama/kimi-k2.5:cloud"), + prompter, + }); + + expect(fetchMock).not.toHaveBeenCalled(); + }); + + it("skips when model is not an ollama model", async () => { + const prompter = {} as unknown as WizardPrompter; + const fetchMock = vi.fn(); + vi.stubGlobal("fetch", fetchMock); + + await ensureOllamaModelPulled({ + config: { + agents: { defaults: { model: { primary: "openai/gpt-4o" } } }, + }, + prompter, + }); + + expect(fetchMock).not.toHaveBeenCalled(); + }); + }); + + it("uses discovered model when requested non-interactive download fails", async () => { + const fetchMock = createOllamaFetchMock({ + tags: ["qwen2.5-coder:7b"], + pullResponse: new Response('{"error":"disk full"}\n', { status: 200 }), + }); + vi.stubGlobal("fetch", fetchMock); + const runtime = createRuntime(); + + const result = await configureOllamaNonInteractive({ + nextConfig: { + agents: { + defaults: { + model: { + primary: "openai/gpt-4o-mini", + fallbacks: ["anthropic/claude-sonnet-4-5"], + }, + }, + }, + }, + opts: { + customBaseUrl: "http://127.0.0.1:11434", + customModelId: "missing-model", + }, + runtime, + }); + + expect(runtime.error).toHaveBeenCalledWith("Download failed: disk full"); + expect(result.agents?.defaults?.model).toEqual({ + primary: "ollama/qwen2.5-coder:7b", + fallbacks: ["anthropic/claude-sonnet-4-5"], + }); + }); + + it("normalizes ollama/ prefix in non-interactive custom model download", async () => { + const fetchMock = createOllamaFetchMock({ + tags: [], + pullResponse: new Response('{"status":"success"}\n', { status: 200 }), + }); + vi.stubGlobal("fetch", fetchMock); + const runtime = createRuntime(); + + const result = await configureOllamaNonInteractive({ + nextConfig: {}, + opts: { + customBaseUrl: "http://127.0.0.1:11434", + customModelId: "ollama/llama3.2:latest", + }, + runtime, + }); + + const pullRequest = fetchMock.mock.calls[1]?.[1]; + expect(JSON.parse(requestBodyText(pullRequest?.body))).toEqual({ name: "llama3.2:latest" }); + expect(result.agents?.defaults?.model).toEqual( + expect.objectContaining({ primary: "ollama/llama3.2:latest" }), + ); + }); + + it("accepts cloud models in non-interactive mode without pulling", async () => { + const fetchMock = createOllamaFetchMock({ tags: [] }); + vi.stubGlobal("fetch", fetchMock); + const runtime = createRuntime(); + + const result = await configureOllamaNonInteractive({ + nextConfig: {}, + opts: { + customBaseUrl: "http://127.0.0.1:11434", + customModelId: "kimi-k2.5:cloud", + }, + runtime, + }); + + expect(fetchMock).toHaveBeenCalledTimes(1); + expect(result.models?.providers?.ollama?.models?.map((model) => model.id)).toContain( + "kimi-k2.5:cloud", + ); + expect(result.agents?.defaults?.model).toEqual( + expect.objectContaining({ primary: "ollama/kimi-k2.5:cloud" }), + ); + }); + + it("exits when Ollama is unreachable", async () => { + const fetchMock = createOllamaFetchMock({ + tagsError: new Error("connect ECONNREFUSED"), + }); + vi.stubGlobal("fetch", fetchMock); + + const runtime = { + log: vi.fn(), + error: vi.fn(), + exit: vi.fn(), + } as unknown as RuntimeEnv; + const nextConfig = {}; + + const result = await configureOllamaNonInteractive({ + nextConfig, + opts: { + customBaseUrl: "http://127.0.0.1:11435", + customModelId: "llama3.2:latest", + }, + runtime, + }); + + expect(runtime.error).toHaveBeenCalledWith( + expect.stringContaining("Ollama could not be reached at http://127.0.0.1:11435."), + ); + expect(runtime.exit).toHaveBeenCalledWith(1); + expect(result).toBe(nextConfig); + }); +}); diff --git a/src/commands/ollama-setup.ts b/src/commands/ollama-setup.ts new file mode 100644 index 00000000000..3308dfcf067 --- /dev/null +++ b/src/commands/ollama-setup.ts @@ -0,0 +1,531 @@ +import { upsertAuthProfileWithLock } from "../agents/auth-profiles.js"; +import { + OLLAMA_DEFAULT_BASE_URL, + buildOllamaModelDefinition, + enrichOllamaModelsWithContext, + fetchOllamaModels, + resolveOllamaApiBase, + type OllamaModelWithContext, +} from "../agents/ollama-models.js"; +import type { OpenClawConfig } from "../config/config.js"; +import type { RuntimeEnv } from "../runtime.js"; +import { WizardCancelledError, type WizardPrompter } from "../wizard/prompts.js"; +import { isRemoteEnvironment } from "./oauth-env.js"; +import { applyAgentDefaultModelPrimary } from "./onboard-auth.config-shared.js"; +import { openUrl } from "./onboard-helpers.js"; +import type { OnboardMode, OnboardOptions } from "./onboard-types.js"; + +export { OLLAMA_DEFAULT_BASE_URL } from "../agents/ollama-models.js"; +export const OLLAMA_DEFAULT_MODEL = "glm-4.7-flash"; + +const OLLAMA_SUGGESTED_MODELS_LOCAL = ["glm-4.7-flash"]; +const OLLAMA_SUGGESTED_MODELS_CLOUD = ["kimi-k2.5:cloud", "minimax-m2.5:cloud", "glm-5:cloud"]; + +function normalizeOllamaModelName(value: string | undefined): string | undefined { + const trimmed = value?.trim(); + if (!trimmed) { + return undefined; + } + if (trimmed.toLowerCase().startsWith("ollama/")) { + const withoutPrefix = trimmed.slice("ollama/".length).trim(); + return withoutPrefix || undefined; + } + return trimmed; +} + +function isOllamaCloudModel(modelName: string | undefined): boolean { + return Boolean(modelName?.trim().toLowerCase().endsWith(":cloud")); +} + +function formatOllamaPullStatus(status: string): { text: string; hidePercent: boolean } { + const trimmed = status.trim(); + const partStatusMatch = trimmed.match(/^([a-z-]+)\s+(?:sha256:)?[a-f0-9]{8,}$/i); + if (partStatusMatch) { + return { text: `${partStatusMatch[1]} part`, hidePercent: false }; + } + if (/^verifying\b.*\bdigest\b/i.test(trimmed)) { + return { text: "verifying digest", hidePercent: true }; + } + return { text: trimmed, hidePercent: false }; +} + +type OllamaCloudAuthResult = { + signedIn: boolean; + signinUrl?: string; +}; + +/** Check if the user is signed in to Ollama cloud via /api/me. */ +async function checkOllamaCloudAuth(baseUrl: string): Promise { + try { + const apiBase = resolveOllamaApiBase(baseUrl); + const response = await fetch(`${apiBase}/api/me`, { + method: "POST", + signal: AbortSignal.timeout(5000), + }); + if (response.status === 401) { + // 401 body contains { error, signin_url } + const data = (await response.json()) as { signin_url?: string }; + return { signedIn: false, signinUrl: data.signin_url }; + } + if (!response.ok) { + return { signedIn: false }; + } + return { signedIn: true }; + } catch { + // /api/me not supported or unreachable — fail closed so cloud mode + // doesn't silently skip auth; the caller handles the fallback. + return { signedIn: false }; + } +} + +type OllamaPullChunk = { + status?: string; + total?: number; + completed?: number; + error?: string; +}; + +type OllamaPullFailureKind = "http" | "no-body" | "chunk-error" | "network"; +type OllamaPullResult = + | { ok: true } + | { + ok: false; + kind: OllamaPullFailureKind; + message: string; + }; + +async function pullOllamaModelCore(params: { + baseUrl: string; + modelName: string; + onStatus?: (status: string, percent: number | null) => void; +}): Promise { + const { onStatus } = params; + const baseUrl = resolveOllamaApiBase(params.baseUrl); + const modelName = normalizeOllamaModelName(params.modelName) ?? params.modelName.trim(); + try { + const response = await fetch(`${baseUrl}/api/pull`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ name: modelName }), + }); + if (!response.ok) { + return { + ok: false, + kind: "http", + message: `Failed to download ${modelName} (HTTP ${response.status})`, + }; + } + if (!response.body) { + return { + ok: false, + kind: "no-body", + message: `Failed to download ${modelName} (no response body)`, + }; + } + + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ""; + const layers = new Map(); + + const parseLine = (line: string): OllamaPullResult => { + const trimmed = line.trim(); + if (!trimmed) { + return { ok: true }; + } + try { + const chunk = JSON.parse(trimmed) as OllamaPullChunk; + if (chunk.error) { + return { + ok: false, + kind: "chunk-error", + message: `Download failed: ${chunk.error}`, + }; + } + if (!chunk.status) { + return { ok: true }; + } + if (chunk.total && chunk.completed !== undefined) { + layers.set(chunk.status, { total: chunk.total, completed: chunk.completed }); + let totalSum = 0; + let completedSum = 0; + for (const layer of layers.values()) { + totalSum += layer.total; + completedSum += layer.completed; + } + const percent = totalSum > 0 ? Math.round((completedSum / totalSum) * 100) : null; + onStatus?.(chunk.status, percent); + } else { + onStatus?.(chunk.status, null); + } + } catch { + // Ignore malformed lines from streaming output. + } + return { ok: true }; + }; + + for (;;) { + const { done, value } = await reader.read(); + if (done) { + break; + } + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + buffer = lines.pop() ?? ""; + for (const line of lines) { + const parsed = parseLine(line); + if (!parsed.ok) { + return parsed; + } + } + } + + const trailing = buffer.trim(); + if (trailing) { + const parsed = parseLine(trailing); + if (!parsed.ok) { + return parsed; + } + } + + return { ok: true }; + } catch (err) { + const reason = err instanceof Error ? err.message : String(err); + return { + ok: false, + kind: "network", + message: `Failed to download ${modelName}: ${reason}`, + }; + } +} + +/** Pull a model from Ollama, streaming progress updates. */ +async function pullOllamaModel( + baseUrl: string, + modelName: string, + prompter: WizardPrompter, +): Promise { + const spinner = prompter.progress(`Downloading ${modelName}...`); + const result = await pullOllamaModelCore({ + baseUrl, + modelName, + onStatus: (status, percent) => { + const displayStatus = formatOllamaPullStatus(status); + if (displayStatus.hidePercent) { + spinner.update(`Downloading ${modelName} - ${displayStatus.text}`); + } else { + spinner.update(`Downloading ${modelName} - ${displayStatus.text} - ${percent ?? 0}%`); + } + }, + }); + if (!result.ok) { + spinner.stop(result.message); + return false; + } + spinner.stop(`Downloaded ${modelName}`); + return true; +} + +async function pullOllamaModelNonInteractive( + baseUrl: string, + modelName: string, + runtime: RuntimeEnv, +): Promise { + runtime.log(`Downloading ${modelName}...`); + const result = await pullOllamaModelCore({ baseUrl, modelName }); + if (!result.ok) { + runtime.error(result.message); + return false; + } + runtime.log(`Downloaded ${modelName}`); + return true; +} + +function buildOllamaModelsConfig( + modelNames: string[], + discoveredModelsByName?: Map, +) { + return modelNames.map((name) => + buildOllamaModelDefinition(name, discoveredModelsByName?.get(name)?.contextWindow), + ); +} + +function applyOllamaProviderConfig( + cfg: OpenClawConfig, + baseUrl: string, + modelNames: string[], + discoveredModelsByName?: Map, +): OpenClawConfig { + return { + ...cfg, + models: { + ...cfg.models, + mode: cfg.models?.mode ?? "merge", + providers: { + ...cfg.models?.providers, + ollama: { + baseUrl, + api: "ollama", + apiKey: "OLLAMA_API_KEY", // pragma: allowlist secret + models: buildOllamaModelsConfig(modelNames, discoveredModelsByName), + }, + }, + }, + }; +} + +async function storeOllamaCredential(agentDir?: string): Promise { + await upsertAuthProfileWithLock({ + profileId: "ollama:default", + credential: { type: "api_key", provider: "ollama", key: "ollama-local" }, + agentDir, + }); +} + +/** + * Interactive: prompt for base URL, discover models, configure provider. + * Model selection is handled by the standard model picker downstream. + */ +export async function promptAndConfigureOllama(params: { + cfg: OpenClawConfig; + prompter: WizardPrompter; +}): Promise<{ config: OpenClawConfig; defaultModelId: string }> { + const { prompter } = params; + + // 1. Prompt base URL + const baseUrlRaw = await prompter.text({ + message: "Ollama base URL", + initialValue: OLLAMA_DEFAULT_BASE_URL, + placeholder: OLLAMA_DEFAULT_BASE_URL, + validate: (value) => (value?.trim() ? undefined : "Required"), + }); + const configuredBaseUrl = String(baseUrlRaw ?? "") + .trim() + .replace(/\/+$/, ""); + const baseUrl = resolveOllamaApiBase(configuredBaseUrl); + + // 2. Check reachability + const { reachable, models } = await fetchOllamaModels(baseUrl); + + if (!reachable) { + await prompter.note( + [ + `Ollama could not be reached at ${baseUrl}.`, + "Download it at https://ollama.com/download", + "", + "Start Ollama and re-run onboarding.", + ].join("\n"), + "Ollama", + ); + throw new WizardCancelledError("Ollama not reachable"); + } + + const enrichedModels = await enrichOllamaModelsWithContext(baseUrl, models.slice(0, 50)); + const discoveredModelsByName = new Map(enrichedModels.map((model) => [model.name, model])); + const modelNames = models.map((m) => m.name); + + // 3. Mode selection + const mode = (await prompter.select({ + message: "Ollama mode", + options: [ + { value: "remote", label: "Cloud + Local", hint: "Ollama cloud models + local models" }, + { value: "local", label: "Local", hint: "Local models only" }, + ], + })) as OnboardMode; + + // 4. Cloud auth — check /api/me upfront for remote (cloud+local) mode + let cloudAuthVerified = false; + if (mode === "remote") { + const authResult = await checkOllamaCloudAuth(baseUrl); + if (!authResult.signedIn) { + if (authResult.signinUrl) { + if (!isRemoteEnvironment()) { + await openUrl(authResult.signinUrl); + } + await prompter.note( + ["Sign in to Ollama Cloud:", authResult.signinUrl].join("\n"), + "Ollama Cloud", + ); + const confirmed = await prompter.confirm({ + message: "Have you signed in?", + }); + if (!confirmed) { + throw new WizardCancelledError("Ollama cloud sign-in cancelled"); + } + // Re-check after user claims sign-in + const recheck = await checkOllamaCloudAuth(baseUrl); + if (!recheck.signedIn) { + throw new WizardCancelledError("Ollama cloud sign-in required"); + } + cloudAuthVerified = true; + } else { + // No signin URL available (older server, unreachable /api/me, or custom gateway). + await prompter.note( + [ + "Could not verify Ollama Cloud authentication.", + "Cloud models may not work until you sign in at https://ollama.com.", + ].join("\n"), + "Ollama Cloud", + ); + const continueAnyway = await prompter.confirm({ + message: "Continue without cloud auth?", + }); + if (!continueAnyway) { + throw new WizardCancelledError("Ollama cloud auth could not be verified"); + } + // Cloud auth unverified — fall back to local defaults so the model + // picker doesn't steer toward cloud models that may fail. + } + } else { + cloudAuthVerified = true; + } + } + + // 5. Model ordering — suggested models first. + // Use cloud defaults only when auth was actually verified; otherwise fall + // back to local defaults so the user isn't steered toward cloud models + // that may fail at runtime. + const suggestedModels = + mode === "local" || !cloudAuthVerified + ? OLLAMA_SUGGESTED_MODELS_LOCAL + : OLLAMA_SUGGESTED_MODELS_CLOUD; + const orderedModelNames = [ + ...suggestedModels, + ...modelNames.filter((name) => !suggestedModels.includes(name)), + ]; + + const defaultModelId = suggestedModels[0] ?? OLLAMA_DEFAULT_MODEL; + const config = applyOllamaProviderConfig( + params.cfg, + baseUrl, + orderedModelNames, + discoveredModelsByName, + ); + return { config, defaultModelId }; +} + +/** Non-interactive: auto-discover models and configure provider. */ +export async function configureOllamaNonInteractive(params: { + nextConfig: OpenClawConfig; + opts: OnboardOptions; + runtime: RuntimeEnv; +}): Promise { + const { opts, runtime } = params; + const configuredBaseUrl = (opts.customBaseUrl?.trim() || OLLAMA_DEFAULT_BASE_URL).replace( + /\/+$/, + "", + ); + const baseUrl = resolveOllamaApiBase(configuredBaseUrl); + + const { reachable, models } = await fetchOllamaModels(baseUrl); + const explicitModel = normalizeOllamaModelName(opts.customModelId); + + if (!reachable) { + runtime.error( + [ + `Ollama could not be reached at ${baseUrl}.`, + "Download it at https://ollama.com/download", + ].join("\n"), + ); + runtime.exit(1); + return params.nextConfig; + } + + await storeOllamaCredential(); + + const enrichedModels = await enrichOllamaModelsWithContext(baseUrl, models.slice(0, 50)); + const discoveredModelsByName = new Map(enrichedModels.map((model) => [model.name, model])); + const modelNames = models.map((m) => m.name); + + // Apply local suggested model ordering. + const suggestedModels = OLLAMA_SUGGESTED_MODELS_LOCAL; + const orderedModelNames = [ + ...suggestedModels, + ...modelNames.filter((name) => !suggestedModels.includes(name)), + ]; + + const requestedDefaultModelId = explicitModel ?? suggestedModels[0]; + let pulledRequestedModel = false; + const availableModelNames = new Set(modelNames); + const requestedCloudModel = isOllamaCloudModel(requestedDefaultModelId); + + if (requestedCloudModel) { + availableModelNames.add(requestedDefaultModelId); + } + + // Pull if model not in discovered list and Ollama is reachable + if (!requestedCloudModel && !modelNames.includes(requestedDefaultModelId)) { + pulledRequestedModel = await pullOllamaModelNonInteractive( + baseUrl, + requestedDefaultModelId, + runtime, + ); + if (pulledRequestedModel) { + availableModelNames.add(requestedDefaultModelId); + } + } + + let allModelNames = orderedModelNames; + let defaultModelId = requestedDefaultModelId; + if ( + (pulledRequestedModel || requestedCloudModel) && + !allModelNames.includes(requestedDefaultModelId) + ) { + allModelNames = [...allModelNames, requestedDefaultModelId]; + } + if (!availableModelNames.has(requestedDefaultModelId)) { + if (availableModelNames.size > 0) { + const firstAvailableModel = + allModelNames.find((name) => availableModelNames.has(name)) ?? + Array.from(availableModelNames)[0]; + defaultModelId = firstAvailableModel; + runtime.log( + `Ollama model ${requestedDefaultModelId} was not available; using ${defaultModelId} instead.`, + ); + } else { + runtime.error( + [ + `No Ollama models are available at ${baseUrl}.`, + "Pull a model first, then re-run onboarding.", + ].join("\n"), + ); + runtime.exit(1); + return params.nextConfig; + } + } + + const config = applyOllamaProviderConfig( + params.nextConfig, + baseUrl, + allModelNames, + discoveredModelsByName, + ); + const modelRef = `ollama/${defaultModelId}`; + runtime.log(`Default Ollama model: ${defaultModelId}`); + return applyAgentDefaultModelPrimary(config, modelRef); +} + +/** Pull the configured default Ollama model if it isn't already available locally. */ +export async function ensureOllamaModelPulled(params: { + config: OpenClawConfig; + prompter: WizardPrompter; +}): Promise { + const modelCfg = params.config.agents?.defaults?.model; + const modelId = typeof modelCfg === "string" ? modelCfg : modelCfg?.primary; + if (!modelId?.startsWith("ollama/")) { + return; + } + const baseUrl = params.config.models?.providers?.ollama?.baseUrl ?? OLLAMA_DEFAULT_BASE_URL; + const modelName = modelId.slice("ollama/".length); + if (isOllamaCloudModel(modelName)) { + return; + } + const { models } = await fetchOllamaModels(baseUrl); + if (models.some((m) => m.name === modelName)) { + return; + } + const pulled = await pullOllamaModel(baseUrl, modelName, params.prompter); + if (!pulled) { + throw new WizardCancelledError("Failed to download selected Ollama model"); + } +} diff --git a/src/commands/onboard-auth.config-core.ts b/src/commands/onboard-auth.config-core.ts index 103343d5914..8c41bfb939c 100644 --- a/src/commands/onboard-auth.config-core.ts +++ b/src/commands/onboard-auth.config-core.ts @@ -65,6 +65,7 @@ import { buildZaiModelDefinition, buildMoonshotModelDefinition, buildXaiModelDefinition, + buildModelStudioModelDefinition, MISTRAL_BASE_URL, MISTRAL_DEFAULT_MODEL_ID, QIANFAN_BASE_URL, @@ -79,8 +80,34 @@ import { resolveZaiBaseUrl, XAI_BASE_URL, XAI_DEFAULT_MODEL_ID, + MODELSTUDIO_CN_BASE_URL, + MODELSTUDIO_GLOBAL_BASE_URL, + MODELSTUDIO_DEFAULT_MODEL_REF, } from "./onboard-auth.models.js"; +function mergeProviderModels( + existingProvider: Record | undefined, + defaultModels: T[], +): T[] { + const existingModels = Array.isArray(existingProvider?.models) + ? (existingProvider.models as T[]) + : []; + const mergedModels = [...existingModels]; + const seen = new Set(existingModels.map((model) => model.id)); + for (const model of defaultModels) { + if (!seen.has(model.id)) { + mergedModels.push(model); + seen.add(model.id); + } + } + return mergedModels; +} + +function getNormalizedProviderApiKey(existingProvider: Record | undefined) { + const { apiKey } = (existingProvider ?? {}) as { apiKey?: string }; + return typeof apiKey === "string" ? apiKey.trim() || undefined : undefined; +} + export function applyZaiProviderConfig( cfg: OpenClawConfig, params?: { endpoint?: string; modelId?: string }, @@ -96,7 +123,6 @@ export function applyZaiProviderConfig( const providers = { ...cfg.models?.providers }; const existingProvider = providers.zai; - const existingModels = Array.isArray(existingProvider?.models) ? existingProvider.models : []; const defaultModels = [ buildZaiModelDefinition({ id: "glm-5" }), @@ -105,21 +131,13 @@ export function applyZaiProviderConfig( buildZaiModelDefinition({ id: "glm-4.7-flashx" }), ]; - const mergedModels = [...existingModels]; - const seen = new Set(existingModels.map((m) => m.id)); - for (const model of defaultModels) { - if (!seen.has(model.id)) { - mergedModels.push(model); - seen.add(model.id); - } - } + const mergedModels = mergeProviderModels(existingProvider, defaultModels); - const { apiKey: existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< + const { apiKey: _existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< string, unknown > as { apiKey?: string }; - const resolvedApiKey = typeof existingApiKey === "string" ? existingApiKey : undefined; - const normalizedApiKey = resolvedApiKey?.trim(); + const normalizedApiKey = getNormalizedProviderApiKey(existingProvider); const baseUrl = params?.endpoint ? resolveZaiBaseUrl(params.endpoint) @@ -252,12 +270,11 @@ export function applySyntheticProviderConfig(cfg: OpenClawConfig): OpenClawConfi (model) => !existingModels.some((existing) => existing.id === model.id), ), ]; - const { apiKey: existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< + const { apiKey: _existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< string, unknown > as { apiKey?: string }; - const resolvedApiKey = typeof existingApiKey === "string" ? existingApiKey : undefined; - const normalizedApiKey = resolvedApiKey?.trim(); + const normalizedApiKey = getNormalizedProviderApiKey(existingProvider); providers.synthetic = { ...existingProviderRest, baseUrl: SYNTHETIC_BASE_URL, @@ -573,3 +590,83 @@ export function applyQianfanConfig(cfg: OpenClawConfig): OpenClawConfig { const next = applyQianfanProviderConfig(cfg); return applyAgentDefaultModelPrimary(next, QIANFAN_DEFAULT_MODEL_REF); } + +// Alibaba Cloud Model Studio Coding Plan + +function applyModelStudioProviderConfigWithBaseUrl( + cfg: OpenClawConfig, + baseUrl: string, +): OpenClawConfig { + const models = { ...cfg.agents?.defaults?.models }; + + const modelStudioModelIds = [ + "qwen3.5-plus", + "qwen3-max-2026-01-23", + "qwen3-coder-next", + "qwen3-coder-plus", + "MiniMax-M2.5", + "glm-5", + "glm-4.7", + "kimi-k2.5", + ]; + for (const modelId of modelStudioModelIds) { + const modelRef = `modelstudio/${modelId}`; + if (!models[modelRef]) { + models[modelRef] = {}; + } + } + models[MODELSTUDIO_DEFAULT_MODEL_REF] = { + ...models[MODELSTUDIO_DEFAULT_MODEL_REF], + alias: models[MODELSTUDIO_DEFAULT_MODEL_REF]?.alias ?? "Qwen", + }; + + const providers = { ...cfg.models?.providers }; + const existingProvider = providers.modelstudio; + + const defaultModels = [ + buildModelStudioModelDefinition({ id: "qwen3.5-plus" }), + buildModelStudioModelDefinition({ id: "qwen3-max-2026-01-23" }), + buildModelStudioModelDefinition({ id: "qwen3-coder-next" }), + buildModelStudioModelDefinition({ id: "qwen3-coder-plus" }), + buildModelStudioModelDefinition({ id: "MiniMax-M2.5" }), + buildModelStudioModelDefinition({ id: "glm-5" }), + buildModelStudioModelDefinition({ id: "glm-4.7" }), + buildModelStudioModelDefinition({ id: "kimi-k2.5" }), + ]; + + const mergedModels = mergeProviderModels(existingProvider, defaultModels); + + const { apiKey: _existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< + string, + unknown + > as { apiKey?: string }; + const normalizedApiKey = getNormalizedProviderApiKey(existingProvider); + + providers.modelstudio = { + ...existingProviderRest, + baseUrl, + api: "openai-completions", + ...(normalizedApiKey ? { apiKey: normalizedApiKey } : {}), + models: mergedModels.length > 0 ? mergedModels : defaultModels, + }; + + return applyOnboardAuthAgentModelsAndProviders(cfg, { agentModels: models, providers }); +} + +export function applyModelStudioProviderConfig(cfg: OpenClawConfig): OpenClawConfig { + return applyModelStudioProviderConfigWithBaseUrl(cfg, MODELSTUDIO_GLOBAL_BASE_URL); +} + +export function applyModelStudioProviderConfigCn(cfg: OpenClawConfig): OpenClawConfig { + return applyModelStudioProviderConfigWithBaseUrl(cfg, MODELSTUDIO_CN_BASE_URL); +} + +export function applyModelStudioConfig(cfg: OpenClawConfig): OpenClawConfig { + const next = applyModelStudioProviderConfig(cfg); + return applyAgentDefaultModelPrimary(next, MODELSTUDIO_DEFAULT_MODEL_REF); +} + +export function applyModelStudioConfigCn(cfg: OpenClawConfig): OpenClawConfig { + const next = applyModelStudioProviderConfigCn(cfg); + return applyAgentDefaultModelPrimary(next, MODELSTUDIO_DEFAULT_MODEL_REF); +} diff --git a/src/commands/onboard-auth.config-minimax.ts b/src/commands/onboard-auth.config-minimax.ts index 04c109f7e56..14ec734592b 100644 --- a/src/commands/onboard-auth.config-minimax.ts +++ b/src/commands/onboard-auth.config-minimax.ts @@ -1,5 +1,4 @@ import type { OpenClawConfig } from "../config/config.js"; -import { toAgentModelListLike } from "../config/model-input.js"; import type { ModelProviderConfig } from "../config/types.models.js"; import { applyAgentDefaultModelPrimary, @@ -7,154 +6,10 @@ import { } from "./onboard-auth.config-shared.js"; import { buildMinimaxApiModelDefinition, - buildMinimaxModelDefinition, - DEFAULT_MINIMAX_BASE_URL, - DEFAULT_MINIMAX_CONTEXT_WINDOW, - DEFAULT_MINIMAX_MAX_TOKENS, MINIMAX_API_BASE_URL, MINIMAX_CN_API_BASE_URL, - MINIMAX_HOSTED_COST, - MINIMAX_HOSTED_MODEL_ID, - MINIMAX_HOSTED_MODEL_REF, - MINIMAX_LM_STUDIO_COST, } from "./onboard-auth.models.js"; -export function applyMinimaxProviderConfig(cfg: OpenClawConfig): OpenClawConfig { - const models = { ...cfg.agents?.defaults?.models }; - models["anthropic/claude-opus-4-6"] = { - ...models["anthropic/claude-opus-4-6"], - alias: models["anthropic/claude-opus-4-6"]?.alias ?? "Opus", - }; - models["lmstudio/minimax-m2.5-gs32"] = { - ...models["lmstudio/minimax-m2.5-gs32"], - alias: models["lmstudio/minimax-m2.5-gs32"]?.alias ?? "Minimax", - }; - - const providers = { ...cfg.models?.providers }; - if (!providers.lmstudio) { - providers.lmstudio = { - baseUrl: "http://127.0.0.1:1234/v1", - apiKey: "lmstudio", - api: "openai-responses", - models: [ - buildMinimaxModelDefinition({ - id: "minimax-m2.5-gs32", - name: "MiniMax M2.5 GS32", - reasoning: false, - cost: MINIMAX_LM_STUDIO_COST, - contextWindow: 196608, - maxTokens: 8192, - }), - ], - }; - } - - return applyOnboardAuthAgentModelsAndProviders(cfg, { agentModels: models, providers }); -} - -export function applyMinimaxHostedProviderConfig( - cfg: OpenClawConfig, - params?: { baseUrl?: string }, -): OpenClawConfig { - const models = { ...cfg.agents?.defaults?.models }; - models[MINIMAX_HOSTED_MODEL_REF] = { - ...models[MINIMAX_HOSTED_MODEL_REF], - alias: models[MINIMAX_HOSTED_MODEL_REF]?.alias ?? "Minimax", - }; - - const providers = { ...cfg.models?.providers }; - const hostedModel = buildMinimaxModelDefinition({ - id: MINIMAX_HOSTED_MODEL_ID, - cost: MINIMAX_HOSTED_COST, - contextWindow: DEFAULT_MINIMAX_CONTEXT_WINDOW, - maxTokens: DEFAULT_MINIMAX_MAX_TOKENS, - }); - const existingProvider = providers.minimax; - const existingModels = Array.isArray(existingProvider?.models) ? existingProvider.models : []; - const hasHostedModel = existingModels.some((model) => model.id === MINIMAX_HOSTED_MODEL_ID); - const mergedModels = hasHostedModel ? existingModels : [...existingModels, hostedModel]; - providers.minimax = { - ...existingProvider, - baseUrl: params?.baseUrl?.trim() || DEFAULT_MINIMAX_BASE_URL, - apiKey: "minimax", - api: "openai-completions", - models: mergedModels.length > 0 ? mergedModels : [hostedModel], - }; - - return applyOnboardAuthAgentModelsAndProviders(cfg, { agentModels: models, providers }); -} - -export function applyMinimaxConfig(cfg: OpenClawConfig): OpenClawConfig { - const next = applyMinimaxProviderConfig(cfg); - return applyAgentDefaultModelPrimary(next, "lmstudio/minimax-m2.5-gs32"); -} - -export function applyMinimaxHostedConfig( - cfg: OpenClawConfig, - params?: { baseUrl?: string }, -): OpenClawConfig { - const next = applyMinimaxHostedProviderConfig(cfg, params); - return { - ...next, - agents: { - ...next.agents, - defaults: { - ...next.agents?.defaults, - model: { - ...toAgentModelListLike(next.agents?.defaults?.model), - primary: MINIMAX_HOSTED_MODEL_REF, - }, - }, - }, - }; -} - -// MiniMax Anthropic-compatible API (platform.minimax.io/anthropic) -export function applyMinimaxApiProviderConfig( - cfg: OpenClawConfig, - modelId: string = "MiniMax-M2.5", -): OpenClawConfig { - return applyMinimaxApiProviderConfigWithBaseUrl(cfg, { - providerId: "minimax", - modelId, - baseUrl: MINIMAX_API_BASE_URL, - }); -} - -export function applyMinimaxApiConfig( - cfg: OpenClawConfig, - modelId: string = "MiniMax-M2.5", -): OpenClawConfig { - return applyMinimaxApiConfigWithBaseUrl(cfg, { - providerId: "minimax", - modelId, - baseUrl: MINIMAX_API_BASE_URL, - }); -} - -// MiniMax China API (api.minimaxi.com) -export function applyMinimaxApiProviderConfigCn( - cfg: OpenClawConfig, - modelId: string = "MiniMax-M2.5", -): OpenClawConfig { - return applyMinimaxApiProviderConfigWithBaseUrl(cfg, { - providerId: "minimax-cn", - modelId, - baseUrl: MINIMAX_CN_API_BASE_URL, - }); -} - -export function applyMinimaxApiConfigCn( - cfg: OpenClawConfig, - modelId: string = "MiniMax-M2.5", -): OpenClawConfig { - return applyMinimaxApiConfigWithBaseUrl(cfg, { - providerId: "minimax-cn", - modelId, - baseUrl: MINIMAX_CN_API_BASE_URL, - }); -} - type MinimaxApiProviderConfigParams = { providerId: string; modelId: string; @@ -193,17 +48,7 @@ function applyMinimaxApiProviderConfigWithBaseUrl( alias: "Minimax", }; - return { - ...cfg, - agents: { - ...cfg.agents, - defaults: { - ...cfg.agents?.defaults, - models, - }, - }, - models: { mode: cfg.models?.mode ?? "merge", providers }, - }; + return applyOnboardAuthAgentModelsAndProviders(cfg, { agentModels: models, providers }); } function applyMinimaxApiConfigWithBaseUrl( @@ -213,3 +58,49 @@ function applyMinimaxApiConfigWithBaseUrl( const next = applyMinimaxApiProviderConfigWithBaseUrl(cfg, params); return applyAgentDefaultModelPrimary(next, `${params.providerId}/${params.modelId}`); } + +// MiniMax Global API (platform.minimax.io/anthropic) +export function applyMinimaxApiProviderConfig( + cfg: OpenClawConfig, + modelId: string = "MiniMax-M2.5", +): OpenClawConfig { + return applyMinimaxApiProviderConfigWithBaseUrl(cfg, { + providerId: "minimax", + modelId, + baseUrl: MINIMAX_API_BASE_URL, + }); +} + +export function applyMinimaxApiConfig( + cfg: OpenClawConfig, + modelId: string = "MiniMax-M2.5", +): OpenClawConfig { + return applyMinimaxApiConfigWithBaseUrl(cfg, { + providerId: "minimax", + modelId, + baseUrl: MINIMAX_API_BASE_URL, + }); +} + +// MiniMax CN API (api.minimaxi.com/anthropic) — same provider id, different baseUrl +export function applyMinimaxApiProviderConfigCn( + cfg: OpenClawConfig, + modelId: string = "MiniMax-M2.5", +): OpenClawConfig { + return applyMinimaxApiProviderConfigWithBaseUrl(cfg, { + providerId: "minimax", + modelId, + baseUrl: MINIMAX_CN_API_BASE_URL, + }); +} + +export function applyMinimaxApiConfigCn( + cfg: OpenClawConfig, + modelId: string = "MiniMax-M2.5", +): OpenClawConfig { + return applyMinimaxApiConfigWithBaseUrl(cfg, { + providerId: "minimax", + modelId, + baseUrl: MINIMAX_CN_API_BASE_URL, + }); +} diff --git a/src/commands/onboard-auth.config-opencode-go.ts b/src/commands/onboard-auth.config-opencode-go.ts new file mode 100644 index 00000000000..25be5ffa18f --- /dev/null +++ b/src/commands/onboard-auth.config-opencode-go.ts @@ -0,0 +1,36 @@ +import type { OpenClawConfig } from "../config/config.js"; +import { applyAgentDefaultModelPrimary } from "./onboard-auth.config-shared.js"; +import { OPENCODE_GO_DEFAULT_MODEL_REF } from "./opencode-go-model-default.js"; + +const OPENCODE_GO_ALIAS_DEFAULTS: Record = { + "opencode-go/kimi-k2.5": "Kimi", + "opencode-go/glm-5": "GLM", + "opencode-go/minimax-m2.5": "MiniMax", +}; + +export function applyOpencodeGoProviderConfig(cfg: OpenClawConfig): OpenClawConfig { + // Use the built-in opencode-go provider from pi-ai; only seed allowlist aliases. + const models = { ...cfg.agents?.defaults?.models }; + for (const [modelRef, alias] of Object.entries(OPENCODE_GO_ALIAS_DEFAULTS)) { + models[modelRef] = { + ...models[modelRef], + alias: models[modelRef]?.alias ?? alias, + }; + } + + return { + ...cfg, + agents: { + ...cfg.agents, + defaults: { + ...cfg.agents?.defaults, + models, + }, + }, + }; +} + +export function applyOpencodeGoConfig(cfg: OpenClawConfig): OpenClawConfig { + const next = applyOpencodeGoProviderConfig(cfg); + return applyAgentDefaultModelPrimary(next, OPENCODE_GO_DEFAULT_MODEL_REF); +} diff --git a/src/commands/onboard-auth.credentials.test.ts b/src/commands/onboard-auth.credentials.test.ts index 5ff2c57461d..e844ac501c2 100644 --- a/src/commands/onboard-auth.credentials.test.ts +++ b/src/commands/onboard-auth.credentials.test.ts @@ -3,6 +3,7 @@ import { setByteplusApiKey, setCloudflareAiGatewayConfig, setMoonshotApiKey, + setOpencodeZenApiKey, setOpenaiApiKey, setVolcengineApiKey, } from "./onboard-auth.js"; @@ -22,6 +23,7 @@ describe("onboard auth credentials secret refs", () => { "CLOUDFLARE_AI_GATEWAY_API_KEY", "VOLCANO_ENGINE_API_KEY", "BYTEPLUS_API_KEY", + "OPENCODE_API_KEY", ]); afterEach(async () => { @@ -207,4 +209,25 @@ describe("onboard auth credentials secret refs", () => { }); expect(parsed.profiles?.["byteplus:default"]?.key).toBeUndefined(); }); + + it("stores shared OpenCode credentials for both runtime providers", async () => { + const env = await setupAuthTestEnv("openclaw-onboard-auth-credentials-opencode-"); + lifecycle.setStateDir(env.stateDir); + process.env.OPENCODE_API_KEY = "sk-opencode-env"; // pragma: allowlist secret + + await setOpencodeZenApiKey("sk-opencode-env", env.agentDir, { + secretInputMode: "ref", // pragma: allowlist secret + }); + + const parsed = await readAuthProfilesForAgent<{ + profiles?: Record; + }>(env.agentDir); + + expect(parsed.profiles?.["opencode:default"]).toMatchObject({ + keyRef: { source: "env", provider: "default", id: "OPENCODE_API_KEY" }, + }); + expect(parsed.profiles?.["opencode-go:default"]).toMatchObject({ + keyRef: { source: "env", provider: "default", id: "OPENCODE_API_KEY" }, + }); + }); }); diff --git a/src/commands/onboard-auth.credentials.ts b/src/commands/onboard-auth.credentials.ts index c32a3ea9ae6..92e1170b010 100644 --- a/src/commands/onboard-auth.credentials.ts +++ b/src/commands/onboard-auth.credentials.ts @@ -15,7 +15,11 @@ import { PROVIDER_ENV_VARS } from "../secrets/provider-env-vars.js"; import { normalizeSecretInput } from "../utils/normalize-secret-input.js"; import type { SecretInputMode } from "./onboard-types.js"; export { CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_REF } from "../agents/cloudflare-ai-gateway.js"; -export { MISTRAL_DEFAULT_MODEL_REF, XAI_DEFAULT_MODEL_REF } from "./onboard-auth.models.js"; +export { + MISTRAL_DEFAULT_MODEL_REF, + XAI_DEFAULT_MODEL_REF, + MODELSTUDIO_DEFAULT_MODEL_REF, +} from "./onboard-auth.models.js"; export { KILOCODE_DEFAULT_MODEL_REF }; const resolveAuthAgentDir = (agentDir?: string) => agentDir ?? resolveOpenClawAgentDir(); @@ -429,11 +433,30 @@ export async function setOpencodeZenApiKey( agentDir?: string, options?: ApiKeyStorageOptions, ) { - upsertAuthProfile({ - profileId: "opencode:default", - credential: buildApiKeyCredential("opencode", key, undefined, options), - agentDir: resolveAuthAgentDir(agentDir), - }); + await setSharedOpencodeApiKey(key, agentDir, options); +} + +export async function setOpencodeGoApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { + await setSharedOpencodeApiKey(key, agentDir, options); +} + +async function setSharedOpencodeApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { + const resolvedAgentDir = resolveAuthAgentDir(agentDir); + for (const provider of ["opencode", "opencode-go"] as const) { + upsertAuthProfile({ + profileId: `${provider}:default`, + credential: buildApiKeyCredential(provider, key, undefined, options), + agentDir: resolvedAgentDir, + }); + } } export async function setTogetherApiKey( @@ -472,6 +495,18 @@ export function setQianfanApiKey( }); } +export function setModelStudioApiKey( + key: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, +) { + upsertAuthProfile({ + profileId: "modelstudio:default", + credential: buildApiKeyCredential("modelstudio", key, undefined, options), + agentDir: resolveAuthAgentDir(agentDir), + }); +} + export function setXaiApiKey(key: SecretInput, agentDir?: string, options?: ApiKeyStorageOptions) { upsertAuthProfile({ profileId: "xai:default", diff --git a/src/commands/onboard-auth.models.ts b/src/commands/onboard-auth.models.ts index 36ae85dadac..2945e7b4461 100644 --- a/src/commands/onboard-auth.models.ts +++ b/src/commands/onboard-auth.models.ts @@ -224,3 +224,105 @@ export function buildKilocodeModelDefinition(): ModelDefinitionConfig { maxTokens: KILOCODE_DEFAULT_MAX_TOKENS, }; } + +// Alibaba Cloud Model Studio Coding Plan +export const MODELSTUDIO_CN_BASE_URL = "https://coding.dashscope.aliyuncs.com/v1"; +export const MODELSTUDIO_GLOBAL_BASE_URL = "https://coding-intl.dashscope.aliyuncs.com/v1"; +export const MODELSTUDIO_DEFAULT_MODEL_ID = "qwen3.5-plus"; +export const MODELSTUDIO_DEFAULT_MODEL_REF = `modelstudio/${MODELSTUDIO_DEFAULT_MODEL_ID}`; +export const MODELSTUDIO_DEFAULT_COST = { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, +}; + +const MODELSTUDIO_MODEL_CATALOG = { + "qwen3.5-plus": { + name: "qwen3.5-plus", + reasoning: false, + input: ["text", "image"], + contextWindow: 1000000, + maxTokens: 65536, + }, + "qwen3-max-2026-01-23": { + name: "qwen3-max-2026-01-23", + reasoning: false, + input: ["text"], + contextWindow: 262144, + maxTokens: 65536, + }, + "qwen3-coder-next": { + name: "qwen3-coder-next", + reasoning: false, + input: ["text"], + contextWindow: 262144, + maxTokens: 65536, + }, + "qwen3-coder-plus": { + name: "qwen3-coder-plus", + reasoning: false, + input: ["text"], + contextWindow: 1000000, + maxTokens: 65536, + }, + "MiniMax-M2.5": { + name: "MiniMax-M2.5", + reasoning: false, + input: ["text"], + contextWindow: 1000000, + maxTokens: 65536, + }, + "glm-5": { + name: "glm-5", + reasoning: false, + input: ["text"], + contextWindow: 202752, + maxTokens: 16384, + }, + "glm-4.7": { + name: "glm-4.7", + reasoning: false, + input: ["text"], + contextWindow: 202752, + maxTokens: 16384, + }, + "kimi-k2.5": { + name: "kimi-k2.5", + reasoning: false, + input: ["text", "image"], + contextWindow: 262144, + maxTokens: 32768, + }, +} as const; + +type ModelStudioCatalogId = keyof typeof MODELSTUDIO_MODEL_CATALOG; + +export function buildModelStudioModelDefinition(params: { + id: string; + name?: string; + reasoning?: boolean; + input?: string[]; + cost?: ModelDefinitionConfig["cost"]; + contextWindow?: number; + maxTokens?: number; +}): ModelDefinitionConfig { + const catalog = MODELSTUDIO_MODEL_CATALOG[params.id as ModelStudioCatalogId]; + return { + id: params.id, + name: params.name ?? catalog?.name ?? params.id, + reasoning: params.reasoning ?? catalog?.reasoning ?? false, + input: + (params.input as ("text" | "image")[]) ?? + ([...(catalog?.input ?? ["text"])] as ("text" | "image")[]), + cost: params.cost ?? MODELSTUDIO_DEFAULT_COST, + contextWindow: params.contextWindow ?? catalog?.contextWindow ?? 262144, + maxTokens: params.maxTokens ?? catalog?.maxTokens ?? 65536, + }; +} + +export function buildModelStudioDefaultModelDefinition(): ModelDefinitionConfig { + return buildModelStudioModelDefinition({ + id: MODELSTUDIO_DEFAULT_MODEL_ID, + }); +} diff --git a/src/commands/onboard-auth.test.ts b/src/commands/onboard-auth.test.ts index a79eb1d970a..fa2c9f4f10d 100644 --- a/src/commands/onboard-auth.test.ts +++ b/src/commands/onboard-auth.test.ts @@ -16,6 +16,8 @@ import { applyMistralProviderConfig, applyMinimaxApiConfig, applyMinimaxApiProviderConfig, + applyOpencodeGoConfig, + applyOpencodeGoProviderConfig, applyOpencodeZenConfig, applyOpencodeZenProviderConfig, applyOpenrouterConfig, @@ -675,6 +677,11 @@ describe("allowlist provider helpers", () => { modelRef: "opencode/claude-opus-4-6", alias: "My Opus", }, + { + applyConfig: applyOpencodeGoProviderConfig, + modelRef: "opencode-go/kimi-k2.5", + alias: "Kimi", + }, { applyConfig: applyOpenrouterProviderConfig, modelRef: OPENROUTER_DEFAULT_MODEL_REF, @@ -729,6 +736,10 @@ describe("default-model config helpers", () => { applyConfig: applyOpencodeZenConfig, primaryModel: "opencode/claude-opus-4-6", }, + { + applyConfig: applyOpencodeGoConfig, + primaryModel: "opencode-go/kimi-k2.5", + }, { applyConfig: applyOpenrouterConfig, primaryModel: OPENROUTER_DEFAULT_MODEL_REF, diff --git a/src/commands/onboard-auth.ts b/src/commands/onboard-auth.ts index 13d2cf75bf0..f51e61a8cee 100644 --- a/src/commands/onboard-auth.ts +++ b/src/commands/onboard-auth.ts @@ -39,6 +39,10 @@ export { applyXiaomiProviderConfig, applyZaiConfig, applyZaiProviderConfig, + applyModelStudioConfig, + applyModelStudioConfigCn, + applyModelStudioProviderConfig, + applyModelStudioProviderConfigCn, KILOCODE_BASE_URL, } from "./onboard-auth.config-core.js"; export { @@ -46,16 +50,16 @@ export { applyMinimaxApiConfigCn, applyMinimaxApiProviderConfig, applyMinimaxApiProviderConfigCn, - applyMinimaxConfig, - applyMinimaxHostedConfig, - applyMinimaxHostedProviderConfig, - applyMinimaxProviderConfig, } from "./onboard-auth.config-minimax.js"; export { applyOpencodeZenConfig, applyOpencodeZenProviderConfig, } from "./onboard-auth.config-opencode.js"; +export { + applyOpencodeGoConfig, + applyOpencodeGoProviderConfig, +} from "./onboard-auth.config-opencode-go.js"; export { CLOUDFLARE_AI_GATEWAY_DEFAULT_MODEL_REF, KILOCODE_DEFAULT_MODEL_REF, @@ -73,6 +77,7 @@ export { setMinimaxApiKey, setMistralApiKey, setMoonshotApiKey, + setOpencodeGoApiKey, setOpencodeZenApiKey, setOpenrouterApiKey, setSyntheticApiKey, @@ -84,6 +89,7 @@ export { setVolcengineApiKey, setZaiApiKey, setXaiApiKey, + setModelStudioApiKey, writeOAuthCredentials, HUGGINGFACE_DEFAULT_MODEL_REF, VERCEL_AI_GATEWAY_DEFAULT_MODEL_REF, @@ -92,6 +98,7 @@ export { TOGETHER_DEFAULT_MODEL_REF, MISTRAL_DEFAULT_MODEL_REF, XAI_DEFAULT_MODEL_REF, + MODELSTUDIO_DEFAULT_MODEL_REF, } from "./onboard-auth.credentials.js"; export { buildKilocodeModelDefinition, diff --git a/src/commands/onboard-custom.test.ts b/src/commands/onboard-custom.test.ts index b04f7bc08ab..bc1a1927bdc 100644 --- a/src/commands/onboard-custom.test.ts +++ b/src/commands/onboard-custom.test.ts @@ -1,5 +1,6 @@ import { afterEach, describe, expect, it, vi } from "vitest"; import { CONTEXT_WINDOW_HARD_MIN_TOKENS } from "../agents/context-window-guard.js"; +import { OLLAMA_DEFAULT_BASE_URL } from "../agents/ollama-models.js"; import type { OpenClawConfig } from "../config/config.js"; import { defaultRuntime } from "../runtime.js"; import { @@ -133,6 +134,23 @@ describe("promptCustomApiConfig", () => { expect(result.config.agents?.defaults?.models?.["custom/llama3"]?.alias).toBe("local"); }); + it("defaults custom onboarding to the native Ollama base URL", async () => { + const prompter = createTestPrompter({ + text: ["http://localhost:11434", "", "llama3", "custom", ""], + select: ["plaintext", "openai"], + }); + stubFetchSequence([{ ok: true }]); + + await runPromptCustomApi(prompter); + + expect(prompter.text).toHaveBeenCalledWith( + expect.objectContaining({ + message: "API Base URL", + initialValue: OLLAMA_DEFAULT_BASE_URL, + }), + ); + }); + it("retries when verification fails", async () => { const prompter = createTestPrompter({ text: ["http://localhost:11434/v1", "", "bad-model", "good-model", "custom", ""], diff --git a/src/commands/onboard-custom.ts b/src/commands/onboard-custom.ts index a05922aafe0..874018a74ea 100644 --- a/src/commands/onboard-custom.ts +++ b/src/commands/onboard-custom.ts @@ -1,6 +1,7 @@ import { CONTEXT_WINDOW_HARD_MIN_TOKENS } from "../agents/context-window-guard.js"; import { DEFAULT_PROVIDER } from "../agents/defaults.js"; import { buildModelAliasIndex, modelKey } from "../agents/model-selection.js"; +import { OLLAMA_DEFAULT_BASE_URL } from "../agents/ollama-models.js"; import type { OpenClawConfig } from "../config/config.js"; import type { ModelProviderConfig } from "../config/types.models.js"; import { isSecretRef, type SecretInput } from "../config/types.secrets.js"; @@ -16,7 +17,6 @@ import { applyPrimaryModel } from "./model-picker.js"; import { normalizeAlias } from "./models/shared.js"; import type { SecretInputMode } from "./onboard-types.js"; -const DEFAULT_OLLAMA_BASE_URL = "http://127.0.0.1:11434/v1"; const DEFAULT_CONTEXT_WINDOW = CONTEXT_WINDOW_HARD_MIN_TOKENS; const DEFAULT_MAX_TOKENS = 4096; const VERIFY_TIMEOUT_MS = 30_000; @@ -389,7 +389,7 @@ async function promptBaseUrlAndKey(params: { }): Promise<{ baseUrl: string; apiKey?: SecretInput; resolvedApiKey: string }> { const baseUrlInput = await params.prompter.text({ message: "API Base URL", - initialValue: params.initialBaseUrl ?? DEFAULT_OLLAMA_BASE_URL, + initialValue: params.initialBaseUrl ?? OLLAMA_DEFAULT_BASE_URL, placeholder: "https://api.example.com/v1", validate: (val) => { try { diff --git a/src/commands/onboard-non-interactive.gateway.test.ts b/src/commands/onboard-non-interactive.gateway.test.ts index c5d29a12177..83a81f340b3 100644 --- a/src/commands/onboard-non-interactive.gateway.test.ts +++ b/src/commands/onboard-non-interactive.gateway.test.ts @@ -1,9 +1,11 @@ import fs from "node:fs/promises"; import path from "node:path"; -import { afterAll, beforeAll, describe, expect, it, vi } from "vitest"; +import { afterAll, afterEach, beforeAll, describe, expect, it, vi } from "vitest"; +import type { RuntimeEnv } from "../runtime.js"; import { makeTempWorkspace } from "../test-helpers/workspace.js"; import { captureEnv } from "../test-utils/env.js"; import { createThrowingRuntime, readJsonFile } from "./onboard-non-interactive.test-helpers.js"; +import type { installGatewayDaemonNonInteractive } from "./onboard-non-interactive/local/daemon-install.js"; const gatewayClientCalls: Array<{ url?: string; @@ -13,6 +15,29 @@ const gatewayClientCalls: Array<{ onClose?: (code: number, reason: string) => void; }> = []; const ensureWorkspaceAndSessionsMock = vi.fn(async (..._args: unknown[]) => {}); +type InstallGatewayDaemonResult = Awaited>; +const installGatewayDaemonNonInteractiveMock = vi.hoisted(() => + vi.fn(async (): Promise => ({ installed: true })), +); +const gatewayServiceMock = vi.hoisted(() => ({ + label: "LaunchAgent", + loadedText: "loaded", + isLoaded: vi.fn(async () => true), + readRuntime: vi.fn(async () => ({ + status: "running", + state: "active", + pid: 4242, + })), +})); +const readLastGatewayErrorLineMock = vi.hoisted(() => + vi.fn(async () => "Gateway failed to start: required secrets are unavailable."), +); +let waitForGatewayReachableMock: + | ((params: { url: string; token?: string; password?: string; deadlineMs?: number }) => Promise<{ + ok: boolean; + detail?: string; + }>) + | undefined; vi.mock("../gateway/client.js", () => ({ GatewayClient: class { @@ -46,9 +71,25 @@ vi.mock("./onboard-helpers.js", async (importOriginal) => { return { ...actual, ensureWorkspaceAndSessions: ensureWorkspaceAndSessionsMock, + waitForGatewayReachable: (...args: Parameters) => + waitForGatewayReachableMock + ? waitForGatewayReachableMock(args[0]) + : actual.waitForGatewayReachable(...args), }; }); +vi.mock("./onboard-non-interactive/local/daemon-install.js", () => ({ + installGatewayDaemonNonInteractive: installGatewayDaemonNonInteractiveMock, +})); + +vi.mock("../daemon/service.js", () => ({ + resolveGatewayService: () => gatewayServiceMock, +})); + +vi.mock("../daemon/diagnostics.js", () => ({ + readLastGatewayErrorLine: readLastGatewayErrorLineMock, +})); + const { runNonInteractiveOnboarding } = await import("./onboard-non-interactive.js"); const { resolveConfigPath: resolveStateConfigPath } = await import("../config/paths.js"); const { resolveConfigPath } = await import("../config/config.js"); @@ -116,6 +157,14 @@ describe("onboard (non-interactive): gateway and remote auth", () => { envSnapshot.restore(); }); + afterEach(() => { + waitForGatewayReachableMock = undefined; + installGatewayDaemonNonInteractiveMock.mockClear(); + gatewayServiceMock.isLoaded.mockClear(); + gatewayServiceMock.readRuntime.mockClear(); + readLastGatewayErrorLineMock.mockClear(); + }); + it("writes gateway token auth into config", async () => { await withStateDir("state-noninteractive-", async (stateDir) => { const token = "tok_test_123"; @@ -302,6 +351,211 @@ describe("onboard (non-interactive): gateway and remote auth", () => { }); }, 60_000); + it("explains local health failure when no daemon was requested", async () => { + await withStateDir("state-local-health-hint-", async (stateDir) => { + waitForGatewayReachableMock = vi.fn(async () => ({ + ok: false, + detail: "socket closed: 1006 abnormal closure", + })); + + await expect( + runNonInteractiveOnboarding( + { + nonInteractive: true, + mode: "local", + workspace: path.join(stateDir, "openclaw"), + authChoice: "skip", + skipSkills: true, + skipHealth: false, + installDaemon: false, + gatewayBind: "loopback", + }, + runtime, + ), + ).rejects.toThrow( + /only waits for an already-running gateway unless you pass --install-daemon[\s\S]*--skip-health/, + ); + }); + }, 60_000); + + it("uses a longer health deadline when daemon install was requested", async () => { + await withStateDir("state-local-daemon-health-", async (stateDir) => { + let capturedDeadlineMs: number | undefined; + waitForGatewayReachableMock = vi.fn(async (params: { deadlineMs?: number }) => { + capturedDeadlineMs = params.deadlineMs; + return { ok: true }; + }); + + await runNonInteractiveOnboarding( + { + nonInteractive: true, + mode: "local", + workspace: path.join(stateDir, "openclaw"), + authChoice: "skip", + skipSkills: true, + skipHealth: false, + installDaemon: true, + gatewayBind: "loopback", + }, + runtime, + ); + + expect(installGatewayDaemonNonInteractiveMock).toHaveBeenCalledTimes(1); + expect(capturedDeadlineMs).toBe(45_000); + }); + }, 60_000); + + it("emits a daemon-install failure when Linux user systemd is unavailable", async () => { + await withStateDir("state-local-daemon-install-json-fail-", async (stateDir) => { + installGatewayDaemonNonInteractiveMock.mockResolvedValueOnce({ + installed: false, + skippedReason: "systemd-user-unavailable", + }); + + let capturedError = ""; + const runtimeWithCapture: RuntimeEnv = { + log: () => {}, + error: (...args: unknown[]) => { + const firstArg = args[0]; + capturedError = + typeof firstArg === "string" + ? firstArg + : firstArg instanceof Error + ? firstArg.message + : (JSON.stringify(firstArg) ?? ""); + throw new Error(capturedError); + }, + exit: (_code: number) => { + throw new Error("exit should not be reached after runtime.error"); + }, + }; + + const originalPlatform = process.platform; + Object.defineProperty(process, "platform", { + configurable: true, + value: "linux", + }); + + try { + await expect( + runNonInteractiveOnboarding( + { + nonInteractive: true, + mode: "local", + workspace: path.join(stateDir, "openclaw"), + authChoice: "skip", + skipSkills: true, + skipHealth: false, + installDaemon: true, + gatewayBind: "loopback", + json: true, + }, + runtimeWithCapture, + ), + ).rejects.toThrow(/"phase": "daemon-install"/); + } finally { + Object.defineProperty(process, "platform", { + configurable: true, + value: originalPlatform, + }); + } + + const parsed = JSON.parse(capturedError) as { + ok: boolean; + phase: string; + daemonInstall?: { + requested?: boolean; + installed?: boolean; + skippedReason?: string; + }; + hints?: string[]; + }; + expect(parsed.ok).toBe(false); + expect(parsed.phase).toBe("daemon-install"); + expect(parsed.daemonInstall).toEqual({ + requested: true, + installed: false, + skippedReason: "systemd-user-unavailable", + }); + expect(parsed.hints).toContain( + "Fix: rerun without `--install-daemon` for one-shot setup, or enable a working user-systemd session and retry.", + ); + }); + }, 60_000); + + it("emits structured JSON diagnostics when daemon health fails", async () => { + await withStateDir("state-local-daemon-health-json-fail-", async (stateDir) => { + waitForGatewayReachableMock = vi.fn(async () => ({ + ok: false, + detail: "gateway closed (1006 abnormal closure (no close frame)): no close reason", + })); + + let capturedError = ""; + const runtimeWithCapture: RuntimeEnv = { + log: () => {}, + error: (...args: unknown[]) => { + const firstArg = args[0]; + capturedError = + typeof firstArg === "string" + ? firstArg + : firstArg instanceof Error + ? firstArg.message + : (JSON.stringify(firstArg) ?? ""); + throw new Error(capturedError); + }, + exit: (_code: number) => { + throw new Error("exit should not be reached after runtime.error"); + }, + }; + + await expect( + runNonInteractiveOnboarding( + { + nonInteractive: true, + mode: "local", + workspace: path.join(stateDir, "openclaw"), + authChoice: "skip", + skipSkills: true, + skipHealth: false, + installDaemon: true, + gatewayBind: "loopback", + json: true, + }, + runtimeWithCapture, + ), + ).rejects.toThrow(/"phase": "gateway-health"/); + + const parsed = JSON.parse(capturedError) as { + ok: boolean; + phase: string; + installDaemon: boolean; + detail?: string; + gateway?: { wsUrl?: string }; + hints?: string[]; + diagnostics?: { + service?: { + label?: string; + loaded?: boolean; + runtimeStatus?: string; + pid?: number; + }; + lastGatewayError?: string; + }; + }; + expect(parsed.ok).toBe(false); + expect(parsed.phase).toBe("gateway-health"); + expect(parsed.installDaemon).toBe(true); + expect(parsed.detail).toContain("1006 abnormal closure"); + expect(parsed.gateway?.wsUrl).toContain("ws://127.0.0.1:"); + expect(parsed.hints).toContain("Run `openclaw gateway status --deep` for more detail."); + expect(parsed.diagnostics?.service?.label).toBe("LaunchAgent"); + expect(parsed.diagnostics?.service?.loaded).toBe(true); + expect(parsed.diagnostics?.service?.runtimeStatus).toBe("running"); + expect(parsed.diagnostics?.service?.pid).toBe(4242); + expect(parsed.diagnostics?.lastGatewayError).toContain("required secrets are unavailable"); + }); + }, 60_000); + it("auto-generates token auth when binding LAN and persists the token", async () => { if (process.platform === "win32") { // Windows runner occasionally drops the temp config write in this flow; skip to keep CI green. diff --git a/src/commands/onboard-non-interactive.provider-auth.test.ts b/src/commands/onboard-non-interactive.provider-auth.test.ts index d72de28a61d..d1eb0a7749f 100644 --- a/src/commands/onboard-non-interactive.provider-auth.test.ts +++ b/src/commands/onboard-non-interactive.provider-auth.test.ts @@ -17,7 +17,7 @@ type OnboardEnv = { runtime: NonInteractiveRuntime; }; -const ensureWorkspaceAndSessionsMock = vi.fn(async (..._args: unknown[]) => {}); +const ensureWorkspaceAndSessionsMock = vi.hoisted(() => vi.fn(async (..._args: unknown[]) => {})); vi.mock("./onboard-helpers.js", async (importOriginal) => { const actual = await importOriginal(); @@ -42,11 +42,6 @@ let upsertAuthProfile: typeof import("../agents/auth-profiles.js").upsertAuthPro type ProviderAuthConfigSnapshot = { auth?: { profiles?: Record }; agents?: { defaults?: { model?: { primary?: string } } }; - talk?: { - provider?: string; - apiKey?: string | { source?: string; id?: string }; - providers?: Record; - }; models?: { providers?: Record< string, @@ -188,16 +183,16 @@ describe("onboard (non-interactive): provider auth", () => { it("stores MiniMax API key and uses global baseUrl by default", async () => { await withOnboardEnv("openclaw-onboard-minimax-", async (env) => { const cfg = await runOnboardingAndReadConfig(env, { - authChoice: "minimax-api", + authChoice: "minimax-global-api", minimaxApiKey: "sk-minimax-test", // pragma: allowlist secret }); - expect(cfg.auth?.profiles?.["minimax:default"]?.provider).toBe("minimax"); - expect(cfg.auth?.profiles?.["minimax:default"]?.mode).toBe("api_key"); + expect(cfg.auth?.profiles?.["minimax:global"]?.provider).toBe("minimax"); + expect(cfg.auth?.profiles?.["minimax:global"]?.mode).toBe("api_key"); expect(cfg.models?.providers?.minimax?.baseUrl).toBe(MINIMAX_API_BASE_URL); expect(cfg.agents?.defaults?.model?.primary).toBe("minimax/MiniMax-M2.5"); await expectApiKeyProfile({ - profileId: "minimax:default", + profileId: "minimax:global", provider: "minimax", key: "sk-minimax-test", }); @@ -207,17 +202,17 @@ describe("onboard (non-interactive): provider auth", () => { it("supports MiniMax CN API endpoint auth choice", async () => { await withOnboardEnv("openclaw-onboard-minimax-cn-", async (env) => { const cfg = await runOnboardingAndReadConfig(env, { - authChoice: "minimax-api-key-cn", + authChoice: "minimax-cn-api", minimaxApiKey: "sk-minimax-test", // pragma: allowlist secret }); - expect(cfg.auth?.profiles?.["minimax-cn:default"]?.provider).toBe("minimax-cn"); - expect(cfg.auth?.profiles?.["minimax-cn:default"]?.mode).toBe("api_key"); - expect(cfg.models?.providers?.["minimax-cn"]?.baseUrl).toBe(MINIMAX_CN_API_BASE_URL); - expect(cfg.agents?.defaults?.model?.primary).toBe("minimax-cn/MiniMax-M2.5"); + expect(cfg.auth?.profiles?.["minimax:cn"]?.provider).toBe("minimax"); + expect(cfg.auth?.profiles?.["minimax:cn"]?.mode).toBe("api_key"); + expect(cfg.models?.providers?.minimax?.baseUrl).toBe(MINIMAX_CN_API_BASE_URL); + expect(cfg.agents?.defaults?.model?.primary).toBe("minimax/MiniMax-M2.5"); await expectApiKeyProfile({ - profileId: "minimax-cn:default", - provider: "minimax-cn", + profileId: "minimax:cn", + provider: "minimax", key: "sk-minimax-test", }); }); @@ -362,38 +357,6 @@ describe("onboard (non-interactive): provider auth", () => { }); }); - it("does not persist talk fallback secrets when OpenAI ref onboarding starts from an empty config", async () => { - await withOnboardEnv("openclaw-onboard-openai-ref-no-talk-leak-", async (env) => { - await withEnvAsync( - { - OPENAI_API_KEY: "sk-openai-env-key", // pragma: allowlist secret - ELEVENLABS_API_KEY: "elevenlabs-env-key", // pragma: allowlist secret - }, - async () => { - const cfg = await runOnboardingAndReadConfig(env, { - authChoice: "openai-api-key", - secretInputMode: "ref", // pragma: allowlist secret - }); - - expect(cfg.agents?.defaults?.model?.primary).toBe(OPENAI_DEFAULT_MODEL); - expect(cfg.talk).toBeUndefined(); - - const store = ensureAuthProfileStore(); - const profile = store.profiles["openai:default"]; - expect(profile?.type).toBe("api_key"); - if (profile?.type === "api_key") { - expect(profile.key).toBeUndefined(); - expect(profile.keyRef).toEqual({ - source: "env", - provider: "default", - id: "OPENAI_API_KEY", - }); - } - }, - ); - }); - }); - it.each([ { name: "anthropic", @@ -479,7 +442,7 @@ describe("onboard (non-interactive): provider auth", () => { }, ); - it("stores the detected env alias as keyRef for opencode ref mode", async () => { + it("stores the detected env alias as keyRef for both OpenCode runtime providers", async () => { await withOnboardEnv("openclaw-onboard-ref-opencode-alias-", async ({ runtime }) => { await withEnvAsync( { @@ -494,29 +457,80 @@ describe("onboard (non-interactive): provider auth", () => { }); const store = ensureAuthProfileStore(); - const profile = store.profiles["opencode:default"]; - expect(profile?.type).toBe("api_key"); - if (profile?.type === "api_key") { - expect(profile.key).toBeUndefined(); - expect(profile.keyRef).toEqual({ - source: "env", - provider: "default", - id: "OPENCODE_ZEN_API_KEY", - }); + for (const profileId of ["opencode:default", "opencode-go:default"]) { + const profile = store.profiles[profileId]; + expect(profile?.type).toBe("api_key"); + if (profile?.type === "api_key") { + expect(profile.key).toBeUndefined(); + expect(profile.keyRef).toEqual({ + source: "env", + provider: "default", + id: "OPENCODE_ZEN_API_KEY", + }); + } } }, ); }); }); - it("rejects vLLM auth choice in non-interactive mode", async () => { - await withOnboardEnv("openclaw-onboard-vllm-non-interactive-", async ({ runtime }) => { - await expect( - runNonInteractiveOnboardingWithDefaults(runtime, { - authChoice: "vllm", - skipSkills: true, - }), - ).rejects.toThrow('Auth choice "vllm" requires interactive mode.'); + it("configures vLLM via the provider plugin in non-interactive mode", async () => { + await withOnboardEnv("openclaw-onboard-vllm-non-interactive-", async (env) => { + const cfg = await runOnboardingAndReadConfig(env, { + authChoice: "vllm", + customBaseUrl: "http://127.0.0.1:8100/v1", + customApiKey: "vllm-test-key", // pragma: allowlist secret + customModelId: "Qwen/Qwen3-8B", + }); + + expect(cfg.auth?.profiles?.["vllm:default"]?.provider).toBe("vllm"); + expect(cfg.auth?.profiles?.["vllm:default"]?.mode).toBe("api_key"); + expect(cfg.models?.providers?.vllm).toEqual({ + baseUrl: "http://127.0.0.1:8100/v1", + api: "openai-completions", + apiKey: "VLLM_API_KEY", + models: [ + expect.objectContaining({ + id: "Qwen/Qwen3-8B", + }), + ], + }); + expect(cfg.agents?.defaults?.model?.primary).toBe("vllm/Qwen/Qwen3-8B"); + await expectApiKeyProfile({ + profileId: "vllm:default", + provider: "vllm", + key: "vllm-test-key", + }); + }); + }); + + it("configures SGLang via the provider plugin in non-interactive mode", async () => { + await withOnboardEnv("openclaw-onboard-sglang-non-interactive-", async (env) => { + const cfg = await runOnboardingAndReadConfig(env, { + authChoice: "sglang", + customBaseUrl: "http://127.0.0.1:31000/v1", + customApiKey: "sglang-test-key", // pragma: allowlist secret + customModelId: "Qwen/Qwen3-32B", + }); + + expect(cfg.auth?.profiles?.["sglang:default"]?.provider).toBe("sglang"); + expect(cfg.auth?.profiles?.["sglang:default"]?.mode).toBe("api_key"); + expect(cfg.models?.providers?.sglang).toEqual({ + baseUrl: "http://127.0.0.1:31000/v1", + api: "openai-completions", + apiKey: "SGLANG_API_KEY", + models: [ + expect.objectContaining({ + id: "Qwen/Qwen3-32B", + }), + ], + }); + expect(cfg.agents?.defaults?.model?.primary).toBe("sglang/Qwen/Qwen3-32B"); + await expectApiKeyProfile({ + profileId: "sglang:default", + provider: "sglang", + key: "sglang-test-key", + }); }); }); @@ -611,6 +625,26 @@ describe("onboard (non-interactive): provider auth", () => { }); }); + it("infers Model Studio auth choice from --modelstudio-api-key and sets default model", async () => { + await withOnboardEnv("openclaw-onboard-modelstudio-infer-", async (env) => { + const cfg = await runOnboardingAndReadConfig(env, { + modelstudioApiKey: "modelstudio-test-key", // pragma: allowlist secret + }); + + expect(cfg.auth?.profiles?.["modelstudio:default"]?.provider).toBe("modelstudio"); + expect(cfg.auth?.profiles?.["modelstudio:default"]?.mode).toBe("api_key"); + expect(cfg.models?.providers?.modelstudio?.baseUrl).toBe( + "https://coding-intl.dashscope.aliyuncs.com/v1", + ); + expect(cfg.agents?.defaults?.model?.primary).toBe("modelstudio/qwen3.5-plus"); + await expectApiKeyProfile({ + profileId: "modelstudio:default", + provider: "modelstudio", + key: "modelstudio-test-key", + }); + }); + }); + it("configures a custom provider from non-interactive flags", async () => { await withOnboardEnv("openclaw-onboard-custom-provider-", async ({ configPath, runtime }) => { await runNonInteractiveOnboardingWithDefaults(runtime, { diff --git a/src/commands/onboard-non-interactive/local.ts b/src/commands/onboard-non-interactive/local.ts index 4e0482ae2c8..5e26bf50d24 100644 --- a/src/commands/onboard-non-interactive/local.ts +++ b/src/commands/onboard-non-interactive/local.ts @@ -15,10 +15,58 @@ import { import type { OnboardOptions } from "../onboard-types.js"; import { inferAuthChoiceFromFlags } from "./local/auth-choice-inference.js"; import { applyNonInteractiveGatewayConfig } from "./local/gateway-config.js"; -import { logNonInteractiveOnboardingJson } from "./local/output.js"; +import { + type GatewayHealthFailureDiagnostics, + logNonInteractiveOnboardingFailure, + logNonInteractiveOnboardingJson, +} from "./local/output.js"; import { applyNonInteractiveSkillsConfig } from "./local/skills-config.js"; import { resolveNonInteractiveWorkspaceDir } from "./local/workspace.js"; +const INSTALL_DAEMON_HEALTH_DEADLINE_MS = 45_000; +const ATTACH_EXISTING_GATEWAY_HEALTH_DEADLINE_MS = 15_000; + +async function collectGatewayHealthFailureDiagnostics(): Promise< + GatewayHealthFailureDiagnostics | undefined +> { + const diagnostics: GatewayHealthFailureDiagnostics = {}; + + try { + const { resolveGatewayService } = await import("../../daemon/service.js"); + const service = resolveGatewayService(); + const env = process.env as Record; + const [loaded, runtime] = await Promise.all([ + service.isLoaded({ env }).catch(() => false), + service.readRuntime(env).catch(() => undefined), + ]); + diagnostics.service = { + label: service.label, + loaded, + loadedText: service.loadedText, + runtimeStatus: runtime?.status, + state: runtime?.state, + pid: runtime?.pid, + lastExitStatus: runtime?.lastExitStatus, + lastExitReason: runtime?.lastExitReason, + }; + } catch (err) { + diagnostics.inspectError = `service diagnostics failed: ${String(err)}`; + } + + try { + const { readLastGatewayErrorLine } = await import("../../daemon/diagnostics.js"); + diagnostics.lastGatewayError = (await readLastGatewayErrorLine(process.env)) ?? undefined; + } catch (err) { + diagnostics.inspectError = diagnostics.inspectError + ? `${diagnostics.inspectError}; log diagnostics failed: ${String(err)}` + : `log diagnostics failed: ${String(err)}`; + } + + return diagnostics.service || diagnostics.lastGatewayError || diagnostics.inspectError + ? diagnostics + : undefined; +} + export async function runNonInteractiveOnboardingLocal(params: { opts: OnboardOptions; runtime: RuntimeEnv; @@ -85,17 +133,62 @@ export async function runNonInteractiveOnboardingLocal(params: { skipBootstrap: Boolean(nextConfig.agents?.defaults?.skipBootstrap), }); + const daemonRuntimeRaw = opts.daemonRuntime ?? DEFAULT_GATEWAY_DAEMON_RUNTIME; + let daemonInstallStatus: + | { + requested: boolean; + installed: boolean; + skippedReason?: "systemd-user-unavailable"; + } + | undefined; if (opts.installDaemon) { const { installGatewayDaemonNonInteractive } = await import("./local/daemon-install.js"); - await installGatewayDaemonNonInteractive({ + const daemonInstall = await installGatewayDaemonNonInteractive({ nextConfig, opts, runtime, port: gatewayResult.port, }); + daemonInstallStatus = daemonInstall.installed + ? { + requested: true, + installed: true, + } + : { + requested: true, + installed: false, + skippedReason: daemonInstall.skippedReason, + }; + if (!daemonInstall.installed && !opts.skipHealth) { + logNonInteractiveOnboardingFailure({ + opts, + runtime, + mode, + phase: "daemon-install", + message: + daemonInstall.skippedReason === "systemd-user-unavailable" + ? "Gateway service install is unavailable because systemd user services are not reachable in this Linux session." + : "Gateway service install did not complete successfully.", + installDaemon: true, + daemonInstall: { + requested: true, + installed: false, + skippedReason: daemonInstall.skippedReason, + }, + daemonRuntime: daemonRuntimeRaw, + hints: + daemonInstall.skippedReason === "systemd-user-unavailable" + ? [ + "Fix: rerun without `--install-daemon` for one-shot setup, or enable a working user-systemd session and retry.", + "If your auth profile uses env-backed refs, keep those env vars set in the shell that runs `openclaw gateway run` or `openclaw agent --local`.", + ] + : [`Run \`${formatCliCommand("openclaw gateway status --deep")}\` for more detail.`], + }); + runtime.exit(1); + return; + } } - const daemonRuntimeRaw = opts.daemonRuntime ?? DEFAULT_GATEWAY_DAEMON_RUNTIME; if (!opts.skipHealth) { const { healthCommand } = await import("../health.js"); const links = resolveControlUiLinks({ @@ -104,11 +197,45 @@ export async function runNonInteractiveOnboardingLocal(params: { customBindHost: nextConfig.gateway?.customBindHost, basePath: undefined, }); - await waitForGatewayReachable({ + const probe = await waitForGatewayReachable({ url: links.wsUrl, token: gatewayResult.gatewayToken, - deadlineMs: 15_000, + deadlineMs: opts.installDaemon + ? INSTALL_DAEMON_HEALTH_DEADLINE_MS + : ATTACH_EXISTING_GATEWAY_HEALTH_DEADLINE_MS, }); + if (!probe.ok) { + const diagnostics = opts.installDaemon + ? await collectGatewayHealthFailureDiagnostics() + : undefined; + logNonInteractiveOnboardingFailure({ + opts, + runtime, + mode, + phase: "gateway-health", + message: `Gateway did not become reachable at ${links.wsUrl}.`, + detail: probe.detail, + gateway: { + wsUrl: links.wsUrl, + httpUrl: links.httpUrl, + }, + installDaemon: Boolean(opts.installDaemon), + daemonInstall: daemonInstallStatus, + daemonRuntime: opts.installDaemon ? daemonRuntimeRaw : undefined, + diagnostics, + hints: !opts.installDaemon + ? [ + "Non-interactive local onboarding only waits for an already-running gateway unless you pass --install-daemon.", + `Fix: start \`${formatCliCommand("openclaw gateway run")}\`, re-run with \`--install-daemon\`, or use \`--skip-health\`.`, + process.platform === "win32" + ? "Native Windows managed gateway install tries Scheduled Tasks first and falls back to a per-user Startup-folder login item when task creation is denied." + : undefined, + ].filter((value): value is string => Boolean(value)) + : [`Run \`${formatCliCommand("openclaw gateway status --deep")}\` for more detail.`], + }); + runtime.exit(1); + return; + } await healthCommand({ json: false, timeoutMs: 10_000 }, runtime); } @@ -125,6 +252,7 @@ export async function runNonInteractiveOnboardingLocal(params: { tailscaleMode: gatewayResult.tailscaleMode, }, installDaemon: Boolean(opts.installDaemon), + daemonInstall: daemonInstallStatus, daemonRuntime: opts.installDaemon ? daemonRuntimeRaw : undefined, skipSkills: Boolean(opts.skipSkills), skipHealth: Boolean(opts.skipHealth), diff --git a/src/commands/onboard-non-interactive/local/auth-choice-inference.ts b/src/commands/onboard-non-interactive/local/auth-choice-inference.ts index aecab3ba489..212bb9dd890 100644 --- a/src/commands/onboard-non-interactive/local/auth-choice-inference.ts +++ b/src/commands/onboard-non-interactive/local/auth-choice-inference.ts @@ -27,9 +27,12 @@ type AuthChoiceFlagOptions = Pick< | "xiaomiApiKey" | "minimaxApiKey" | "opencodeZenApiKey" + | "opencodeGoApiKey" | "xaiApiKey" | "litellmApiKey" | "qianfanApiKey" + | "modelstudioApiKeyCn" + | "modelstudioApiKey" | "volcengineApiKey" | "byteplusApiKey" | "customBaseUrl" diff --git a/src/commands/onboard-non-interactive/local/auth-choice.api-key-providers.ts b/src/commands/onboard-non-interactive/local/auth-choice.api-key-providers.ts new file mode 100644 index 00000000000..a04dda68fd1 --- /dev/null +++ b/src/commands/onboard-non-interactive/local/auth-choice.api-key-providers.ts @@ -0,0 +1,543 @@ +import type { OpenClawConfig } from "../../../config/config.js"; +import type { SecretInput } from "../../../config/types.secrets.js"; +import type { RuntimeEnv } from "../../../runtime.js"; +import { applyGoogleGeminiModelDefault } from "../../google-gemini-model-default.js"; +import { applyPrimaryModel } from "../../model-picker.js"; +import { + applyAuthProfileConfig, + applyHuggingfaceConfig, + applyKilocodeConfig, + applyKimiCodeConfig, + applyLitellmConfig, + applyMistralConfig, + applyModelStudioConfig, + applyModelStudioConfigCn, + applyMoonshotConfig, + applyMoonshotConfigCn, + applyOpencodeGoConfig, + applyOpencodeZenConfig, + applyOpenrouterConfig, + applyQianfanConfig, + applySyntheticConfig, + applyTogetherConfig, + applyVeniceConfig, + applyVercelAiGatewayConfig, + applyXaiConfig, + applyXiaomiConfig, + setAnthropicApiKey, + setGeminiApiKey, + setHuggingfaceApiKey, + setKilocodeApiKey, + setKimiCodingApiKey, + setLitellmApiKey, + setMistralApiKey, + setModelStudioApiKey, + setMoonshotApiKey, + setOpenaiApiKey, + setOpencodeGoApiKey, + setOpencodeZenApiKey, + setOpenrouterApiKey, + setQianfanApiKey, + setSyntheticApiKey, + setTogetherApiKey, + setVeniceApiKey, + setVercelAiGatewayApiKey, + setVolcengineApiKey, + setXaiApiKey, + setXiaomiApiKey, + setByteplusApiKey, +} from "../../onboard-auth.js"; +import type { AuthChoice, OnboardOptions } from "../../onboard-types.js"; +import { applyOpenAIConfig } from "../../openai-model-default.js"; + +type ApiKeyStorageOptions = { + secretInputMode: "plaintext" | "ref"; +}; + +type SimpleApiKeyAuthChoice = { + authChoices: AuthChoice[]; + provider: string; + flagValue?: string; + flagName: `--${string}`; + envVar: string; + profileId: string; + setCredential: (value: SecretInput, options?: ApiKeyStorageOptions) => Promise | void; + applyConfig: (cfg: OpenClawConfig) => OpenClawConfig; +}; + +type ResolvedNonInteractiveApiKey = { + key: string; + source: "profile" | "env" | "flag"; +}; + +function buildSimpleApiKeyAuthChoices(params: { opts: OnboardOptions }): SimpleApiKeyAuthChoice[] { + const withStorage = + ( + setter: ( + value: SecretInput, + agentDir?: string, + options?: ApiKeyStorageOptions, + ) => Promise | void, + ) => + (value: SecretInput, options?: ApiKeyStorageOptions) => + setter(value, undefined, options); + + return [ + { + authChoices: ["apiKey"], + provider: "anthropic", + flagValue: params.opts.anthropicApiKey, + flagName: "--anthropic-api-key", + envVar: "ANTHROPIC_API_KEY", + profileId: "anthropic:default", + setCredential: withStorage(setAnthropicApiKey), + applyConfig: (cfg) => + applyAuthProfileConfig(cfg, { + profileId: "anthropic:default", + provider: "anthropic", + mode: "api_key", + }), + }, + { + authChoices: ["gemini-api-key"], + provider: "google", + flagValue: params.opts.geminiApiKey, + flagName: "--gemini-api-key", + envVar: "GEMINI_API_KEY", + profileId: "google:default", + setCredential: withStorage(setGeminiApiKey), + applyConfig: (cfg) => + applyGoogleGeminiModelDefault( + applyAuthProfileConfig(cfg, { + profileId: "google:default", + provider: "google", + mode: "api_key", + }), + ).next, + }, + { + authChoices: ["xiaomi-api-key"], + provider: "xiaomi", + flagValue: params.opts.xiaomiApiKey, + flagName: "--xiaomi-api-key", + envVar: "XIAOMI_API_KEY", + profileId: "xiaomi:default", + setCredential: withStorage(setXiaomiApiKey), + applyConfig: (cfg) => + applyXiaomiConfig( + applyAuthProfileConfig(cfg, { + profileId: "xiaomi:default", + provider: "xiaomi", + mode: "api_key", + }), + ), + }, + { + authChoices: ["xai-api-key"], + provider: "xai", + flagValue: params.opts.xaiApiKey, + flagName: "--xai-api-key", + envVar: "XAI_API_KEY", + profileId: "xai:default", + setCredential: withStorage(setXaiApiKey), + applyConfig: (cfg) => + applyXaiConfig( + applyAuthProfileConfig(cfg, { + profileId: "xai:default", + provider: "xai", + mode: "api_key", + }), + ), + }, + { + authChoices: ["mistral-api-key"], + provider: "mistral", + flagValue: params.opts.mistralApiKey, + flagName: "--mistral-api-key", + envVar: "MISTRAL_API_KEY", + profileId: "mistral:default", + setCredential: withStorage(setMistralApiKey), + applyConfig: (cfg) => + applyMistralConfig( + applyAuthProfileConfig(cfg, { + profileId: "mistral:default", + provider: "mistral", + mode: "api_key", + }), + ), + }, + { + authChoices: ["volcengine-api-key"], + provider: "volcengine", + flagValue: params.opts.volcengineApiKey, + flagName: "--volcengine-api-key", + envVar: "VOLCANO_ENGINE_API_KEY", + profileId: "volcengine:default", + setCredential: withStorage(setVolcengineApiKey), + applyConfig: (cfg) => + applyPrimaryModel( + applyAuthProfileConfig(cfg, { + profileId: "volcengine:default", + provider: "volcengine", + mode: "api_key", + }), + "volcengine-plan/ark-code-latest", + ), + }, + { + authChoices: ["byteplus-api-key"], + provider: "byteplus", + flagValue: params.opts.byteplusApiKey, + flagName: "--byteplus-api-key", + envVar: "BYTEPLUS_API_KEY", + profileId: "byteplus:default", + setCredential: withStorage(setByteplusApiKey), + applyConfig: (cfg) => + applyPrimaryModel( + applyAuthProfileConfig(cfg, { + profileId: "byteplus:default", + provider: "byteplus", + mode: "api_key", + }), + "byteplus-plan/ark-code-latest", + ), + }, + { + authChoices: ["qianfan-api-key"], + provider: "qianfan", + flagValue: params.opts.qianfanApiKey, + flagName: "--qianfan-api-key", + envVar: "QIANFAN_API_KEY", + profileId: "qianfan:default", + setCredential: withStorage(setQianfanApiKey), + applyConfig: (cfg) => + applyQianfanConfig( + applyAuthProfileConfig(cfg, { + profileId: "qianfan:default", + provider: "qianfan", + mode: "api_key", + }), + ), + }, + { + authChoices: ["modelstudio-api-key-cn"], + provider: "modelstudio", + flagValue: params.opts.modelstudioApiKeyCn, + flagName: "--modelstudio-api-key-cn", + envVar: "MODELSTUDIO_API_KEY", + profileId: "modelstudio:default", + setCredential: withStorage(setModelStudioApiKey), + applyConfig: (cfg) => + applyModelStudioConfigCn( + applyAuthProfileConfig(cfg, { + profileId: "modelstudio:default", + provider: "modelstudio", + mode: "api_key", + }), + ), + }, + { + authChoices: ["modelstudio-api-key"], + provider: "modelstudio", + flagValue: params.opts.modelstudioApiKey, + flagName: "--modelstudio-api-key", + envVar: "MODELSTUDIO_API_KEY", + profileId: "modelstudio:default", + setCredential: withStorage(setModelStudioApiKey), + applyConfig: (cfg) => + applyModelStudioConfig( + applyAuthProfileConfig(cfg, { + profileId: "modelstudio:default", + provider: "modelstudio", + mode: "api_key", + }), + ), + }, + { + authChoices: ["openai-api-key"], + provider: "openai", + flagValue: params.opts.openaiApiKey, + flagName: "--openai-api-key", + envVar: "OPENAI_API_KEY", + profileId: "openai:default", + setCredential: withStorage(setOpenaiApiKey), + applyConfig: (cfg) => + applyOpenAIConfig( + applyAuthProfileConfig(cfg, { + profileId: "openai:default", + provider: "openai", + mode: "api_key", + }), + ), + }, + { + authChoices: ["openrouter-api-key"], + provider: "openrouter", + flagValue: params.opts.openrouterApiKey, + flagName: "--openrouter-api-key", + envVar: "OPENROUTER_API_KEY", + profileId: "openrouter:default", + setCredential: withStorage(setOpenrouterApiKey), + applyConfig: (cfg) => + applyOpenrouterConfig( + applyAuthProfileConfig(cfg, { + profileId: "openrouter:default", + provider: "openrouter", + mode: "api_key", + }), + ), + }, + { + authChoices: ["kilocode-api-key"], + provider: "kilocode", + flagValue: params.opts.kilocodeApiKey, + flagName: "--kilocode-api-key", + envVar: "KILOCODE_API_KEY", + profileId: "kilocode:default", + setCredential: withStorage(setKilocodeApiKey), + applyConfig: (cfg) => + applyKilocodeConfig( + applyAuthProfileConfig(cfg, { + profileId: "kilocode:default", + provider: "kilocode", + mode: "api_key", + }), + ), + }, + { + authChoices: ["litellm-api-key"], + provider: "litellm", + flagValue: params.opts.litellmApiKey, + flagName: "--litellm-api-key", + envVar: "LITELLM_API_KEY", + profileId: "litellm:default", + setCredential: withStorage(setLitellmApiKey), + applyConfig: (cfg) => + applyLitellmConfig( + applyAuthProfileConfig(cfg, { + profileId: "litellm:default", + provider: "litellm", + mode: "api_key", + }), + ), + }, + { + authChoices: ["ai-gateway-api-key"], + provider: "vercel-ai-gateway", + flagValue: params.opts.aiGatewayApiKey, + flagName: "--ai-gateway-api-key", + envVar: "AI_GATEWAY_API_KEY", + profileId: "vercel-ai-gateway:default", + setCredential: withStorage(setVercelAiGatewayApiKey), + applyConfig: (cfg) => + applyVercelAiGatewayConfig( + applyAuthProfileConfig(cfg, { + profileId: "vercel-ai-gateway:default", + provider: "vercel-ai-gateway", + mode: "api_key", + }), + ), + }, + { + authChoices: ["moonshot-api-key"], + provider: "moonshot", + flagValue: params.opts.moonshotApiKey, + flagName: "--moonshot-api-key", + envVar: "MOONSHOT_API_KEY", + profileId: "moonshot:default", + setCredential: withStorage(setMoonshotApiKey), + applyConfig: (cfg) => + applyMoonshotConfig( + applyAuthProfileConfig(cfg, { + profileId: "moonshot:default", + provider: "moonshot", + mode: "api_key", + }), + ), + }, + { + authChoices: ["moonshot-api-key-cn"], + provider: "moonshot", + flagValue: params.opts.moonshotApiKey, + flagName: "--moonshot-api-key", + envVar: "MOONSHOT_API_KEY", + profileId: "moonshot:default", + setCredential: withStorage(setMoonshotApiKey), + applyConfig: (cfg) => + applyMoonshotConfigCn( + applyAuthProfileConfig(cfg, { + profileId: "moonshot:default", + provider: "moonshot", + mode: "api_key", + }), + ), + }, + { + authChoices: ["kimi-code-api-key"], + provider: "kimi-coding", + flagValue: params.opts.kimiCodeApiKey, + flagName: "--kimi-code-api-key", + envVar: "KIMI_API_KEY", + profileId: "kimi-coding:default", + setCredential: withStorage(setKimiCodingApiKey), + applyConfig: (cfg) => + applyKimiCodeConfig( + applyAuthProfileConfig(cfg, { + profileId: "kimi-coding:default", + provider: "kimi-coding", + mode: "api_key", + }), + ), + }, + { + authChoices: ["synthetic-api-key"], + provider: "synthetic", + flagValue: params.opts.syntheticApiKey, + flagName: "--synthetic-api-key", + envVar: "SYNTHETIC_API_KEY", + profileId: "synthetic:default", + setCredential: withStorage(setSyntheticApiKey), + applyConfig: (cfg) => + applySyntheticConfig( + applyAuthProfileConfig(cfg, { + profileId: "synthetic:default", + provider: "synthetic", + mode: "api_key", + }), + ), + }, + { + authChoices: ["venice-api-key"], + provider: "venice", + flagValue: params.opts.veniceApiKey, + flagName: "--venice-api-key", + envVar: "VENICE_API_KEY", + profileId: "venice:default", + setCredential: withStorage(setVeniceApiKey), + applyConfig: (cfg) => + applyVeniceConfig( + applyAuthProfileConfig(cfg, { + profileId: "venice:default", + provider: "venice", + mode: "api_key", + }), + ), + }, + { + authChoices: ["opencode-zen"], + provider: "opencode", + flagValue: params.opts.opencodeZenApiKey, + flagName: "--opencode-zen-api-key", + envVar: "OPENCODE_API_KEY (or OPENCODE_ZEN_API_KEY)", + profileId: "opencode:default", + setCredential: withStorage(setOpencodeZenApiKey), + applyConfig: (cfg) => + applyOpencodeZenConfig( + applyAuthProfileConfig(cfg, { + profileId: "opencode:default", + provider: "opencode", + mode: "api_key", + }), + ), + }, + { + authChoices: ["opencode-go"], + provider: "opencode-go", + flagValue: params.opts.opencodeGoApiKey, + flagName: "--opencode-go-api-key", + envVar: "OPENCODE_API_KEY", + profileId: "opencode-go:default", + setCredential: withStorage(setOpencodeGoApiKey), + applyConfig: (cfg) => + applyOpencodeGoConfig( + applyAuthProfileConfig(cfg, { + profileId: "opencode-go:default", + provider: "opencode-go", + mode: "api_key", + }), + ), + }, + { + authChoices: ["together-api-key"], + provider: "together", + flagValue: params.opts.togetherApiKey, + flagName: "--together-api-key", + envVar: "TOGETHER_API_KEY", + profileId: "together:default", + setCredential: withStorage(setTogetherApiKey), + applyConfig: (cfg) => + applyTogetherConfig( + applyAuthProfileConfig(cfg, { + profileId: "together:default", + provider: "together", + mode: "api_key", + }), + ), + }, + { + authChoices: ["huggingface-api-key"], + provider: "huggingface", + flagValue: params.opts.huggingfaceApiKey, + flagName: "--huggingface-api-key", + envVar: "HF_TOKEN", + profileId: "huggingface:default", + setCredential: withStorage(setHuggingfaceApiKey), + applyConfig: (cfg) => + applyHuggingfaceConfig( + applyAuthProfileConfig(cfg, { + profileId: "huggingface:default", + provider: "huggingface", + mode: "api_key", + }), + ), + }, + ]; +} + +export async function applySimpleNonInteractiveApiKeyChoice(params: { + authChoice: AuthChoice; + nextConfig: OpenClawConfig; + baseConfig: OpenClawConfig; + opts: OnboardOptions; + runtime: RuntimeEnv; + apiKeyStorageOptions?: ApiKeyStorageOptions; + resolveApiKey: (input: { + provider: string; + cfg: OpenClawConfig; + flagValue?: string; + flagName: `--${string}`; + envVar: string; + runtime: RuntimeEnv; + }) => Promise; + maybeSetResolvedApiKey: ( + resolved: ResolvedNonInteractiveApiKey, + setter: (value: SecretInput) => Promise | void, + ) => Promise; +}): Promise { + const definition = buildSimpleApiKeyAuthChoices({ + opts: params.opts, + }).find((entry) => entry.authChoices.includes(params.authChoice)); + if (!definition) { + return undefined; + } + + const resolved = await params.resolveApiKey({ + provider: definition.provider, + cfg: params.baseConfig, + flagValue: definition.flagValue, + flagName: definition.flagName, + envVar: definition.envVar, + runtime: params.runtime, + }); + if (!resolved) { + return null; + } + if ( + !(await params.maybeSetResolvedApiKey(resolved, (value) => + definition.setCredential(value, params.apiKeyStorageOptions), + )) + ) { + return null; + } + return definition.applyConfig(params.nextConfig); +} diff --git a/src/commands/onboard-non-interactive/local/auth-choice.plugin-providers.ts b/src/commands/onboard-non-interactive/local/auth-choice.plugin-providers.ts new file mode 100644 index 00000000000..01007aa7aa2 --- /dev/null +++ b/src/commands/onboard-non-interactive/local/auth-choice.plugin-providers.ts @@ -0,0 +1,121 @@ +import { resolveDefaultAgentId, resolveAgentWorkspaceDir } from "../../../agents/agent-scope.js"; +import type { ApiKeyCredential } from "../../../agents/auth-profiles/types.js"; +import { resolveDefaultAgentWorkspaceDir } from "../../../agents/workspace.js"; +import type { OpenClawConfig } from "../../../config/config.js"; +import { enablePluginInConfig } from "../../../plugins/enable.js"; +import { + PROVIDER_PLUGIN_CHOICE_PREFIX, + resolveProviderPluginChoice, +} from "../../../plugins/provider-wizard.js"; +import { resolvePluginProviders } from "../../../plugins/providers.js"; +import type { + ProviderNonInteractiveApiKeyCredentialParams, + ProviderResolveNonInteractiveApiKeyParams, +} from "../../../plugins/types.js"; +import type { RuntimeEnv } from "../../../runtime.js"; +import { resolvePreferredProviderForAuthChoice } from "../../auth-choice.preferred-provider.js"; +import type { OnboardOptions } from "../../onboard-types.js"; + +function buildIsolatedProviderResolutionConfig( + cfg: OpenClawConfig, + providerId: string | undefined, +): OpenClawConfig { + if (!providerId) { + return cfg; + } + const allow = new Set(cfg.plugins?.allow ?? []); + allow.add(providerId); + return { + ...cfg, + plugins: { + ...cfg.plugins, + allow: Array.from(allow), + entries: { + ...cfg.plugins?.entries, + [providerId]: { + ...cfg.plugins?.entries?.[providerId], + enabled: true, + }, + }, + }, + }; +} + +export async function applyNonInteractivePluginProviderChoice(params: { + nextConfig: OpenClawConfig; + authChoice: string; + opts: OnboardOptions; + runtime: RuntimeEnv; + baseConfig: OpenClawConfig; + resolveApiKey: (input: ProviderResolveNonInteractiveApiKeyParams) => Promise<{ + key: string; + source: "profile" | "env" | "flag"; + envVarName?: string; + } | null>; + toApiKeyCredential: ( + input: ProviderNonInteractiveApiKeyCredentialParams, + ) => ApiKeyCredential | null; +}): Promise { + const agentId = resolveDefaultAgentId(params.nextConfig); + const workspaceDir = + resolveAgentWorkspaceDir(params.nextConfig, agentId) ?? resolveDefaultAgentWorkspaceDir(); + const prefixedProviderId = params.authChoice.startsWith(PROVIDER_PLUGIN_CHOICE_PREFIX) + ? params.authChoice.slice(PROVIDER_PLUGIN_CHOICE_PREFIX.length).split(":", 1)[0]?.trim() + : undefined; + const preferredProviderId = + prefixedProviderId || + resolvePreferredProviderForAuthChoice({ + choice: params.authChoice, + config: params.nextConfig, + workspaceDir, + }); + const resolutionConfig = buildIsolatedProviderResolutionConfig( + params.nextConfig, + preferredProviderId, + ); + const providerChoice = resolveProviderPluginChoice({ + providers: resolvePluginProviders({ + config: resolutionConfig, + workspaceDir, + }), + choice: params.authChoice, + }); + if (!providerChoice) { + return undefined; + } + + const enableResult = enablePluginInConfig( + params.nextConfig, + providerChoice.provider.pluginId ?? providerChoice.provider.id, + ); + if (!enableResult.enabled) { + params.runtime.error( + `${providerChoice.provider.label} plugin is disabled (${enableResult.reason ?? "blocked"}).`, + ); + params.runtime.exit(1); + return null; + } + + const method = providerChoice.method; + if (!method.runNonInteractive) { + params.runtime.error( + [ + `Auth choice "${params.authChoice}" requires interactive mode.`, + `The ${providerChoice.provider.label} provider plugin does not implement non-interactive setup.`, + ].join("\n"), + ); + params.runtime.exit(1); + return null; + } + + return method.runNonInteractive({ + authChoice: params.authChoice, + config: enableResult.config, + baseConfig: params.baseConfig, + opts: params.opts, + runtime: params.runtime, + workspaceDir, + resolveApiKey: params.resolveApiKey, + toApiKeyCredential: params.toApiKeyCredential, + }); +} diff --git a/src/commands/onboard-non-interactive/local/auth-choice.ts b/src/commands/onboard-non-interactive/local/auth-choice.ts index 98eef51dd20..d435771d720 100644 --- a/src/commands/onboard-non-interactive/local/auth-choice.ts +++ b/src/commands/onboard-non-interactive/local/auth-choice.ts @@ -1,4 +1,5 @@ import { upsertAuthProfile } from "../../../agents/auth-profiles.js"; +import type { ApiKeyCredential } from "../../../agents/auth-profiles/types.js"; import { normalizeProviderId } from "../../../agents/model-selection.js"; import { parseDurationMs } from "../../../cli/parse-duration.js"; import type { OpenClawConfig } from "../../../config/config.js"; @@ -8,53 +9,14 @@ import { resolveDefaultSecretProviderAlias } from "../../../secrets/ref-contract import { normalizeSecretInput } from "../../../utils/normalize-secret-input.js"; import { normalizeSecretInputModeInput } from "../../auth-choice.apply-helpers.js"; import { buildTokenProfileId, validateAnthropicSetupToken } from "../../auth-token.js"; -import { applyGoogleGeminiModelDefault } from "../../google-gemini-model-default.js"; -import { applyPrimaryModel } from "../../model-picker.js"; import { applyAuthProfileConfig, applyCloudflareAiGatewayConfig, - applyKilocodeConfig, - applyQianfanConfig, - applyKimiCodeConfig, applyMinimaxApiConfig, applyMinimaxApiConfigCn, - applyMinimaxConfig, - applyMoonshotConfig, - applyMoonshotConfigCn, - applyOpencodeZenConfig, - applyOpenrouterConfig, - applySyntheticConfig, - applyVeniceConfig, - applyTogetherConfig, - applyHuggingfaceConfig, - applyVercelAiGatewayConfig, - applyLitellmConfig, - applyMistralConfig, - applyXaiConfig, - applyXiaomiConfig, applyZaiConfig, - setAnthropicApiKey, setCloudflareAiGatewayConfig, - setByteplusApiKey, - setQianfanApiKey, - setGeminiApiKey, - setKilocodeApiKey, - setKimiCodingApiKey, - setLitellmApiKey, - setMistralApiKey, setMinimaxApiKey, - setMoonshotApiKey, - setOpenaiApiKey, - setOpencodeZenApiKey, - setOpenrouterApiKey, - setSyntheticApiKey, - setVolcengineApiKey, - setXaiApiKey, - setVeniceApiKey, - setTogetherApiKey, - setHuggingfaceApiKey, - setVercelAiGatewayApiKey, - setXiaomiApiKey, setZaiApiKey, } from "../../onboard-auth.js"; import { @@ -64,9 +26,10 @@ import { resolveCustomProviderId, } from "../../onboard-custom.js"; import type { AuthChoice, OnboardOptions } from "../../onboard-types.js"; -import { applyOpenAIConfig } from "../../openai-model-default.js"; import { detectZaiEndpoint } from "../../zai-endpoint-detect.js"; import { resolveNonInteractiveApiKey } from "../api-keys.js"; +import { applySimpleNonInteractiveApiKeyChoice } from "./auth-choice.api-key-providers.js"; +import { applyNonInteractivePluginProviderChoice } from "./auth-choice.plugin-providers.js"; type ResolvedNonInteractiveApiKey = NonNullable< Awaited> @@ -121,6 +84,46 @@ export async function applyNonInteractiveAuthChoice(params: { ...input, secretInputMode: requestedSecretInputMode, }); + const toApiKeyCredential = (params: { + provider: string; + resolved: ResolvedNonInteractiveApiKey; + email?: string; + metadata?: Record; + }): ApiKeyCredential | null => { + const storeSecretRef = requestedSecretInputMode === "ref" && params.resolved.source === "env"; // pragma: allowlist secret + if (storeSecretRef) { + if (!params.resolved.envVarName) { + runtime.error( + [ + `--secret-input-mode ref requires an explicit environment variable for provider "${params.provider}".`, + "Set the provider API key env var and retry, or use --secret-input-mode plaintext.", + ].join("\n"), + ); + runtime.exit(1); + return null; + } + return { + type: "api_key", + provider: params.provider, + keyRef: { + source: "env", + provider: resolveDefaultSecretProviderAlias(baseConfig, "env", { + preferFirstProviderForSource: true, + }), + id: params.resolved.envVarName, + }, + ...(params.email ? { email: params.email } : {}), + ...(params.metadata ? { metadata: params.metadata } : {}), + }; + } + return { + type: "api_key", + provider: params.provider, + key: params.resolved.key, + ...(params.email ? { email: params.email } : {}), + ...(params.metadata ? { metadata: params.metadata } : {}), + }; + }; const maybeSetResolvedApiKey = async ( resolved: ResolvedNonInteractiveApiKey, setter: (value: SecretInput) => Promise | void, @@ -158,41 +161,22 @@ export async function applyNonInteractiveAuthChoice(params: { return null; } - if (authChoice === "vllm") { - runtime.error( - [ - 'Auth choice "vllm" requires interactive mode.', - "Use interactive onboard/configure to enter base URL, API key, and model ID.", - ].join("\n"), - ); - runtime.exit(1); - return null; - } - - if (authChoice === "apiKey") { - const resolved = await resolveApiKey({ - provider: "anthropic", - cfg: baseConfig, - flagValue: opts.anthropicApiKey, - flagName: "--anthropic-api-key", - envVar: "ANTHROPIC_API_KEY", - runtime, - }); - if (!resolved) { - return null; - } - if ( - !(await maybeSetResolvedApiKey(resolved, (value) => - setAnthropicApiKey(value, undefined, apiKeyStorageOptions), - )) - ) { - return null; - } - return applyAuthProfileConfig(nextConfig, { - profileId: "anthropic:default", - provider: "anthropic", - mode: "api_key", - }); + const pluginProviderChoice = await applyNonInteractivePluginProviderChoice({ + nextConfig, + authChoice, + opts, + runtime, + baseConfig, + resolveApiKey: (input) => + resolveApiKey({ + ...input, + cfg: baseConfig, + runtime, + }), + toApiKeyCredential, + }); + if (pluginProviderChoice !== undefined) { + return pluginProviderChoice; } if (authChoice === "token") { @@ -250,31 +234,18 @@ export async function applyNonInteractiveAuthChoice(params: { }); } - if (authChoice === "gemini-api-key") { - const resolved = await resolveApiKey({ - provider: "google", - cfg: baseConfig, - flagValue: opts.geminiApiKey, - flagName: "--gemini-api-key", - envVar: "GEMINI_API_KEY", - runtime, - }); - if (!resolved) { - return null; - } - if ( - !(await maybeSetResolvedApiKey(resolved, (value) => - setGeminiApiKey(value, undefined, apiKeyStorageOptions), - )) - ) { - return null; - } - nextConfig = applyAuthProfileConfig(nextConfig, { - profileId: "google:default", - provider: "google", - mode: "api_key", - }); - return applyGoogleGeminiModelDefault(nextConfig).next; + const simpleApiKeyChoice = await applySimpleNonInteractiveApiKeyChoice({ + authChoice, + nextConfig, + baseConfig, + opts, + runtime, + apiKeyStorageOptions, + resolveApiKey, + maybeSetResolvedApiKey, + }); + if (simpleApiKeyChoice !== undefined) { + return simpleApiKeyChoice; } if ( @@ -336,303 +307,6 @@ export async function applyNonInteractiveAuthChoice(params: { }); } - if (authChoice === "xiaomi-api-key") { - const resolved = await resolveApiKey({ - provider: "xiaomi", - cfg: baseConfig, - flagValue: opts.xiaomiApiKey, - flagName: "--xiaomi-api-key", - envVar: "XIAOMI_API_KEY", - runtime, - }); - if (!resolved) { - return null; - } - if ( - !(await maybeSetResolvedApiKey(resolved, (value) => - setXiaomiApiKey(value, undefined, apiKeyStorageOptions), - )) - ) { - return null; - } - nextConfig = applyAuthProfileConfig(nextConfig, { - profileId: "xiaomi:default", - provider: "xiaomi", - mode: "api_key", - }); - return applyXiaomiConfig(nextConfig); - } - - if (authChoice === "xai-api-key") { - const resolved = await resolveApiKey({ - provider: "xai", - cfg: baseConfig, - flagValue: opts.xaiApiKey, - flagName: "--xai-api-key", - envVar: "XAI_API_KEY", - runtime, - }); - if (!resolved) { - return null; - } - if ( - !(await maybeSetResolvedApiKey(resolved, (value) => - setXaiApiKey(value, undefined, apiKeyStorageOptions), - )) - ) { - return null; - } - nextConfig = applyAuthProfileConfig(nextConfig, { - profileId: "xai:default", - provider: "xai", - mode: "api_key", - }); - return applyXaiConfig(nextConfig); - } - - if (authChoice === "mistral-api-key") { - const resolved = await resolveApiKey({ - provider: "mistral", - cfg: baseConfig, - flagValue: opts.mistralApiKey, - flagName: "--mistral-api-key", - envVar: "MISTRAL_API_KEY", - runtime, - }); - if (!resolved) { - return null; - } - if ( - !(await maybeSetResolvedApiKey(resolved, (value) => - setMistralApiKey(value, undefined, apiKeyStorageOptions), - )) - ) { - return null; - } - nextConfig = applyAuthProfileConfig(nextConfig, { - profileId: "mistral:default", - provider: "mistral", - mode: "api_key", - }); - return applyMistralConfig(nextConfig); - } - - if (authChoice === "volcengine-api-key") { - const resolved = await resolveApiKey({ - provider: "volcengine", - cfg: baseConfig, - flagValue: opts.volcengineApiKey, - flagName: "--volcengine-api-key", - envVar: "VOLCANO_ENGINE_API_KEY", - runtime, - }); - if (!resolved) { - return null; - } - if ( - !(await maybeSetResolvedApiKey(resolved, (value) => - setVolcengineApiKey(value, undefined, apiKeyStorageOptions), - )) - ) { - return null; - } - nextConfig = applyAuthProfileConfig(nextConfig, { - profileId: "volcengine:default", - provider: "volcengine", - mode: "api_key", - }); - return applyPrimaryModel(nextConfig, "volcengine-plan/ark-code-latest"); - } - - if (authChoice === "byteplus-api-key") { - const resolved = await resolveApiKey({ - provider: "byteplus", - cfg: baseConfig, - flagValue: opts.byteplusApiKey, - flagName: "--byteplus-api-key", - envVar: "BYTEPLUS_API_KEY", - runtime, - }); - if (!resolved) { - return null; - } - if ( - !(await maybeSetResolvedApiKey(resolved, (value) => - setByteplusApiKey(value, undefined, apiKeyStorageOptions), - )) - ) { - return null; - } - nextConfig = applyAuthProfileConfig(nextConfig, { - profileId: "byteplus:default", - provider: "byteplus", - mode: "api_key", - }); - return applyPrimaryModel(nextConfig, "byteplus-plan/ark-code-latest"); - } - - if (authChoice === "qianfan-api-key") { - const resolved = await resolveApiKey({ - provider: "qianfan", - cfg: baseConfig, - flagValue: opts.qianfanApiKey, - flagName: "--qianfan-api-key", - envVar: "QIANFAN_API_KEY", - runtime, - }); - if (!resolved) { - return null; - } - if ( - !(await maybeSetResolvedApiKey(resolved, (value) => - setQianfanApiKey(value, undefined, apiKeyStorageOptions), - )) - ) { - return null; - } - nextConfig = applyAuthProfileConfig(nextConfig, { - profileId: "qianfan:default", - provider: "qianfan", - mode: "api_key", - }); - return applyQianfanConfig(nextConfig); - } - - if (authChoice === "openai-api-key") { - const resolved = await resolveApiKey({ - provider: "openai", - cfg: baseConfig, - flagValue: opts.openaiApiKey, - flagName: "--openai-api-key", - envVar: "OPENAI_API_KEY", - runtime, - }); - if (!resolved) { - return null; - } - if ( - !(await maybeSetResolvedApiKey(resolved, (value) => - setOpenaiApiKey(value, undefined, apiKeyStorageOptions), - )) - ) { - return null; - } - nextConfig = applyAuthProfileConfig(nextConfig, { - profileId: "openai:default", - provider: "openai", - mode: "api_key", - }); - return applyOpenAIConfig(nextConfig); - } - - if (authChoice === "openrouter-api-key") { - const resolved = await resolveApiKey({ - provider: "openrouter", - cfg: baseConfig, - flagValue: opts.openrouterApiKey, - flagName: "--openrouter-api-key", - envVar: "OPENROUTER_API_KEY", - runtime, - }); - if (!resolved) { - return null; - } - if ( - !(await maybeSetResolvedApiKey(resolved, (value) => - setOpenrouterApiKey(value, undefined, apiKeyStorageOptions), - )) - ) { - return null; - } - nextConfig = applyAuthProfileConfig(nextConfig, { - profileId: "openrouter:default", - provider: "openrouter", - mode: "api_key", - }); - return applyOpenrouterConfig(nextConfig); - } - - if (authChoice === "kilocode-api-key") { - const resolved = await resolveApiKey({ - provider: "kilocode", - cfg: baseConfig, - flagValue: opts.kilocodeApiKey, - flagName: "--kilocode-api-key", - envVar: "KILOCODE_API_KEY", - runtime, - }); - if (!resolved) { - return null; - } - if ( - !(await maybeSetResolvedApiKey(resolved, (value) => - setKilocodeApiKey(value, undefined, apiKeyStorageOptions), - )) - ) { - return null; - } - nextConfig = applyAuthProfileConfig(nextConfig, { - profileId: "kilocode:default", - provider: "kilocode", - mode: "api_key", - }); - return applyKilocodeConfig(nextConfig); - } - - if (authChoice === "litellm-api-key") { - const resolved = await resolveApiKey({ - provider: "litellm", - cfg: baseConfig, - flagValue: opts.litellmApiKey, - flagName: "--litellm-api-key", - envVar: "LITELLM_API_KEY", - runtime, - }); - if (!resolved) { - return null; - } - if ( - !(await maybeSetResolvedApiKey(resolved, (value) => - setLitellmApiKey(value, undefined, apiKeyStorageOptions), - )) - ) { - return null; - } - nextConfig = applyAuthProfileConfig(nextConfig, { - profileId: "litellm:default", - provider: "litellm", - mode: "api_key", - }); - return applyLitellmConfig(nextConfig); - } - - if (authChoice === "ai-gateway-api-key") { - const resolved = await resolveApiKey({ - provider: "vercel-ai-gateway", - cfg: baseConfig, - flagValue: opts.aiGatewayApiKey, - flagName: "--ai-gateway-api-key", - envVar: "AI_GATEWAY_API_KEY", - runtime, - }); - if (!resolved) { - return null; - } - if ( - !(await maybeSetResolvedApiKey(resolved, (value) => - setVercelAiGatewayApiKey(value, undefined, apiKeyStorageOptions), - )) - ) { - return null; - } - nextConfig = applyAuthProfileConfig(nextConfig, { - profileId: "vercel-ai-gateway:default", - provider: "vercel-ai-gateway", - mode: "api_key", - }); - return applyVercelAiGatewayConfig(nextConfig); - } - if (authChoice === "cloudflare-ai-gateway-api-key") { const accountId = opts.cloudflareAiGatewayAccountId?.trim() ?? ""; const gatewayId = opts.cloudflareAiGatewayGatewayId?.trim() ?? ""; @@ -681,140 +355,37 @@ export async function applyNonInteractiveAuthChoice(params: { }); } - const applyMoonshotApiKeyChoice = async ( - applyConfig: (cfg: OpenClawConfig) => OpenClawConfig, - ): Promise => { - const resolved = await resolveApiKey({ - provider: "moonshot", - cfg: baseConfig, - flagValue: opts.moonshotApiKey, - flagName: "--moonshot-api-key", - envVar: "MOONSHOT_API_KEY", - runtime, - }); - if (!resolved) { - return null; - } - if ( - !(await maybeSetResolvedApiKey(resolved, (value) => - setMoonshotApiKey(value, undefined, apiKeyStorageOptions), - )) - ) { - return null; - } - nextConfig = applyAuthProfileConfig(nextConfig, { - profileId: "moonshot:default", - provider: "moonshot", - mode: "api_key", - }); - return applyConfig(nextConfig); + // Legacy aliases: these choice values were removed; fail with an actionable message so + // existing CI automation gets a clear error instead of silently exiting 0 with no auth. + const REMOVED_MINIMAX_CHOICES: Record = { + minimax: "minimax-global-api", + "minimax-api": "minimax-global-api", + "minimax-cloud": "minimax-global-api", + "minimax-api-lightning": "minimax-global-api", + "minimax-api-key-cn": "minimax-cn-api", }; - - if (authChoice === "moonshot-api-key") { - return await applyMoonshotApiKeyChoice(applyMoonshotConfig); + if (Object.prototype.hasOwnProperty.call(REMOVED_MINIMAX_CHOICES, authChoice as string)) { + const replacement = REMOVED_MINIMAX_CHOICES[authChoice as string]; + runtime.error( + `"${authChoice as string}" is no longer supported. Use --auth-choice ${replacement} instead.`, + ); + runtime.exit(1); + return null; } - if (authChoice === "moonshot-api-key-cn") { - return await applyMoonshotApiKeyChoice(applyMoonshotConfigCn); - } - - if (authChoice === "kimi-code-api-key") { + if (authChoice === "minimax-global-api" || authChoice === "minimax-cn-api") { + const isCn = authChoice === "minimax-cn-api"; + const profileId = isCn ? "minimax:cn" : "minimax:global"; const resolved = await resolveApiKey({ - provider: "kimi-coding", - cfg: baseConfig, - flagValue: opts.kimiCodeApiKey, - flagName: "--kimi-code-api-key", - envVar: "KIMI_API_KEY", - runtime, - }); - if (!resolved) { - return null; - } - if ( - !(await maybeSetResolvedApiKey(resolved, (value) => - setKimiCodingApiKey(value, undefined, apiKeyStorageOptions), - )) - ) { - return null; - } - nextConfig = applyAuthProfileConfig(nextConfig, { - profileId: "kimi-coding:default", - provider: "kimi-coding", - mode: "api_key", - }); - return applyKimiCodeConfig(nextConfig); - } - - if (authChoice === "synthetic-api-key") { - const resolved = await resolveApiKey({ - provider: "synthetic", - cfg: baseConfig, - flagValue: opts.syntheticApiKey, - flagName: "--synthetic-api-key", - envVar: "SYNTHETIC_API_KEY", - runtime, - }); - if (!resolved) { - return null; - } - if ( - !(await maybeSetResolvedApiKey(resolved, (value) => - setSyntheticApiKey(value, undefined, apiKeyStorageOptions), - )) - ) { - return null; - } - nextConfig = applyAuthProfileConfig(nextConfig, { - profileId: "synthetic:default", - provider: "synthetic", - mode: "api_key", - }); - return applySyntheticConfig(nextConfig); - } - - if (authChoice === "venice-api-key") { - const resolved = await resolveApiKey({ - provider: "venice", - cfg: baseConfig, - flagValue: opts.veniceApiKey, - flagName: "--venice-api-key", - envVar: "VENICE_API_KEY", - runtime, - }); - if (!resolved) { - return null; - } - if ( - !(await maybeSetResolvedApiKey(resolved, (value) => - setVeniceApiKey(value, undefined, apiKeyStorageOptions), - )) - ) { - return null; - } - nextConfig = applyAuthProfileConfig(nextConfig, { - profileId: "venice:default", - provider: "venice", - mode: "api_key", - }); - return applyVeniceConfig(nextConfig); - } - - if ( - authChoice === "minimax-cloud" || - authChoice === "minimax-api" || - authChoice === "minimax-api-key-cn" || - authChoice === "minimax-api-lightning" - ) { - const isCn = authChoice === "minimax-api-key-cn"; - const providerId = isCn ? "minimax-cn" : "minimax"; - const profileId = `${providerId}:default`; - const resolved = await resolveApiKey({ - provider: providerId, + provider: "minimax", cfg: baseConfig, flagValue: opts.minimaxApiKey, flagName: "--minimax-api-key", envVar: "MINIMAX_API_KEY", runtime, + // Disable profile fallback: both regions share provider "minimax", so an existing + // Global profile key must not be silently reused when configuring CN (and vice versa). + allowProfile: false, }); if (!resolved) { return null; @@ -828,99 +399,10 @@ export async function applyNonInteractiveAuthChoice(params: { } nextConfig = applyAuthProfileConfig(nextConfig, { profileId, - provider: providerId, + provider: "minimax", mode: "api_key", }); - const modelId = - authChoice === "minimax-api-lightning" ? "MiniMax-M2.5-highspeed" : "MiniMax-M2.5"; - return isCn - ? applyMinimaxApiConfigCn(nextConfig, modelId) - : applyMinimaxApiConfig(nextConfig, modelId); - } - - if (authChoice === "minimax") { - return applyMinimaxConfig(nextConfig); - } - - if (authChoice === "opencode-zen") { - const resolved = await resolveApiKey({ - provider: "opencode", - cfg: baseConfig, - flagValue: opts.opencodeZenApiKey, - flagName: "--opencode-zen-api-key", - envVar: "OPENCODE_API_KEY (or OPENCODE_ZEN_API_KEY)", - runtime, - }); - if (!resolved) { - return null; - } - if ( - !(await maybeSetResolvedApiKey(resolved, (value) => - setOpencodeZenApiKey(value, undefined, apiKeyStorageOptions), - )) - ) { - return null; - } - nextConfig = applyAuthProfileConfig(nextConfig, { - profileId: "opencode:default", - provider: "opencode", - mode: "api_key", - }); - return applyOpencodeZenConfig(nextConfig); - } - - if (authChoice === "together-api-key") { - const resolved = await resolveApiKey({ - provider: "together", - cfg: baseConfig, - flagValue: opts.togetherApiKey, - flagName: "--together-api-key", - envVar: "TOGETHER_API_KEY", - runtime, - }); - if (!resolved) { - return null; - } - if ( - !(await maybeSetResolvedApiKey(resolved, (value) => - setTogetherApiKey(value, undefined, apiKeyStorageOptions), - )) - ) { - return null; - } - nextConfig = applyAuthProfileConfig(nextConfig, { - profileId: "together:default", - provider: "together", - mode: "api_key", - }); - return applyTogetherConfig(nextConfig); - } - - if (authChoice === "huggingface-api-key") { - const resolved = await resolveApiKey({ - provider: "huggingface", - cfg: baseConfig, - flagValue: opts.huggingfaceApiKey, - flagName: "--huggingface-api-key", - envVar: "HF_TOKEN", - runtime, - }); - if (!resolved) { - return null; - } - if ( - !(await maybeSetResolvedApiKey(resolved, (value) => - setHuggingfaceApiKey(value, undefined, apiKeyStorageOptions), - )) - ) { - return null; - } - nextConfig = applyAuthProfileConfig(nextConfig, { - profileId: "huggingface:default", - provider: "huggingface", - mode: "api_key", - }); - return applyHuggingfaceConfig(nextConfig); + return isCn ? applyMinimaxApiConfigCn(nextConfig) : applyMinimaxApiConfig(nextConfig); } if (authChoice === "custom-api-key") { @@ -1000,7 +482,8 @@ export async function applyNonInteractiveAuthChoice(params: { authChoice === "chutes" || authChoice === "openai-codex" || authChoice === "qwen-portal" || - authChoice === "minimax-portal" + authChoice === "minimax-global-oauth" || + authChoice === "minimax-cn-oauth" ) { runtime.error("OAuth requires interactive mode."); runtime.exit(1); diff --git a/src/commands/onboard-non-interactive/local/daemon-install.test.ts b/src/commands/onboard-non-interactive/local/daemon-install.test.ts index c3e87a1d48d..d45cf4cafad 100644 --- a/src/commands/onboard-non-interactive/local/daemon-install.test.ts +++ b/src/commands/onboard-non-interactive/local/daemon-install.test.ts @@ -6,6 +6,7 @@ const gatewayInstallErrorHint = vi.hoisted(() => vi.fn(() => "hint")); const resolveGatewayInstallToken = vi.hoisted(() => vi.fn()); const serviceInstall = vi.hoisted(() => vi.fn(async () => {})); const ensureSystemdUserLingerNonInteractive = vi.hoisted(() => vi.fn(async () => {})); +const isSystemdUserServiceAvailable = vi.hoisted(() => vi.fn(async () => true)); vi.mock("../../daemon-install-helpers.js", () => ({ buildGatewayInstallPlan, @@ -23,7 +24,7 @@ vi.mock("../../../daemon/service.js", () => ({ })); vi.mock("../../../daemon/systemd.js", () => ({ - isSystemdUserServiceAvailable: vi.fn(async () => true), + isSystemdUserServiceAvailable, })); vi.mock("../../daemon-runtime.js", () => ({ @@ -40,6 +41,7 @@ const { installGatewayDaemonNonInteractive } = await import("./daemon-install.js describe("installGatewayDaemonNonInteractive", () => { beforeEach(() => { vi.clearAllMocks(); + isSystemdUserServiceAvailable.mockResolvedValue(true); resolveGatewayInstallToken.mockResolvedValue({ token: undefined, tokenRefConfigured: true, @@ -100,4 +102,39 @@ describe("installGatewayDaemonNonInteractive", () => { expect(buildGatewayInstallPlan).not.toHaveBeenCalled(); expect(serviceInstall).not.toHaveBeenCalled(); }); + + it("returns a skipped result when Linux user systemd is unavailable", async () => { + const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() }; + const originalPlatform = process.platform; + + isSystemdUserServiceAvailable.mockResolvedValue(false); + Object.defineProperty(process, "platform", { + configurable: true, + value: "linux", + }); + + try { + const result = await installGatewayDaemonNonInteractive({ + nextConfig: {} as OpenClawConfig, + opts: { installDaemon: true }, + runtime, + port: 18789, + }); + + expect(result).toEqual({ + installed: false, + skippedReason: "systemd-user-unavailable", + }); + expect(runtime.log).toHaveBeenCalledWith( + expect.stringContaining("Systemd user services are unavailable"), + ); + expect(buildGatewayInstallPlan).not.toHaveBeenCalled(); + expect(serviceInstall).not.toHaveBeenCalled(); + } finally { + Object.defineProperty(process, "platform", { + configurable: true, + value: originalPlatform, + }); + } + }); }); diff --git a/src/commands/onboard-non-interactive/local/daemon-install.ts b/src/commands/onboard-non-interactive/local/daemon-install.ts index d3b759227d6..6236b410f75 100644 --- a/src/commands/onboard-non-interactive/local/daemon-install.ts +++ b/src/commands/onboard-non-interactive/local/daemon-install.ts @@ -13,24 +13,34 @@ export async function installGatewayDaemonNonInteractive(params: { opts: OnboardOptions; runtime: RuntimeEnv; port: number; -}) { +}): Promise< + | { + installed: true; + } + | { + installed: false; + skippedReason?: "systemd-user-unavailable"; + } +> { const { opts, runtime, port } = params; if (!opts.installDaemon) { - return; + return { installed: false }; } const daemonRuntimeRaw = opts.daemonRuntime ?? DEFAULT_GATEWAY_DAEMON_RUNTIME; const systemdAvailable = process.platform === "linux" ? await isSystemdUserServiceAvailable() : true; if (process.platform === "linux" && !systemdAvailable) { - runtime.log("Systemd user services are unavailable; skipping service install."); - return; + runtime.log( + "Systemd user services are unavailable; skipping service install. Use a direct shell run (`openclaw gateway run`) or rerun without --install-daemon on this session.", + ); + return { installed: false, skippedReason: "systemd-user-unavailable" }; } if (!isGatewayDaemonRuntime(daemonRuntimeRaw)) { runtime.error("Invalid --daemon-runtime (use node or bun)"); runtime.exit(1); - return; + return { installed: false }; } const service = resolveGatewayService(); @@ -50,7 +60,7 @@ export async function installGatewayDaemonNonInteractive(params: { ].join(" "), ); runtime.exit(1); - return; + return { installed: false }; } const { programArguments, workingDirectory, environment } = await buildGatewayInstallPlan({ env: process.env, @@ -70,7 +80,8 @@ export async function installGatewayDaemonNonInteractive(params: { } catch (err) { runtime.error(`Gateway service install failed: ${String(err)}`); runtime.log(gatewayInstallErrorHint()); - return; + return { installed: false }; } await ensureSystemdUserLingerNonInteractive({ runtime }); + return { installed: true }; } diff --git a/src/commands/onboard-non-interactive/local/output.ts b/src/commands/onboard-non-interactive/local/output.ts index d4296e3500c..a91df06aee6 100644 --- a/src/commands/onboard-non-interactive/local/output.ts +++ b/src/commands/onboard-non-interactive/local/output.ts @@ -1,6 +1,21 @@ import type { RuntimeEnv } from "../../../runtime.js"; import type { OnboardOptions } from "../../onboard-types.js"; +export type GatewayHealthFailureDiagnostics = { + service?: { + label: string; + loaded: boolean; + loadedText: string; + runtimeStatus?: string; + state?: string; + pid?: number; + lastExitStatus?: number; + lastExitReason?: string; + }; + lastGatewayError?: string; + inspectError?: string; +}; + export function logNonInteractiveOnboardingJson(params: { opts: OnboardOptions; runtime: RuntimeEnv; @@ -14,6 +29,11 @@ export function logNonInteractiveOnboardingJson(params: { tailscaleMode: string; }; installDaemon?: boolean; + daemonInstall?: { + requested: boolean; + installed: boolean; + skippedReason?: string; + }; daemonRuntime?: string; skipSkills?: boolean; skipHealth?: boolean; @@ -24,11 +44,13 @@ export function logNonInteractiveOnboardingJson(params: { params.runtime.log( JSON.stringify( { + ok: true, mode: params.mode, workspace: params.workspaceDir, authChoice: params.authChoice, gateway: params.gateway, installDaemon: Boolean(params.installDaemon), + daemonInstall: params.daemonInstall, daemonRuntime: params.daemonRuntime, skipSkills: Boolean(params.skipSkills), skipHealth: Boolean(params.skipHealth), @@ -38,3 +60,94 @@ export function logNonInteractiveOnboardingJson(params: { ), ); } + +function formatGatewayRuntimeSummary( + diagnostics: GatewayHealthFailureDiagnostics | undefined, +): string | undefined { + const service = diagnostics?.service; + if (!service?.runtimeStatus) { + return undefined; + } + const parts = [service.runtimeStatus]; + if (typeof service.pid === "number") { + parts.push(`pid ${service.pid}`); + } + if (service.state) { + parts.push(`state ${service.state}`); + } + if (typeof service.lastExitStatus === "number") { + parts.push(`last exit ${service.lastExitStatus}`); + } + if (service.lastExitReason) { + parts.push(`reason ${service.lastExitReason}`); + } + return parts.join(", "); +} + +export function logNonInteractiveOnboardingFailure(params: { + opts: OnboardOptions; + runtime: RuntimeEnv; + mode: "local" | "remote"; + phase: string; + message: string; + detail?: string; + hints?: string[]; + gateway?: { + wsUrl?: string; + httpUrl?: string; + }; + installDaemon?: boolean; + daemonInstall?: { + requested: boolean; + installed: boolean; + skippedReason?: string; + }; + daemonRuntime?: string; + diagnostics?: GatewayHealthFailureDiagnostics; +}) { + const hints = params.hints?.filter(Boolean) ?? []; + const gatewayRuntime = formatGatewayRuntimeSummary(params.diagnostics); + + if (params.opts.json) { + params.runtime.error( + JSON.stringify( + { + ok: false, + mode: params.mode, + phase: params.phase, + message: params.message, + detail: params.detail, + gateway: params.gateway, + installDaemon: Boolean(params.installDaemon), + daemonInstall: params.daemonInstall, + daemonRuntime: params.daemonRuntime, + diagnostics: params.diagnostics, + hints: hints.length > 0 ? hints : undefined, + }, + null, + 2, + ), + ); + return; + } + + const lines = [ + params.message, + params.detail ? `Last probe: ${params.detail}` : undefined, + params.diagnostics?.service + ? `Service: ${params.diagnostics.service.label} (${params.diagnostics.service.loaded ? params.diagnostics.service.loadedText : "not loaded"})` + : undefined, + gatewayRuntime ? `Runtime: ${gatewayRuntime}` : undefined, + params.diagnostics?.lastGatewayError + ? `Last gateway error: ${params.diagnostics.lastGatewayError}` + : undefined, + params.diagnostics?.inspectError + ? `Diagnostics warning: ${params.diagnostics.inspectError}` + : undefined, + hints.length > 0 ? hints.join("\n") : undefined, + ] + .filter(Boolean) + .join("\n"); + + params.runtime.error(lines); +} diff --git a/src/commands/onboard-provider-auth-flags.ts b/src/commands/onboard-provider-auth-flags.ts index a1038625a78..53df8cdc4c8 100644 --- a/src/commands/onboard-provider-auth-flags.ts +++ b/src/commands/onboard-provider-auth-flags.ts @@ -20,9 +20,12 @@ type OnboardProviderAuthOptionKey = keyof Pick< | "togetherApiKey" | "huggingfaceApiKey" | "opencodeZenApiKey" + | "opencodeGoApiKey" | "xaiApiKey" | "litellmApiKey" | "qianfanApiKey" + | "modelstudioApiKeyCn" + | "modelstudioApiKey" | "volcengineApiKey" | "byteplusApiKey" >; @@ -123,7 +126,7 @@ export const ONBOARD_PROVIDER_AUTH_FLAGS: ReadonlyArray }, { optionKey: "minimaxApiKey", - authChoice: "minimax-api", + authChoice: "minimax-global-api", cliFlag: "--minimax-api-key", cliOption: "--minimax-api-key ", description: "MiniMax API key", @@ -161,7 +164,14 @@ export const ONBOARD_PROVIDER_AUTH_FLAGS: ReadonlyArray authChoice: "opencode-zen", cliFlag: "--opencode-zen-api-key", cliOption: "--opencode-zen-api-key ", - description: "OpenCode Zen API key", + description: "OpenCode API key (Zen catalog)", + }, + { + optionKey: "opencodeGoApiKey", + authChoice: "opencode-go", + cliFlag: "--opencode-go-api-key", + cliOption: "--opencode-go-api-key ", + description: "OpenCode API key (Go catalog)", }, { optionKey: "xaiApiKey", @@ -184,6 +194,20 @@ export const ONBOARD_PROVIDER_AUTH_FLAGS: ReadonlyArray cliOption: "--qianfan-api-key ", description: "QIANFAN API key", }, + { + optionKey: "modelstudioApiKeyCn", + authChoice: "modelstudio-api-key-cn", + cliFlag: "--modelstudio-api-key-cn", + cliOption: "--modelstudio-api-key-cn ", + description: "Alibaba Cloud Model Studio Coding Plan API key (China)", + }, + { + optionKey: "modelstudioApiKey", + authChoice: "modelstudio-api-key", + cliFlag: "--modelstudio-api-key", + cliOption: "--modelstudio-api-key ", + description: "Alibaba Cloud Model Studio Coding Plan API key (Global/Intl)", + }, { optionKey: "volcengineApiKey", authChoice: "volcengine-api-key", diff --git a/src/commands/onboard-types.ts b/src/commands/onboard-types.ts index 7e938430517..f7a89a8b971 100644 --- a/src/commands/onboard-types.ts +++ b/src/commands/onboard-types.ts @@ -2,14 +2,13 @@ import type { ChannelId } from "../channels/plugins/types.js"; import type { GatewayDaemonRuntime } from "./daemon-runtime.js"; export type OnboardMode = "local" | "remote"; -export type AuthChoice = +export type BuiltInAuthChoice = // Legacy alias for `setup-token` (kept for backwards CLI compatibility). | "oauth" | "setup-token" | "claude-cli" | "token" | "chutes" - | "vllm" | "openai-codex" | "openai-api-key" | "openrouter-api-key" @@ -34,13 +33,12 @@ export type AuthChoice = | "zai-global" | "zai-cn" | "xiaomi-api-key" - | "minimax-cloud" - | "minimax" - | "minimax-api" - | "minimax-api-key-cn" - | "minimax-api-lightning" - | "minimax-portal" + | "minimax-global-oauth" + | "minimax-global-api" + | "minimax-cn-oauth" + | "minimax-cn-api" | "opencode-zen" + | "opencode-go" | "github-copilot" | "copilot-proxy" | "qwen-portal" @@ -49,13 +47,16 @@ export type AuthChoice = | "volcengine-api-key" | "byteplus-api-key" | "qianfan-api-key" + | "modelstudio-api-key-cn" + | "modelstudio-api-key" | "custom-api-key" | "skip"; -export type AuthChoiceGroupId = +export type AuthChoice = BuiltInAuthChoice | (string & {}); + +export type BuiltInAuthChoiceGroupId = | "openai" | "anthropic" | "chutes" - | "vllm" | "google" | "copilot" | "openrouter" @@ -66,7 +67,7 @@ export type AuthChoiceGroupId = | "moonshot" | "zai" | "xiaomi" - | "opencode-zen" + | "opencode" | "minimax" | "synthetic" | "venice" @@ -75,10 +76,12 @@ export type AuthChoiceGroupId = | "together" | "huggingface" | "qianfan" + | "modelstudio" | "xai" | "volcengine" | "byteplus" | "custom"; +export type AuthChoiceGroupId = BuiltInAuthChoiceGroupId | (string & {}); export type GatewayAuthChoice = "token" | "password"; export type ResetScope = "config" | "config+creds+sessions" | "full"; export type GatewayBind = "loopback" | "lan" | "auto" | "custom" | "tailnet"; @@ -131,10 +134,13 @@ export type OnboardOptions = { togetherApiKey?: string; huggingfaceApiKey?: string; opencodeZenApiKey?: string; + opencodeGoApiKey?: string; xaiApiKey?: string; volcengineApiKey?: string; byteplusApiKey?: string; qianfanApiKey?: string; + modelstudioApiKeyCn?: string; + modelstudioApiKey?: string; customBaseUrl?: string; customApiKey?: string; customModelId?: string; diff --git a/src/commands/onboard.test.ts b/src/commands/onboard.test.ts index 1233222bf54..5d1dc20634d 100644 --- a/src/commands/onboard.test.ts +++ b/src/commands/onboard.test.ts @@ -60,6 +60,26 @@ describe("onboardCommand", () => { expect(mocks.runNonInteractiveOnboarding).not.toHaveBeenCalled(); }); + it("logs ASCII-safe Windows guidance before onboarding", async () => { + const runtime = makeRuntime(); + const platformSpy = vi.spyOn(process, "platform", "get").mockReturnValue("win32"); + + try { + await onboardCommand({}, runtime); + + expect(runtime.log).toHaveBeenCalledWith( + [ + "Windows detected - OpenClaw runs great on WSL2!", + "Native Windows might be trickier.", + "Quick setup: wsl --install (one command, one reboot)", + "Guide: https://docs.openclaw.ai/windows", + ].join("\n"), + ); + } finally { + platformSpy.mockRestore(); + } + }); + it("defaults --reset to config+creds+sessions scope", async () => { const runtime = makeRuntime(); diff --git a/src/commands/onboard.ts b/src/commands/onboard.ts index 9c55bddf1d6..6762998f815 100644 --- a/src/commands/onboard.ts +++ b/src/commands/onboard.ts @@ -77,7 +77,7 @@ export async function onboardCommand(opts: OnboardOptions, runtime: RuntimeEnv = if (process.platform === "win32") { runtime.log( [ - "Windows detected — OpenClaw runs great on WSL2!", + "Windows detected - OpenClaw runs great on WSL2!", "Native Windows might be trickier.", "Quick setup: wsl --install (one command, one reboot)", "Guide: https://docs.openclaw.ai/windows", diff --git a/src/commands/openai-codex-oauth.test.ts b/src/commands/openai-codex-oauth.test.ts index abe71d0bd42..43f1ac41f8a 100644 --- a/src/commands/openai-codex-oauth.test.ts +++ b/src/commands/openai-codex-oauth.test.ts @@ -9,7 +9,7 @@ const mocks = vi.hoisted(() => ({ formatOpenAIOAuthTlsPreflightFix: vi.fn(), })); -vi.mock("@mariozechner/pi-ai", () => ({ +vi.mock("@mariozechner/pi-ai/oauth", () => ({ loginOpenAICodex: mocks.loginOpenAICodex, })); diff --git a/src/commands/openai-codex-oauth.ts b/src/commands/openai-codex-oauth.ts index 683354bf7a8..a868217750b 100644 --- a/src/commands/openai-codex-oauth.ts +++ b/src/commands/openai-codex-oauth.ts @@ -1,5 +1,4 @@ -import type { OAuthCredentials } from "@mariozechner/pi-ai/oauth"; -import { loginOpenAICodex } from "@mariozechner/pi-ai/oauth"; +import { loginOpenAICodex, type OAuthCredentials } from "@mariozechner/pi-ai/oauth"; import type { RuntimeEnv } from "../runtime.js"; import type { WizardPrompter } from "../wizard/prompts.js"; import { createVpsAwareOAuthHandlers } from "./oauth-flow.js"; diff --git a/src/commands/opencode-go-model-default.ts b/src/commands/opencode-go-model-default.ts new file mode 100644 index 00000000000..c959f23ff2e --- /dev/null +++ b/src/commands/opencode-go-model-default.ts @@ -0,0 +1,11 @@ +import type { OpenClawConfig } from "../config/config.js"; +import { applyAgentDefaultPrimaryModel } from "./model-default.js"; + +export const OPENCODE_GO_DEFAULT_MODEL_REF = "opencode-go/kimi-k2.5"; + +export function applyOpencodeGoModelDefault(cfg: OpenClawConfig): { + next: OpenClawConfig; + changed: boolean; +} { + return applyAgentDefaultPrimaryModel({ cfg, model: OPENCODE_GO_DEFAULT_MODEL_REF }); +} diff --git a/src/commands/self-hosted-provider-setup.ts b/src/commands/self-hosted-provider-setup.ts new file mode 100644 index 00000000000..c067d797f15 --- /dev/null +++ b/src/commands/self-hosted-provider-setup.ts @@ -0,0 +1,302 @@ +import { upsertAuthProfileWithLock } from "../agents/auth-profiles.js"; +import type { ApiKeyCredential, AuthProfileCredential } from "../agents/auth-profiles/types.js"; +import type { OpenClawConfig } from "../config/config.js"; +import type { + ProviderDiscoveryContext, + ProviderAuthResult, + ProviderAuthMethodNonInteractiveContext, + ProviderNonInteractiveApiKeyResult, +} from "../plugins/types.js"; +import type { WizardPrompter } from "../wizard/prompts.js"; +import { applyAuthProfileConfig } from "./onboard-auth.js"; + +export const SELF_HOSTED_DEFAULT_CONTEXT_WINDOW = 128000; +export const SELF_HOSTED_DEFAULT_MAX_TOKENS = 8192; +export const SELF_HOSTED_DEFAULT_COST = { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, +}; + +export function applyProviderDefaultModel(cfg: OpenClawConfig, modelRef: string): OpenClawConfig { + const existingModel = cfg.agents?.defaults?.model; + const fallbacks = + existingModel && typeof existingModel === "object" && "fallbacks" in existingModel + ? (existingModel as { fallbacks?: string[] }).fallbacks + : undefined; + + return { + ...cfg, + agents: { + ...cfg.agents, + defaults: { + ...cfg.agents?.defaults, + model: { + ...(fallbacks ? { fallbacks } : undefined), + primary: modelRef, + }, + }, + }, + }; +} + +function buildOpenAICompatibleSelfHostedProviderConfig(params: { + cfg: OpenClawConfig; + providerId: string; + baseUrl: string; + providerApiKey: string; + modelId: string; + input?: Array<"text" | "image">; + reasoning?: boolean; + contextWindow?: number; + maxTokens?: number; +}): { config: OpenClawConfig; modelId: string; modelRef: string; profileId: string } { + const modelRef = `${params.providerId}/${params.modelId}`; + const profileId = `${params.providerId}:default`; + return { + config: { + ...params.cfg, + models: { + ...params.cfg.models, + mode: params.cfg.models?.mode ?? "merge", + providers: { + ...params.cfg.models?.providers, + [params.providerId]: { + baseUrl: params.baseUrl, + api: "openai-completions", + apiKey: params.providerApiKey, + models: [ + { + id: params.modelId, + name: params.modelId, + reasoning: params.reasoning ?? false, + input: params.input ?? ["text"], + cost: SELF_HOSTED_DEFAULT_COST, + contextWindow: params.contextWindow ?? SELF_HOSTED_DEFAULT_CONTEXT_WINDOW, + maxTokens: params.maxTokens ?? SELF_HOSTED_DEFAULT_MAX_TOKENS, + }, + ], + }, + }, + }, + }, + modelId: params.modelId, + modelRef, + profileId, + }; +} + +type OpenAICompatibleSelfHostedProviderSetupParams = { + cfg: OpenClawConfig; + prompter: WizardPrompter; + providerId: string; + providerLabel: string; + defaultBaseUrl: string; + defaultApiKeyEnvVar: string; + modelPlaceholder: string; + input?: Array<"text" | "image">; + reasoning?: boolean; + contextWindow?: number; + maxTokens?: number; +}; + +type OpenAICompatibleSelfHostedProviderPromptResult = { + config: OpenClawConfig; + credential: AuthProfileCredential; + modelId: string; + modelRef: string; + profileId: string; +}; + +function buildSelfHostedProviderAuthResult( + result: OpenAICompatibleSelfHostedProviderPromptResult, +): ProviderAuthResult { + return { + profiles: [ + { + profileId: result.profileId, + credential: result.credential, + }, + ], + configPatch: result.config, + defaultModel: result.modelRef, + }; +} + +export async function promptAndConfigureOpenAICompatibleSelfHostedProvider( + params: OpenAICompatibleSelfHostedProviderSetupParams, +): Promise { + const baseUrlRaw = await params.prompter.text({ + message: `${params.providerLabel} base URL`, + initialValue: params.defaultBaseUrl, + placeholder: params.defaultBaseUrl, + validate: (value) => (value?.trim() ? undefined : "Required"), + }); + const apiKeyRaw = await params.prompter.text({ + message: `${params.providerLabel} API key`, + placeholder: "sk-... (or any non-empty string)", + validate: (value) => (value?.trim() ? undefined : "Required"), + }); + const modelIdRaw = await params.prompter.text({ + message: `${params.providerLabel} model`, + placeholder: params.modelPlaceholder, + validate: (value) => (value?.trim() ? undefined : "Required"), + }); + + const baseUrl = String(baseUrlRaw ?? "") + .trim() + .replace(/\/+$/, ""); + const apiKey = String(apiKeyRaw ?? "").trim(); + const modelId = String(modelIdRaw ?? "").trim(); + const credential: AuthProfileCredential = { + type: "api_key", + provider: params.providerId, + key: apiKey, + }; + const configured = buildOpenAICompatibleSelfHostedProviderConfig({ + cfg: params.cfg, + providerId: params.providerId, + baseUrl, + providerApiKey: params.defaultApiKeyEnvVar, + modelId, + input: params.input, + reasoning: params.reasoning, + contextWindow: params.contextWindow, + maxTokens: params.maxTokens, + }); + + return { + config: configured.config, + credential, + modelId: configured.modelId, + modelRef: configured.modelRef, + profileId: configured.profileId, + }; +} + +export async function promptAndConfigureOpenAICompatibleSelfHostedProviderAuth( + params: OpenAICompatibleSelfHostedProviderSetupParams, +): Promise { + const result = await promptAndConfigureOpenAICompatibleSelfHostedProvider(params); + return buildSelfHostedProviderAuthResult(result); +} + +export async function discoverOpenAICompatibleSelfHostedProvider< + T extends Record, +>(params: { + ctx: ProviderDiscoveryContext; + providerId: string; + buildProvider: (params: { apiKey?: string }) => Promise; +}): Promise<{ provider: T & { apiKey: string } } | null> { + if (params.ctx.config.models?.providers?.[params.providerId]) { + return null; + } + const { apiKey, discoveryApiKey } = params.ctx.resolveProviderApiKey(params.providerId); + if (!apiKey) { + return null; + } + return { + provider: { + ...(await params.buildProvider({ apiKey: discoveryApiKey })), + apiKey, + }, + }; +} + +function buildMissingNonInteractiveModelIdMessage(params: { + authChoice: string; + providerLabel: string; + modelPlaceholder: string; +}): string { + return [ + `Missing --custom-model-id for --auth-choice ${params.authChoice}.`, + `Pass the ${params.providerLabel} model id to use, for example ${params.modelPlaceholder}.`, + ].join("\n"); +} + +function buildSelfHostedProviderCredential(params: { + ctx: ProviderAuthMethodNonInteractiveContext; + providerId: string; + resolved: ProviderNonInteractiveApiKeyResult; +}): ApiKeyCredential | null { + return params.ctx.toApiKeyCredential({ + provider: params.providerId, + resolved: params.resolved, + }); +} + +export async function configureOpenAICompatibleSelfHostedProviderNonInteractive(params: { + ctx: ProviderAuthMethodNonInteractiveContext; + providerId: string; + providerLabel: string; + defaultBaseUrl: string; + defaultApiKeyEnvVar: string; + modelPlaceholder: string; + input?: Array<"text" | "image">; + reasoning?: boolean; + contextWindow?: number; + maxTokens?: number; +}): Promise { + const baseUrl = (params.ctx.opts.customBaseUrl?.trim() || params.defaultBaseUrl).replace( + /\/+$/, + "", + ); + const modelId = params.ctx.opts.customModelId?.trim(); + if (!modelId) { + params.ctx.runtime.error( + buildMissingNonInteractiveModelIdMessage({ + authChoice: params.ctx.authChoice, + providerLabel: params.providerLabel, + modelPlaceholder: params.modelPlaceholder, + }), + ); + params.ctx.runtime.exit(1); + return null; + } + + const resolved = await params.ctx.resolveApiKey({ + provider: params.providerId, + flagValue: params.ctx.opts.customApiKey, + flagName: "--custom-api-key", + envVar: params.defaultApiKeyEnvVar, + envVarName: params.defaultApiKeyEnvVar, + }); + if (!resolved) { + return null; + } + + const credential = buildSelfHostedProviderCredential({ + ctx: params.ctx, + providerId: params.providerId, + resolved, + }); + if (!credential) { + return null; + } + + const configured = buildOpenAICompatibleSelfHostedProviderConfig({ + cfg: params.ctx.config, + providerId: params.providerId, + baseUrl, + providerApiKey: params.defaultApiKeyEnvVar, + modelId, + input: params.input, + reasoning: params.reasoning, + contextWindow: params.contextWindow, + maxTokens: params.maxTokens, + }); + await upsertAuthProfileWithLock({ + profileId: configured.profileId, + credential, + agentDir: params.ctx.agentDir, + }); + + const withProfile = applyAuthProfileConfig(configured.config, { + profileId: configured.profileId, + provider: params.providerId, + mode: "api_key", + }); + params.ctx.runtime.log(`Default ${params.providerLabel} model: ${modelId}`); + return applyProviderDefaultModel(withProfile, configured.modelRef); +} diff --git a/src/commands/session-store-targets.test.ts b/src/commands/session-store-targets.test.ts index 62ccab8d3cd..3f3a87b09db 100644 --- a/src/commands/session-store-targets.test.ts +++ b/src/commands/session-store-targets.test.ts @@ -1,17 +1,10 @@ import { beforeEach, describe, expect, it, vi } from "vitest"; import { resolveSessionStoreTargets } from "./session-store-targets.js"; -const resolveStorePathMock = vi.hoisted(() => vi.fn()); -const resolveDefaultAgentIdMock = vi.hoisted(() => vi.fn()); -const listAgentIdsMock = vi.hoisted(() => vi.fn()); +const resolveSessionStoreTargetsMock = vi.hoisted(() => vi.fn()); vi.mock("../config/sessions.js", () => ({ - resolveStorePath: resolveStorePathMock, -})); - -vi.mock("../agents/agent-scope.js", () => ({ - resolveDefaultAgentId: resolveDefaultAgentIdMock, - listAgentIds: listAgentIdsMock, + resolveSessionStoreTargets: resolveSessionStoreTargetsMock, })); describe("resolveSessionStoreTargets", () => { @@ -19,61 +12,14 @@ describe("resolveSessionStoreTargets", () => { vi.clearAllMocks(); }); - it("resolves the default agent store when no selector is provided", () => { - resolveDefaultAgentIdMock.mockReturnValue("main"); - resolveStorePathMock.mockReturnValue("/tmp/main-sessions.json"); + it("delegates session store target resolution to the shared config helper", () => { + resolveSessionStoreTargetsMock.mockReturnValue([ + { agentId: "main", storePath: "/tmp/main-sessions.json" }, + ]); const targets = resolveSessionStoreTargets({}, {}); expect(targets).toEqual([{ agentId: "main", storePath: "/tmp/main-sessions.json" }]); - expect(resolveStorePathMock).toHaveBeenCalledWith(undefined, { agentId: "main" }); - }); - - it("resolves all configured agent stores", () => { - listAgentIdsMock.mockReturnValue(["main", "work"]); - resolveStorePathMock - .mockReturnValueOnce("/tmp/main-sessions.json") - .mockReturnValueOnce("/tmp/work-sessions.json"); - - const targets = resolveSessionStoreTargets( - { - session: { store: "~/.openclaw/agents/{agentId}/sessions/sessions.json" }, - }, - { allAgents: true }, - ); - - expect(targets).toEqual([ - { agentId: "main", storePath: "/tmp/main-sessions.json" }, - { agentId: "work", storePath: "/tmp/work-sessions.json" }, - ]); - }); - - it("dedupes shared store paths for --all-agents", () => { - listAgentIdsMock.mockReturnValue(["main", "work"]); - resolveStorePathMock.mockReturnValue("/tmp/shared-sessions.json"); - - const targets = resolveSessionStoreTargets( - { - session: { store: "/tmp/shared-sessions.json" }, - }, - { allAgents: true }, - ); - - expect(targets).toEqual([{ agentId: "main", storePath: "/tmp/shared-sessions.json" }]); - expect(resolveStorePathMock).toHaveBeenCalledTimes(2); - }); - - it("rejects unknown agent ids", () => { - listAgentIdsMock.mockReturnValue(["main", "work"]); - expect(() => resolveSessionStoreTargets({}, { agent: "ghost" })).toThrow(/Unknown agent id/); - }); - - it("rejects conflicting selectors", () => { - expect(() => resolveSessionStoreTargets({}, { agent: "main", allAgents: true })).toThrow( - /cannot be used together/i, - ); - expect(() => - resolveSessionStoreTargets({}, { store: "/tmp/sessions.json", allAgents: true }), - ).toThrow(/cannot be combined/i); + expect(resolveSessionStoreTargetsMock).toHaveBeenCalledWith({}, {}); }); }); diff --git a/src/commands/session-store-targets.ts b/src/commands/session-store-targets.ts index c9e91006e53..c01197c6f88 100644 --- a/src/commands/session-store-targets.ts +++ b/src/commands/session-store-targets.ts @@ -1,84 +1,11 @@ -import { listAgentIds, resolveDefaultAgentId } from "../agents/agent-scope.js"; -import { resolveStorePath } from "../config/sessions.js"; +import { + resolveSessionStoreTargets, + type SessionStoreSelectionOptions, + type SessionStoreTarget, +} from "../config/sessions.js"; import type { OpenClawConfig } from "../config/types.openclaw.js"; -import { normalizeAgentId } from "../routing/session-key.js"; import type { RuntimeEnv } from "../runtime.js"; - -export type SessionStoreSelectionOptions = { - store?: string; - agent?: string; - allAgents?: boolean; -}; - -export type SessionStoreTarget = { - agentId: string; - storePath: string; -}; - -function dedupeTargetsByStorePath(targets: SessionStoreTarget[]): SessionStoreTarget[] { - const deduped = new Map(); - for (const target of targets) { - if (!deduped.has(target.storePath)) { - deduped.set(target.storePath, target); - } - } - return [...deduped.values()]; -} - -export function resolveSessionStoreTargets( - cfg: OpenClawConfig, - opts: SessionStoreSelectionOptions, -): SessionStoreTarget[] { - const defaultAgentId = resolveDefaultAgentId(cfg); - const hasAgent = Boolean(opts.agent?.trim()); - const allAgents = opts.allAgents === true; - if (hasAgent && allAgents) { - throw new Error("--agent and --all-agents cannot be used together"); - } - if (opts.store && (hasAgent || allAgents)) { - throw new Error("--store cannot be combined with --agent or --all-agents"); - } - - if (opts.store) { - return [ - { - agentId: defaultAgentId, - storePath: resolveStorePath(opts.store, { agentId: defaultAgentId }), - }, - ]; - } - - if (allAgents) { - const targets = listAgentIds(cfg).map((agentId) => ({ - agentId, - storePath: resolveStorePath(cfg.session?.store, { agentId }), - })); - return dedupeTargetsByStorePath(targets); - } - - if (hasAgent) { - const knownAgents = listAgentIds(cfg); - const requested = normalizeAgentId(opts.agent ?? ""); - if (!knownAgents.includes(requested)) { - throw new Error( - `Unknown agent id "${opts.agent}". Use "openclaw agents list" to see configured agents.`, - ); - } - return [ - { - agentId: requested, - storePath: resolveStorePath(cfg.session?.store, { agentId: requested }), - }, - ]; - } - - return [ - { - agentId: defaultAgentId, - storePath: resolveStorePath(cfg.session?.store, { agentId: defaultAgentId }), - }, - ]; -} +export { resolveSessionStoreTargets, type SessionStoreSelectionOptions, type SessionStoreTarget }; export function resolveSessionStoreTargetsOrExit(params: { cfg: OpenClawConfig; diff --git a/src/commands/status-all/channels.mattermost-token-summary.test.ts b/src/commands/status-all/channels.mattermost-token-summary.test.ts index a797d028d9f..a012a3a3647 100644 --- a/src/commands/status-all/channels.mattermost-token-summary.test.ts +++ b/src/commands/status-all/channels.mattermost-token-summary.test.ts @@ -37,139 +37,94 @@ function makeMattermostPlugin(): ChannelPlugin { }; } -function makeSlackPlugin(params?: { botToken?: string; appToken?: string }): ChannelPlugin { - return { - id: "slack", - meta: { - id: "slack", - label: "Slack", - selectionLabel: "Slack", - docsPath: "/channels/slack", - blurb: "test", - }, - capabilities: { chatTypes: ["direct"] }, - config: { - listAccountIds: () => ["primary"], - defaultAccountId: () => "primary", - inspectAccount: () => ({ - name: "Primary", - enabled: true, - botToken: params?.botToken ?? "bot-token", - appToken: params?.appToken ?? "app-token", - }), - resolveAccount: () => ({ - name: "Primary", - enabled: true, - botToken: params?.botToken ?? "bot-token", - appToken: params?.appToken ?? "app-token", - }), - isConfigured: () => true, - isEnabled: () => true, - }, - actions: { - listActions: () => ["send"], - }, - }; -} +type TestTable = Awaited>; -function makeUnavailableSlackPlugin(): ChannelPlugin { - return { - id: "slack", - meta: { - id: "slack", - label: "Slack", - selectionLabel: "Slack", - docsPath: "/channels/slack", - blurb: "test", - }, - capabilities: { chatTypes: ["direct"] }, - config: { - listAccountIds: () => ["primary"], - defaultAccountId: () => "primary", - inspectAccount: () => ({ - name: "Primary", - enabled: true, - configured: true, - botToken: "", - appToken: "", - botTokenSource: "config", - appTokenSource: "config", - botTokenStatus: "configured_unavailable", - appTokenStatus: "configured_unavailable", - }), - resolveAccount: () => ({ - name: "Primary", - enabled: true, - configured: true, - botToken: "", - appToken: "", - botTokenSource: "config", - appTokenSource: "config", - botTokenStatus: "configured_unavailable", - appTokenStatus: "configured_unavailable", - }), - isConfigured: () => true, - isEnabled: () => true, - }, - actions: { - listActions: () => ["send"], - }, - }; -} - -function makeSourceAwareUnavailablePlugin(): ChannelPlugin { +function makeSlackDirectPlugin(config: ChannelPlugin["config"]): ChannelPlugin { return makeDirectPlugin({ id: "slack", label: "Slack", docsPath: "/channels/slack", - config: { - listAccountIds: () => ["primary"], - defaultAccountId: () => "primary", - inspectAccount: (cfg) => - (cfg as { marker?: string }).marker === "source" - ? { - name: "Primary", - enabled: true, - configured: true, - botToken: "", - appToken: "", - botTokenSource: "config", - appTokenSource: "config", - botTokenStatus: "configured_unavailable", - appTokenStatus: "configured_unavailable", - } - : { - name: "Primary", - enabled: true, - configured: false, - botToken: "", - appToken: "", - botTokenSource: "none", - appTokenSource: "none", - }, - resolveAccount: () => ({ - name: "Primary", - enabled: true, - botToken: "", - appToken: "", - }), - isConfigured: (account) => Boolean((account as { configured?: boolean }).configured), - isEnabled: () => true, - }, + config, + }); +} + +function createSlackTokenAccount(params?: { botToken?: string; appToken?: string }) { + return { + name: "Primary", + enabled: true, + botToken: params?.botToken ?? "bot-token", + appToken: params?.appToken ?? "app-token", + }; +} + +function createUnavailableSlackTokenAccount() { + return { + name: "Primary", + enabled: true, + configured: true, + botToken: "", + appToken: "", + botTokenSource: "config", + appTokenSource: "config", + botTokenStatus: "configured_unavailable", + appTokenStatus: "configured_unavailable", + }; +} + +function makeSlackPlugin(params?: { botToken?: string; appToken?: string }): ChannelPlugin { + return makeSlackDirectPlugin({ + listAccountIds: () => ["primary"], + defaultAccountId: () => "primary", + inspectAccount: () => createSlackTokenAccount(params), + resolveAccount: () => createSlackTokenAccount(params), + isConfigured: () => true, + isEnabled: () => true, + }); +} + +function makeUnavailableSlackPlugin(): ChannelPlugin { + return makeSlackDirectPlugin({ + listAccountIds: () => ["primary"], + defaultAccountId: () => "primary", + inspectAccount: () => createUnavailableSlackTokenAccount(), + resolveAccount: () => createUnavailableSlackTokenAccount(), + isConfigured: () => true, + isEnabled: () => true, + }); +} + +function makeSourceAwareUnavailablePlugin(): ChannelPlugin { + return makeSlackDirectPlugin({ + listAccountIds: () => ["primary"], + defaultAccountId: () => "primary", + inspectAccount: (cfg) => + (cfg as { marker?: string }).marker === "source" + ? createUnavailableSlackTokenAccount() + : { + name: "Primary", + enabled: true, + configured: false, + botToken: "", + appToken: "", + botTokenSource: "none", + appTokenSource: "none", + }, + resolveAccount: () => ({ + name: "Primary", + enabled: true, + botToken: "", + appToken: "", + }), + isConfigured: (account) => Boolean((account as { configured?: boolean }).configured), + isEnabled: () => true, }); } function makeSourceUnavailableResolvedAvailablePlugin(): ChannelPlugin { - return { + return makeDirectPlugin({ id: "discord", - meta: { - id: "discord", - label: "Discord", - selectionLabel: "Discord", - docsPath: "/channels/discord", - blurb: "test", - }, - capabilities: { chatTypes: ["direct"] }, + label: "Discord", + docsPath: "/channels/discord", config: { listAccountIds: () => ["primary"], defaultAccountId: () => "primary", @@ -199,10 +154,7 @@ function makeSourceUnavailableResolvedAvailablePlugin(): ChannelPlugin { isConfigured: (account) => Boolean((account as { configured?: boolean }).configured), isEnabled: () => true, }, - actions: { - listActions: () => ["send"], - }, - }; + }); } function makeHttpSlackUnavailablePlugin(): ChannelPlugin { @@ -263,64 +215,76 @@ function makeTokenPlugin(): ChannelPlugin { }); } +async function buildTestTable( + plugins: ChannelPlugin[], + params?: { cfg?: Record; sourceConfig?: Record }, +) { + vi.mocked(listChannelPlugins).mockReturnValue(plugins); + return await buildChannelsTable((params?.cfg ?? { channels: {} }) as never, { + showSecrets: false, + sourceConfig: params?.sourceConfig as never, + }); +} + +function expectTableRow( + table: TestTable, + params: { id: string; state: string; detailContains?: string; detailEquals?: string }, +) { + const row = table.rows.find((entry) => entry.id === params.id); + expect(row).toBeDefined(); + expect(row?.state).toBe(params.state); + if (params.detailContains) { + expect(row?.detail).toContain(params.detailContains); + } + if (params.detailEquals) { + expect(row?.detail).toBe(params.detailEquals); + } + return row; +} + +function expectTableDetailRows( + table: TestTable, + title: string, + rows: Array>, +) { + const detail = table.details.find((entry) => entry.title === title); + expect(detail).toBeDefined(); + expect(detail?.rows).toEqual(rows); +} + describe("buildChannelsTable - mattermost token summary", () => { it("does not require appToken for mattermost accounts", async () => { - vi.mocked(listChannelPlugins).mockReturnValue([makeMattermostPlugin()]); - - const table = await buildChannelsTable({ channels: {} } as never, { - showSecrets: false, - }); - - const mattermostRow = table.rows.find((row) => row.id === "mattermost"); - expect(mattermostRow).toBeDefined(); - expect(mattermostRow?.state).toBe("ok"); + const table = await buildTestTable([makeMattermostPlugin()]); + const mattermostRow = expectTableRow(table, { id: "mattermost", state: "ok" }); expect(mattermostRow?.detail).not.toContain("need bot+app"); }); it("keeps bot+app requirement when both fields exist", async () => { - vi.mocked(listChannelPlugins).mockReturnValue([ - makeSlackPlugin({ botToken: "bot-token", appToken: "" }), - ]); - - const table = await buildChannelsTable({ channels: {} } as never, { - showSecrets: false, - }); - - const slackRow = table.rows.find((row) => row.id === "slack"); - expect(slackRow).toBeDefined(); - expect(slackRow?.state).toBe("warn"); - expect(slackRow?.detail).toContain("need bot+app"); + const table = await buildTestTable([makeSlackPlugin({ botToken: "bot-token", appToken: "" })]); + expectTableRow(table, { id: "slack", state: "warn", detailContains: "need bot+app" }); }); it("reports configured-but-unavailable Slack credentials as warn", async () => { - vi.mocked(listChannelPlugins).mockReturnValue([makeUnavailableSlackPlugin()]); - - const table = await buildChannelsTable({ channels: {} } as never, { - showSecrets: false, + const table = await buildTestTable([makeUnavailableSlackPlugin()]); + expectTableRow(table, { + id: "slack", + state: "warn", + detailContains: "unavailable in this command path", }); - - const slackRow = table.rows.find((row) => row.id === "slack"); - expect(slackRow).toBeDefined(); - expect(slackRow?.state).toBe("warn"); - expect(slackRow?.detail).toContain("unavailable in this command path"); }); it("preserves unavailable credential state from the source config snapshot", async () => { - vi.mocked(listChannelPlugins).mockReturnValue([makeSourceAwareUnavailablePlugin()]); - - const table = await buildChannelsTable({ marker: "resolved", channels: {} } as never, { - showSecrets: false, - sourceConfig: { marker: "source", channels: {} } as never, + const table = await buildTestTable([makeSourceAwareUnavailablePlugin()], { + cfg: { marker: "resolved", channels: {} }, + sourceConfig: { marker: "source", channels: {} }, }); - const slackRow = table.rows.find((row) => row.id === "slack"); - expect(slackRow).toBeDefined(); - expect(slackRow?.state).toBe("warn"); - expect(slackRow?.detail).toContain("unavailable in this command path"); - - const slackDetails = table.details.find((detail) => detail.title === "Slack accounts"); - expect(slackDetails).toBeDefined(); - expect(slackDetails?.rows).toEqual([ + expectTableRow(table, { + id: "slack", + state: "warn", + detailContains: "unavailable in this command path", + }); + expectTableDetailRows(table, "Slack accounts", [ { Account: "primary (Primary)", Notes: "bot:config · app:config · secret unavailable in this command path", @@ -330,21 +294,13 @@ describe("buildChannelsTable - mattermost token summary", () => { }); it("treats status-only available credentials as resolved", async () => { - vi.mocked(listChannelPlugins).mockReturnValue([makeSourceUnavailableResolvedAvailablePlugin()]); - - const table = await buildChannelsTable({ marker: "resolved", channels: {} } as never, { - showSecrets: false, - sourceConfig: { marker: "source", channels: {} } as never, + const table = await buildTestTable([makeSourceUnavailableResolvedAvailablePlugin()], { + cfg: { marker: "resolved", channels: {} }, + sourceConfig: { marker: "source", channels: {} }, }); - const discordRow = table.rows.find((row) => row.id === "discord"); - expect(discordRow).toBeDefined(); - expect(discordRow?.state).toBe("ok"); - expect(discordRow?.detail).toBe("configured"); - - const discordDetails = table.details.find((detail) => detail.title === "Discord accounts"); - expect(discordDetails).toBeDefined(); - expect(discordDetails?.rows).toEqual([ + expectTableRow(table, { id: "discord", state: "ok", detailEquals: "configured" }); + expectTableDetailRows(table, "Discord accounts", [ { Account: "primary (Primary)", Notes: "token:config", @@ -354,20 +310,13 @@ describe("buildChannelsTable - mattermost token summary", () => { }); it("treats Slack HTTP signing-secret availability as required config", async () => { - vi.mocked(listChannelPlugins).mockReturnValue([makeHttpSlackUnavailablePlugin()]); - - const table = await buildChannelsTable({ channels: {} } as never, { - showSecrets: false, + const table = await buildTestTable([makeHttpSlackUnavailablePlugin()]); + expectTableRow(table, { + id: "slack", + state: "warn", + detailContains: "configured http credentials unavailable", }); - - const slackRow = table.rows.find((row) => row.id === "slack"); - expect(slackRow).toBeDefined(); - expect(slackRow?.state).toBe("warn"); - expect(slackRow?.detail).toContain("configured http credentials unavailable"); - - const slackDetails = table.details.find((detail) => detail.title === "Slack accounts"); - expect(slackDetails).toBeDefined(); - expect(slackDetails?.rows).toEqual([ + expectTableDetailRows(table, "Slack accounts", [ { Account: "primary (Primary)", Notes: "bot:config · signing:config · secret unavailable in this command path", @@ -377,15 +326,7 @@ describe("buildChannelsTable - mattermost token summary", () => { }); it("still reports single-token channels as ok", async () => { - vi.mocked(listChannelPlugins).mockReturnValue([makeTokenPlugin()]); - - const table = await buildChannelsTable({ channels: {} } as never, { - showSecrets: false, - }); - - const tokenRow = table.rows.find((row) => row.id === "token-only"); - expect(tokenRow).toBeDefined(); - expect(tokenRow?.state).toBe("ok"); - expect(tokenRow?.detail).toContain("token"); + const table = await buildTestTable([makeTokenPlugin()]); + expectTableRow(table, { id: "token-only", state: "ok", detailContains: "token" }); }); }); diff --git a/src/commands/status-all/report-lines.ts b/src/commands/status-all/report-lines.ts index 152918029b5..751237360b4 100644 --- a/src/commands/status-all/report-lines.ts +++ b/src/commands/status-all/report-lines.ts @@ -1,5 +1,5 @@ import type { ProgressReporter } from "../../cli/progress.js"; -import { renderTable } from "../../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../../terminal/table.js"; import { isRich, theme } from "../../terminal/theme.js"; import { groupChannelIssuesByChannel } from "./channel-issues.js"; import { appendStatusAllDiagnosis } from "./diagnosis.js"; @@ -57,7 +57,7 @@ export async function buildStatusAllReportLines(params: { const fail = (text: string) => (rich ? theme.error(text) : text); const muted = (text: string) => (rich ? theme.muted(text) : text); - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); const overview = renderTable({ width: tableWidth, diff --git a/src/commands/status.command.ts b/src/commands/status.command.ts index 0d412c9715a..7e68424c5a9 100644 --- a/src/commands/status.command.ts +++ b/src/commands/status.command.ts @@ -16,7 +16,7 @@ import { } from "../memory/status-format.js"; import type { RuntimeEnv } from "../runtime.js"; import { runSecurityAudit } from "../security/audit.js"; -import { renderTable } from "../terminal/table.js"; +import { getTerminalTableWidth, renderTable } from "../terminal/table.js"; import { theme } from "../terminal/theme.js"; import { formatHealthChannelLines, type HealthSummary } from "./health.js"; import { resolveControlUiLinks } from "./onboard-helpers.js"; @@ -229,7 +229,7 @@ export async function statusCommand( runtime.log(""); } - const tableWidth = Math.max(60, (process.stdout.columns ?? 120) - 1); + const tableWidth = getTerminalTableWidth(); if (secretDiagnostics.length > 0) { runtime.log(theme.warn("Secret diagnostics:")); diff --git a/src/commands/status.service-summary.test.ts b/src/commands/status.service-summary.test.ts index fb51d8036e4..f730137a111 100644 --- a/src/commands/status.service-summary.test.ts +++ b/src/commands/status.service-summary.test.ts @@ -1,5 +1,6 @@ import { describe, expect, it, vi } from "vitest"; import type { GatewayService } from "../daemon/service.js"; +import type { GatewayServiceEnvArgs } from "../daemon/service.js"; import { readServiceStatusSummary } from "./status.service-summary.js"; function createService(overrides: Partial): GatewayService { @@ -10,7 +11,7 @@ function createService(overrides: Partial): GatewayService { install: vi.fn(async () => {}), uninstall: vi.fn(async () => {}), stop: vi.fn(async () => {}), - restart: vi.fn(async () => {}), + restart: vi.fn(async () => ({ outcome: "completed" as const })), isLoaded: vi.fn(async () => false), readCommand: vi.fn(async () => null), readRuntime: vi.fn(async () => ({ status: "stopped" as const })), @@ -57,4 +58,41 @@ describe("readServiceStatusSummary", () => { expect(summary.externallyManaged).toBe(false); expect(summary.loadedText).toBe("disabled"); }); + + it("passes command environment to runtime and loaded checks", async () => { + const isLoaded = vi.fn(async ({ env }: GatewayServiceEnvArgs) => { + return env?.OPENCLAW_GATEWAY_PORT === "18789"; + }); + const readRuntime = vi.fn(async (env?: NodeJS.ProcessEnv) => ({ + status: env?.OPENCLAW_GATEWAY_PORT === "18789" ? ("running" as const) : ("unknown" as const), + })); + + const summary = await readServiceStatusSummary( + createService({ + isLoaded, + readCommand: vi.fn(async () => ({ + programArguments: ["openclaw", "gateway", "run", "--port", "18789"], + environment: { OPENCLAW_GATEWAY_PORT: "18789" }, + })), + readRuntime, + }), + "Daemon", + ); + + expect(isLoaded).toHaveBeenCalledWith( + expect.objectContaining({ + env: expect.objectContaining({ + OPENCLAW_GATEWAY_PORT: "18789", + }), + }), + ); + expect(readRuntime).toHaveBeenCalledWith( + expect.objectContaining({ + OPENCLAW_GATEWAY_PORT: "18789", + }), + ); + expect(summary.installed).toBe(true); + expect(summary.loaded).toBe(true); + expect(summary.runtime).toMatchObject({ status: "running" }); + }); }); diff --git a/src/commands/status.service-summary.ts b/src/commands/status.service-summary.ts index d750fe7eb02..cc366c2c7ba 100644 --- a/src/commands/status.service-summary.ts +++ b/src/commands/status.service-summary.ts @@ -16,10 +16,16 @@ export async function readServiceStatusSummary( fallbackLabel: string, ): Promise { try { - const [loaded, runtime, command] = await Promise.all([ - service.isLoaded({ env: process.env }).catch(() => false), - service.readRuntime(process.env).catch(() => undefined), - service.readCommand(process.env).catch(() => null), + const command = await service.readCommand(process.env).catch(() => null); + const serviceEnv = command?.environment + ? ({ + ...process.env, + ...command.environment, + } satisfies NodeJS.ProcessEnv) + : process.env; + const [loaded, runtime] = await Promise.all([ + service.isLoaded({ env: serviceEnv }).catch(() => false), + service.readRuntime(serviceEnv).catch(() => undefined), ]); const managedByOpenClaw = command != null; const externallyManaged = !managedByOpenClaw && runtime?.status === "running"; diff --git a/src/commands/status.summary.redaction.test.ts b/src/commands/status.summary.redaction.test.ts index 02eaecbcb35..26e28887560 100644 --- a/src/commands/status.summary.redaction.test.ts +++ b/src/commands/status.summary.redaction.test.ts @@ -22,6 +22,7 @@ function createRecentSessionRow() { describe("redactSensitiveStatusSummary", () => { it("removes sensitive session and path details while preserving summary structure", () => { const input: StatusSummary = { + runtimeVersion: "2026.3.8", heartbeat: { defaultAgentId: "main", agents: [{ agentId: "main", enabled: true, every: "5m", everyMs: 300_000 }], @@ -50,6 +51,7 @@ describe("redactSensitiveStatusSummary", () => { expect(redacted.sessions.recent).toEqual([]); expect(redacted.sessions.byAgent[0]?.path).toBe("[redacted]"); expect(redacted.sessions.byAgent[0]?.recent).toEqual([]); + expect(redacted.runtimeVersion).toBe("2026.3.8"); expect(redacted.heartbeat).toEqual(input.heartbeat); expect(redacted.channelSummary).toEqual(input.channelSummary); }); diff --git a/src/commands/status.summary.test.ts b/src/commands/status.summary.test.ts new file mode 100644 index 00000000000..addda823a23 --- /dev/null +++ b/src/commands/status.summary.test.ts @@ -0,0 +1,85 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; + +vi.mock("../agents/context.js", () => ({ + resolveContextTokensForModel: vi.fn(() => 200_000), +})); + +vi.mock("../agents/defaults.js", () => ({ + DEFAULT_CONTEXT_TOKENS: 200_000, + DEFAULT_MODEL: "gpt-5.2", + DEFAULT_PROVIDER: "openai", +})); + +vi.mock("../agents/model-selection.js", () => ({ + resolveConfiguredModelRef: vi.fn(() => ({ + provider: "openai", + model: "gpt-5.2", + })), +})); + +vi.mock("../config/config.js", () => ({ + loadConfig: vi.fn(() => ({})), +})); + +vi.mock("../config/sessions.js", () => ({ + loadSessionStore: vi.fn(() => ({})), + resolveFreshSessionTotalTokens: vi.fn(() => undefined), + resolveMainSessionKey: vi.fn(() => "main"), + resolveStorePath: vi.fn(() => "/tmp/sessions.json"), +})); + +vi.mock("../gateway/session-utils.js", () => ({ + classifySessionKey: vi.fn(() => "direct"), + listAgentsForGateway: vi.fn(() => ({ + defaultId: "main", + agents: [{ id: "main" }], + })), + resolveSessionModelRef: vi.fn(() => ({ + provider: "openai", + model: "gpt-5.2", + })), +})); + +vi.mock("../infra/channel-summary.js", () => ({ + buildChannelSummary: vi.fn(async () => ["ok"]), +})); + +vi.mock("../infra/heartbeat-runner.js", () => ({ + resolveHeartbeatSummaryForAgent: vi.fn(() => ({ + enabled: true, + every: "5m", + everyMs: 300_000, + })), +})); + +vi.mock("../infra/system-events.js", () => ({ + peekSystemEvents: vi.fn(() => []), +})); + +vi.mock("../routing/session-key.js", () => ({ + parseAgentSessionKey: vi.fn(() => null), +})); + +vi.mock("../version.js", () => ({ + resolveRuntimeServiceVersion: vi.fn(() => "2026.3.8"), +})); + +vi.mock("./status.link-channel.js", () => ({ + resolveLinkChannelContext: vi.fn(async () => undefined), +})); + +describe("getStatusSummary", () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + it("includes runtimeVersion in the status payload", async () => { + const { getStatusSummary } = await import("./status.summary.js"); + + const summary = await getStatusSummary(); + + expect(summary.runtimeVersion).toBe("2026.3.8"); + expect(summary.heartbeat.defaultAgentId).toBe("main"); + expect(summary.channelSummary).toEqual(["ok"]); + }); +}); diff --git a/src/commands/status.summary.ts b/src/commands/status.summary.ts index 3a71464973f..b84bada07ff 100644 --- a/src/commands/status.summary.ts +++ b/src/commands/status.summary.ts @@ -19,6 +19,7 @@ import { buildChannelSummary } from "../infra/channel-summary.js"; import { resolveHeartbeatSummaryForAgent } from "../infra/heartbeat-runner.js"; import { peekSystemEvents } from "../infra/system-events.js"; import { parseAgentSessionKey } from "../routing/session-key.js"; +import { resolveRuntimeServiceVersion } from "../version.js"; import { resolveLinkChannelContext } from "./status.link-channel.js"; import type { HeartbeatStatus, SessionStatus, StatusSummary } from "./status.types.js"; @@ -35,6 +36,9 @@ const buildFlags = (entry?: SessionEntry): string[] => { if (typeof verbose === "string" && verbose.length > 0) { flags.push(`verbose:${verbose}`); } + if (typeof entry?.fastMode === "boolean") { + flags.push(entry.fastMode ? "fast" : "fast:off"); + } const reasoning = entry?.reasoningLevel; if (typeof reasoning === "string" && reasoning.length > 0) { flags.push(`reasoning:${reasoning}`); @@ -169,6 +173,7 @@ export async function getStatusSummary( updatedAt, age, thinkingLevel: entry?.thinkingLevel, + fastMode: entry?.fastMode, verboseLevel: entry?.verboseLevel, reasoningLevel: entry?.reasoningLevel, elevatedLevel: entry?.elevatedLevel, @@ -210,6 +215,7 @@ export async function getStatusSummary( const totalSessions = allSessions.length; const summary: StatusSummary = { + runtimeVersion: resolveRuntimeServiceVersion(process.env), linkChannel: linkContext ? { id: linkContext.plugin.id, diff --git a/src/commands/status.types.ts b/src/commands/status.types.ts index a3e0a5ca8e2..de680f1665f 100644 --- a/src/commands/status.types.ts +++ b/src/commands/status.types.ts @@ -8,6 +8,7 @@ export type SessionStatus = { updatedAt: number | null; age: number | null; thinkingLevel?: string; + fastMode?: boolean; verboseLevel?: string; reasoningLevel?: string; elevatedLevel?: string; @@ -34,6 +35,7 @@ export type HeartbeatStatus = { }; export type StatusSummary = { + runtimeVersion?: string | null; linkChannel?: { id: ChannelId; label: string; diff --git a/src/commands/vllm-setup.ts b/src/commands/vllm-setup.ts index f0f3f47356e..4d8657306e6 100644 --- a/src/commands/vllm-setup.ts +++ b/src/commands/vllm-setup.ts @@ -1,78 +1,36 @@ -import { upsertAuthProfileWithLock } from "../agents/auth-profiles.js"; import type { OpenClawConfig } from "../config/config.js"; import type { WizardPrompter } from "../wizard/prompts.js"; +import { + applyProviderDefaultModel, + promptAndConfigureOpenAICompatibleSelfHostedProvider, + SELF_HOSTED_DEFAULT_CONTEXT_WINDOW, + SELF_HOSTED_DEFAULT_COST, + SELF_HOSTED_DEFAULT_MAX_TOKENS, +} from "./self-hosted-provider-setup.js"; export const VLLM_DEFAULT_BASE_URL = "http://127.0.0.1:8000/v1"; -export const VLLM_DEFAULT_CONTEXT_WINDOW = 128000; -export const VLLM_DEFAULT_MAX_TOKENS = 8192; -export const VLLM_DEFAULT_COST = { - input: 0, - output: 0, - cacheRead: 0, - cacheWrite: 0, -}; +export const VLLM_DEFAULT_CONTEXT_WINDOW = SELF_HOSTED_DEFAULT_CONTEXT_WINDOW; +export const VLLM_DEFAULT_MAX_TOKENS = SELF_HOSTED_DEFAULT_MAX_TOKENS; +export const VLLM_DEFAULT_COST = SELF_HOSTED_DEFAULT_COST; export async function promptAndConfigureVllm(params: { cfg: OpenClawConfig; prompter: WizardPrompter; - agentDir?: string; }): Promise<{ config: OpenClawConfig; modelId: string; modelRef: string }> { - const baseUrlRaw = await params.prompter.text({ - message: "vLLM base URL", - initialValue: VLLM_DEFAULT_BASE_URL, - placeholder: VLLM_DEFAULT_BASE_URL, - validate: (value) => (value?.trim() ? undefined : "Required"), + const result = await promptAndConfigureOpenAICompatibleSelfHostedProvider({ + cfg: params.cfg, + prompter: params.prompter, + providerId: "vllm", + providerLabel: "vLLM", + defaultBaseUrl: VLLM_DEFAULT_BASE_URL, + defaultApiKeyEnvVar: "VLLM_API_KEY", + modelPlaceholder: "meta-llama/Meta-Llama-3-8B-Instruct", }); - const apiKeyRaw = await params.prompter.text({ - message: "vLLM API key", - placeholder: "sk-... (or any non-empty string)", - validate: (value) => (value?.trim() ? undefined : "Required"), - }); - const modelIdRaw = await params.prompter.text({ - message: "vLLM model", - placeholder: "meta-llama/Meta-Llama-3-8B-Instruct", - validate: (value) => (value?.trim() ? undefined : "Required"), - }); - - const baseUrl = String(baseUrlRaw ?? "") - .trim() - .replace(/\/+$/, ""); - const apiKey = String(apiKeyRaw ?? "").trim(); - const modelId = String(modelIdRaw ?? "").trim(); - const modelRef = `vllm/${modelId}`; - - await upsertAuthProfileWithLock({ - profileId: "vllm:default", - credential: { type: "api_key", provider: "vllm", key: apiKey }, - agentDir: params.agentDir, - }); - - const nextConfig: OpenClawConfig = { - ...params.cfg, - models: { - ...params.cfg.models, - mode: params.cfg.models?.mode ?? "merge", - providers: { - ...params.cfg.models?.providers, - vllm: { - baseUrl, - api: "openai-completions", - apiKey: "VLLM_API_KEY", - models: [ - { - id: modelId, - name: modelId, - reasoning: false, - input: ["text"], - cost: VLLM_DEFAULT_COST, - contextWindow: VLLM_DEFAULT_CONTEXT_WINDOW, - maxTokens: VLLM_DEFAULT_MAX_TOKENS, - }, - ], - }, - }, - }, + return { + config: result.config, + modelId: result.modelId, + modelRef: result.modelRef, }; - - return { config: nextConfig, modelId, modelRef }; } + +export { applyProviderDefaultModel as applyVllmDefaultModel }; diff --git a/src/config/channel-capabilities.test.ts b/src/config/channel-capabilities.test.ts index 423cc3e2f74..75083317e82 100644 --- a/src/config/channel-capabilities.test.ts +++ b/src/config/channel-capabilities.test.ts @@ -125,6 +125,23 @@ describe("resolveChannelCapabilities", () => { }), ).toBeUndefined(); }); + + it("handles Slack object-format capabilities gracefully", () => { + const cfg = { + channels: { + slack: { + capabilities: { interactiveReplies: true }, + }, + }, + } as unknown as Partial; + + expect( + resolveChannelCapabilities({ + cfg, + channel: "slack", + }), + ).toBeUndefined(); + }); }); const createStubPlugin = (id: string): ChannelPlugin => ({ diff --git a/src/config/channel-capabilities.ts b/src/config/channel-capabilities.ts index 0e66f755e3b..b7edc354596 100644 --- a/src/config/channel-capabilities.ts +++ b/src/config/channel-capabilities.ts @@ -2,9 +2,10 @@ import { normalizeChannelId } from "../channels/plugins/index.js"; import { resolveAccountEntry } from "../routing/account-lookup.js"; import { normalizeAccountId } from "../routing/session-key.js"; import type { OpenClawConfig } from "./config.js"; +import type { SlackCapabilitiesConfig } from "./types.slack.js"; import type { TelegramCapabilitiesConfig } from "./types.telegram.js"; -type CapabilitiesConfig = TelegramCapabilitiesConfig; +type CapabilitiesConfig = TelegramCapabilitiesConfig | SlackCapabilitiesConfig; const isStringArray = (value: unknown): value is string[] => Array.isArray(value) && value.every((entry) => typeof entry === "string"); diff --git a/src/config/config-misc.test.ts b/src/config/config-misc.test.ts index 647986a96e0..bd9a05fea10 100644 --- a/src/config/config-misc.test.ts +++ b/src/config/config-misc.test.ts @@ -315,6 +315,7 @@ describe("model compat config schema", () => { requiresAssistantAfterToolResult: false, requiresThinkingAsText: false, requiresMistralToolIds: false, + requiresOpenAiAnthropicToolPayload: true, }, }, ], @@ -360,6 +361,33 @@ describe("config strict validation", () => { expect(res.ok).toBe(false); }); + it("accepts documented agents.list[].params overrides", () => { + const res = validateConfigObject({ + agents: { + list: [ + { + id: "main", + model: "anthropic/claude-opus-4-6", + params: { + cacheRetention: "none", + temperature: 0.4, + maxTokens: 8192, + }, + }, + ], + }, + }); + + expect(res.ok).toBe(true); + if (res.ok) { + expect(res.config.agents?.list?.[0]?.params).toEqual({ + cacheRetention: "none", + temperature: 0.4, + maxTokens: 8192, + }); + } + }); + it("flags legacy config entries without auto-migrating", async () => { await withTempHome(async (home) => { await writeOpenClawConfig(home, { diff --git a/src/config/config.discord.test.ts b/src/config/config.discord.test.ts index 8afde31b9e3..0bf5484dbe3 100644 --- a/src/config/config.discord.test.ts +++ b/src/config/config.discord.test.ts @@ -36,7 +36,7 @@ describe("config discord", () => { requireMention: false, users: ["steipete"], channels: { - general: { allow: true }, + general: { allow: true, autoThread: true }, }, }, }, @@ -54,6 +54,7 @@ describe("config discord", () => { expect(cfg.channels?.discord?.actions?.channels).toBe(true); expect(cfg.channels?.discord?.guilds?.["123"]?.slug).toBe("friends-of-openclaw"); expect(cfg.channels?.discord?.guilds?.["123"]?.channels?.general?.allow).toBe(true); + expect(cfg.channels?.discord?.guilds?.["123"]?.channels?.general?.autoThread).toBe(true); }, ); }); diff --git a/src/config/config.plugin-validation.test.ts b/src/config/config.plugin-validation.test.ts index 02eab6789ea..d7e6ae46aca 100644 --- a/src/config/config.plugin-validation.test.ts +++ b/src/config/config.plugin-validation.test.ts @@ -5,13 +5,25 @@ import { afterAll, beforeAll, describe, expect, it } from "vitest"; import { clearPluginManifestRegistryCache } from "../plugins/manifest-registry.js"; import { validateConfigObjectWithPlugins } from "./config.js"; +async function chmodSafeDir(dir: string) { + if (process.platform === "win32") { + return; + } + await fs.chmod(dir, 0o755); +} + +async function mkdirSafe(dir: string) { + await fs.mkdir(dir, { recursive: true }); + await chmodSafeDir(dir); +} + async function writePluginFixture(params: { dir: string; id: string; schema: Record; channels?: string[]; }) { - await fs.mkdir(params.dir, { recursive: true }); + await mkdirSafe(params.dir); await fs.writeFile( path.join(params.dir, "index.js"), `export default { id: "${params.id}", register() {} };`, @@ -32,23 +44,31 @@ async function writePluginFixture(params: { } describe("config plugin validation", () => { + const previousUmask = process.umask(0o022); let fixtureRoot = ""; let suiteHome = ""; let badPluginDir = ""; let enumPluginDir = ""; let bluebubblesPluginDir = ""; let voiceCallSchemaPluginDir = ""; - const envSnapshot = { - OPENCLAW_STATE_DIR: process.env.OPENCLAW_STATE_DIR, - OPENCLAW_PLUGIN_MANIFEST_CACHE_MS: process.env.OPENCLAW_PLUGIN_MANIFEST_CACHE_MS, - }; + const suiteEnv = () => + ({ + ...process.env, + HOME: suiteHome, + OPENCLAW_HOME: undefined, + OPENCLAW_STATE_DIR: path.join(suiteHome, ".openclaw"), + CLAWDBOT_STATE_DIR: undefined, + OPENCLAW_PLUGIN_MANIFEST_CACHE_MS: "10000", + }) satisfies NodeJS.ProcessEnv; - const validateInSuite = (raw: unknown) => validateConfigObjectWithPlugins(raw); + const validateInSuite = (raw: unknown) => + validateConfigObjectWithPlugins(raw, { env: suiteEnv() }); beforeAll(async () => { fixtureRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-config-plugin-validation-")); + await chmodSafeDir(fixtureRoot); suiteHome = path.join(fixtureRoot, "home"); - await fs.mkdir(suiteHome, { recursive: true }); + await mkdirSafe(suiteHome); badPluginDir = path.join(suiteHome, "bad-plugin"); enumPluginDir = path.join(suiteHome, "enum-plugin"); bluebubblesPluginDir = path.join(suiteHome, "bluebubbles-plugin"); @@ -102,8 +122,6 @@ describe("config plugin validation", () => { id: "voice-call-schema-fixture", schema: voiceCallManifest.configSchema, }); - process.env.OPENCLAW_STATE_DIR = path.join(suiteHome, ".openclaw"); - process.env.OPENCLAW_PLUGIN_MANIFEST_CACHE_MS = "10000"; clearPluginManifestRegistryCache(); // Warm the plugin manifest cache once so path-based validations can reuse // parsed manifests across test cases. @@ -118,16 +136,7 @@ describe("config plugin validation", () => { afterAll(async () => { await fs.rm(fixtureRoot, { recursive: true, force: true }); clearPluginManifestRegistryCache(); - if (envSnapshot.OPENCLAW_STATE_DIR === undefined) { - delete process.env.OPENCLAW_STATE_DIR; - } else { - process.env.OPENCLAW_STATE_DIR = envSnapshot.OPENCLAW_STATE_DIR; - } - if (envSnapshot.OPENCLAW_PLUGIN_MANIFEST_CACHE_MS === undefined) { - delete process.env.OPENCLAW_PLUGIN_MANIFEST_CACHE_MS; - } else { - process.env.OPENCLAW_PLUGIN_MANIFEST_CACHE_MS = envSnapshot.OPENCLAW_PLUGIN_MANIFEST_CACHE_MS; - } + process.umask(previousUmask); }); it("reports missing plugin refs across load paths, entries, and allowlist surfaces", async () => { @@ -279,6 +288,31 @@ describe("config plugin validation", () => { expect(res.ok).toBe(true); }); + it("accepts voice-call OpenAI TTS speed, instructions, and baseUrl config fields", async () => { + const res = validateInSuite({ + agents: { list: [{ id: "pi" }] }, + plugins: { + enabled: true, + load: { paths: [voiceCallSchemaPluginDir] }, + entries: { + "voice-call-schema-fixture": { + config: { + tts: { + openai: { + baseUrl: "http://localhost:8880/v1", + voice: "alloy", + speed: 1.5, + instructions: "Speak in a cheerful tone", + }, + }, + }, + }, + }, + }, + }); + expect(res.ok).toBe(true); + }); + it("accepts known plugin ids and valid channel/heartbeat enums", async () => { const res = validateInSuite({ agents: { diff --git a/src/config/config.pruning-defaults.test.ts b/src/config/config.pruning-defaults.test.ts index f2f66ce6bac..92d46f4ab75 100644 --- a/src/config/config.pruning-defaults.test.ts +++ b/src/config/config.pruning-defaults.test.ts @@ -15,6 +15,22 @@ async function writeConfigForTest(home: string, config: unknown): Promise ); } +async function loadConfigForHome(config: unknown) { + return await withTempHome(async (home) => { + await writeConfigForTest(home, config); + return loadConfig(); + }); +} + +function expectAnthropicPruningDefaults( + cfg: ReturnType, + heartbeatEvery = "30m", +) { + expect(cfg.agents?.defaults?.contextPruning?.mode).toBe("cache-ttl"); + expect(cfg.agents?.defaults?.contextPruning?.ttl).toBe("1h"); + expect(cfg.agents?.defaults?.heartbeat?.every).toBe(heartbeatEvery); +} + describe("config pruning defaults", () => { it("does not enable contextPruning by default", async () => { await withEnvAsync({ ANTHROPIC_API_KEY: "", ANTHROPIC_OAUTH_TOKEN: "" }, async () => { @@ -29,105 +45,103 @@ describe("config pruning defaults", () => { }); it("enables cache-ttl pruning + 1h heartbeat for Anthropic OAuth", async () => { - await withTempHome(async (home) => { - await writeConfigForTest(home, { - auth: { - profiles: { - "anthropic:me": { provider: "anthropic", mode: "oauth", email: "me@example.com" }, - }, + const cfg = await loadConfigForHome({ + auth: { + profiles: { + "anthropic:me": { provider: "anthropic", mode: "oauth", email: "me@example.com" }, }, - agents: { defaults: {} }, - }); - - const cfg = loadConfig(); - - expect(cfg.agents?.defaults?.contextPruning?.mode).toBe("cache-ttl"); - expect(cfg.agents?.defaults?.contextPruning?.ttl).toBe("1h"); - expect(cfg.agents?.defaults?.heartbeat?.every).toBe("1h"); + }, + agents: { defaults: {} }, }); + + expectAnthropicPruningDefaults(cfg, "1h"); }); it("enables cache-ttl pruning + 1h cache TTL for Anthropic API keys", async () => { - await withTempHome(async (home) => { - await writeConfigForTest(home, { - auth: { - profiles: { - "anthropic:api": { provider: "anthropic", mode: "api_key" }, - }, + const cfg = await loadConfigForHome({ + auth: { + profiles: { + "anthropic:api": { provider: "anthropic", mode: "api_key" }, }, - agents: { - defaults: { - model: { primary: "anthropic/claude-opus-4-5" }, - }, + }, + agents: { + defaults: { + model: { primary: "anthropic/claude-opus-4-5" }, }, - }); - - const cfg = loadConfig(); - - expect(cfg.agents?.defaults?.contextPruning?.mode).toBe("cache-ttl"); - expect(cfg.agents?.defaults?.contextPruning?.ttl).toBe("1h"); - expect(cfg.agents?.defaults?.heartbeat?.every).toBe("30m"); - expect( - cfg.agents?.defaults?.models?.["anthropic/claude-opus-4-5"]?.params?.cacheRetention, - ).toBe("short"); + }, }); + + expectAnthropicPruningDefaults(cfg); + expect( + cfg.agents?.defaults?.models?.["anthropic/claude-opus-4-5"]?.params?.cacheRetention, + ).toBe("short"); + }); + + it("adds cacheRetention defaults for dated Anthropic primary model refs", async () => { + const cfg = await loadConfigForHome({ + auth: { + profiles: { + "anthropic:api": { provider: "anthropic", mode: "api_key" }, + }, + }, + agents: { + defaults: { + model: { primary: "anthropic/claude-sonnet-4-20250514" }, + }, + }, + }); + + expectAnthropicPruningDefaults(cfg); + expect( + cfg.agents?.defaults?.models?.["anthropic/claude-sonnet-4-20250514"]?.params?.cacheRetention, + ).toBe("short"); }); it("adds default cacheRetention for Anthropic Claude models on Bedrock", async () => { - await withTempHome(async (home) => { - await writeConfigForTest(home, { - auth: { - profiles: { - "anthropic:api": { provider: "anthropic", mode: "api_key" }, - }, + const cfg = await loadConfigForHome({ + auth: { + profiles: { + "anthropic:api": { provider: "anthropic", mode: "api_key" }, }, - agents: { - defaults: { - model: { primary: "amazon-bedrock/us.anthropic.claude-opus-4-6-v1" }, - }, + }, + agents: { + defaults: { + model: { primary: "amazon-bedrock/us.anthropic.claude-opus-4-6-v1" }, }, - }); - - const cfg = loadConfig(); - - expect( - cfg.agents?.defaults?.models?.["amazon-bedrock/us.anthropic.claude-opus-4-6-v1"]?.params - ?.cacheRetention, - ).toBe("short"); + }, }); + + expect( + cfg.agents?.defaults?.models?.["amazon-bedrock/us.anthropic.claude-opus-4-6-v1"]?.params + ?.cacheRetention, + ).toBe("short"); }); it("does not add default cacheRetention for non-Anthropic Bedrock models", async () => { - await withTempHome(async (home) => { - await writeConfigForTest(home, { - auth: { - profiles: { - "anthropic:api": { provider: "anthropic", mode: "api_key" }, - }, + const cfg = await loadConfigForHome({ + auth: { + profiles: { + "anthropic:api": { provider: "anthropic", mode: "api_key" }, }, - agents: { - defaults: { - model: { primary: "amazon-bedrock/amazon.nova-micro-v1:0" }, - }, + }, + agents: { + defaults: { + model: { primary: "amazon-bedrock/amazon.nova-micro-v1:0" }, }, - }); - - const cfg = loadConfig(); - - expect( - cfg.agents?.defaults?.models?.["amazon-bedrock/amazon.nova-micro-v1:0"]?.params - ?.cacheRetention, - ).toBeUndefined(); + }, }); + + expect( + cfg.agents?.defaults?.models?.["amazon-bedrock/amazon.nova-micro-v1:0"]?.params + ?.cacheRetention, + ).toBeUndefined(); }); it("does not override explicit contextPruning mode", async () => { - await withTempHome(async (home) => { - await writeConfigForTest(home, { agents: { defaults: { contextPruning: { mode: "off" } } } }); - - const cfg = loadConfig(); - - expect(cfg.agents?.defaults?.contextPruning?.mode).toBe("off"); + const cfg = await loadConfigForHome({ + agents: { defaults: { contextPruning: { mode: "off" } } }, }); + + expect(cfg.agents?.defaults?.contextPruning?.mode).toBe("off"); }); }); diff --git a/src/config/config.schema-regressions.test.ts b/src/config/config.schema-regressions.test.ts index 4125cb1b3d4..7a6053fd01c 100644 --- a/src/config/config.schema-regressions.test.ts +++ b/src/config/config.schema-regressions.test.ts @@ -184,4 +184,44 @@ describe("config schema regressions", () => { expect(res.ok).toBe(false); }); + + it("accepts signal accountUuid for loop protection", () => { + const res = validateConfigObject({ + channels: { + signal: { + accountUuid: "a1b2c3d4-e5f6-7890-abcd-ef1234567890", + }, + }, + }); + + expect(res.ok).toBe(true); + }); + + it("accepts telegram actions editMessage and createForumTopic", () => { + const res = validateConfigObject({ + channels: { + telegram: { + actions: { + editMessage: true, + createForumTopic: false, + }, + }, + }, + }); + + expect(res.ok).toBe(true); + }); + + it("accepts discovery.wideArea.domain for unicast DNS-SD", () => { + const res = validateConfigObject({ + discovery: { + wideArea: { + enabled: true, + domain: "openclaw.internal", + }, + }, + }); + + expect(res.ok).toBe(true); + }); }); diff --git a/src/config/config.secrets-schema.test.ts b/src/config/config.secrets-schema.test.ts index 196bb50ace4..e3c236fb15b 100644 --- a/src/config/config.secrets-schema.test.ts +++ b/src/config/config.secrets-schema.test.ts @@ -1,4 +1,8 @@ import { describe, expect, it } from "vitest"; +import { + INVALID_EXEC_SECRET_REF_IDS, + VALID_EXEC_SECRET_REF_IDS, +} from "../test-utils/secret-ref-test-vectors.js"; import { validateConfigObjectRaw } from "./validation.js"; function validateOpenAiApiKeyRef(apiKey: unknown) { @@ -173,4 +177,31 @@ describe("config secret refs schema", () => { ).toBe(true); } }); + + it("accepts valid exec secret reference ids", () => { + for (const id of VALID_EXEC_SECRET_REF_IDS) { + const result = validateOpenAiApiKeyRef({ + source: "exec", + provider: "vault", + id, + }); + expect(result.ok, `expected valid exec ref id: ${id}`).toBe(true); + } + }); + + it("rejects invalid exec secret reference ids", () => { + for (const id of INVALID_EXEC_SECRET_REF_IDS) { + const result = validateOpenAiApiKeyRef({ + source: "exec", + provider: "vault", + id, + }); + expect(result.ok, `expected invalid exec ref id: ${id}`).toBe(false); + if (!result.ok) { + expect( + result.issues.some((issue) => issue.path.includes("models.providers.openai.apiKey")), + ).toBe(true); + } + } + }); }); diff --git a/src/config/config.talk-validation.test.ts b/src/config/config.talk-validation.test.ts index cb948d75c75..d2fb463613c 100644 --- a/src/config/config.talk-validation.test.ts +++ b/src/config/config.talk-validation.test.ts @@ -8,38 +8,42 @@ describe("talk config validation fail-closed behavior", () => { vi.restoreAllMocks(); }); + async function expectInvalidTalkConfig(config: unknown, messagePattern: RegExp) { + await withTempHomeConfig(config, async () => { + const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {}); + + let thrown: unknown; + try { + loadConfig(); + } catch (error) { + thrown = error; + } + + expect(thrown).toBeInstanceOf(Error); + expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG"); + expect((thrown as Error).message).toMatch(messagePattern); + expect(consoleSpy).toHaveBeenCalled(); + }); + } + it.each([ ["boolean", true], ["string", "1500"], ["float", 1500.5], ])("rejects %s talk.silenceTimeoutMs during config load", async (_label, value) => { - await withTempHomeConfig( + await expectInvalidTalkConfig( { agents: { list: [{ id: "main" }] }, talk: { silenceTimeoutMs: value, }, }, - async () => { - const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {}); - - let thrown: unknown; - try { - loadConfig(); - } catch (error) { - thrown = error; - } - - expect(thrown).toBeInstanceOf(Error); - expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG"); - expect((thrown as Error).message).toMatch(/silenceTimeoutMs|talk/i); - expect(consoleSpy).toHaveBeenCalled(); - }, + /silenceTimeoutMs|talk/i, ); }); it("rejects talk.provider when it does not match talk.providers during config load", async () => { - await withTempHomeConfig( + await expectInvalidTalkConfig( { agents: { list: [{ id: "main" }] }, talk: { @@ -51,26 +55,12 @@ describe("talk config validation fail-closed behavior", () => { }, }, }, - async () => { - const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {}); - - let thrown: unknown; - try { - loadConfig(); - } catch (error) { - thrown = error; - } - - expect(thrown).toBeInstanceOf(Error); - expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG"); - expect((thrown as Error).message).toMatch(/talk\.provider|talk\.providers|acme/i); - expect(consoleSpy).toHaveBeenCalled(); - }, + /talk\.provider|talk\.providers|acme/i, ); }); it("rejects multi-provider talk config without talk.provider during config load", async () => { - await withTempHomeConfig( + await expectInvalidTalkConfig( { agents: { list: [{ id: "main" }] }, talk: { @@ -84,21 +74,7 @@ describe("talk config validation fail-closed behavior", () => { }, }, }, - async () => { - const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {}); - - let thrown: unknown; - try { - loadConfig(); - } catch (error) { - thrown = error; - } - - expect(thrown).toBeInstanceOf(Error); - expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG"); - expect((thrown as Error).message).toMatch(/talk\.provider|required/i); - expect(consoleSpy).toHaveBeenCalled(); - }, + /talk\.provider|required/i, ); }); }); diff --git a/src/config/config.ts b/src/config/config.ts index 7caaa15a95f..3bd36d0d709 100644 --- a/src/config/config.ts +++ b/src/config/config.ts @@ -5,6 +5,7 @@ export { createConfigIO, getRuntimeConfigSnapshot, getRuntimeConfigSourceSnapshot, + projectConfigOntoRuntimeSourceSnapshot, loadConfig, readBestEffortConfig, parseConfigJson5, diff --git a/src/config/io.runtime-snapshot-write.test.ts b/src/config/io.runtime-snapshot-write.test.ts index 71ddbbb8de3..480897c698c 100644 --- a/src/config/io.runtime-snapshot-write.test.ts +++ b/src/config/io.runtime-snapshot-write.test.ts @@ -7,6 +7,7 @@ import { clearRuntimeConfigSnapshot, getRuntimeConfigSourceSnapshot, loadConfig, + projectConfigOntoRuntimeSourceSnapshot, setRuntimeConfigSnapshotRefreshHandler, setRuntimeConfigSnapshot, writeConfigFile, @@ -61,6 +62,46 @@ describe("runtime config snapshot writes", () => { }); }); + it("skips source projection for non-runtime-derived configs", async () => { + await withTempHome("openclaw-config-runtime-projection-shape-", async () => { + const sourceConfig: OpenClawConfig = { + ...createSourceConfig(), + gateway: { + auth: { + mode: "token", + }, + }, + }; + const runtimeConfig: OpenClawConfig = { + ...createRuntimeConfig(), + gateway: { + auth: { + mode: "token", + }, + }, + }; + const independentConfig: OpenClawConfig = { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: "sk-independent-config", // pragma: allowlist secret + models: [], + }, + }, + }, + }; + + try { + setRuntimeConfigSnapshot(runtimeConfig, sourceConfig); + const projected = projectConfigOntoRuntimeSourceSnapshot(independentConfig); + expect(projected).toBe(independentConfig); + } finally { + resetRuntimeConfigState(); + } + }); + }); + it("clears runtime source snapshot when runtime snapshot is cleared", async () => { const sourceConfig = createSourceConfig(); const runtimeConfig = createRuntimeConfig(); diff --git a/src/config/io.ts b/src/config/io.ts index a4ec4cd430c..fba17f253aa 100644 --- a/src/config/io.ts +++ b/src/config/io.ts @@ -68,6 +68,7 @@ const SHELL_ENV_EXPECTED_KEYS = [ "OPENROUTER_API_KEY", "AI_GATEWAY_API_KEY", "MINIMAX_API_KEY", + "MODELSTUDIO_API_KEY", "SYNTHETIC_API_KEY", "KILOCODE_API_KEY", "ELEVENLABS_API_KEY", @@ -163,6 +164,32 @@ function hashConfigRaw(raw: string | null): string { .digest("hex"); } +async function tightenStateDirPermissionsIfNeeded(params: { + configPath: string; + env: NodeJS.ProcessEnv; + homedir: () => string; + fsModule: typeof fs; +}): Promise { + if (process.platform === "win32") { + return; + } + const stateDir = resolveStateDir(params.env, params.homedir); + const configDir = path.dirname(params.configPath); + if (path.resolve(configDir) !== path.resolve(stateDir)) { + return; + } + try { + const stat = await params.fsModule.promises.stat(configDir); + const mode = stat.mode & 0o777; + if ((mode & 0o077) === 0) { + return; + } + await params.fsModule.promises.chmod(configDir, 0o700); + } catch { + // Best-effort hardening only; callers still need the config write to proceed. + } +} + function formatConfigValidationFailure(pathLabel: string, issueMessage: string): string { const match = issueMessage.match(OPEN_DM_POLICY_ALLOW_FROM_RE); const policyPath = match?.groups?.policyPath?.trim(); @@ -1135,6 +1162,12 @@ export function createConfigIO(overrides: ConfigIoDeps = {}) { const dir = path.dirname(configPath); await deps.fs.promises.mkdir(dir, { recursive: true, mode: 0o700 }); + await tightenStateDirPermissionsIfNeeded({ + configPath, + env: deps.env, + homedir: deps.homedir, + fsModule: deps.fs, + }); const outputConfigBase = envRefMap && changedPaths ? (restoreEnvRefsFromMap(cfgToWrite, "", envRefMap, changedPaths) as OpenClawConfig) @@ -1373,6 +1406,58 @@ export function getRuntimeConfigSourceSnapshot(): OpenClawConfig | null { return runtimeConfigSourceSnapshot; } +function isCompatibleTopLevelRuntimeProjectionShape(params: { + runtimeSnapshot: OpenClawConfig; + candidate: OpenClawConfig; +}): boolean { + const runtime = params.runtimeSnapshot as Record; + const candidate = params.candidate as Record; + for (const key of Object.keys(runtime)) { + if (!Object.hasOwn(candidate, key)) { + return false; + } + const runtimeValue = runtime[key]; + const candidateValue = candidate[key]; + const runtimeType = Array.isArray(runtimeValue) + ? "array" + : runtimeValue === null + ? "null" + : typeof runtimeValue; + const candidateType = Array.isArray(candidateValue) + ? "array" + : candidateValue === null + ? "null" + : typeof candidateValue; + if (runtimeType !== candidateType) { + return false; + } + } + return true; +} + +export function projectConfigOntoRuntimeSourceSnapshot(config: OpenClawConfig): OpenClawConfig { + if (!runtimeConfigSnapshot || !runtimeConfigSourceSnapshot) { + return config; + } + if (config === runtimeConfigSnapshot) { + return runtimeConfigSourceSnapshot; + } + // This projection expects callers to pass config objects derived from the + // active runtime snapshot (for example shallow/deep clones with targeted edits). + // For structurally unrelated configs, skip projection to avoid accidental + // merge-patch deletions or reintroducing resolved values into source refs. + if ( + !isCompatibleTopLevelRuntimeProjectionShape({ + runtimeSnapshot: runtimeConfigSnapshot, + candidate: config, + }) + ) { + return config; + } + const runtimePatch = createMergePatch(runtimeConfigSnapshot, config); + return coerceConfig(applyMergePatch(runtimeConfigSourceSnapshot, runtimePatch)); +} + export function setRuntimeConfigSnapshotRefreshHandler( refreshHandler: RuntimeConfigSnapshotRefreshHandler | null, ): void { diff --git a/src/config/io.write-config.test.ts b/src/config/io.write-config.test.ts index 6b73b9fbd30..68709725d83 100644 --- a/src/config/io.write-config.test.ts +++ b/src/config/io.write-config.test.ts @@ -142,6 +142,28 @@ describe("config io write", () => { }); }); + it.runIf(process.platform !== "win32")( + "tightens world-writable state dir when writing the default config", + async () => { + await withSuiteHome(async (home) => { + const stateDir = path.join(home, ".openclaw"); + await fs.mkdir(stateDir, { recursive: true, mode: 0o777 }); + await fs.chmod(stateDir, 0o777); + + const io = createConfigIO({ + env: {} as NodeJS.ProcessEnv, + homedir: () => home, + logger: silentLogger, + }); + + await io.writeConfigFile({ gateway: { mode: "local" } }); + + const stat = await fs.stat(stateDir); + expect(stat.mode & 0o777).toBe(0o700); + }); + }, + ); + it('shows actionable guidance for dmPolicy="open" without wildcard allowFrom', async () => { await withSuiteHome(async (home) => { const io = createConfigIO({ diff --git a/src/config/markdown-tables.test.ts b/src/config/markdown-tables.test.ts new file mode 100644 index 00000000000..0049ccf9645 --- /dev/null +++ b/src/config/markdown-tables.test.ts @@ -0,0 +1,16 @@ +import { describe, expect, it } from "vitest"; +import { DEFAULT_TABLE_MODES } from "./markdown-tables.js"; + +describe("DEFAULT_TABLE_MODES", () => { + it("mattermost mode is off", () => { + expect(DEFAULT_TABLE_MODES.get("mattermost")).toBe("off"); + }); + + it("signal mode is bullets", () => { + expect(DEFAULT_TABLE_MODES.get("signal")).toBe("bullets"); + }); + + it("whatsapp mode is bullets", () => { + expect(DEFAULT_TABLE_MODES.get("whatsapp")).toBe("bullets"); + }); +}); diff --git a/src/config/markdown-tables.ts b/src/config/markdown-tables.ts index 2095cd87b33..def751dce81 100644 --- a/src/config/markdown-tables.ts +++ b/src/config/markdown-tables.ts @@ -14,9 +14,10 @@ type MarkdownConfigSection = MarkdownConfigEntry & { accounts?: Record; }; -const DEFAULT_TABLE_MODES = new Map([ +export const DEFAULT_TABLE_MODES = new Map([ ["signal", "bullets"], ["whatsapp", "bullets"], + ["mattermost", "off"], ]); const isMarkdownTableMode = (value: unknown): value is MarkdownTableMode => diff --git a/src/config/paths.test.ts b/src/config/paths.test.ts index b8afe7674cb..6d2ffcfaf08 100644 --- a/src/config/paths.test.ts +++ b/src/config/paths.test.ts @@ -1,7 +1,7 @@ import fs from "node:fs/promises"; -import os from "node:os"; import path from "node:path"; import { describe, expect, it } from "vitest"; +import { withTempDir } from "../test-helpers/temp-dir.js"; import { resolveDefaultConfigCandidates, resolveConfigPathCandidate, @@ -37,15 +37,6 @@ describe("oauth paths", () => { }); describe("state + config path candidates", () => { - async function withTempRoot(prefix: string, run: (root: string) => Promise): Promise { - const root = await fs.mkdtemp(path.join(os.tmpdir(), prefix)); - try { - await run(root); - } finally { - await fs.rm(root, { recursive: true, force: true }); - } - } - function expectOpenClawHomeDefaults(env: NodeJS.ProcessEnv): void { const configuredHome = env.OPENCLAW_HOME; if (!configuredHome) { @@ -107,7 +98,7 @@ describe("state + config path candidates", () => { }); it("prefers ~/.openclaw when it exists and legacy dir is missing", async () => { - await withTempRoot("openclaw-state-", async (root) => { + await withTempDir({ prefix: "openclaw-state-" }, async (root) => { const newDir = path.join(root, ".openclaw"); await fs.mkdir(newDir, { recursive: true }); const resolved = resolveStateDir({} as NodeJS.ProcessEnv, () => root); @@ -116,7 +107,7 @@ describe("state + config path candidates", () => { }); it("falls back to existing legacy state dir when ~/.openclaw is missing", async () => { - await withTempRoot("openclaw-state-legacy-", async (root) => { + await withTempDir({ prefix: "openclaw-state-legacy-" }, async (root) => { const legacyDir = path.join(root, ".clawdbot"); await fs.mkdir(legacyDir, { recursive: true }); const resolved = resolveStateDir({} as NodeJS.ProcessEnv, () => root); @@ -125,7 +116,7 @@ describe("state + config path candidates", () => { }); it("CONFIG_PATH prefers existing config when present", async () => { - await withTempRoot("openclaw-config-", async (root) => { + await withTempDir({ prefix: "openclaw-config-" }, async (root) => { const legacyDir = path.join(root, ".openclaw"); await fs.mkdir(legacyDir, { recursive: true }); const legacyPath = path.join(legacyDir, "openclaw.json"); @@ -137,7 +128,7 @@ describe("state + config path candidates", () => { }); it("respects state dir overrides when config is missing", async () => { - await withTempRoot("openclaw-config-override-", async (root) => { + await withTempDir({ prefix: "openclaw-config-override-" }, async (root) => { const legacyDir = path.join(root, ".openclaw"); await fs.mkdir(legacyDir, { recursive: true }); const legacyConfig = path.join(legacyDir, "openclaw.json"); diff --git a/src/config/paths.ts b/src/config/paths.ts index 5f9afc85a46..84c27749bcf 100644 --- a/src/config/paths.ts +++ b/src/config/paths.ts @@ -1,7 +1,7 @@ import fs from "node:fs"; import os from "node:os"; import path from "node:path"; -import { expandHomePrefix, resolveRequiredHomeDir } from "../infra/home-dir.js"; +import { resolveHomeRelativePath, resolveRequiredHomeDir } from "../infra/home-dir.js"; import type { OpenClawConfig } from "./types.js"; /** @@ -93,19 +93,7 @@ function resolveUserPath( env: NodeJS.ProcessEnv = process.env, homedir: () => string = envHomedir(env), ): string { - const trimmed = input.trim(); - if (!trimmed) { - return trimmed; - } - if (trimmed.startsWith("~")) { - const expanded = expandHomePrefix(trimmed, { - home: resolveRequiredHomeDir(env, homedir), - env, - homedir, - }); - return path.resolve(expanded); - } - return path.resolve(trimmed); + return resolveHomeRelativePath(input, { env, homedir }); } export const STATE_DIR = resolveStateDir(); diff --git a/src/config/plugin-auto-enable.test.ts b/src/config/plugin-auto-enable.test.ts index 52b2c9cc180..c44a600a23f 100644 --- a/src/config/plugin-auto-enable.test.ts +++ b/src/config/plugin-auto-enable.test.ts @@ -1,8 +1,60 @@ -import { describe, expect, it } from "vitest"; -import type { PluginManifestRegistry } from "../plugins/manifest-registry.js"; +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; +import { afterAll, afterEach, describe, expect, it } from "vitest"; +import { clearPluginDiscoveryCache } from "../plugins/discovery.js"; +import { + clearPluginManifestRegistryCache, + type PluginManifestRegistry, +} from "../plugins/manifest-registry.js"; import { validateConfigObject } from "./config.js"; import { applyPluginAutoEnable } from "./plugin-auto-enable.js"; +const tempDirs: string[] = []; +const previousUmask = process.umask(0o022); + +function chmodSafeDir(dir: string) { + if (process.platform === "win32") { + return; + } + fs.chmodSync(dir, 0o755); +} + +function mkdtempSafe(prefix: string) { + const dir = fs.mkdtempSync(prefix); + chmodSafeDir(dir); + return dir; +} + +function mkdirSafe(dir: string) { + fs.mkdirSync(dir, { recursive: true }); + chmodSafeDir(dir); +} + +function makeTempDir() { + const dir = mkdtempSafe(path.join(os.tmpdir(), "openclaw-plugin-auto-enable-")); + tempDirs.push(dir); + return dir; +} + +function writePluginManifestFixture(params: { rootDir: string; id: string; channels: string[] }) { + mkdirSafe(params.rootDir); + fs.writeFileSync( + path.join(params.rootDir, "openclaw.plugin.json"), + JSON.stringify( + { + id: params.id, + channels: params.channels, + configSchema: { type: "object" }, + }, + null, + 2, + ), + "utf-8", + ); + fs.writeFileSync(path.join(params.rootDir, "index.ts"), "export default {}", "utf-8"); +} + /** Helper to build a minimal PluginManifestRegistry for testing. */ function makeRegistry(plugins: Array<{ id: string; channels: string[] }>): PluginManifestRegistry { return { @@ -66,6 +118,18 @@ function applyWithBluebubblesImessageConfig(extra?: { }); } +afterEach(() => { + clearPluginDiscoveryCache(); + clearPluginManifestRegistryCache(); + for (const dir of tempDirs.splice(0)) { + fs.rmSync(dir, { recursive: true, force: true }); + } +}); + +afterAll(() => { + process.umask(previousUmask); +}); + describe("applyPluginAutoEnable", () => { it("auto-enables built-in channels and appends to existing allowlist", () => { const result = applyWithSlackConfig({ plugins: { allow: ["telegram"] } }); @@ -158,6 +222,80 @@ describe("applyPluginAutoEnable", () => { expect(result.changes.join("\n")).toContain("IRC configured, enabled automatically."); }); + it("uses the provided env when loading plugin manifests automatically", () => { + const stateDir = makeTempDir(); + const pluginDir = path.join(stateDir, "extensions", "apn-channel"); + writePluginManifestFixture({ + rootDir: pluginDir, + id: "apn-channel", + channels: ["apn"], + }); + + const result = applyPluginAutoEnable({ + config: { + channels: { apn: { someKey: "value" } }, + }, + env: { + ...process.env, + OPENCLAW_HOME: undefined, + OPENCLAW_STATE_DIR: stateDir, + CLAWDBOT_STATE_DIR: undefined, + OPENCLAW_BUNDLED_PLUGINS_DIR: "/nonexistent/bundled/plugins", + }, + }); + + expect(result.config.plugins?.entries?.["apn-channel"]?.enabled).toBe(true); + expect(result.config.plugins?.entries?.apn).toBeUndefined(); + }); + + it("uses env-scoped catalog metadata for preferOver auto-enable decisions", () => { + const stateDir = makeTempDir(); + const catalogPath = path.join(stateDir, "plugins", "catalog.json"); + mkdirSafe(path.dirname(catalogPath)); + fs.writeFileSync( + catalogPath, + JSON.stringify({ + entries: [ + { + name: "@openclaw/env-secondary", + openclaw: { + channel: { + id: "env-secondary", + label: "Env Secondary", + selectionLabel: "Env Secondary", + docsPath: "/channels/env-secondary", + blurb: "Env secondary entry", + preferOver: ["env-primary"], + }, + install: { + npmSpec: "@openclaw/env-secondary", + }, + }, + }, + ], + }), + "utf-8", + ); + + const result = applyPluginAutoEnable({ + config: { + channels: { + "env-primary": { enabled: true }, + "env-secondary": { enabled: true }, + }, + }, + env: { + ...process.env, + OPENCLAW_STATE_DIR: stateDir, + CLAWDBOT_STATE_DIR: undefined, + }, + manifestRegistry: makeRegistry([]), + }); + + expect(result.config.plugins?.entries?.["env-secondary"]?.enabled).toBe(true); + expect(result.config.plugins?.entries?.["env-primary"]?.enabled).toBeUndefined(); + }); + it("auto-enables provider auth plugins when profiles exist", () => { const result = applyPluginAutoEnable({ config: { @@ -311,5 +449,29 @@ describe("applyPluginAutoEnable", () => { expect(result.config.channels?.imessage?.enabled).toBe(true); expect(result.changes.join("\n")).toContain("iMessage configured, enabled automatically."); }); + + it("uses the provided env when loading installed plugin manifests", () => { + const stateDir = makeTempDir(); + const pluginDir = path.join(stateDir, "extensions", "apn-channel"); + writePluginManifestFixture({ + rootDir: pluginDir, + id: "apn-channel", + channels: ["apn"], + }); + + const result = applyPluginAutoEnable({ + config: makeApnChannelConfig(), + env: { + ...process.env, + OPENCLAW_HOME: undefined, + OPENCLAW_STATE_DIR: stateDir, + CLAWDBOT_STATE_DIR: undefined, + OPENCLAW_BUNDLED_PLUGINS_DIR: "/nonexistent/bundled/plugins", + }, + }); + + expect(result.config.plugins?.entries?.["apn-channel"]?.enabled).toBe(true); + expect(result.config.plugins?.entries?.apn).toBeUndefined(); + }); }); }); diff --git a/src/config/plugin-auto-enable.ts b/src/config/plugin-auto-enable.ts index eccb6f980ed..5c365fb5cc8 100644 --- a/src/config/plugin-auto-enable.ts +++ b/src/config/plugin-auto-enable.ts @@ -27,13 +27,6 @@ export type PluginAutoEnableResult = { changes: string[]; }; -const CHANNEL_PLUGIN_IDS = Array.from( - new Set([ - ...listChatChannels().map((meta) => meta.id), - ...listChannelPluginCatalogEntries().map((entry) => entry.id), - ]), -); - const PROVIDER_PLUGIN_IDS: Array<{ pluginId: string; providerId: string }> = [ { pluginId: "google-gemini-cli-auth", providerId: "google-gemini-cli" }, { pluginId: "qwen-portal-auth", providerId: "qwen-portal" }, @@ -315,8 +308,17 @@ function resolvePluginIdForChannel( return channelToPluginId.get(channelId) ?? channelId; } -function collectCandidateChannelIds(cfg: OpenClawConfig): string[] { - const channelIds = new Set(CHANNEL_PLUGIN_IDS); +function listKnownChannelPluginIds(env: NodeJS.ProcessEnv): string[] { + return Array.from( + new Set([ + ...listChatChannels().map((meta) => meta.id), + ...listChannelPluginCatalogEntries({ env }).map((entry) => entry.id), + ]), + ); +} + +function collectCandidateChannelIds(cfg: OpenClawConfig, env: NodeJS.ProcessEnv): string[] { + const channelIds = new Set(listKnownChannelPluginIds(env)); const configuredChannels = cfg.channels as Record | undefined; if (!configuredChannels || typeof configuredChannels !== "object") { return Array.from(channelIds); @@ -339,7 +341,7 @@ function resolveConfiguredPlugins( const changes: PluginEnableChange[] = []; // Build reverse map: channel ID → plugin ID from installed plugin manifests. const channelToPluginId = buildChannelToPluginIdMap(registry); - for (const channelId of collectCandidateChannelIds(cfg)) { + for (const channelId of collectCandidateChannelIds(cfg, env)) { const pluginId = resolvePluginIdForChannel(channelId, channelToPluginId); if (isChannelConfigured(cfg, channelId, env)) { changes.push({ pluginId, reason: `${channelId} configured` }); @@ -390,12 +392,12 @@ function isPluginDenied(cfg: OpenClawConfig, pluginId: string): boolean { return Array.isArray(deny) && deny.includes(pluginId); } -function resolvePreferredOverIds(pluginId: string): string[] { +function resolvePreferredOverIds(pluginId: string, env: NodeJS.ProcessEnv): string[] { const normalized = normalizeChatChannelId(pluginId); if (normalized) { return getChatChannelMeta(normalized).preferOver ?? []; } - const catalogEntry = getChannelPluginCatalogEntry(pluginId); + const catalogEntry = getChannelPluginCatalogEntry(pluginId, { env }); return catalogEntry?.meta.preferOver ?? []; } @@ -403,6 +405,7 @@ function shouldSkipPreferredPluginAutoEnable( cfg: OpenClawConfig, entry: PluginEnableChange, configured: PluginEnableChange[], + env: NodeJS.ProcessEnv, ): boolean { for (const other of configured) { if (other.pluginId === entry.pluginId) { @@ -414,7 +417,7 @@ function shouldSkipPreferredPluginAutoEnable( if (isPluginExplicitlyDisabled(cfg, other.pluginId)) { continue; } - const preferOver = resolvePreferredOverIds(other.pluginId); + const preferOver = resolvePreferredOverIds(other.pluginId, env); if (preferOver.includes(entry.pluginId)) { return true; } @@ -477,7 +480,8 @@ export function applyPluginAutoEnable(params: { manifestRegistry?: PluginManifestRegistry; }): PluginAutoEnableResult { const env = params.env ?? process.env; - const registry = params.manifestRegistry ?? loadPluginManifestRegistry({ config: params.config }); + const registry = + params.manifestRegistry ?? loadPluginManifestRegistry({ config: params.config, env }); const configured = resolveConfiguredPlugins(params.config, env, registry); if (configured.length === 0) { return { config: params.config, changes: [] }; @@ -498,7 +502,7 @@ export function applyPluginAutoEnable(params: { if (isPluginExplicitlyDisabled(next, entry.pluginId)) { continue; } - if (shouldSkipPreferredPluginAutoEnable(next, entry, configured)) { + if (shouldSkipPreferredPluginAutoEnable(next, entry, configured, env)) { continue; } const allow = next.plugins?.allow; diff --git a/src/config/schema.help.quality.test.ts b/src/config/schema.help.quality.test.ts index fa9451456bf..f74728e360b 100644 --- a/src/config/schema.help.quality.test.ts +++ b/src/config/schema.help.quality.test.ts @@ -72,6 +72,10 @@ const TARGET_KEYS = [ "agents.defaults.memorySearch.fallback", "agents.defaults.memorySearch.sources", "agents.defaults.memorySearch.extraPaths", + "agents.defaults.memorySearch.multimodal", + "agents.defaults.memorySearch.multimodal.enabled", + "agents.defaults.memorySearch.multimodal.modalities", + "agents.defaults.memorySearch.multimodal.maxFileBytes", "agents.defaults.memorySearch.experimental.sessionMemory", "agents.defaults.memorySearch.remote.baseUrl", "agents.defaults.memorySearch.remote.apiKey", @@ -83,6 +87,7 @@ const TARGET_KEYS = [ "agents.defaults.memorySearch.remote.batch.timeoutMinutes", "agents.defaults.memorySearch.local.modelPath", "agents.defaults.memorySearch.store.path", + "agents.defaults.memorySearch.outputDimensionality", "agents.defaults.memorySearch.store.vector.enabled", "agents.defaults.memorySearch.store.vector.extensionPath", "agents.defaults.memorySearch.query.hybrid.enabled", @@ -291,6 +296,7 @@ const TARGET_KEYS = [ "web.reconnect.jitter", "web.reconnect.maxAttempts", "discovery", + "discovery.wideArea.domain", "discovery.wideArea.enabled", "discovery.mdns", "discovery.mdns.mode", @@ -522,6 +528,12 @@ const CHANNELS_AGENTS_TARGET_KEYS = [ "channels.telegram", "channels.telegram.botToken", "channels.telegram.capabilities.inlineButtons", + "channels.telegram.execApprovals", + "channels.telegram.execApprovals.enabled", + "channels.telegram.execApprovals.approvers", + "channels.telegram.execApprovals.agentFilter", + "channels.telegram.execApprovals.sessionFilter", + "channels.telegram.execApprovals.target", "channels.whatsapp", ] as const; diff --git a/src/config/schema.help.ts b/src/config/schema.help.ts index 08c579f89e3..215a17d77d8 100644 --- a/src/config/schema.help.ts +++ b/src/config/schema.help.ts @@ -292,6 +292,8 @@ export const FIELD_HELP: Record = { "Wide-area discovery configuration group for exposing discovery signals beyond local-link scopes. Enable only in deployments that intentionally aggregate gateway presence across sites.", "discovery.wideArea.enabled": "Enables wide-area discovery signaling when your environment needs non-local gateway discovery. Keep disabled unless cross-network discovery is operationally required.", + "discovery.wideArea.domain": + "Optional unicast DNS-SD domain for wide-area discovery, such as openclaw.internal. Use this when you intentionally publish gateway discovery beyond local mDNS scopes.", "discovery.mdns": "mDNS discovery configuration group for local network advertisement and discovery behavior tuning. Keep minimal mode for routine LAN discovery unless extra metadata is required.", tools: @@ -386,6 +388,16 @@ export const FIELD_HELP: Record = { "Loosens strict browser auth checks for Control UI when you must run a non-standard setup. Keep this off unless you trust your network and proxy path, because impersonation risk is higher.", "gateway.controlUi.dangerouslyDisableDeviceAuth": "Disables Control UI device identity checks and relies on token/password only. Use only for short-lived debugging on trusted networks, then turn it off immediately.", + "gateway.push": + "Push-delivery settings used by the gateway when it needs to wake or notify paired devices. Configure relay-backed APNs here for official iOS builds; direct APNs auth remains env-based for local/manual builds.", + "gateway.push.apns": + "APNs delivery settings for iOS devices paired to this gateway. Use relay settings for official/TestFlight builds that register through the external push relay.", + "gateway.push.apns.relay": + "External relay settings for relay-backed APNs sends. The gateway uses this relay for push.test, wake nudges, and reconnect wakes after a paired official iOS build publishes a relay-backed registration.", + "gateway.push.apns.relay.baseUrl": + "Base HTTPS URL for the external APNs relay service used by official/TestFlight iOS builds. Keep this aligned with the relay URL baked into the iOS build so registration and send traffic hit the same deployment.", + "gateway.push.apns.relay.timeoutMs": + "Timeout in milliseconds for relay send requests from the gateway to the APNs relay (default: 10000). Increase for slower relays or networks, or lower to fail wake attempts faster.", "gateway.http.endpoints.chatCompletions.enabled": "Enable the OpenAI-compatible `POST /v1/chat/completions` endpoint (default: false).", "gateway.http.endpoints.chatCompletions.maxBodyBytes": @@ -778,13 +790,23 @@ export const FIELD_HELP: Record = { "agents.defaults.memorySearch.sources": 'Chooses which sources are indexed: "memory" reads MEMORY.md + memory files, and "sessions" includes transcript history. Keep ["memory"] unless you need recall from prior chat transcripts.', "agents.defaults.memorySearch.extraPaths": - "Adds extra directories or .md files to the memory index beyond default memory files. Use this when key reference docs live elsewhere in your repo; keep paths small and intentional to avoid noisy recall.", + "Adds extra directories or .md files to the memory index beyond default memory files. Use this when key reference docs live elsewhere in your repo; when multimodal memory is enabled, matching image/audio files under these paths are also eligible for indexing.", + "agents.defaults.memorySearch.multimodal": + 'Optional multimodal memory settings for indexing image and audio files from configured extra paths. Keep this off unless your embedding model explicitly supports cross-modal embeddings, and set `memorySearch.fallback` to "none" while it is enabled. Matching files are uploaded to the configured remote embedding provider during indexing.', + "agents.defaults.memorySearch.multimodal.enabled": + "Enables image/audio memory indexing from extraPaths. This currently requires Gemini embedding-2, keeps the default memory roots Markdown-only, disables memory-search fallback providers, and uploads matching binary content to the configured remote embedding provider.", + "agents.defaults.memorySearch.multimodal.modalities": + 'Selects which multimodal file types are indexed from extraPaths: "image", "audio", or "all". Keep this narrow to avoid indexing large binary corpora unintentionally.', + "agents.defaults.memorySearch.multimodal.maxFileBytes": + "Sets the maximum bytes allowed per multimodal file before it is skipped during memory indexing. Use this to cap upload cost and indexing latency, or raise it for short high-quality audio clips.", "agents.defaults.memorySearch.experimental.sessionMemory": "Indexes session transcripts into memory search so responses can reference prior chat turns. Keep this off unless transcript recall is needed, because indexing cost and storage usage both increase.", "agents.defaults.memorySearch.provider": 'Selects the embedding backend used to build/query memory vectors: "openai", "gemini", "voyage", "mistral", "ollama", or "local". Keep your most reliable provider here and configure fallback for resilience.', "agents.defaults.memorySearch.model": "Embedding model override used by the selected memory provider when a non-default model is required. Set this only when you need explicit recall quality/cost tuning beyond provider defaults.", + "agents.defaults.memorySearch.outputDimensionality": + "Gemini embedding-2 only: chooses the output vector size for memory embeddings. Use 768, 1536, or 3072 (default), and expect a full reindex when you change it because stored vector dimensions must stay consistent.", "agents.defaults.memorySearch.remote.baseUrl": "Overrides the embedding API endpoint, such as an OpenAI-compatible proxy or custom Gemini base URL. Use this only when routing through your own gateway or vendor endpoint; keep provider defaults otherwise.", "agents.defaults.memorySearch.remote.apiKey": @@ -910,6 +932,8 @@ export const FIELD_HELP: Record = { "Requires at least this many newly appended bytes before session transcript changes trigger reindex (default: 100000). Increase to reduce frequent small reindexes, or lower for faster transcript freshness.", "agents.defaults.memorySearch.sync.sessions.deltaMessages": "Requires at least this many appended transcript messages before reindex is triggered (default: 50). Lower this for near-real-time transcript recall, or raise it to reduce indexing churn.", + "agents.defaults.memorySearch.sync.sessions.postCompactionForce": + "Forces a session memory-search reindex after compaction-triggered transcript updates (default: true). Keep enabled when compacted summaries must be immediately searchable, or disable to reduce write-time indexing pressure.", ui: "UI presentation settings for accenting and assistant identity shown in control surfaces. Use this for branding and readability customization without changing runtime behavior.", "ui.seamColor": "Primary accent/seam color used by UI surfaces for emphasis, badges, and visual identity cues. Use high-contrast values that remain readable across light/dark themes.", @@ -1013,6 +1037,8 @@ export const FIELD_HELP: Record = { "Enables summary quality audits and regeneration retries for safeguard compaction. Default: false, so safeguard mode alone does not turn on retry behavior.", "agents.defaults.compaction.qualityGuard.maxRetries": "Maximum number of regeneration retries after a failed safeguard summary quality audit. Use small values to bound extra latency and token cost.", + "agents.defaults.compaction.postIndexSync": + 'Controls post-compaction session memory reindex mode: "off", "async", or "await" (default: "async"). Use "await" for strongest freshness, "async" for lower compaction latency, and "off" only when session-memory sync is handled elsewhere.', "agents.defaults.compaction.postCompactionSections": 'AGENTS.md H2/H3 section names re-injected after compaction so the agent reruns critical startup guidance. Leave unset to use "Session Startup"/"Red Lines" with legacy fallback to "Every Session"/"Safety"; set to [] to disable reinjection entirely.', "agents.defaults.compaction.model": @@ -1383,6 +1409,18 @@ export const FIELD_HELP: Record = { "Telegram bot token used to authenticate Bot API requests for this account/provider config. Use secret/env substitution and rotate tokens if exposure is suspected.", "channels.telegram.capabilities.inlineButtons": "Enable Telegram inline button components for supported command and interaction surfaces. Disable if your deployment needs plain-text-only compatibility behavior.", + "channels.telegram.execApprovals": + "Telegram-native exec approval routing and approver authorization. Enable this only when Telegram should act as an explicit exec-approval client for the selected bot account.", + "channels.telegram.execApprovals.enabled": + "Enable Telegram exec approvals for this account. When false or unset, Telegram messages/buttons cannot approve exec requests.", + "channels.telegram.execApprovals.approvers": + "Telegram user IDs allowed to approve exec requests for this bot account. Use numeric Telegram user IDs; prompts are only delivered to these approvers when target includes dm.", + "channels.telegram.execApprovals.agentFilter": + 'Optional allowlist of agent IDs eligible for Telegram exec approvals, for example `["main", "ops-agent"]`. Use this to keep approval prompts scoped to the agents you actually operate from Telegram.', + "channels.telegram.execApprovals.sessionFilter": + "Optional session-key filters matched as substring or regex-style patterns before Telegram approval routing is used. Use narrow patterns so Telegram approvals only appear for intended sessions.", + "channels.telegram.execApprovals.target": + 'Controls where Telegram approval prompts are sent: "dm" sends to approver DMs (default), "channel" sends to the originating Telegram chat/topic, and "both" sends to both. Channel delivery exposes the command text to the chat, so only use it in trusted groups/topics.', "channels.slack.configWrites": "Allow Slack to write config in response to channel events/commands (default: true).", "channels.slack.botToken": @@ -1393,6 +1431,8 @@ export const FIELD_HELP: Record = { "Optional Slack user token for workflows requiring user-context API access beyond bot permissions. Use sparingly and audit scopes because this token can carry broader authority.", "channels.slack.userTokenReadOnly": "When true, treat configured Slack user token usage as read-only helper behavior where possible. Keep enabled if you only need supplemental reads without user-context writes.", + "channels.slack.capabilities.interactiveReplies": + "Enable agent-authored Slack interactive reply directives (`[[slack_buttons: ...]]`, `[[slack_select: ...]]`). Default: false.", "channels.mattermost.configWrites": "Allow Mattermost to write config in response to channel events/commands (default: true).", "channels.discord.configWrites": @@ -1445,7 +1485,7 @@ export const FIELD_HELP: Record = { "messages.statusReactions.enabled": "Enable lifecycle status reactions for Telegram. When enabled, the ack reaction becomes the initial 'queued' state and progresses through thinking, tool, done/error automatically. Default: false.", "messages.statusReactions.emojis": - "Override default status reaction emojis. Keys: thinking, tool, coding, web, done, error, stallSoft, stallHard. Must be valid Telegram reaction emojis.", + "Override default status reaction emojis. Keys: thinking, compacting, tool, coding, web, done, error, stallSoft, stallHard. Must be valid Telegram reaction emojis.", "messages.statusReactions.timing": "Override default timing. Keys: debounceMs (700), stallSoftMs (25000), stallHardMs (60000), doneHoldMs (1500), errorHoldMs (2500).", "messages.inbound.debounceMs": diff --git a/src/config/schema.hints.ts b/src/config/schema.hints.ts index 64d1acde778..9d56ff2566c 100644 --- a/src/config/schema.hints.ts +++ b/src/config/schema.hints.ts @@ -75,6 +75,7 @@ const FIELD_PLACEHOLDERS: Record = { "gateway.controlUi.basePath": "/openclaw", "gateway.controlUi.root": "dist/control-ui", "gateway.controlUi.allowedOrigins": "https://control.example.com", + "gateway.push.apns.relay.baseUrl": "https://relay.example.com", "channels.mattermost.baseUrl": "https://chat.example.com", "agents.list[].identity.avatar": "avatars/openclaw.png", }; diff --git a/src/config/schema.labels.ts b/src/config/schema.labels.ts index 16bf21e8daf..9b1fdb73445 100644 --- a/src/config/schema.labels.ts +++ b/src/config/schema.labels.ts @@ -250,6 +250,11 @@ export const FIELD_LABELS: Record = { "Dangerously Allow Host-Header Origin Fallback", "gateway.controlUi.allowInsecureAuth": "Insecure Control UI Auth Toggle", "gateway.controlUi.dangerouslyDisableDeviceAuth": "Dangerously Disable Control UI Device Auth", + "gateway.push": "Gateway Push Delivery", + "gateway.push.apns": "Gateway APNs Delivery", + "gateway.push.apns.relay": "Gateway APNs Relay", + "gateway.push.apns.relay.baseUrl": "Gateway APNs Relay Base URL", + "gateway.push.apns.relay.timeoutMs": "Gateway APNs Relay Timeout (ms)", "gateway.http.endpoints.chatCompletions.enabled": "OpenAI Chat Completions Endpoint", "gateway.http.endpoints.chatCompletions.maxBodyBytes": "OpenAI Chat Completions Max Body Bytes", "gateway.http.endpoints.chatCompletions.maxImageParts": "OpenAI Chat Completions Max Image Parts", @@ -319,6 +324,10 @@ export const FIELD_LABELS: Record = { "agents.defaults.memorySearch.enabled": "Enable Memory Search", "agents.defaults.memorySearch.sources": "Memory Search Sources", "agents.defaults.memorySearch.extraPaths": "Extra Memory Paths", + "agents.defaults.memorySearch.multimodal": "Memory Search Multimodal", + "agents.defaults.memorySearch.multimodal.enabled": "Enable Memory Search Multimodal", + "agents.defaults.memorySearch.multimodal.modalities": "Memory Search Multimodal Modalities", + "agents.defaults.memorySearch.multimodal.maxFileBytes": "Memory Search Multimodal Max File Bytes", "agents.defaults.memorySearch.experimental.sessionMemory": "Memory Search Session Index (Experimental)", "agents.defaults.memorySearch.provider": "Memory Search Provider", @@ -331,6 +340,7 @@ export const FIELD_LABELS: Record = { "agents.defaults.memorySearch.remote.batch.pollIntervalMs": "Remote Batch Poll Interval (ms)", "agents.defaults.memorySearch.remote.batch.timeoutMinutes": "Remote Batch Timeout (min)", "agents.defaults.memorySearch.model": "Memory Search Model", + "agents.defaults.memorySearch.outputDimensionality": "Memory Search Output Dimensionality", "agents.defaults.memorySearch.fallback": "Memory Search Fallback", "agents.defaults.memorySearch.local.modelPath": "Local Embedding Model Path", "agents.defaults.memorySearch.store.path": "Memory Search Index Path", @@ -344,6 +354,8 @@ export const FIELD_LABELS: Record = { "agents.defaults.memorySearch.sync.watchDebounceMs": "Memory Watch Debounce (ms)", "agents.defaults.memorySearch.sync.sessions.deltaBytes": "Session Delta Bytes", "agents.defaults.memorySearch.sync.sessions.deltaMessages": "Session Delta Messages", + "agents.defaults.memorySearch.sync.sessions.postCompactionForce": + "Force Reindex After Compaction", "agents.defaults.memorySearch.query.maxResults": "Memory Search Max Results", "agents.defaults.memorySearch.query.minScore": "Memory Search Min Score", "agents.defaults.memorySearch.query.hybrid.enabled": "Memory Search Hybrid", @@ -458,6 +470,7 @@ export const FIELD_LABELS: Record = { "agents.defaults.compaction.qualityGuard": "Compaction Quality Guard", "agents.defaults.compaction.qualityGuard.enabled": "Compaction Quality Guard Enabled", "agents.defaults.compaction.qualityGuard.maxRetries": "Compaction Quality Guard Max Retries", + "agents.defaults.compaction.postIndexSync": "Compaction Post-Index Sync", "agents.defaults.compaction.postCompactionSections": "Post-Compaction Context Sections", "agents.defaults.compaction.model": "Compaction Model Override", "agents.defaults.compaction.memoryFlush": "Compaction Memory Flush", @@ -641,6 +654,7 @@ export const FIELD_LABELS: Record = { discovery: "Discovery", "discovery.wideArea": "Wide-area Discovery", "discovery.wideArea.enabled": "Wide-area Discovery Enabled", + "discovery.wideArea.domain": "Wide-area Discovery Domain", "discovery.mdns": "mDNS Discovery", canvasHost: "Canvas Host", "canvasHost.enabled": "Canvas Host Enabled", @@ -719,6 +733,12 @@ export const FIELD_LABELS: Record = { "channels.telegram.network.autoSelectFamily": "Telegram autoSelectFamily", "channels.telegram.timeoutSeconds": "Telegram API Timeout (seconds)", "channels.telegram.capabilities.inlineButtons": "Telegram Inline Buttons", + "channels.telegram.execApprovals": "Telegram Exec Approvals", + "channels.telegram.execApprovals.enabled": "Telegram Exec Approvals Enabled", + "channels.telegram.execApprovals.approvers": "Telegram Exec Approval Approvers", + "channels.telegram.execApprovals.agentFilter": "Telegram Exec Approval Agent Filter", + "channels.telegram.execApprovals.sessionFilter": "Telegram Exec Approval Session Filter", + "channels.telegram.execApprovals.target": "Telegram Exec Approval Target", "channels.telegram.threadBindings.enabled": "Telegram Thread Binding Enabled", "channels.telegram.threadBindings.idleHours": "Telegram Thread Binding Idle Timeout (hours)", "channels.telegram.threadBindings.maxAgeHours": "Telegram Thread Binding Max Age (hours)", @@ -793,6 +813,7 @@ export const FIELD_LABELS: Record = { "channels.slack.appToken": "Slack App Token", "channels.slack.userToken": "Slack User Token", "channels.slack.userTokenReadOnly": "Slack User Token Read Only", + "channels.slack.capabilities.interactiveReplies": "Slack Interactive Replies", "channels.slack.streaming": "Slack Streaming Mode", "channels.slack.nativeStreaming": "Slack Native Streaming", "channels.slack.streamMode": "Slack Stream Mode (Legacy)", diff --git a/src/config/schema.tags.ts b/src/config/schema.tags.ts index 82bdc1d87cd..1abfb90d656 100644 --- a/src/config/schema.tags.ts +++ b/src/config/schema.tags.ts @@ -41,6 +41,7 @@ const TAG_PRIORITY: Record = { const TAG_OVERRIDES: Record = { "gateway.auth.token": ["security", "auth", "access", "network"], "gateway.auth.password": ["security", "auth", "access", "network"], + "gateway.push.apns.relay.baseUrl": ["network", "advanced"], "gateway.controlUi.dangerouslyAllowHostHeaderOriginFallback": [ "security", "access", diff --git a/src/config/schema.test.ts b/src/config/schema.test.ts index 54aaa79c846..3d6ecced2ca 100644 --- a/src/config/schema.test.ts +++ b/src/config/schema.test.ts @@ -1,6 +1,7 @@ import { beforeAll, describe, expect, it } from "vitest"; import { buildConfigSchema, lookupConfigSchema } from "./schema.js"; import { applyDerivedTags, CONFIG_TAGS, deriveTagsForPath } from "./schema.tags.js"; +import { ToolsSchema } from "./zod-schema.agent-runtime.js"; describe("config schema", () => { type SchemaInput = NonNullable[0]>; @@ -200,6 +201,51 @@ describe("config schema", () => { expect(tags).toContain("performance"); }); + it("accepts web fetch readability and firecrawl config in the runtime zod schema", () => { + const parsed = ToolsSchema.parse({ + web: { + fetch: { + readability: true, + firecrawl: { + enabled: true, + apiKey: "firecrawl-test-key", + baseUrl: "https://api.firecrawl.dev", + onlyMainContent: true, + maxAgeMs: 60_000, + timeoutSeconds: 15, + }, + }, + }, + }); + + expect(parsed?.web?.fetch?.readability).toBe(true); + expect(parsed?.web?.fetch).toMatchObject({ + firecrawl: { + enabled: true, + apiKey: "firecrawl-test-key", + baseUrl: "https://api.firecrawl.dev", + onlyMainContent: true, + maxAgeMs: 60_000, + timeoutSeconds: 15, + }, + }); + }); + + it("rejects unknown keys inside web fetch firecrawl config", () => { + expect(() => + ToolsSchema.parse({ + web: { + fetch: { + firecrawl: { + enabled: true, + nope: true, + }, + }, + }, + }), + ).toThrow(); + }); + it("keeps tags in the allowed taxonomy", () => { const withTags = applyDerivedTags({ "gateway.auth.token": {}, diff --git a/src/config/sessions.ts b/src/config/sessions.ts index 701870ec8a7..1a521836405 100644 --- a/src/config/sessions.ts +++ b/src/config/sessions.ts @@ -11,3 +11,4 @@ export * from "./sessions/transcript.js"; export * from "./sessions/session-file.js"; export * from "./sessions/delivery-info.js"; export * from "./sessions/disk-budget.js"; +export * from "./sessions/targets.js"; diff --git a/src/config/sessions/paths.ts b/src/config/sessions/paths.ts index 6112fd6d31c..1be7aec6299 100644 --- a/src/config/sessions/paths.ts +++ b/src/config/sessions/paths.ts @@ -276,19 +276,24 @@ export function resolveSessionFilePath( return resolveSessionTranscriptPathInDir(sessionId, sessionsDir); } -export function resolveStorePath(store?: string, opts?: { agentId?: string }) { +export function resolveStorePath( + store?: string, + opts?: { agentId?: string; env?: NodeJS.ProcessEnv }, +) { const agentId = normalizeAgentId(opts?.agentId ?? DEFAULT_AGENT_ID); + const env = opts?.env ?? process.env; + const homedir = () => resolveRequiredHomeDir(env, os.homedir); if (!store) { - return resolveDefaultSessionStorePath(agentId); + return path.join(resolveAgentSessionsDir(agentId, env, homedir), "sessions.json"); } if (store.includes("{agentId}")) { const expanded = store.replaceAll("{agentId}", agentId); if (expanded.startsWith("~")) { return path.resolve( expandHomePrefix(expanded, { - home: resolveRequiredHomeDir(process.env, os.homedir), - env: process.env, - homedir: os.homedir, + home: resolveRequiredHomeDir(env, homedir), + env, + homedir, }), ); } @@ -297,11 +302,28 @@ export function resolveStorePath(store?: string, opts?: { agentId?: string }) { if (store.startsWith("~")) { return path.resolve( expandHomePrefix(store, { - home: resolveRequiredHomeDir(process.env, os.homedir), - env: process.env, - homedir: os.homedir, + home: resolveRequiredHomeDir(env, homedir), + env, + homedir, }), ); } return path.resolve(store); } + +export function resolveAgentsDirFromSessionStorePath(storePath: string): string | undefined { + const candidateAbsPath = path.resolve(storePath); + if (path.basename(candidateAbsPath) !== "sessions.json") { + return undefined; + } + const sessionsDir = path.dirname(candidateAbsPath); + if (path.basename(sessionsDir) !== "sessions") { + return undefined; + } + const agentDir = path.dirname(sessionsDir); + const agentsDir = path.dirname(agentDir); + if (path.basename(agentsDir) !== "agents") { + return undefined; + } + return agentsDir; +} diff --git a/src/config/sessions/sessions.test.ts b/src/config/sessions/sessions.test.ts index dfe4b74e9b2..2773b6d0fe7 100644 --- a/src/config/sessions/sessions.test.ts +++ b/src/config/sessions/sessions.test.ts @@ -283,18 +283,25 @@ describe("session store lock (Promise chain mutex)", () => { describe("appendAssistantMessageToSessionTranscript", () => { const fixture = useTempSessionsFixture("transcript-test-"); + const sessionId = "test-session-id"; + const sessionKey = "test-session"; + + function writeTranscriptStore() { + fs.writeFileSync( + fixture.storePath(), + JSON.stringify({ + [sessionKey]: { + sessionId, + chatType: "direct", + channel: "discord", + }, + }), + "utf-8", + ); + } it("creates transcript file and appends message for valid session", async () => { - const sessionId = "test-session-id"; - const sessionKey = "test-session"; - const store = { - [sessionKey]: { - sessionId, - chatType: "direct", - channel: "discord", - }, - }; - fs.writeFileSync(fixture.storePath(), JSON.stringify(store), "utf-8"); + writeTranscriptStore(); const result = await appendAssistantMessageToSessionTranscript({ sessionKey, @@ -324,6 +331,70 @@ describe("appendAssistantMessageToSessionTranscript", () => { expect(messageLine.message.content[0].text).toBe("Hello from delivery mirror!"); } }); + + it("does not append a duplicate delivery mirror for the same idempotency key", async () => { + writeTranscriptStore(); + + await appendAssistantMessageToSessionTranscript({ + sessionKey, + text: "Hello from delivery mirror!", + idempotencyKey: "mirror:test-source-message", + storePath: fixture.storePath(), + }); + await appendAssistantMessageToSessionTranscript({ + sessionKey, + text: "Hello from delivery mirror!", + idempotencyKey: "mirror:test-source-message", + storePath: fixture.storePath(), + }); + + const sessionFile = resolveSessionTranscriptPathInDir(sessionId, fixture.sessionsDir()); + const lines = fs.readFileSync(sessionFile, "utf-8").trim().split("\n"); + expect(lines.length).toBe(2); + + const messageLine = JSON.parse(lines[1]); + expect(messageLine.message.idempotencyKey).toBe("mirror:test-source-message"); + expect(messageLine.message.content[0].text).toBe("Hello from delivery mirror!"); + }); + + it("ignores malformed transcript lines when checking mirror idempotency", async () => { + writeTranscriptStore(); + + const sessionFile = resolveSessionTranscriptPathInDir(sessionId, fixture.sessionsDir()); + fs.writeFileSync( + sessionFile, + [ + JSON.stringify({ + type: "session", + version: 1, + id: sessionId, + timestamp: new Date().toISOString(), + cwd: process.cwd(), + }), + "{not-json", + JSON.stringify({ + type: "message", + message: { + role: "assistant", + idempotencyKey: "mirror:test-source-message", + content: [{ type: "text", text: "Hello from delivery mirror!" }], + }, + }), + ].join("\n") + "\n", + "utf-8", + ); + + const result = await appendAssistantMessageToSessionTranscript({ + sessionKey, + text: "Hello from delivery mirror!", + idempotencyKey: "mirror:test-source-message", + storePath: fixture.storePath(), + }); + + expect(result.ok).toBe(true); + const lines = fs.readFileSync(sessionFile, "utf-8").trim().split("\n"); + expect(lines.length).toBe(3); + }); }); describe("resolveAndPersistSessionFile", () => { diff --git a/src/config/sessions/targets.test.ts b/src/config/sessions/targets.test.ts new file mode 100644 index 00000000000..43674233a3a --- /dev/null +++ b/src/config/sessions/targets.test.ts @@ -0,0 +1,293 @@ +import fsSync from "node:fs"; +import fs from "node:fs/promises"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { withTempHome } from "../../../test/helpers/temp-home.js"; +import type { OpenClawConfig } from "../config.js"; +import { + resolveAllAgentSessionStoreTargets, + resolveAllAgentSessionStoreTargetsSync, + resolveSessionStoreTargets, +} from "./targets.js"; + +async function resolveRealStorePath(sessionsDir: string): Promise { + // Match the native realpath behavior used by both discovery paths. + return fsSync.realpathSync.native(path.join(sessionsDir, "sessions.json")); +} + +async function createAgentSessionStores( + root: string, + agentIds: string[], +): Promise> { + const storePaths: Record = {}; + for (const agentId of agentIds) { + const sessionsDir = path.join(root, "agents", agentId, "sessions"); + await fs.mkdir(sessionsDir, { recursive: true }); + await fs.writeFile(path.join(sessionsDir, "sessions.json"), "{}", "utf8"); + storePaths[agentId] = await resolveRealStorePath(sessionsDir); + } + return storePaths; +} + +function createCustomRootCfg(customRoot: string, defaultAgentId = "ops"): OpenClawConfig { + return { + session: { + store: path.join(customRoot, "agents", "{agentId}", "sessions", "sessions.json"), + }, + agents: { + list: [{ id: defaultAgentId, default: true }], + }, + }; +} + +async function resolveTargetsForCustomRoot(home: string, agentIds: string[]) { + const customRoot = path.join(home, "custom-state"); + const storePaths = await createAgentSessionStores(customRoot, agentIds); + const cfg = createCustomRootCfg(customRoot); + const targets = await resolveAllAgentSessionStoreTargets(cfg, { env: process.env }); + return { storePaths, targets }; +} + +function expectTargetsToContainStores( + targets: Array<{ agentId: string; storePath: string }>, + stores: Record, +): void { + expect(targets).toEqual( + expect.arrayContaining( + Object.entries(stores).map(([agentId, storePath]) => ({ + agentId, + storePath, + })), + ), + ); +} + +const discoveryResolvers = [ + { + label: "async", + resolve: async (cfg: OpenClawConfig, env: NodeJS.ProcessEnv) => + await resolveAllAgentSessionStoreTargets(cfg, { env }), + }, + { + label: "sync", + resolve: async (cfg: OpenClawConfig, env: NodeJS.ProcessEnv) => + resolveAllAgentSessionStoreTargetsSync(cfg, { env }), + }, +] as const; + +describe("resolveSessionStoreTargets", () => { + it("resolves all configured agent stores", () => { + const cfg: OpenClawConfig = { + session: { + store: "~/.openclaw/agents/{agentId}/sessions/sessions.json", + }, + agents: { + list: [{ id: "main", default: true }, { id: "work" }], + }, + }; + + const targets = resolveSessionStoreTargets(cfg, { allAgents: true }); + + expect(targets).toEqual([ + { + agentId: "main", + storePath: path.resolve( + path.join(process.env.HOME ?? "", ".openclaw/agents/main/sessions/sessions.json"), + ), + }, + { + agentId: "work", + storePath: path.resolve( + path.join(process.env.HOME ?? "", ".openclaw/agents/work/sessions/sessions.json"), + ), + }, + ]); + }); + + it("dedupes shared store paths for --all-agents", () => { + const cfg: OpenClawConfig = { + session: { + store: "/tmp/shared-sessions.json", + }, + agents: { + list: [{ id: "main", default: true }, { id: "work" }], + }, + }; + + expect(resolveSessionStoreTargets(cfg, { allAgents: true })).toEqual([ + { agentId: "main", storePath: path.resolve("/tmp/shared-sessions.json") }, + ]); + }); + + it("rejects unknown agent ids", () => { + const cfg: OpenClawConfig = { + agents: { + list: [{ id: "main", default: true }, { id: "work" }], + }, + }; + + expect(() => resolveSessionStoreTargets(cfg, { agent: "ghost" })).toThrow(/Unknown agent id/); + }); + + it("rejects conflicting selectors", () => { + expect(() => resolveSessionStoreTargets({}, { agent: "main", allAgents: true })).toThrow( + /cannot be used together/i, + ); + expect(() => + resolveSessionStoreTargets({}, { store: "/tmp/sessions.json", allAgents: true }), + ).toThrow(/cannot be combined/i); + }); +}); + +describe("resolveAllAgentSessionStoreTargets", () => { + it("includes discovered on-disk agent stores alongside configured targets", async () => { + await withTempHome(async (home) => { + const stateDir = path.join(home, ".openclaw"); + const storePaths = await createAgentSessionStores(stateDir, ["ops", "retired"]); + + const cfg: OpenClawConfig = { + agents: { + list: [{ id: "ops", default: true }], + }, + }; + + const targets = await resolveAllAgentSessionStoreTargets(cfg, { env: process.env }); + + expectTargetsToContainStores(targets, storePaths); + expect(targets.filter((target) => target.storePath === storePaths.ops)).toHaveLength(1); + }); + }); + + it("discovers retired agent stores under a configured custom session root", async () => { + await withTempHome(async (home) => { + const { storePaths, targets } = await resolveTargetsForCustomRoot(home, ["ops", "retired"]); + + expectTargetsToContainStores(targets, storePaths); + expect(targets.filter((target) => target.storePath === storePaths.ops)).toHaveLength(1); + }); + }); + + it("keeps the actual on-disk store path for discovered retired agents", async () => { + await withTempHome(async (home) => { + const { storePaths, targets } = await resolveTargetsForCustomRoot(home, [ + "ops", + "Retired Agent", + ]); + + expect(targets).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + agentId: "retired-agent", + storePath: storePaths["Retired Agent"], + }), + ]), + ); + }); + }); + + it("respects the caller env when resolving configured and discovered store roots", async () => { + await withTempHome(async (home) => { + const envStateDir = path.join(home, "env-state"); + const mainSessionsDir = path.join(envStateDir, "agents", "main", "sessions"); + const retiredSessionsDir = path.join(envStateDir, "agents", "retired", "sessions"); + await fs.mkdir(mainSessionsDir, { recursive: true }); + await fs.mkdir(retiredSessionsDir, { recursive: true }); + await fs.writeFile(path.join(mainSessionsDir, "sessions.json"), "{}", "utf8"); + await fs.writeFile(path.join(retiredSessionsDir, "sessions.json"), "{}", "utf8"); + + const env = { + ...process.env, + OPENCLAW_STATE_DIR: envStateDir, + }; + const cfg: OpenClawConfig = {}; + const mainStorePath = await resolveRealStorePath(mainSessionsDir); + const retiredStorePath = await resolveRealStorePath(retiredSessionsDir); + + const targets = await resolveAllAgentSessionStoreTargets(cfg, { env }); + + expect(targets).toEqual( + expect.arrayContaining([ + { + agentId: "main", + storePath: mainStorePath, + }, + { + agentId: "retired", + storePath: retiredStorePath, + }, + ]), + ); + }); + }); + + for (const resolver of discoveryResolvers) { + it(`skips unreadable or invalid discovery roots when other roots are still readable (${resolver.label})`, async () => { + await withTempHome(async (home) => { + const customRoot = path.join(home, "custom-state"); + await fs.mkdir(customRoot, { recursive: true }); + await fs.writeFile(path.join(customRoot, "agents"), "not-a-directory", "utf8"); + + const envStateDir = path.join(home, "env-state"); + const storePaths = await createAgentSessionStores(envStateDir, ["main", "retired"]); + const cfg = createCustomRootCfg(customRoot, "main"); + const env = { + ...process.env, + OPENCLAW_STATE_DIR: envStateDir, + }; + + await expect(resolver.resolve(cfg, env)).resolves.toEqual( + expect.arrayContaining([ + { + agentId: "retired", + storePath: storePaths.retired, + }, + ]), + ); + }); + }); + + it(`skips symlinked discovered stores under templated agents roots (${resolver.label})`, async () => { + await withTempHome(async (home) => { + if (process.platform === "win32") { + return; + } + const customRoot = path.join(home, "custom-state"); + const opsSessionsDir = path.join(customRoot, "agents", "ops", "sessions"); + const leakedFile = path.join(home, "outside.json"); + await fs.mkdir(opsSessionsDir, { recursive: true }); + await fs.writeFile(leakedFile, JSON.stringify({ leak: { secret: "x" } }), "utf8"); + await fs.symlink(leakedFile, path.join(opsSessionsDir, "sessions.json")); + + const targets = await resolver.resolve(createCustomRootCfg(customRoot), process.env); + expect(targets).not.toContainEqual({ + agentId: "ops", + storePath: expect.stringContaining(path.join("ops", "sessions", "sessions.json")), + }); + }); + }); + } + + it("skips discovered directories that only normalize into the default main agent", async () => { + await withTempHome(async (home) => { + const stateDir = path.join(home, ".openclaw"); + const mainSessionsDir = path.join(stateDir, "agents", "main", "sessions"); + const junkSessionsDir = path.join(stateDir, "agents", "###", "sessions"); + await fs.mkdir(mainSessionsDir, { recursive: true }); + await fs.mkdir(junkSessionsDir, { recursive: true }); + await fs.writeFile(path.join(mainSessionsDir, "sessions.json"), "{}", "utf8"); + await fs.writeFile(path.join(junkSessionsDir, "sessions.json"), "{}", "utf8"); + + const cfg: OpenClawConfig = {}; + const mainStorePath = await resolveRealStorePath(mainSessionsDir); + const targets = await resolveAllAgentSessionStoreTargets(cfg, { env: process.env }); + + expect(targets).toContainEqual({ + agentId: "main", + storePath: mainStorePath, + }); + expect( + targets.some((target) => target.storePath === path.join(junkSessionsDir, "sessions.json")), + ).toBe(false); + }); + }); +}); diff --git a/src/config/sessions/targets.ts b/src/config/sessions/targets.ts new file mode 100644 index 00000000000..c647a17e41f --- /dev/null +++ b/src/config/sessions/targets.ts @@ -0,0 +1,344 @@ +import fsSync from "node:fs"; +import fs from "node:fs/promises"; +import path from "node:path"; +import { listAgentIds, resolveDefaultAgentId } from "../../agents/agent-scope.js"; +import { + resolveAgentSessionDirsFromAgentsDir, + resolveAgentSessionDirsFromAgentsDirSync, +} from "../../agents/session-dirs.js"; +import { DEFAULT_AGENT_ID, normalizeAgentId } from "../../routing/session-key.js"; +import { resolveStateDir } from "../paths.js"; +import type { OpenClawConfig } from "../types.openclaw.js"; +import { resolveAgentsDirFromSessionStorePath, resolveStorePath } from "./paths.js"; + +export type SessionStoreSelectionOptions = { + store?: string; + agent?: string; + allAgents?: boolean; +}; + +export type SessionStoreTarget = { + agentId: string; + storePath: string; +}; + +const NON_FATAL_DISCOVERY_ERROR_CODES = new Set([ + "EACCES", + "ELOOP", + "ENOENT", + "ENOTDIR", + "EPERM", + "ESTALE", +]); + +function dedupeTargetsByStorePath(targets: SessionStoreTarget[]): SessionStoreTarget[] { + const deduped = new Map(); + for (const target of targets) { + if (!deduped.has(target.storePath)) { + deduped.set(target.storePath, target); + } + } + return [...deduped.values()]; +} + +function shouldSkipDiscoveryError(err: unknown): boolean { + const code = (err as NodeJS.ErrnoException | undefined)?.code; + return typeof code === "string" && NON_FATAL_DISCOVERY_ERROR_CODES.has(code); +} + +function isWithinRoot(realPath: string, realRoot: string): boolean { + return realPath === realRoot || realPath.startsWith(`${realRoot}${path.sep}`); +} + +function shouldSkipDiscoveredAgentDirName(dirName: string, agentId: string): boolean { + // Avoid collapsing arbitrary directory names like "###" into the default main agent. + // Human-friendly names like "Retired Agent" are still allowed because they normalize to + // a non-default stable id and preserve the intended retired-store discovery behavior. + return agentId === DEFAULT_AGENT_ID && dirName.trim().toLowerCase() !== DEFAULT_AGENT_ID; +} + +function resolveValidatedDiscoveredStorePathSync(params: { + sessionsDir: string; + agentsRoot: string; + realAgentsRoot?: string; +}): string | undefined { + const storePath = path.join(params.sessionsDir, "sessions.json"); + try { + const stat = fsSync.lstatSync(storePath); + if (stat.isSymbolicLink() || !stat.isFile()) { + return undefined; + } + const realStorePath = fsSync.realpathSync.native(storePath); + const realAgentsRoot = params.realAgentsRoot ?? fsSync.realpathSync.native(params.agentsRoot); + return isWithinRoot(realStorePath, realAgentsRoot) ? realStorePath : undefined; + } catch (err) { + if (shouldSkipDiscoveryError(err)) { + return undefined; + } + throw err; + } +} + +async function resolveValidatedDiscoveredStorePath(params: { + sessionsDir: string; + agentsRoot: string; + realAgentsRoot?: string; +}): Promise { + const storePath = path.join(params.sessionsDir, "sessions.json"); + try { + const stat = await fs.lstat(storePath); + if (stat.isSymbolicLink() || !stat.isFile()) { + return undefined; + } + const realStorePath = await fs.realpath(storePath); + const realAgentsRoot = params.realAgentsRoot ?? (await fs.realpath(params.agentsRoot)); + return isWithinRoot(realStorePath, realAgentsRoot) ? realStorePath : undefined; + } catch (err) { + if (shouldSkipDiscoveryError(err)) { + return undefined; + } + throw err; + } +} + +function resolveSessionStoreDiscoveryState( + cfg: OpenClawConfig, + env: NodeJS.ProcessEnv, +): { + configuredTargets: SessionStoreTarget[]; + agentsRoots: string[]; +} { + const configuredTargets = resolveSessionStoreTargets(cfg, { allAgents: true }, { env }); + const agentsRoots = new Set(); + for (const target of configuredTargets) { + const agentsDir = resolveAgentsDirFromSessionStorePath(target.storePath); + if (agentsDir) { + agentsRoots.add(agentsDir); + } + } + agentsRoots.add(path.join(resolveStateDir(env), "agents")); + return { + configuredTargets, + agentsRoots: [...agentsRoots], + }; +} + +function toDiscoveredSessionStoreTarget( + sessionsDir: string, + storePath: string, +): SessionStoreTarget | undefined { + const dirName = path.basename(path.dirname(sessionsDir)); + const agentId = normalizeAgentId(dirName); + if (shouldSkipDiscoveredAgentDirName(dirName, agentId)) { + return undefined; + } + return { + agentId, + // Keep the actual on-disk store path so retired/manual agent dirs remain discoverable + // even if their directory name no longer round-trips through normalizeAgentId(). + storePath, + }; +} + +export function resolveAllAgentSessionStoreTargetsSync( + cfg: OpenClawConfig, + params: { env?: NodeJS.ProcessEnv } = {}, +): SessionStoreTarget[] { + const env = params.env ?? process.env; + const { configuredTargets, agentsRoots } = resolveSessionStoreDiscoveryState(cfg, env); + const realAgentsRoots = new Map(); + const getRealAgentsRoot = (agentsRoot: string): string | undefined => { + const cached = realAgentsRoots.get(agentsRoot); + if (cached !== undefined) { + return cached; + } + try { + const realAgentsRoot = fsSync.realpathSync.native(agentsRoot); + realAgentsRoots.set(agentsRoot, realAgentsRoot); + return realAgentsRoot; + } catch (err) { + if (shouldSkipDiscoveryError(err)) { + return undefined; + } + throw err; + } + }; + const validatedConfiguredTargets = configuredTargets.flatMap((target) => { + const agentsRoot = resolveAgentsDirFromSessionStorePath(target.storePath); + if (!agentsRoot) { + return [target]; + } + const realAgentsRoot = getRealAgentsRoot(agentsRoot); + if (!realAgentsRoot) { + return []; + } + const validatedStorePath = resolveValidatedDiscoveredStorePathSync({ + sessionsDir: path.dirname(target.storePath), + agentsRoot, + realAgentsRoot, + }); + return validatedStorePath ? [{ ...target, storePath: validatedStorePath }] : []; + }); + const discoveredTargets = agentsRoots.flatMap((agentsDir) => { + try { + const realAgentsRoot = getRealAgentsRoot(agentsDir); + if (!realAgentsRoot) { + return []; + } + return resolveAgentSessionDirsFromAgentsDirSync(agentsDir).flatMap((sessionsDir) => { + const validatedStorePath = resolveValidatedDiscoveredStorePathSync({ + sessionsDir, + agentsRoot: agentsDir, + realAgentsRoot, + }); + const target = validatedStorePath + ? toDiscoveredSessionStoreTarget(sessionsDir, validatedStorePath) + : undefined; + return target ? [target] : []; + }); + } catch (err) { + if (shouldSkipDiscoveryError(err)) { + return []; + } + throw err; + } + }); + return dedupeTargetsByStorePath([...validatedConfiguredTargets, ...discoveredTargets]); +} + +export async function resolveAllAgentSessionStoreTargets( + cfg: OpenClawConfig, + params: { env?: NodeJS.ProcessEnv } = {}, +): Promise { + const env = params.env ?? process.env; + const { configuredTargets, agentsRoots } = resolveSessionStoreDiscoveryState(cfg, env); + const realAgentsRoots = new Map(); + const getRealAgentsRoot = async (agentsRoot: string): Promise => { + const cached = realAgentsRoots.get(agentsRoot); + if (cached !== undefined) { + return cached; + } + try { + const realAgentsRoot = await fs.realpath(agentsRoot); + realAgentsRoots.set(agentsRoot, realAgentsRoot); + return realAgentsRoot; + } catch (err) { + if (shouldSkipDiscoveryError(err)) { + return undefined; + } + throw err; + } + }; + const validatedConfiguredTargets = ( + await Promise.all( + configuredTargets.map(async (target) => { + const agentsRoot = resolveAgentsDirFromSessionStorePath(target.storePath); + if (!agentsRoot) { + return target; + } + const realAgentsRoot = await getRealAgentsRoot(agentsRoot); + if (!realAgentsRoot) { + return undefined; + } + const validatedStorePath = await resolveValidatedDiscoveredStorePath({ + sessionsDir: path.dirname(target.storePath), + agentsRoot, + realAgentsRoot, + }); + return validatedStorePath ? { ...target, storePath: validatedStorePath } : undefined; + }), + ) + ).filter((target): target is SessionStoreTarget => Boolean(target)); + + const discoveredTargets = ( + await Promise.all( + agentsRoots.map(async (agentsDir) => { + try { + const realAgentsRoot = await getRealAgentsRoot(agentsDir); + if (!realAgentsRoot) { + return []; + } + const sessionsDirs = await resolveAgentSessionDirsFromAgentsDir(agentsDir); + return ( + await Promise.all( + sessionsDirs.map(async (sessionsDir) => { + const validatedStorePath = await resolveValidatedDiscoveredStorePath({ + sessionsDir, + agentsRoot: agentsDir, + realAgentsRoot, + }); + return validatedStorePath + ? toDiscoveredSessionStoreTarget(sessionsDir, validatedStorePath) + : undefined; + }), + ) + ).filter((target): target is SessionStoreTarget => Boolean(target)); + } catch (err) { + if (shouldSkipDiscoveryError(err)) { + return []; + } + throw err; + } + }), + ) + ).flat(); + + return dedupeTargetsByStorePath([...validatedConfiguredTargets, ...discoveredTargets]); +} + +export function resolveSessionStoreTargets( + cfg: OpenClawConfig, + opts: SessionStoreSelectionOptions, + params: { env?: NodeJS.ProcessEnv } = {}, +): SessionStoreTarget[] { + const env = params.env ?? process.env; + const defaultAgentId = resolveDefaultAgentId(cfg); + const hasAgent = Boolean(opts.agent?.trim()); + const allAgents = opts.allAgents === true; + if (hasAgent && allAgents) { + throw new Error("--agent and --all-agents cannot be used together"); + } + if (opts.store && (hasAgent || allAgents)) { + throw new Error("--store cannot be combined with --agent or --all-agents"); + } + + if (opts.store) { + return [ + { + agentId: defaultAgentId, + storePath: resolveStorePath(opts.store, { agentId: defaultAgentId, env }), + }, + ]; + } + + if (allAgents) { + const targets = listAgentIds(cfg).map((agentId) => ({ + agentId, + storePath: resolveStorePath(cfg.session?.store, { agentId, env }), + })); + return dedupeTargetsByStorePath(targets); + } + + if (hasAgent) { + const knownAgents = listAgentIds(cfg); + const requested = normalizeAgentId(opts.agent ?? ""); + if (!knownAgents.includes(requested)) { + throw new Error( + `Unknown agent id "${opts.agent}". Use "openclaw agents list" to see configured agents.`, + ); + } + return [ + { + agentId: requested, + storePath: resolveStorePath(cfg.session?.store, { agentId: requested, env }), + }, + ]; + } + + return [ + { + agentId: defaultAgentId, + storePath: resolveStorePath(cfg.session?.store, { agentId: defaultAgentId, env }), + }, + ]; +} diff --git a/src/config/sessions/transcript.ts b/src/config/sessions/transcript.ts index e6a8044f5c6..aa1890de953 100644 --- a/src/config/sessions/transcript.ts +++ b/src/config/sessions/transcript.ts @@ -135,6 +135,7 @@ export async function appendAssistantMessageToSessionTranscript(params: { sessionKey: string; text?: string; mediaUrls?: string[]; + idempotencyKey?: string; /** Optional override for store path (mostly for tests). */ storePath?: string; }): Promise<{ ok: true; sessionFile: string } | { ok: false; reason: string }> { @@ -179,6 +180,13 @@ export async function appendAssistantMessageToSessionTranscript(params: { await ensureSessionHeader({ sessionFile, sessionId: entry.sessionId }); + if ( + params.idempotencyKey && + (await transcriptHasIdempotencyKey(sessionFile, params.idempotencyKey)) + ) { + return { ok: true, sessionFile }; + } + const sessionManager = SessionManager.open(sessionFile); sessionManager.appendMessage({ role: "assistant", @@ -202,8 +210,34 @@ export async function appendAssistantMessageToSessionTranscript(params: { }, stopReason: "stop", timestamp: Date.now(), + ...(params.idempotencyKey ? { idempotencyKey: params.idempotencyKey } : {}), }); emitSessionTranscriptUpdate(sessionFile); return { ok: true, sessionFile }; } + +async function transcriptHasIdempotencyKey( + transcriptPath: string, + idempotencyKey: string, +): Promise { + try { + const raw = await fs.promises.readFile(transcriptPath, "utf-8"); + for (const line of raw.split(/\r?\n/)) { + if (!line.trim()) { + continue; + } + try { + const parsed = JSON.parse(line) as { message?: { idempotencyKey?: unknown } }; + if (parsed.message?.idempotencyKey === idempotencyKey) { + return true; + } + } catch { + continue; + } + } + } catch { + return false; + } + return false; +} diff --git a/src/config/sessions/types.ts b/src/config/sessions/types.ts index 81d67d13011..4ba9b336127 100644 --- a/src/config/sessions/types.ts +++ b/src/config/sessions/types.ts @@ -78,10 +78,16 @@ export type SessionEntry = { sessionFile?: string; /** Parent session key that spawned this session (used for sandbox session-tool scoping). */ spawnedBy?: string; + /** Workspace inherited by spawned sessions and reused on later turns for the same child session. */ + spawnedWorkspaceDir?: string; /** True after a thread/topic session has been forked from its parent transcript once. */ forkedFromParent?: boolean; /** Subagent spawn depth (0 = main, 1 = sub-agent, 2 = sub-sub-agent). */ spawnDepth?: number; + /** Explicit role assigned at spawn time for subagent tool policy/control decisions. */ + subagentRole?: "orchestrator" | "leaf"; + /** Explicit control scope assigned at spawn time for subagent control decisions. */ + subagentControlScope?: "children" | "none"; systemSent?: boolean; abortedLastRun?: boolean; /** @@ -94,6 +100,7 @@ export type SessionEntry = { abortCutoffTimestamp?: number; chatType?: SessionChatType; thinkingLevel?: string; + fastMode?: boolean; verboseLevel?: string; reasoningLevel?: string; elevatedLevel?: string; diff --git a/src/config/types.agent-defaults.ts b/src/config/types.agent-defaults.ts index 9124e4084d8..c81cf0edbed 100644 --- a/src/config/types.agent-defaults.ts +++ b/src/config/types.agent-defaults.ts @@ -279,7 +279,7 @@ export type AgentDefaultsConfig = { thinking?: string; /** Default run timeout in seconds for spawned sub-agents (0 = no timeout). */ runTimeoutSeconds?: number; - /** Gateway timeout in ms for sub-agent announce delivery calls (default: 60000). */ + /** Gateway timeout in ms for sub-agent announce delivery calls (default: 90000). */ announceTimeoutMs?: number; }; /** Optional sandbox settings for non-main sessions. */ @@ -287,6 +287,7 @@ export type AgentDefaultsConfig = { }; export type AgentCompactionMode = "default" | "safeguard"; +export type AgentCompactionPostIndexSyncMode = "off" | "async" | "await"; export type AgentCompactionIdentifierPolicy = "strict" | "off" | "custom"; export type AgentCompactionQualityGuardConfig = { /** Enable compaction summary quality audits and regeneration retries. Default: false. */ @@ -306,6 +307,8 @@ export type AgentCompactionConfig = { reserveTokensFloor?: number; /** Max share of context window for history during safeguard pruning (0.1–0.9, default 0.5). */ maxHistoryShare?: number; + /** Additional compaction-summary instructions that can preserve language or persona continuity. */ + customInstructions?: string; /** Preserve this many most-recent user/assistant turns verbatim in compaction summary context. */ recentTurnsPreserve?: number; /** Identifier-preservation instruction policy for compaction summaries. */ @@ -314,6 +317,8 @@ export type AgentCompactionConfig = { identifierInstructions?: string; /** Optional quality-audit retries for safeguard compaction summaries. */ qualityGuard?: AgentCompactionQualityGuardConfig; + /** Post-compaction session memory index sync mode. */ + postIndexSync?: AgentCompactionPostIndexSyncMode; /** Pre-compaction memory flush (agentic turn). Default: enabled. */ memoryFlush?: AgentCompactionMemoryFlushConfig; /** diff --git a/src/config/types.browser.ts b/src/config/types.browser.ts index 57d036bd88c..5f8e28a0ebe 100644 --- a/src/config/types.browser.ts +++ b/src/config/types.browser.ts @@ -4,7 +4,7 @@ export type BrowserProfileConfig = { /** CDP URL for this profile (use for remote Chrome). */ cdpUrl?: string; /** Profile driver (default: openclaw). */ - driver?: "openclaw" | "clawd" | "extension"; + driver?: "openclaw" | "clawd" | "extension" | "existing-session"; /** If true, never launch a browser for this profile; only attach. Falls back to browser.attachOnly. */ attachOnly?: boolean; /** Profile color (hex). Auto-assigned at creation. */ diff --git a/src/config/types.discord.ts b/src/config/types.discord.ts index 2d2e674f6b6..2d005dd7d7a 100644 --- a/src/config/types.discord.ts +++ b/src/config/types.discord.ts @@ -52,6 +52,8 @@ export type DiscordGuildChannelConfig = { systemPrompt?: string; /** If false, omit thread starter context for this channel (default: true). */ includeThreadStarter?: boolean; + /** If true, automatically create a thread for each new message in this channel. */ + autoThread?: boolean; }; export type DiscordReactionNotificationMode = "off" | "own" | "all" | "allowlist"; diff --git a/src/config/types.gateway.ts b/src/config/types.gateway.ts index 58b061682a1..ea17a1d9d05 100644 --- a/src/config/types.gateway.ts +++ b/src/config/types.gateway.ts @@ -186,6 +186,8 @@ export type GatewayTailscaleConfig = { }; export type GatewayRemoteConfig = { + /** Whether remote gateway surfaces are enabled. Default: true when absent. */ + enabled?: boolean; /** Remote Gateway WebSocket URL (ws:// or wss://). */ url?: string; /** Transport for macOS remote connections (ssh tunnel or direct WS). */ @@ -345,6 +347,21 @@ export type GatewayHttpConfig = { securityHeaders?: GatewayHttpSecurityHeadersConfig; }; +export type GatewayPushApnsRelayConfig = { + /** Base HTTPS URL for the external iOS APNs relay service. */ + baseUrl?: string; + /** Timeout in milliseconds for relay send requests (default: 10000). */ + timeoutMs?: number; +}; + +export type GatewayPushApnsConfig = { + relay?: GatewayPushApnsRelayConfig; +}; + +export type GatewayPushConfig = { + apns?: GatewayPushApnsConfig; +}; + export type GatewayNodesConfig = { /** Browser routing policy for node-hosted browser proxies. */ browser?: { @@ -393,6 +410,7 @@ export type GatewayConfig = { reload?: GatewayReloadConfig; tls?: GatewayTlsConfig; http?: GatewayHttpConfig; + push?: GatewayPushConfig; nodes?: GatewayNodesConfig; /** * IPs of trusted reverse proxies (e.g. Traefik, nginx). When a connection diff --git a/src/config/types.messages.ts b/src/config/types.messages.ts index 39a5ca7da69..002a1200b8b 100644 --- a/src/config/types.messages.ts +++ b/src/config/types.messages.ts @@ -58,6 +58,7 @@ export type StatusReactionsEmojiConfig = { error?: string; stallSoft?: string; stallHard?: string; + compacting?: string; }; export type StatusReactionsTimingConfig = { diff --git a/src/config/types.signal.ts b/src/config/types.signal.ts index 1f3d5180b92..bd33a64cf51 100644 --- a/src/config/types.signal.ts +++ b/src/config/types.signal.ts @@ -1,8 +1,15 @@ import type { CommonChannelMessagingConfig } from "./types.channel-messaging-common.js"; +import type { GroupToolPolicyBySenderConfig, GroupToolPolicyConfig } from "./types.tools.js"; export type SignalReactionNotificationMode = "off" | "own" | "all" | "allowlist"; export type SignalReactionLevel = "off" | "ack" | "minimal" | "extensive"; +export type SignalGroupConfig = { + requireMention?: boolean; + tools?: GroupToolPolicyConfig; + toolsBySender?: GroupToolPolicyBySenderConfig; +}; + export type SignalAccountConfig = CommonChannelMessagingConfig & { /** Optional explicit E.164 account for signal-cli. */ account?: string; @@ -24,6 +31,8 @@ export type SignalAccountConfig = CommonChannelMessagingConfig & { ignoreAttachments?: boolean; ignoreStories?: boolean; sendReadReceipts?: boolean; + /** Per-group overrides keyed by Signal group id (or "*"). */ + groups?: Record; /** Outbound text chunk size (chars). Default: 4000. */ textChunkLimit?: number; /** Reaction notification mode (off|own|all|allowlist). Default: own. */ diff --git a/src/config/types.slack.ts b/src/config/types.slack.ts index 96abe2641d6..a90f1ed5020 100644 --- a/src/config/types.slack.ts +++ b/src/config/types.slack.ts @@ -47,6 +47,11 @@ export type SlackChannelConfig = { export type SlackReactionNotificationMode = "off" | "own" | "all" | "allowlist"; export type SlackStreamingMode = "off" | "partial" | "block" | "progress"; export type SlackLegacyStreamMode = "replace" | "status_final" | "append"; +export type SlackCapabilitiesConfig = + | string[] + | { + interactiveReplies?: boolean; + }; export type SlackActionConfig = { reactions?: boolean; @@ -89,7 +94,7 @@ export type SlackAccountConfig = { /** Slack Events API webhook path (default: /slack/events). */ webhookPath?: string; /** Optional provider capability tags used for agent/runtime guidance. */ - capabilities?: string[]; + capabilities?: SlackCapabilitiesConfig; /** Markdown formatting overrides (tables). */ markdown?: MarkdownConfig; /** Override native command registration for Slack (bool or "auto"). */ diff --git a/src/config/types.telegram.ts b/src/config/types.telegram.ts index ce8ad105b06..45eac2fb310 100644 --- a/src/config/types.telegram.ts +++ b/src/config/types.telegram.ts @@ -38,6 +38,20 @@ export type TelegramNetworkConfig = { export type TelegramInlineButtonsScope = "off" | "dm" | "group" | "all" | "allowlist"; export type TelegramStreamingMode = "off" | "partial" | "block" | "progress"; +export type TelegramExecApprovalTarget = "dm" | "channel" | "both"; + +export type TelegramExecApprovalConfig = { + /** Enable Telegram exec approvals for this account. Default: false. */ + enabled?: boolean; + /** Telegram user IDs allowed to approve exec requests. Required if enabled. */ + approvers?: Array; + /** Only forward approvals for these agent IDs. Omit = all agents. */ + agentFilter?: string[]; + /** Only forward approvals matching these session key patterns (substring or regex). */ + sessionFilter?: string[]; + /** Where to send approval prompts. Default: "dm". */ + target?: TelegramExecApprovalTarget; +}; export type TelegramCapabilitiesConfig = | string[] @@ -58,6 +72,8 @@ export type TelegramAccountConfig = { name?: string; /** Optional provider capability tags used for agent/runtime guidance. */ capabilities?: TelegramCapabilitiesConfig; + /** Telegram-native exec approval delivery + approver authorization. */ + execApprovals?: TelegramExecApprovalConfig; /** Markdown formatting overrides (tables). */ markdown?: MarkdownConfig; /** Override native command registration for Telegram (bool or "auto"). */ @@ -77,7 +93,7 @@ export type TelegramAccountConfig = { /** If false, do not start this Telegram account. Default: true. */ enabled?: boolean; botToken?: string; - /** Path to file containing bot token (for secret managers like agenix). */ + /** Path to a regular file containing the bot token; symlinks are rejected. */ tokenFile?: string; /** Control reply threading when reply tags are present (off|first|all). */ replyToMode?: ReplyToMode; diff --git a/src/config/types.tools.ts b/src/config/types.tools.ts index 89775758411..43d39285b57 100644 --- a/src/config/types.tools.ts +++ b/src/config/types.tools.ts @@ -319,6 +319,15 @@ export type MemorySearchConfig = { sources?: Array<"memory" | "sessions">; /** Extra paths to include in memory search (directories or .md files). */ extraPaths?: string[]; + /** Optional multimodal file indexing for selected extra paths. */ + multimodal?: { + /** Enable image/audio embeddings from extraPaths. */ + enabled?: boolean; + /** Which non-text file types to index. */ + modalities?: Array<"image" | "audio" | "all">; + /** Max bytes allowed per multimodal file before it is skipped. */ + maxFileBytes?: number; + }; /** Experimental memory search settings. */ experimental?: { /** Enable session transcript indexing (experimental, default: false). */ @@ -347,6 +356,11 @@ export type MemorySearchConfig = { fallback?: "openai" | "gemini" | "local" | "voyage" | "mistral" | "ollama" | "none"; /** Embedding model id (remote) or alias (local). */ model?: string; + /** + * Gemini embedding-2 models only: output vector dimensions. + * Supported values today are 768, 1536, and 3072. + */ + outputDimensionality?: number; /** Local embedding settings (node-llama-cpp). */ local?: { /** GGUF model path or hf: URI. */ @@ -388,6 +402,8 @@ export type MemorySearchConfig = { deltaBytes?: number; /** Minimum appended JSONL lines before session transcripts are reindexed. */ deltaMessages?: number; + /** Force session reindex after compaction-triggered transcript updates (default: true). */ + postCompactionForce?: boolean; }; }; /** Query behavior. */ @@ -512,7 +528,7 @@ export type ToolsConfig = { /** Enable Firecrawl fallback (default: true when apiKey is set). */ enabled?: boolean; /** Firecrawl API key (optional; defaults to FIRECRAWL_API_KEY env var). */ - apiKey?: string; + apiKey?: SecretInput; /** Firecrawl base URL (default: https://api.firecrawl.dev). */ baseUrl?: string; /** Whether to keep only main content (default: true). */ diff --git a/src/config/types.tts.ts b/src/config/types.tts.ts index 3d898ff9c57..a6232f9de5a 100644 --- a/src/config/types.tts.ts +++ b/src/config/types.tts.ts @@ -61,6 +61,10 @@ export type TtsConfig = { baseUrl?: string; model?: string; voice?: string; + /** Playback speed (0.25–4.0, default 1.0). */ + speed?: number; + /** System-level instructions for the TTS model (gpt-4o-mini-tts only). */ + instructions?: string; }; /** Microsoft Edge (node-edge-tts) configuration. */ edge?: { diff --git a/src/config/validation.ts b/src/config/validation.ts index 90d733e0818..686dbb0ed43 100644 --- a/src/config/validation.ts +++ b/src/config/validation.ts @@ -297,17 +297,23 @@ type ValidateConfigWithPluginsResult = warnings: ConfigValidationIssue[]; }; -export function validateConfigObjectWithPlugins(raw: unknown): ValidateConfigWithPluginsResult { - return validateConfigObjectWithPluginsBase(raw, { applyDefaults: true }); +export function validateConfigObjectWithPlugins( + raw: unknown, + params?: { env?: NodeJS.ProcessEnv }, +): ValidateConfigWithPluginsResult { + return validateConfigObjectWithPluginsBase(raw, { applyDefaults: true, env: params?.env }); } -export function validateConfigObjectRawWithPlugins(raw: unknown): ValidateConfigWithPluginsResult { - return validateConfigObjectWithPluginsBase(raw, { applyDefaults: false }); +export function validateConfigObjectRawWithPlugins( + raw: unknown, + params?: { env?: NodeJS.ProcessEnv }, +): ValidateConfigWithPluginsResult { + return validateConfigObjectWithPluginsBase(raw, { applyDefaults: false, env: params?.env }); } function validateConfigObjectWithPluginsBase( raw: unknown, - opts: { applyDefaults: boolean }, + opts: { applyDefaults: boolean; env?: NodeJS.ProcessEnv }, ): ValidateConfigWithPluginsResult { const base = opts.applyDefaults ? validateConfigObject(raw) : validateConfigObjectRaw(raw); if (!base.ok) { @@ -345,6 +351,7 @@ function validateConfigObjectWithPluginsBase( const registry = loadPluginManifestRegistry({ config, workspaceDir: workspaceDir ?? undefined, + env: opts.env, }); for (const diag of registry.diagnostics) { diff --git a/src/config/zod-schema.agent-defaults.ts b/src/config/zod-schema.agent-defaults.ts index 242d6959729..dfa7e23e1c1 100644 --- a/src/config/zod-schema.agent-defaults.ts +++ b/src/config/zod-schema.agent-defaults.ts @@ -91,6 +91,7 @@ export const AgentDefaultsSchema = z keepRecentTokens: z.number().int().positive().optional(), reserveTokensFloor: z.number().int().nonnegative().optional(), maxHistoryShare: z.number().min(0.1).max(0.9).optional(), + customInstructions: z.string().optional(), identifierPolicy: z .union([z.literal("strict"), z.literal("off"), z.literal("custom")]) .optional(), @@ -103,6 +104,7 @@ export const AgentDefaultsSchema = z }) .strict() .optional(), + postIndexSync: z.enum(["off", "async", "await"]).optional(), postCompactionSections: z.array(z.string()).optional(), model: z.string().optional(), memoryFlush: z diff --git a/src/config/zod-schema.agent-runtime.ts b/src/config/zod-schema.agent-runtime.ts index 3ede7218b80..7a87440a768 100644 --- a/src/config/zod-schema.agent-runtime.ts +++ b/src/config/zod-schema.agent-runtime.ts @@ -327,6 +327,18 @@ export const ToolsWebFetchSchema = z cacheTtlMinutes: z.number().nonnegative().optional(), maxRedirects: z.number().int().nonnegative().optional(), userAgent: z.string().optional(), + readability: z.boolean().optional(), + firecrawl: z + .object({ + enabled: z.boolean().optional(), + apiKey: SecretInputSchema.optional().register(sensitive), + baseUrl: z.string().optional(), + onlyMainContent: z.boolean().optional(), + maxAgeMs: z.number().int().nonnegative().optional(), + timeoutSeconds: z.number().int().positive().optional(), + }) + .strict() + .optional(), }) .strict() .optional(); @@ -553,6 +565,16 @@ export const MemorySearchSchema = z enabled: z.boolean().optional(), sources: z.array(z.union([z.literal("memory"), z.literal("sessions")])).optional(), extraPaths: z.array(z.string()).optional(), + multimodal: z + .object({ + enabled: z.boolean().optional(), + modalities: z + .array(z.union([z.literal("image"), z.literal("audio"), z.literal("all")])) + .optional(), + maxFileBytes: z.number().int().positive().optional(), + }) + .strict() + .optional(), experimental: z .object({ sessionMemory: z.boolean().optional(), @@ -599,6 +621,7 @@ export const MemorySearchSchema = z ]) .optional(), model: z.string().optional(), + outputDimensionality: z.number().int().positive().optional(), local: z .object({ modelPath: z.string().optional(), @@ -638,6 +661,7 @@ export const MemorySearchSchema = z .object({ deltaBytes: z.number().int().nonnegative().optional(), deltaMessages: z.number().int().nonnegative().optional(), + postCompactionForce: z.boolean().optional(), }) .strict() .optional(), @@ -745,6 +769,7 @@ export const AgentEntrySchema = z .strict() .optional(), sandbox: AgentSandboxSchema, + params: z.record(z.string(), z.unknown()).optional(), tools: AgentToolsSchema, runtime: AgentRuntimeSchema, }) diff --git a/src/config/zod-schema.core.ts b/src/config/zod-schema.core.ts index 23accd81637..305efab4b26 100644 --- a/src/config/zod-schema.core.ts +++ b/src/config/zod-schema.core.ts @@ -1,14 +1,17 @@ import path from "node:path"; import { z } from "zod"; import { isSafeExecutableValue } from "../infra/exec-safety.js"; -import { isValidFileSecretRefId } from "../secrets/ref-contract.js"; +import { + formatExecSecretRefIdValidationMessage, + isValidExecSecretRefId, + isValidFileSecretRefId, +} from "../secrets/ref-contract.js"; import { MODEL_APIS } from "./types.models.js"; import { createAllowDenyChannelRulesSchema } from "./zod-schema.allowdeny.js"; import { sensitive } from "./zod-schema.sensitive.js"; const ENV_SECRET_REF_ID_PATTERN = /^[A-Z][A-Z0-9_]{0,127}$/; const SECRET_PROVIDER_ALIAS_PATTERN = /^[a-z][a-z0-9_-]{0,63}$/; -const EXEC_SECRET_REF_ID_PATTERN = /^[A-Za-z0-9][A-Za-z0-9._:/-]{0,255}$/; const WINDOWS_ABS_PATH_PATTERN = /^[A-Za-z]:[\\/]/; const WINDOWS_UNC_PATH_PATTERN = /^\\\\[^\\]+\\[^\\]+/; @@ -65,12 +68,7 @@ const ExecSecretRefSchema = z SECRET_PROVIDER_ALIAS_PATTERN, 'Secret reference provider must match /^[a-z][a-z0-9_-]{0,63}$/ (example: "default").', ), - id: z - .string() - .regex( - EXEC_SECRET_REF_ID_PATTERN, - 'Exec secret reference id must match /^[A-Za-z0-9][A-Za-z0-9._:/-]{0,255}$/ (example: "vault/openai/api-key").', - ), + id: z.string().refine(isValidExecSecretRefId, formatExecSecretRefIdValidationMessage()), }) .strict(); @@ -406,6 +404,8 @@ export const TtsConfigSchema = z baseUrl: z.string().optional(), model: z.string().optional(), voice: z.string().optional(), + speed: z.number().min(0.25).max(4).optional(), + instructions: z.string().optional(), }) .strict() .optional(), diff --git a/src/config/zod-schema.providers-core.ts b/src/config/zod-schema.providers-core.ts index ac1287460bd..ced89bd8512 100644 --- a/src/config/zod-schema.providers-core.ts +++ b/src/config/zod-schema.providers-core.ts @@ -49,6 +49,7 @@ const DiscordIdSchema = z const DiscordIdListSchema = z.array(DiscordIdSchema); const TelegramInlineButtonsScopeSchema = z.enum(["off", "dm", "group", "all", "allowlist"]); +const TelegramIdListSchema = z.array(z.union([z.string(), z.number()])); const TelegramCapabilitiesSchema = z.union([ z.array(z.string()), @@ -58,6 +59,14 @@ const TelegramCapabilitiesSchema = z.union([ }) .strict(), ]); +const SlackCapabilitiesSchema = z.union([ + z.array(z.string()), + z + .object({ + interactiveReplies: z.boolean().optional(), + }) + .strict(), +]); export const TelegramTopicSchema = z .object({ @@ -103,8 +112,8 @@ export const TelegramDirectSchema = z const TelegramCustomCommandSchema = z .object({ - command: z.string().transform(normalizeTelegramCommandName), - description: z.string().transform(normalizeTelegramCommandDescription), + command: z.string().overwrite(normalizeTelegramCommandName), + description: z.string().overwrite(normalizeTelegramCommandDescription), }) .strict(); @@ -153,6 +162,16 @@ export const TelegramAccountSchemaBase = z .object({ name: z.string().optional(), capabilities: TelegramCapabilitiesSchema.optional(), + execApprovals: z + .object({ + enabled: z.boolean().optional(), + approvers: TelegramIdListSchema.optional(), + agentFilter: z.array(z.string()).optional(), + sessionFilter: z.array(z.string()).optional(), + target: z.enum(["dm", "channel", "both"]).optional(), + }) + .strict() + .optional(), markdown: MarkdownConfigSchema, enabled: z.boolean().optional(), commands: ProviderCommandsSchema, @@ -233,7 +252,9 @@ export const TelegramAccountSchemaBase = z sendMessage: z.boolean().optional(), poll: z.boolean().optional(), deleteMessage: z.boolean().optional(), + editMessage: z.boolean().optional(), sticker: z.boolean().optional(), + createForumTopic: z.boolean().optional(), }) .strict() .optional(), @@ -373,6 +394,16 @@ export const DiscordGuildChannelSchema = z systemPrompt: z.string().optional(), includeThreadStarter: z.boolean().optional(), autoThread: z.boolean().optional(), + /** Archive duration for auto-created threads in minutes. Discord supports 60, 1440 (1 day), 4320 (3 days), 10080 (1 week). Default: 60. */ + autoArchiveDuration: z + .union([ + z.enum(["60", "1440", "4320", "10080"]), + z.literal(60), + z.literal(1440), + z.literal(4320), + z.literal(10080), + ]) + .optional(), }) .strict(); @@ -808,7 +839,7 @@ export const SlackAccountSchema = z mode: z.enum(["socket", "http"]).optional(), signingSecret: SecretInputSchema.optional().register(sensitive), webhookPath: z.string().optional(), - capabilities: z.array(z.string()).optional(), + capabilities: SlackCapabilitiesSchema.optional(), markdown: MarkdownConfigSchema, enabled: z.boolean().optional(), commands: ProviderCommandsSchema, @@ -948,6 +979,16 @@ export const SlackConfigSchema = SlackAccountSchema.safeExtend({ validateSlackSigningSecretRequirements(value, ctx); }); +const SignalGroupEntrySchema = z + .object({ + requireMention: z.boolean().optional(), + tools: ToolPolicySchema, + toolsBySender: ToolPolicyBySenderSchema, + }) + .strict(); + +const SignalGroupsSchema = z.record(z.string(), SignalGroupEntrySchema.optional()).optional(); + export const SignalAccountSchemaBase = z .object({ name: z.string().optional(), @@ -956,6 +997,7 @@ export const SignalAccountSchemaBase = z enabled: z.boolean().optional(), configWrites: z.boolean().optional(), account: z.string().optional(), + accountUuid: z.string().optional(), httpUrl: z.string().optional(), httpHost: z.string().optional(), httpPort: z.number().int().positive().optional(), @@ -971,6 +1013,7 @@ export const SignalAccountSchemaBase = z defaultTo: z.string().optional(), groupAllowFrom: z.array(z.union([z.string(), z.number()])).optional(), groupPolicy: GroupPolicySchema.optional().default("allowlist"), + groups: SignalGroupsSchema, historyLimit: z.number().int().min(0).optional(), dmHistoryLimit: z.number().int().min(0).optional(), dms: z.record(z.string(), DmConfigSchema.optional()).optional(), diff --git a/src/config/zod-schema.session.ts b/src/config/zod-schema.session.ts index 648caa60f5b..b8bb99b1b14 100644 --- a/src/config/zod-schema.session.ts +++ b/src/config/zod-schema.session.ts @@ -169,6 +169,7 @@ export const MessagesSchema = z error: z.string().optional(), stallSoft: z.string().optional(), stallHard: z.string().optional(), + compacting: z.string().optional(), }) .strict() .optional(), diff --git a/src/config/zod-schema.signal-groups.test.ts b/src/config/zod-schema.signal-groups.test.ts new file mode 100644 index 00000000000..2dcd1ac0676 --- /dev/null +++ b/src/config/zod-schema.signal-groups.test.ts @@ -0,0 +1,65 @@ +import { describe, expect, it } from "vitest"; +import { validateConfigObject } from "./config.js"; + +describe("signal groups schema", () => { + it("accepts top-level Signal groups overrides", () => { + const res = validateConfigObject({ + channels: { + signal: { + groups: { + "*": { + requireMention: false, + }, + "+1234567890": { + requireMention: true, + }, + }, + }, + }, + }); + + expect(res.ok).toBe(true); + }); + + it("accepts per-account Signal groups overrides", () => { + const res = validateConfigObject({ + channels: { + signal: { + accounts: { + primary: { + groups: { + "*": { + requireMention: false, + }, + }, + }, + }, + }, + }, + }); + + expect(res.ok).toBe(true); + }); + + it("rejects unknown keys in Signal groups entries", () => { + const res = validateConfigObject({ + channels: { + signal: { + groups: { + "*": { + requireMention: false, + nope: true, + }, + }, + }, + }, + }); + + expect(res.ok).toBe(false); + if (!res.ok) { + expect(res.issues.some((issue) => issue.path.startsWith("channels.signal.groups"))).toBe( + true, + ); + } + }); +}); diff --git a/src/config/zod-schema.ts b/src/config/zod-schema.ts index c35d1191b6f..8c78d049d0e 100644 --- a/src/config/zod-schema.ts +++ b/src/config/zod-schema.ts @@ -360,15 +360,23 @@ export const OpenClawSchema = z cdpPort: z.number().int().min(1).max(65535).optional(), cdpUrl: z.string().optional(), driver: z - .union([z.literal("openclaw"), z.literal("clawd"), z.literal("extension")]) + .union([ + z.literal("openclaw"), + z.literal("clawd"), + z.literal("extension"), + z.literal("existing-session"), + ]) .optional(), attachOnly: z.boolean().optional(), color: HexColorSchema, }) .strict() - .refine((value) => value.cdpPort || value.cdpUrl, { - message: "Profile must set cdpPort or cdpUrl", - }), + .refine( + (value) => value.driver === "existing-session" || value.cdpPort || value.cdpUrl, + { + message: "Profile must set cdpPort or cdpUrl", + }, + ), ) .optional(), extraArgs: z.array(z.string()).optional(), @@ -596,6 +604,7 @@ export const OpenClawSchema = z wideArea: z .object({ enabled: z.boolean().optional(), + domain: z.string().optional(), }) .strict() .optional(), @@ -789,6 +798,23 @@ export const OpenClawSchema = z }) .strict() .optional(), + push: z + .object({ + apns: z + .object({ + relay: z + .object({ + baseUrl: z.string().optional(), + timeoutMs: z.number().int().positive().optional(), + }) + .strict() + .optional(), + }) + .strict() + .optional(), + }) + .strict() + .optional(), nodes: z .object({ browser: z diff --git a/src/config/zod-schema.tts.test.ts b/src/config/zod-schema.tts.test.ts new file mode 100644 index 00000000000..70398e81054 --- /dev/null +++ b/src/config/zod-schema.tts.test.ts @@ -0,0 +1,36 @@ +import { describe, expect, it } from "vitest"; +import { TtsConfigSchema } from "./zod-schema.core.js"; + +describe("TtsConfigSchema openai speed and instructions", () => { + it("accepts speed and instructions in openai section", () => { + expect(() => + TtsConfigSchema.parse({ + openai: { + voice: "alloy", + speed: 1.5, + instructions: "Speak in a cheerful tone", + }, + }), + ).not.toThrow(); + }); + + it("rejects out-of-range openai speed", () => { + expect(() => + TtsConfigSchema.parse({ + openai: { + speed: 5.0, + }, + }), + ).toThrow(); + }); + + it("rejects openai speed below minimum", () => { + expect(() => + TtsConfigSchema.parse({ + openai: { + speed: 0.1, + }, + }), + ).toThrow(); + }); +}); diff --git a/src/context-engine/context-engine.test.ts b/src/context-engine/context-engine.test.ts index 9b40008f1a0..cd0f2f50439 100644 --- a/src/context-engine/context-engine.test.ts +++ b/src/context-engine/context-engine.test.ts @@ -1,5 +1,6 @@ import type { AgentMessage } from "@mariozechner/pi-agent-core"; -import { describe, expect, it, beforeEach } from "vitest"; +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { compactEmbeddedPiSessionDirect } from "../agents/pi-embedded-runner/compact.runtime.js"; // --------------------------------------------------------------------------- // We dynamically import the registry so we can get a fresh module per test // group when needed. For most groups we use the shared singleton directly. @@ -19,6 +20,23 @@ import type { IngestResult, } from "./types.js"; +vi.mock("../agents/pi-embedded-runner/compact.runtime.js", () => ({ + compactEmbeddedPiSessionDirect: vi.fn(async () => ({ + ok: true, + compacted: false, + reason: "mock compaction", + result: { + summary: "", + firstKeptEntryId: "", + tokensBefore: 0, + tokensAfter: 0, + details: undefined, + }, + })), +})); + +const mockedCompactEmbeddedPiSessionDirect = vi.mocked(compactEmbeddedPiSessionDirect); + // --------------------------------------------------------------------------- // Helpers // --------------------------------------------------------------------------- @@ -43,6 +61,7 @@ class MockContextEngine implements ContextEngine { async ingest(_params: { sessionId: string; + sessionKey?: string; message: AgentMessage; isHeartbeat?: boolean; }): Promise { @@ -51,6 +70,7 @@ class MockContextEngine implements ContextEngine { async assemble(params: { sessionId: string; + sessionKey?: string; messages: AgentMessage[]; tokenBudget?: number; }): Promise { @@ -63,6 +83,7 @@ class MockContextEngine implements ContextEngine { async compact(_params: { sessionId: string; + sessionKey?: string; sessionFile: string; tokenBudget?: number; compactionTarget?: "budget" | "threshold"; @@ -91,6 +112,10 @@ class MockContextEngine implements ContextEngine { // ═══════════════════════════════════════════════════════════════════════════ describe("Engine contract tests", () => { + beforeEach(() => { + mockedCompactEmbeddedPiSessionDirect.mockClear(); + }); + it("a mock engine implementing ContextEngine can be registered and resolved", async () => { const factory = () => new MockContextEngine(); registerContextEngine("mock", factory); @@ -153,6 +178,25 @@ describe("Engine contract tests", () => { // Should complete without error await expect(engine.dispose()).resolves.toBeUndefined(); }); + + it("legacy compact preserves runtimeContext currentTokenCount when top-level value is absent", async () => { + const engine = new LegacyContextEngine(); + + await engine.compact({ + sessionId: "s1", + sessionFile: "/tmp/session.json", + runtimeContext: { + workspaceDir: "/tmp/workspace", + currentTokenCount: 277403, + }, + }); + + expect(mockedCompactEmbeddedPiSessionDirect).toHaveBeenCalledWith( + expect.objectContaining({ + currentTokenCount: 277403, + }), + ); + }); }); // ═══════════════════════════════════════════════════════════════════════════ diff --git a/src/context-engine/legacy.ts b/src/context-engine/legacy.ts index 011022ae26a..0485a4feae4 100644 --- a/src/context-engine/legacy.ts +++ b/src/context-engine/legacy.ts @@ -26,6 +26,7 @@ export class LegacyContextEngine implements ContextEngine { async ingest(_params: { sessionId: string; + sessionKey?: string; message: AgentMessage; isHeartbeat?: boolean; }): Promise { @@ -35,6 +36,7 @@ export class LegacyContextEngine implements ContextEngine { async assemble(params: { sessionId: string; + sessionKey?: string; messages: AgentMessage[]; tokenBudget?: number; }): Promise { @@ -49,6 +51,7 @@ export class LegacyContextEngine implements ContextEngine { async afterTurn(_params: { sessionId: string; + sessionKey?: string; sessionFile: string; messages: AgentMessage[]; prePromptMessageCount: number; @@ -62,6 +65,7 @@ export class LegacyContextEngine implements ContextEngine { async compact(params: { sessionId: string; + sessionKey?: string; sessionFile: string; tokenBudget?: number; force?: boolean; @@ -78,6 +82,13 @@ export class LegacyContextEngine implements ContextEngine { // set by the caller in run.ts. We spread them and override the fields // that come from the ContextEngine compact() signature directly. const runtimeContext = params.runtimeContext ?? {}; + const currentTokenCount = + params.currentTokenCount ?? + (typeof runtimeContext.currentTokenCount === "number" && + Number.isFinite(runtimeContext.currentTokenCount) && + runtimeContext.currentTokenCount > 0 + ? Math.floor(runtimeContext.currentTokenCount) + : undefined); // eslint-disable-next-line @typescript-eslint/no-explicit-any -- bridge runtimeContext matches CompactEmbeddedPiSessionParams const result = await compactEmbeddedPiSessionDirect({ @@ -85,6 +96,7 @@ export class LegacyContextEngine implements ContextEngine { sessionId: params.sessionId, sessionFile: params.sessionFile, tokenBudget: params.tokenBudget, + ...(currentTokenCount !== undefined ? { currentTokenCount } : {}), force: params.force, customInstructions: params.customInstructions, workspaceDir: (runtimeContext.workspaceDir as string) ?? process.cwd(), diff --git a/src/context-engine/types.ts b/src/context-engine/types.ts index b886190a1e0..7ddd695b5b6 100644 --- a/src/context-engine/types.ts +++ b/src/context-engine/types.ts @@ -72,13 +72,18 @@ export interface ContextEngine { /** * Initialize engine state for a session, optionally importing historical context. */ - bootstrap?(params: { sessionId: string; sessionFile: string }): Promise; + bootstrap?(params: { + sessionId: string; + sessionKey?: string; + sessionFile: string; + }): Promise; /** * Ingest a single message into the engine's store. */ ingest(params: { sessionId: string; + sessionKey?: string; message: AgentMessage; /** True when the message belongs to a heartbeat run. */ isHeartbeat?: boolean; @@ -89,6 +94,7 @@ export interface ContextEngine { */ ingestBatch?(params: { sessionId: string; + sessionKey?: string; messages: AgentMessage[]; /** True when the batch belongs to a heartbeat run. */ isHeartbeat?: boolean; @@ -101,6 +107,7 @@ export interface ContextEngine { */ afterTurn?(params: { sessionId: string; + sessionKey?: string; sessionFile: string; messages: AgentMessage[]; /** Number of messages that existed before the prompt was sent. */ @@ -121,6 +128,7 @@ export interface ContextEngine { */ assemble(params: { sessionId: string; + sessionKey?: string; messages: AgentMessage[]; tokenBudget?: number; }): Promise; @@ -131,6 +139,7 @@ export interface ContextEngine { */ compact(params: { sessionId: string; + sessionKey?: string; sessionFile: string; tokenBudget?: number; /** Force compaction even below the default trigger threshold. */ diff --git a/src/cron/cron-protocol-conformance.test.ts b/src/cron/cron-protocol-conformance.test.ts index 51fe8f4767c..698f5e0038d 100644 --- a/src/cron/cron-protocol-conformance.test.ts +++ b/src/cron/cron-protocol-conformance.test.ts @@ -2,7 +2,7 @@ import fs from "node:fs/promises"; import path from "node:path"; import { describe, expect, it } from "vitest"; import { MACOS_APP_SOURCES_DIR } from "../compat/legacy-names.js"; -import { CronDeliverySchema } from "../gateway/protocol/schema.js"; +import { CronDeliverySchema, CronJobStateSchema } from "../gateway/protocol/schema.js"; type SchemaLike = { anyOf?: Array; @@ -29,6 +29,16 @@ function extractDeliveryModes(schema: SchemaLike): string[] { return Array.from(new Set(unionModes)); } +function extractConstUnionValues(schema: SchemaLike): string[] { + return Array.from( + new Set( + (schema.anyOf ?? []) + .map((entry) => entry?.const) + .filter((value): value is string => typeof value === "string"), + ), + ); +} + const UI_FILES = ["ui/src/ui/types.ts", "ui/src/ui/ui-types.ts", "ui/src/ui/views/cron.ts"]; const SWIFT_MODEL_CANDIDATES = [`${MACOS_APP_SOURCES_DIR}/CronModels.swift`]; @@ -88,4 +98,19 @@ describe("cron protocol conformance", () => { expect(swift.includes("struct CronSchedulerStatus")).toBe(true); expect(swift.includes("let jobs:")).toBe(true); }); + + it("cron job state schema keeps the full failover reason set", () => { + const properties = (CronJobStateSchema as SchemaLike).properties ?? {}; + const lastErrorReason = properties.lastErrorReason as SchemaLike | undefined; + expect(lastErrorReason).toBeDefined(); + expect(extractConstUnionValues(lastErrorReason ?? {})).toEqual([ + "auth", + "format", + "rate_limit", + "billing", + "timeout", + "model_not_found", + "unknown", + ]); + }); }); diff --git a/src/cron/delivery.failure-notify.test.ts b/src/cron/delivery.failure-notify.test.ts new file mode 100644 index 00000000000..98cb437c961 --- /dev/null +++ b/src/cron/delivery.failure-notify.test.ts @@ -0,0 +1,143 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; + +const mocks = vi.hoisted(() => ({ + resolveDeliveryTarget: vi.fn(), + deliverOutboundPayloads: vi.fn(), + resolveAgentOutboundIdentity: vi.fn().mockReturnValue({ kind: "identity" }), + buildOutboundSessionContext: vi.fn().mockReturnValue({ kind: "session" }), + createOutboundSendDeps: vi.fn().mockReturnValue({ kind: "deps" }), + warn: vi.fn(), +})); + +vi.mock("./isolated-agent/delivery-target.js", () => ({ + resolveDeliveryTarget: mocks.resolveDeliveryTarget, +})); + +vi.mock("../infra/outbound/deliver.js", () => ({ + deliverOutboundPayloads: mocks.deliverOutboundPayloads, +})); + +vi.mock("../infra/outbound/identity.js", () => ({ + resolveAgentOutboundIdentity: mocks.resolveAgentOutboundIdentity, +})); + +vi.mock("../infra/outbound/session-context.js", () => ({ + buildOutboundSessionContext: mocks.buildOutboundSessionContext, +})); + +vi.mock("../cli/outbound-send-deps.js", () => ({ + createOutboundSendDeps: mocks.createOutboundSendDeps, +})); + +vi.mock("../logging.js", () => ({ + getChildLogger: vi.fn(() => ({ + warn: mocks.warn, + })), +})); + +const { sendFailureNotificationAnnounce } = await import("./delivery.js"); + +describe("sendFailureNotificationAnnounce", () => { + beforeEach(() => { + vi.clearAllMocks(); + mocks.resolveDeliveryTarget.mockResolvedValue({ + ok: true, + channel: "telegram", + to: "123", + accountId: "bot-a", + threadId: 42, + mode: "explicit", + }); + mocks.deliverOutboundPayloads.mockResolvedValue([{ ok: true }]); + }); + + afterEach(() => { + vi.useRealTimers(); + }); + + it("delivers failure alerts to the resolved explicit target with strict send settings", async () => { + const deps = {} as never; + const cfg = {} as never; + + await sendFailureNotificationAnnounce( + deps, + cfg, + "main", + "job-1", + { channel: "telegram", to: "123", accountId: "bot-a" }, + "Cron failed", + ); + + expect(mocks.resolveDeliveryTarget).toHaveBeenCalledWith(cfg, "main", { + channel: "telegram", + to: "123", + accountId: "bot-a", + }); + expect(mocks.buildOutboundSessionContext).toHaveBeenCalledWith({ + cfg, + agentId: "main", + sessionKey: "cron:job-1:failure", + }); + expect(mocks.deliverOutboundPayloads).toHaveBeenCalledWith( + expect.objectContaining({ + cfg, + channel: "telegram", + to: "123", + accountId: "bot-a", + threadId: 42, + payloads: [{ text: "Cron failed" }], + session: { kind: "session" }, + identity: { kind: "identity" }, + bestEffort: false, + deps: { kind: "deps" }, + abortSignal: expect.any(AbortSignal), + }), + ); + }); + + it("does not send when target resolution fails", async () => { + mocks.resolveDeliveryTarget.mockResolvedValue({ + ok: false, + error: new Error("target missing"), + }); + + await sendFailureNotificationAnnounce( + {} as never, + {} as never, + "main", + "job-1", + { channel: "telegram", to: "123" }, + "Cron failed", + ); + + expect(mocks.deliverOutboundPayloads).not.toHaveBeenCalled(); + expect(mocks.warn).toHaveBeenCalledWith( + { error: "target missing" }, + "cron: failed to resolve failure destination target", + ); + }); + + it("swallows outbound delivery errors after logging", async () => { + mocks.deliverOutboundPayloads.mockRejectedValue(new Error("send failed")); + + await expect( + sendFailureNotificationAnnounce( + {} as never, + {} as never, + "main", + "job-1", + { channel: "telegram", to: "123" }, + "Cron failed", + ), + ).resolves.toBeUndefined(); + + expect(mocks.warn).toHaveBeenCalledWith( + expect.objectContaining({ + err: "send failed", + channel: "telegram", + to: "123", + }), + "cron: failure destination announce failed", + ); + }); +}); diff --git a/src/cron/delivery.test.ts b/src/cron/delivery.test.ts index 81ab672af57..43eaa215114 100644 --- a/src/cron/delivery.test.ts +++ b/src/cron/delivery.test.ts @@ -148,6 +148,46 @@ describe("resolveFailureDestination", () => { expect(plan).toBeNull(); }); + it("returns null when webhook failure destination matches the primary webhook target", () => { + const plan = resolveFailureDestination( + makeJob({ + sessionTarget: "main", + payload: { kind: "systemEvent", text: "tick" }, + delivery: { + mode: "webhook", + to: "https://example.invalid/cron", + failureDestination: { + mode: "webhook", + to: "https://example.invalid/cron", + }, + }, + }), + undefined, + ); + expect(plan).toBeNull(); + }); + + it("does not reuse inherited announce recipient when switching failure destination to webhook", () => { + const plan = resolveFailureDestination( + makeJob({ + delivery: { + mode: "announce", + channel: "telegram", + to: "111", + failureDestination: { + mode: "webhook", + }, + }, + }), + { + channel: "signal", + to: "group-abc", + mode: "announce", + }, + ); + expect(plan).toBeNull(); + }); + it("allows job-level failure destination fields to clear inherited global values", () => { const plan = resolveFailureDestination( makeJob({ diff --git a/src/cron/isolated-agent.delivers-response-has-heartbeat-ok-but-includes.test.ts b/src/cron/isolated-agent.delivers-response-has-heartbeat-ok-but-includes.test.ts index 023c1e9eedc..8ea21bffefe 100644 --- a/src/cron/isolated-agent.delivers-response-has-heartbeat-ok-but-includes.test.ts +++ b/src/cron/isolated-agent.delivers-response-has-heartbeat-ok-but-includes.test.ts @@ -138,11 +138,10 @@ describe("runCronIsolatedAgentTurn", () => { }); }); - it("handles media heartbeat delivery and last-target text delivery", async () => { + it("delivers media payloads even when heartbeat text is suppressed", async () => { await withTempHome(async (home) => { const { storePath, deps } = await createTelegramDeliveryFixture(home); - // Media should still be delivered even if text is just HEARTBEAT_OK. mockEmbeddedAgentPayloads([ { text: "HEARTBEAT_OK", mediaUrl: "https://example.com/img.png" }, ]); @@ -156,9 +155,13 @@ describe("runCronIsolatedAgentTurn", () => { expect(mediaRes.status).toBe("ok"); expect(deps.sendMessageTelegram).toHaveBeenCalled(); expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); + }); + }); + + it("keeps non-empty heartbeat text when last-target ack suppression is disabled", async () => { + await withTempHome(async (home) => { + const { storePath, deps } = await createTelegramDeliveryFixture(home); - vi.mocked(runSubagentAnnounceFlow).mockClear(); - vi.mocked(deps.sendMessageTelegram).mockClear(); mockEmbeddedAgentPayloads([{ text: "HEARTBEAT_OK 🦞" }]); const cfg = makeCfg(home, storePath); @@ -194,10 +197,23 @@ describe("runCronIsolatedAgentTurn", () => { "HEARTBEAT_OK 🦞", expect.objectContaining({ accountId: undefined }), ); + }); + }); - vi.mocked(deps.sendMessageTelegram).mockClear(); - vi.mocked(runSubagentAnnounceFlow).mockClear(); - vi.mocked(callGateway).mockClear(); + it("deletes the direct cron session after last-target text delivery", async () => { + await withTempHome(async (home) => { + const { storePath, deps } = await createTelegramDeliveryFixture(home); + + mockEmbeddedAgentPayloads([{ text: "HEARTBEAT_OK 🦞" }]); + + const cfg = makeCfg(home, storePath); + cfg.agents = { + ...cfg.agents, + defaults: { + ...cfg.agents?.defaults, + heartbeat: { ackMaxChars: 0 }, + }, + }; const deleteRes = await runCronIsolatedAgentTurn({ cfg, diff --git a/src/cron/isolated-agent.delivery.test-helpers.ts b/src/cron/isolated-agent.delivery.test-helpers.ts index fe6dad727f4..041f5750a95 100644 --- a/src/cron/isolated-agent.delivery.test-helpers.ts +++ b/src/cron/isolated-agent.delivery.test-helpers.ts @@ -6,12 +6,14 @@ import { makeCfg, makeJob } from "./isolated-agent.test-harness.js"; export function createCliDeps(overrides: Partial = {}): CliDeps { return { - sendMessageSlack: vi.fn(), - sendMessageWhatsApp: vi.fn(), - sendMessageTelegram: vi.fn(), - sendMessageDiscord: vi.fn(), - sendMessageSignal: vi.fn(), - sendMessageIMessage: vi.fn(), + sendMessageSlack: vi.fn().mockResolvedValue({ messageTs: "slack-1", channel: "C1" }), + sendMessageWhatsApp: vi + .fn() + .mockResolvedValue({ messageId: "wa-1", toJid: "123@s.whatsapp.net" }), + sendMessageTelegram: vi.fn().mockResolvedValue({ messageId: "tg-1", chatId: "123" }), + sendMessageDiscord: vi.fn().mockResolvedValue({ messageId: "discord-1", channelId: "123" }), + sendMessageSignal: vi.fn().mockResolvedValue({ messageId: "signal-1", conversationId: "123" }), + sendMessageIMessage: vi.fn().mockResolvedValue({ messageId: "imessage-1", chatId: "123" }), ...overrides, }; } @@ -54,6 +56,7 @@ export async function runTelegramAnnounceTurn(params: { to?: string; bestEffort?: boolean; }; + deliveryContract?: "cron-owned" | "shared"; }): Promise>> { return runCronIsolatedAgentTurn({ cfg: makeCfg(params.home, params.storePath, { @@ -67,5 +70,6 @@ export async function runTelegramAnnounceTurn(params: { message: "do it", sessionKey: "cron:job-1", lane: "cron", + deliveryContract: params.deliveryContract, }); } diff --git a/src/cron/isolated-agent.direct-delivery-forum-topics.test.ts b/src/cron/isolated-agent.direct-delivery-forum-topics.test.ts index 836369fedb6..0ee64e789fc 100644 --- a/src/cron/isolated-agent.direct-delivery-forum-topics.test.ts +++ b/src/cron/isolated-agent.direct-delivery-forum-topics.test.ts @@ -1,5 +1,5 @@ import "./isolated-agent.mocks.js"; -import { beforeEach, describe, expect, it, vi } from "vitest"; +import { beforeEach, describe, expect, it } from "vitest"; import { runSubagentAnnounceFlow } from "../agents/subagent-announce.js"; import { createCliDeps, @@ -15,7 +15,7 @@ describe("runCronIsolatedAgentTurn forum topic delivery", () => { setupIsolatedAgentTurnMocks(); }); - it("routes forum-topic and plain telegram targets through the correct delivery path", async () => { + it("routes forum-topic telegram targets through the correct delivery path", async () => { await withTempCronHome(async (home) => { const storePath = await writeSessionStore(home, { lastProvider: "webchat", lastTo: "" }); const deps = createCliDeps(); @@ -36,8 +36,13 @@ describe("runCronIsolatedAgentTurn forum topic delivery", () => { text: "forum message", messageThreadId: 42, }); + }); + }); - vi.clearAllMocks(); + it("routes plain telegram targets through the correct delivery path", async () => { + await withTempCronHome(async (home) => { + const storePath = await writeSessionStore(home, { lastProvider: "webchat", lastTo: "" }); + const deps = createCliDeps(); mockAgentPayloads([{ text: "plain message" }]); const plainRes = await runTelegramAnnounceTurn({ diff --git a/src/cron/isolated-agent.lane.test.ts b/src/cron/isolated-agent.lane.test.ts new file mode 100644 index 00000000000..3790c5e511a --- /dev/null +++ b/src/cron/isolated-agent.lane.test.ts @@ -0,0 +1,64 @@ +import "./isolated-agent.mocks.js"; +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { runEmbeddedPiAgent } from "../agents/pi-embedded.js"; +import { createCliDeps, mockAgentPayloads } from "./isolated-agent.delivery.test-helpers.js"; +import { runCronIsolatedAgentTurn } from "./isolated-agent.js"; +import { + makeCfg, + makeJob, + withTempCronHome, + writeSessionStoreEntries, +} from "./isolated-agent.test-harness.js"; + +function lastEmbeddedLane(): string | undefined { + const calls = vi.mocked(runEmbeddedPiAgent).mock.calls; + expect(calls.length).toBeGreaterThan(0); + return (calls.at(-1)?.[0] as { lane?: string } | undefined)?.lane; +} + +async function runLaneCase(home: string, lane?: string) { + const storePath = await writeSessionStoreEntries(home, { + "agent:main:main": { + sessionId: "main-session", + updatedAt: Date.now(), + lastProvider: "webchat", + lastTo: "", + }, + }); + mockAgentPayloads([{ text: "ok" }]); + + await runCronIsolatedAgentTurn({ + cfg: makeCfg(home, storePath), + deps: createCliDeps(), + job: makeJob({ kind: "agentTurn", message: "do it", deliver: false }), + message: "do it", + sessionKey: "cron:job-1", + ...(lane === undefined ? {} : { lane }), + }); + + return lastEmbeddedLane(); +} + +describe("runCronIsolatedAgentTurn lane selection", () => { + beforeEach(() => { + vi.mocked(runEmbeddedPiAgent).mockClear(); + }); + + it("moves the cron lane to nested for embedded runs", async () => { + await withTempCronHome(async (home) => { + expect(await runLaneCase(home, "cron")).toBe("nested"); + }); + }); + + it("defaults missing lanes to nested for embedded runs", async () => { + await withTempCronHome(async (home) => { + expect(await runLaneCase(home)).toBe("nested"); + }); + }); + + it("preserves non-cron lanes for embedded runs", async () => { + await withTempCronHome(async (home) => { + expect(await runLaneCase(home, "subagent")).toBe("subagent"); + }); + }); +}); diff --git a/src/cron/isolated-agent.model-formatting.test.ts b/src/cron/isolated-agent.model-formatting.test.ts index e78f251dc8b..b09a9db5ea1 100644 --- a/src/cron/isolated-agent.model-formatting.test.ts +++ b/src/cron/isolated-agent.model-formatting.test.ts @@ -2,6 +2,7 @@ import "./isolated-agent.mocks.js"; import { beforeEach, describe, expect, it, vi } from "vitest"; import { loadModelCatalog } from "../agents/model-catalog.js"; import { runEmbeddedPiAgent } from "../agents/pi-embedded.js"; +import { createCliDeps, mockAgentPayloads } from "./isolated-agent.delivery.test-helpers.js"; import { runCronIsolatedAgentTurn } from "./isolated-agent.js"; import { makeCfg, @@ -13,27 +14,6 @@ import type { CronJob } from "./types.js"; const withTempHome = withTempCronHome; -function makeDeps() { - return { - sendMessageSlack: vi.fn(), - sendMessageWhatsApp: vi.fn(), - sendMessageTelegram: vi.fn(), - sendMessageDiscord: vi.fn(), - sendMessageSignal: vi.fn(), - sendMessageIMessage: vi.fn(), - }; -} - -function mockEmbeddedOk() { - vi.mocked(runEmbeddedPiAgent).mockResolvedValue({ - payloads: [{ text: "ok" }], - meta: { - durationMs: 5, - agentMeta: { sessionId: "s", provider: "p", model: "m" }, - }, - }); -} - /** * Extract the provider and model from the last runEmbeddedPiAgent call. */ @@ -44,6 +24,8 @@ function lastEmbeddedCall(): { provider?: string; model?: string } { } const DEFAULT_MESSAGE = "do it"; +const DEFAULT_PROVIDER = "anthropic"; +const DEFAULT_MODEL = "claude-opus-4-5"; type TurnOptions = { cfgOverrides?: Parameters[2]; @@ -62,7 +44,7 @@ async function runTurnCore(home: string, options: TurnOptions = {}) { }, ...options.storeEntries, }); - mockEmbeddedOk(); + mockAgentPayloads([{ text: "ok" }]); const jobPayload = options.jobPayload ?? { kind: "agentTurn" as const, @@ -72,7 +54,7 @@ async function runTurnCore(home: string, options: TurnOptions = {}) { const res = await runCronIsolatedAgentTurn({ cfg: makeCfg(home, storePath, options.cfgOverrides), - deps: makeDeps(), + deps: createCliDeps(), job: makeJob(jobPayload), message: DEFAULT_MESSAGE, sessionKey: options.sessionKey ?? "cron:job-1", @@ -93,6 +75,50 @@ async function runTurn(home: string, options: TurnOptions = {}) { return { res, call: lastEmbeddedCall() }; } +function expectSelectedModel( + call: { provider?: string; model?: string }, + params: { provider: string; model: string }, +) { + expect(call.provider).toBe(params.provider); + expect(call.model).toBe(params.model); +} + +function expectDefaultSelectedModel(call: { provider?: string; model?: string }) { + expectSelectedModel(call, { provider: DEFAULT_PROVIDER, model: DEFAULT_MODEL }); +} + +function createCronSessionOverrideStore( + overrides: Record, + sessionId = "existing-session", +) { + return { + "agent:main:cron:job-1": { + sessionId, + updatedAt: Date.now(), + ...overrides, + }, + }; +} + +async function expectTurnModel( + home: string, + options: TurnOptions, + expected: { provider: string; model: string }, +) { + const { res, call } = await runTurn(home, options); + expect(res.status).toBe("ok"); + expectSelectedModel(call, expected); +} + +async function expectInvalidModel(home: string, model: string) { + const { res } = await runErrorTurn(home, { + jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, model }, + }); + expect(res.status).toBe("error"); + expect(res.error).toMatch(/invalid model/i); + expect(vi.mocked(runEmbeddedPiAgent)).not.toHaveBeenCalled(); +} + // --------------------------------------------------------------------------- // Tests // --------------------------------------------------------------------------- @@ -119,16 +145,17 @@ describe("cron model formatting and precedence edge cases", () => { it("handles leading/trailing whitespace in model string", async () => { await withTempHome(async (home) => { - const { res, call } = await runTurn(home, { - jobPayload: { - kind: "agentTurn", - message: DEFAULT_MESSAGE, - model: " openai/gpt-4.1-mini ", + await expectTurnModel( + home, + { + jobPayload: { + kind: "agentTurn", + message: DEFAULT_MESSAGE, + model: " openai/gpt-4.1-mini ", + }, }, - }); - expect(res.status).toBe("ok"); - expect(call.provider).toBe("openai"); - expect(call.model).toBe("gpt-4.1-mini"); + { provider: "openai", model: "gpt-4.1-mini" }, + ); }); }); @@ -149,38 +176,29 @@ describe("cron model formatting and precedence edge cases", () => { it("rejects model with trailing slash (empty model name)", async () => { await withTempHome(async (home) => { - const { res } = await runErrorTurn(home, { - jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, model: "openai/" }, - }); - expect(res.status).toBe("error"); - expect(res.error).toMatch(/invalid model/i); - expect(vi.mocked(runEmbeddedPiAgent)).not.toHaveBeenCalled(); + await expectInvalidModel(home, "openai/"); }); }); it("rejects model with leading slash (empty provider)", async () => { await withTempHome(async (home) => { - const { res } = await runErrorTurn(home, { - jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, model: "/gpt-4.1-mini" }, - }); - expect(res.status).toBe("error"); - expect(res.error).toMatch(/invalid model/i); - expect(vi.mocked(runEmbeddedPiAgent)).not.toHaveBeenCalled(); + await expectInvalidModel(home, "/gpt-4.1-mini"); }); }); it("normalizes provider casing", async () => { await withTempHome(async (home) => { - const { res, call } = await runTurn(home, { - jobPayload: { - kind: "agentTurn", - message: DEFAULT_MESSAGE, - model: "OpenAI/gpt-4.1-mini", + await expectTurnModel( + home, + { + jobPayload: { + kind: "agentTurn", + message: DEFAULT_MESSAGE, + model: "OpenAI/gpt-4.1-mini", + }, }, - }); - expect(res.status).toBe("ok"); - expect(call.provider).toBe("openai"); - expect(call.model).toBe("gpt-4.1-mini"); + { provider: "openai", model: "gpt-4.1-mini" }, + ); }); }); @@ -237,43 +255,39 @@ describe("cron model formatting and precedence edge cases", () => { // No model in job payload. Session store has openai override. // Provider must be openai, not the default anthropic. await withTempHome(async (home) => { - const { call } = await runTurn(home, { - jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, - storeEntries: { - "agent:main:cron:job-1": { - sessionId: "existing-session", - updatedAt: Date.now(), + await expectTurnModel( + home, + { + jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, + storeEntries: createCronSessionOverrideStore({ providerOverride: "openai", modelOverride: "gpt-4.1-mini", - }, + }), }, - }); - expect(call.provider).toBe("openai"); - expect(call.model).toBe("gpt-4.1-mini"); + { provider: "openai", model: "gpt-4.1-mini" }, + ); }); }); it("job payload model wins over conflicting session override", async () => { // Job payload says anthropic. Session says openai. Job must win. await withTempHome(async (home) => { - const { call } = await runTurn(home, { - jobPayload: { - kind: "agentTurn", - message: DEFAULT_MESSAGE, - model: "anthropic/claude-sonnet-4-5", - deliver: false, - }, - storeEntries: { - "agent:main:cron:job-1": { - sessionId: "existing-session", - updatedAt: Date.now(), + await expectTurnModel( + home, + { + jobPayload: { + kind: "agentTurn", + message: DEFAULT_MESSAGE, + model: "anthropic/claude-sonnet-4-5", + deliver: false, + }, + storeEntries: createCronSessionOverrideStore({ providerOverride: "openai", modelOverride: "gpt-4.1-mini", - }, + }), }, - }); - expect(call.provider).toBe("anthropic"); - expect(call.model).toBe("claude-sonnet-4-5"); + { provider: "anthropic", model: "claude-sonnet-4-5" }, + ); }); }); @@ -282,9 +296,7 @@ describe("cron model formatting and precedence edge cases", () => { const { call } = await runTurn(home, { jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, }); - // makeCfg default is anthropic/claude-opus-4-5 - expect(call.provider).toBe("anthropic"); - expect(call.model).toBe("claude-opus-4-5"); + expectDefaultSelectedModel(call); }); }); }); @@ -310,24 +322,19 @@ describe("cron model formatting and precedence edge cases", () => { // Step 2: No job model, session store says openai vi.mocked(runEmbeddedPiAgent).mockClear(); - mockEmbeddedOk(); + mockAgentPayloads([{ text: "ok" }]); const step2 = await runTurn(home, { jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, - storeEntries: { - "agent:main:cron:job-1": { - sessionId: "existing-session", - updatedAt: Date.now(), - providerOverride: "openai", - modelOverride: "gpt-4.1-mini", - }, - }, + storeEntries: createCronSessionOverrideStore({ + providerOverride: "openai", + modelOverride: "gpt-4.1-mini", + }), }); - expect(step2.call.provider).toBe("openai"); - expect(step2.call.model).toBe("gpt-4.1-mini"); + expectSelectedModel(step2.call, { provider: "openai", model: "gpt-4.1-mini" }); // Step 3: Job payload says anthropic, session store still says openai vi.mocked(runEmbeddedPiAgent).mockClear(); - mockEmbeddedOk(); + mockAgentPayloads([{ text: "ok" }]); const step3 = await runTurn(home, { jobPayload: { kind: "agentTurn", @@ -335,17 +342,12 @@ describe("cron model formatting and precedence edge cases", () => { model: "anthropic/claude-opus-4-5", deliver: false, }, - storeEntries: { - "agent:main:cron:job-1": { - sessionId: "existing-session", - updatedAt: Date.now(), - providerOverride: "openai", - modelOverride: "gpt-4.1-mini", - }, - }, + storeEntries: createCronSessionOverrideStore({ + providerOverride: "openai", + modelOverride: "gpt-4.1-mini", + }), }); - expect(step3.call.provider).toBe("anthropic"); - expect(step3.call.model).toBe("claude-opus-4-5"); + expectSelectedModel(step3.call, { provider: "anthropic", model: "claude-opus-4-5" }); }); }); @@ -365,12 +367,11 @@ describe("cron model formatting and precedence edge cases", () => { // Run 2: no override — must revert to default anthropic vi.mocked(runEmbeddedPiAgent).mockClear(); - mockEmbeddedOk(); + mockAgentPayloads([{ text: "ok" }]); const r2 = await runTurn(home, { jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, }); - expect(r2.call.provider).toBe("anthropic"); - expect(r2.call.model).toBe("claude-opus-4-5"); + expectDefaultSelectedModel(r2.call); }); }); }); @@ -383,19 +384,20 @@ describe("cron model formatting and precedence edge cases", () => { // The stored modelOverride/providerOverride must still be read and applied // (resolveCronSession spreads ...entry before overriding core fields). await withTempHome(async (home) => { - const { call } = await runTurn(home, { - jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, - storeEntries: { - "agent:main:cron:job-1": { - sessionId: "old-session-id", - updatedAt: Date.now(), - providerOverride: "openai", - modelOverride: "gpt-4.1-mini", - }, + await expectTurnModel( + home, + { + jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, + storeEntries: createCronSessionOverrideStore( + { + providerOverride: "openai", + modelOverride: "gpt-4.1-mini", + }, + "old-session-id", + ), }, - }); - expect(call.provider).toBe("openai"); - expect(call.model).toBe("gpt-4.1-mini"); + { provider: "openai", model: "gpt-4.1-mini" }, + ); }); }); @@ -403,16 +405,9 @@ describe("cron model formatting and precedence edge cases", () => { await withTempHome(async (home) => { const { call } = await runTurn(home, { jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, - storeEntries: { - "agent:main:cron:job-1": { - sessionId: "old-session-id", - updatedAt: Date.now(), - // No providerOverride or modelOverride - }, - }, + storeEntries: createCronSessionOverrideStore({}, "old-session-id"), }); - expect(call.provider).toBe("anthropic"); - expect(call.model).toBe("claude-opus-4-5"); + expectDefaultSelectedModel(call); }); }); }); @@ -425,8 +420,7 @@ describe("cron model formatting and precedence edge cases", () => { const { call } = await runTurn(home, { jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, model: " " }, }); - expect(call.provider).toBe("anthropic"); - expect(call.model).toBe("claude-opus-4-5"); + expectDefaultSelectedModel(call); }); }); @@ -435,8 +429,7 @@ describe("cron model formatting and precedence edge cases", () => { const { call } = await runTurn(home, { jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, model: "" }, }); - expect(call.provider).toBe("anthropic"); - expect(call.model).toBe("claude-opus-4-5"); + expectDefaultSelectedModel(call); }); }); @@ -444,18 +437,13 @@ describe("cron model formatting and precedence edge cases", () => { await withTempHome(async (home) => { const { call } = await runTurn(home, { jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, - storeEntries: { - "agent:main:cron:job-1": { - sessionId: "old", - updatedAt: Date.now(), - providerOverride: "openai", - modelOverride: " ", - }, - }, + storeEntries: createCronSessionOverrideStore( + { providerOverride: "openai", modelOverride: " " }, + "old", + ), }); // Whitespace modelOverride should be ignored → default - expect(call.provider).toBe("anthropic"); - expect(call.model).toBe("claude-opus-4-5"); + expectDefaultSelectedModel(call); }); }); }); @@ -465,35 +453,39 @@ describe("cron model formatting and precedence edge cases", () => { describe("config model format variations", () => { it("default model as string 'provider/model'", async () => { await withTempHome(async (home) => { - const { call } = await runTurn(home, { - cfgOverrides: { - agents: { - defaults: { - model: "openai/gpt-4.1", + await expectTurnModel( + home, + { + cfgOverrides: { + agents: { + defaults: { + model: "openai/gpt-4.1", + }, }, }, + jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, }, - jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, - }); - expect(call.provider).toBe("openai"); - expect(call.model).toBe("gpt-4.1"); + { provider: "openai", model: "gpt-4.1" }, + ); }); }); it("default model as object with primary field", async () => { await withTempHome(async (home) => { - const { call } = await runTurn(home, { - cfgOverrides: { - agents: { - defaults: { - model: { primary: "openai/gpt-4.1" }, + await expectTurnModel( + home, + { + cfgOverrides: { + agents: { + defaults: { + model: { primary: "openai/gpt-4.1" }, + }, }, }, + jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, }, - jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, - }); - expect(call.provider).toBe("openai"); - expect(call.model).toBe("gpt-4.1"); + { provider: "openai", model: "gpt-4.1" }, + ); }); }); diff --git a/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts b/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts index 6b2ab85739a..5abbb453f35 100644 --- a/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts +++ b/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts @@ -23,6 +23,7 @@ async function runExplicitTelegramAnnounceTurn(params: { home: string; storePath: string; deps: CliDeps; + deliveryContract?: "cron-owned" | "shared"; }): Promise>> { return runTelegramAnnounceTurn({ ...params, @@ -83,18 +84,13 @@ async function expectStructuredTelegramFailure(params: { }, }); - expect(res.status).toBe(params.expectedStatus); - if (params.expectedStatus === "ok") { - expect(res.delivered).toBe(false); - } - if (params.expectDeliveryAttempted !== undefined) { - expect(res.deliveryAttempted).toBe(params.expectDeliveryAttempted); - } - if (params.expectedErrorFragment) { - expect(res.error).toContain(params.expectedErrorFragment); - } - expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); - expect(deps.sendMessageTelegram).toHaveBeenCalledTimes(1); + expectFailedTelegramDeliveryResult({ + res, + deps, + expectedStatus: params.expectedStatus, + expectedErrorFragment: params.expectedErrorFragment, + expectDeliveryAttempted: params.expectDeliveryAttempted, + }); }, { deps: { @@ -104,6 +100,29 @@ async function expectStructuredTelegramFailure(params: { ); } +function expectFailedTelegramDeliveryResult(params: { + res: Awaited>; + deps: CliDeps; + expectedStatus: "ok" | "error"; + expectedErrorFragment?: string; + expectDeliveryAttempted?: boolean; +}) { + expect(params.res.status).toBe(params.expectedStatus); + if (params.expectedStatus === "ok") { + expect(params.res.delivered).toBe(false); + } else { + expect(params.res.delivered).toBeUndefined(); + } + if (params.expectDeliveryAttempted !== undefined) { + expect(params.res.deliveryAttempted).toBe(params.expectDeliveryAttempted); + } + if (params.expectedErrorFragment) { + expect(params.res.error).toContain(params.expectedErrorFragment); + } + expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); + expect(params.deps.sendMessageTelegram).toHaveBeenCalledTimes(1); +} + async function runTelegramDeliveryResult(bestEffort: boolean) { let outcome: | { @@ -111,6 +130,35 @@ async function runTelegramDeliveryResult(bestEffort: boolean) { deps: CliDeps; } | undefined; + await withTelegramTextDelivery({ bestEffort }, async ({ res, deps }) => { + outcome = { res, deps }; + }); + if (!outcome) { + throw new Error("telegram delivery did not produce an outcome"); + } + return outcome; +} + +function expectSuccessfulTelegramTextDelivery(params: { + res: Awaited>; + deps: CliDeps; +}): void { + expect(params.res.status).toBe("ok"); + expect(params.res.delivered).toBe(true); + expect(params.res.deliveryAttempted).toBe(true); + expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); +} + +async function withTelegramTextDelivery( + params: { bestEffort: boolean }, + run: (params: { + home: string; + storePath: string; + deps: CliDeps; + res: Awaited>; + }) => Promise, + fixtureParams?: Parameters[1], +) { await withTelegramAnnounceFixture(async ({ home, storePath, deps }) => { mockAgentPayloads([{ text: "hello from cron" }]); const res = await runTelegramAnnounceTurn({ @@ -121,15 +169,35 @@ async function runTelegramDeliveryResult(bestEffort: boolean) { mode: "announce", channel: "telegram", to: "123", - bestEffort, + bestEffort: params.bestEffort, }, }); - outcome = { res, deps }; - }); - if (!outcome) { - throw new Error("telegram delivery did not produce an outcome"); - } - return outcome; + await run({ home, storePath, deps, res }); + }, fixtureParams); +} + +async function expectTelegramTextDeliveryFailure(params: { + bestEffort: boolean; + expectedStatus: "ok" | "error"; + expectedErrorFragment?: string; +}) { + await withTelegramTextDelivery( + { bestEffort: params.bestEffort }, + async ({ deps, res }) => { + expectFailedTelegramDeliveryResult({ + res, + deps, + expectedStatus: params.expectedStatus, + expectedErrorFragment: params.expectedErrorFragment, + expectDeliveryAttempted: true, + }); + }, + { + deps: { + sendMessageTelegram: vi.fn().mockRejectedValue(new Error("boom")), + }, + }, + ); } async function runSignalDeliveryResult(bestEffort: boolean) { @@ -196,7 +264,7 @@ describe("runCronIsolatedAgentTurn", () => { setupIsolatedAgentTurnMocks(); }); - it("delivers explicit targets with direct and final-payload text", async () => { + it("delivers explicit targets with direct text", async () => { await withTelegramAnnounceFixture(async ({ home, storePath, deps }) => { await assertExplicitTelegramTargetDelivery({ home, @@ -205,7 +273,11 @@ describe("runCronIsolatedAgentTurn", () => { payloads: [{ text: "hello from cron" }], expectedText: "hello from cron", }); - vi.clearAllMocks(); + }); + }); + + it("delivers explicit targets with final-payload text", async () => { + await withTelegramAnnounceFixture(async ({ home, storePath, deps }) => { await assertExplicitTelegramTargetDelivery({ home, storePath, @@ -301,6 +373,7 @@ describe("runCronIsolatedAgentTurn", () => { home, storePath, deps, + deliveryContract: "shared", }); expectDeliveredOk(res); @@ -342,121 +415,37 @@ describe("runCronIsolatedAgentTurn", () => { }); it("reports not-delivered when text direct delivery fails and best-effort is enabled", async () => { - await withTelegramAnnounceFixture( - async ({ home, storePath, deps }) => { - mockAgentPayloads([{ text: "hello from cron" }]); - - const res = await runTelegramAnnounceTurn({ - home, - storePath, - deps, - delivery: { - mode: "announce", - channel: "telegram", - to: "123", - bestEffort: true, - }, - }); - - expect(res.status).toBe("ok"); - expect(res.delivered).toBe(false); - expect(res.deliveryAttempted).toBe(true); - expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); - expect(deps.sendMessageTelegram).toHaveBeenCalledTimes(1); - }, - { - deps: { - sendMessageTelegram: vi.fn().mockRejectedValue(new Error("boom")), - }, - }, - ); + await expectTelegramTextDeliveryFailure({ + bestEffort: true, + expectedStatus: "ok", + }); }); it("delivers text directly when best-effort is disabled", async () => { - await withTempHome(async (home) => { - const storePath = await writeSessionStore(home, { lastProvider: "webchat", lastTo: "" }); - const deps = createCliDeps(); - mockAgentPayloads([{ text: "hello from cron" }]); - - const res = await runTelegramAnnounceTurn({ - home, - storePath, - deps, - delivery: { - mode: "announce", - channel: "telegram", - to: "123", - bestEffort: false, - }, - }); - - expect(res.status).toBe("ok"); - expect(res.delivered).toBe(true); - expect(res.deliveryAttempted).toBe(true); - expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); - expectDirectTelegramDelivery(deps, { - chatId: "123", - text: "hello from cron", - }); + const { res, deps } = await runTelegramDeliveryResult(false); + expectSuccessfulTelegramTextDelivery({ res, deps }); + expectDirectTelegramDelivery(deps, { + chatId: "123", + text: "hello from cron", }); }); it("returns error when text direct delivery fails and best-effort is disabled", async () => { - await withTelegramAnnounceFixture( - async ({ home, storePath, deps }) => { - mockAgentPayloads([{ text: "hello from cron" }]); - - const res = await runTelegramAnnounceTurn({ - home, - storePath, - deps, - delivery: { - mode: "announce", - channel: "telegram", - to: "123", - bestEffort: false, - }, - }); - - expect(res.status).toBe("error"); - expect(res.delivered).toBeUndefined(); - expect(res.deliveryAttempted).toBe(true); - expect(res.error).toContain("boom"); - expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); - expect(deps.sendMessageTelegram).toHaveBeenCalledTimes(1); - }, - { - deps: { - sendMessageTelegram: vi.fn().mockRejectedValue(new Error("boom")), - }, - }, - ); + await expectTelegramTextDeliveryFailure({ + bestEffort: false, + expectedStatus: "error", + expectedErrorFragment: "boom", + }); }); it("retries transient text direct delivery failures before succeeding", async () => { const previousFastMode = process.env.OPENCLAW_TEST_FAST; process.env.OPENCLAW_TEST_FAST = "1"; try { - await withTelegramAnnounceFixture( - async ({ home, storePath, deps }) => { - mockAgentPayloads([{ text: "hello from cron" }]); - - const res = await runTelegramAnnounceTurn({ - home, - storePath, - deps, - delivery: { - mode: "announce", - channel: "telegram", - to: "123", - bestEffort: false, - }, - }); - - expect(res.status).toBe("ok"); - expect(res.delivered).toBe(true); - expect(res.deliveryAttempted).toBe(true); - expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); + await withTelegramTextDelivery( + { bestEffort: false }, + async ({ deps, res }) => { + expectSuccessfulTelegramTextDelivery({ res, deps }); expect(deps.sendMessageTelegram).toHaveBeenCalledTimes(2); expect(deps.sendMessageTelegram).toHaveBeenLastCalledWith( "123", @@ -484,10 +473,7 @@ describe("runCronIsolatedAgentTurn", () => { it("delivers text directly when best-effort is enabled", async () => { const { res, deps } = await runTelegramDeliveryResult(true); - expect(res.status).toBe("ok"); - expect(res.delivered).toBe(true); - expect(res.deliveryAttempted).toBe(true); - expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); + expectSuccessfulTelegramTextDelivery({ res, deps }); expectDirectTelegramDelivery(deps, { chatId: "123", text: "hello from cron", diff --git a/src/cron/isolated-agent/delivery-dispatch.double-announce.test.ts b/src/cron/isolated-agent/delivery-dispatch.double-announce.test.ts index f9a7d90a276..b245b4b9c94 100644 --- a/src/cron/isolated-agent/delivery-dispatch.double-announce.test.ts +++ b/src/cron/isolated-agent/delivery-dispatch.double-announce.test.ts @@ -10,7 +10,7 @@ * returning so the timer correctly skips the system-event fallback. */ -import { beforeEach, describe, expect, it, vi } from "vitest"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; // --- Module mocks (must be hoisted before imports) --- @@ -49,7 +49,11 @@ vi.mock("./subagent-followup.js", () => ({ import { countActiveDescendantRuns } from "../../agents/subagent-registry.js"; import { deliverOutboundPayloads } from "../../infra/outbound/deliver.js"; import { shouldEnqueueCronMainSummary } from "../heartbeat-policy.js"; -import { dispatchCronDelivery } from "./delivery-dispatch.js"; +import { + dispatchCronDelivery, + getCompletedDirectCronDeliveriesCountForTests, + resetCompletedDirectCronDeliveriesForTests, +} from "./delivery-dispatch.js"; import type { DeliveryTargetResolution } from "./delivery-target.js"; import type { RunCronAgentTurnResult } from "./run.js"; import { @@ -84,7 +88,11 @@ function makeWithRunSession() { }); } -function makeBaseParams(overrides: { synthesizedText?: string; deliveryRequested?: boolean }) { +function makeBaseParams(overrides: { + synthesizedText?: string; + deliveryRequested?: boolean; + runSessionId?: string; +}) { const resolvedDelivery = makeResolvedDelivery(); return { cfg: {} as never, @@ -98,14 +106,13 @@ function makeBaseParams(overrides: { synthesizedText?: string; deliveryRequested } as never, agentId: "main", agentSessionKey: "agent:main", - runSessionId: "run-123", + runSessionId: overrides.runSessionId ?? "run-123", runStartedAt: Date.now(), runEndedAt: Date.now(), timeoutMs: 30_000, resolvedDelivery, deliveryRequested: overrides.deliveryRequested ?? true, skipHeartbeatDelivery: false, - skipMessagingToolDelivery: false, deliveryBestEffort: false, deliveryPayloadHasStructuredContent: false, deliveryPayloads: overrides.synthesizedText ? [{ text: overrides.synthesizedText }] : [], @@ -127,6 +134,7 @@ function makeBaseParams(overrides: { synthesizedText?: string; deliveryRequested describe("dispatchCronDelivery — double-announce guard", () => { beforeEach(() => { vi.clearAllMocks(); + resetCompletedDirectCronDeliveriesForTests(); vi.mocked(countActiveDescendantRuns).mockReturnValue(0); vi.mocked(expectsSubagentFollowup).mockReturnValue(false); vi.mocked(isLikelyInterimCronMessage).mockReturnValue(false); @@ -134,6 +142,10 @@ describe("dispatchCronDelivery — double-announce guard", () => { vi.mocked(waitForDescendantSubagentSummary).mockResolvedValue(undefined); }); + afterEach(() => { + vi.unstubAllEnvs(); + }); + it("early return (active subagent) sets deliveryAttempted=true so timer skips enqueueSystemEvent", async () => { // countActiveDescendantRuns returns >0 → enters wait block; still >0 after wait → early return vi.mocked(countActiveDescendantRuns).mockReturnValue(2); @@ -214,6 +226,9 @@ describe("dispatchCronDelivery — double-announce guard", () => { payloads: [{ text: "Detailed child result, everything finished successfully." }], }), ); + expect(deliverOutboundPayloads).toHaveBeenCalledWith( + expect.objectContaining({ skipQueue: true }), + ); }); it("normal text delivery sends exactly once and sets deliveryAttempted=true", async () => { @@ -255,6 +270,74 @@ describe("dispatchCronDelivery — double-announce guard", () => { expect(deliverOutboundPayloads).toHaveBeenCalledTimes(1); }); + it("retries transient direct announce failures before succeeding", async () => { + vi.stubEnv("OPENCLAW_TEST_FAST", "1"); + vi.mocked(countActiveDescendantRuns).mockReturnValue(0); + vi.mocked(isLikelyInterimCronMessage).mockReturnValue(false); + vi.mocked(deliverOutboundPayloads) + .mockRejectedValueOnce(new Error("ECONNRESET while sending")) + .mockResolvedValueOnce([{ ok: true } as never]); + + const params = makeBaseParams({ synthesizedText: "Retry me once." }); + const state = await dispatchCronDelivery(params); + + expect(state.result).toBeUndefined(); + expect(state.deliveryAttempted).toBe(true); + expect(state.delivered).toBe(true); + expect(deliverOutboundPayloads).toHaveBeenCalledTimes(2); + }); + + it("keeps direct announce delivery idempotent across replay for the same run session", async () => { + vi.mocked(countActiveDescendantRuns).mockReturnValue(0); + vi.mocked(isLikelyInterimCronMessage).mockReturnValue(false); + vi.mocked(deliverOutboundPayloads).mockResolvedValue([{ ok: true } as never]); + + const params = makeBaseParams({ synthesizedText: "Replay-safe cron update." }); + const first = await dispatchCronDelivery(params); + const second = await dispatchCronDelivery(params); + + expect(first.delivered).toBe(true); + expect(second.delivered).toBe(true); + expect(second.deliveryAttempted).toBe(true); + expect(deliverOutboundPayloads).toHaveBeenCalledTimes(1); + }); + + it("prunes the completed-delivery cache back to the entry cap", async () => { + vi.mocked(countActiveDescendantRuns).mockReturnValue(0); + vi.mocked(isLikelyInterimCronMessage).mockReturnValue(false); + vi.mocked(deliverOutboundPayloads).mockResolvedValue([{ ok: true } as never]); + + for (let i = 0; i < 2003; i += 1) { + const params = makeBaseParams({ + synthesizedText: `Replay-safe cron update ${i}.`, + runSessionId: `run-${i}`, + }); + const state = await dispatchCronDelivery(params); + expect(state.delivered).toBe(true); + } + + expect(getCompletedDirectCronDeliveriesCountForTests()).toBe(2000); + }); + + it("does not retry permanent direct announce failures", async () => { + vi.stubEnv("OPENCLAW_TEST_FAST", "1"); + vi.mocked(countActiveDescendantRuns).mockReturnValue(0); + vi.mocked(isLikelyInterimCronMessage).mockReturnValue(false); + vi.mocked(deliverOutboundPayloads).mockRejectedValue(new Error("chat not found")); + + const params = makeBaseParams({ synthesizedText: "This should fail once." }); + const state = await dispatchCronDelivery(params); + + expect(deliverOutboundPayloads).toHaveBeenCalledTimes(1); + expect(state.result).toEqual( + expect.objectContaining({ + status: "error", + error: "Error: chat not found", + deliveryAttempted: true, + }), + ); + }); + it("no delivery requested means deliveryAttempted stays false and no delivery is sent", async () => { const params = makeBaseParams({ synthesizedText: "Task done.", @@ -265,4 +348,69 @@ describe("dispatchCronDelivery — double-announce guard", () => { expect(deliverOutboundPayloads).not.toHaveBeenCalled(); expect(state.deliveryAttempted).toBe(false); }); + + it("text delivery always bypasses the write-ahead queue", async () => { + vi.mocked(countActiveDescendantRuns).mockReturnValue(0); + vi.mocked(isLikelyInterimCronMessage).mockReturnValue(false); + vi.mocked(deliverOutboundPayloads).mockResolvedValue([{ ok: true } as never]); + + const params = makeBaseParams({ synthesizedText: "Daily digest ready." }); + const state = await dispatchCronDelivery(params); + + expect(state.delivered).toBe(true); + expect(state.deliveryAttempted).toBe(true); + expect(deliverOutboundPayloads).toHaveBeenCalledTimes(1); + + expect(deliverOutboundPayloads).toHaveBeenCalledWith( + expect.objectContaining({ + channel: "telegram", + to: "123456", + payloads: [{ text: "Daily digest ready." }], + skipQueue: true, + }), + ); + }); + + it("structured/thread delivery also bypasses the write-ahead queue", async () => { + vi.mocked(countActiveDescendantRuns).mockReturnValue(0); + vi.mocked(isLikelyInterimCronMessage).mockReturnValue(false); + vi.mocked(deliverOutboundPayloads).mockResolvedValue([{ ok: true } as never]); + + const params = makeBaseParams({ synthesizedText: "Report attached." }); + // Simulate structured content so useDirectDelivery path is taken (no retryTransient) + (params as Record).deliveryPayloadHasStructuredContent = true; + await dispatchCronDelivery(params); + + expect(deliverOutboundPayloads).toHaveBeenCalledTimes(1); + expect(deliverOutboundPayloads).toHaveBeenCalledWith( + expect.objectContaining({ skipQueue: true }), + ); + }); + + it("transient retry delivers exactly once with skipQueue on both attempts", async () => { + vi.mocked(countActiveDescendantRuns).mockReturnValue(0); + vi.mocked(isLikelyInterimCronMessage).mockReturnValue(false); + + // First call throws a transient error, second call succeeds. + vi.mocked(deliverOutboundPayloads) + .mockRejectedValueOnce(new Error("gateway timeout")) + .mockResolvedValueOnce([{ ok: true } as never]); + + vi.stubEnv("OPENCLAW_TEST_FAST", "1"); + try { + const params = makeBaseParams({ synthesizedText: "Retry test." }); + const state = await dispatchCronDelivery(params); + + expect(state.delivered).toBe(true); + expect(state.deliveryAttempted).toBe(true); + // Two calls total: first failed transiently, second succeeded. + expect(deliverOutboundPayloads).toHaveBeenCalledTimes(2); + + const calls = vi.mocked(deliverOutboundPayloads).mock.calls; + expect(calls[0][0]).toEqual(expect.objectContaining({ skipQueue: true })); + expect(calls[1][0]).toEqual(expect.objectContaining({ skipQueue: true })); + } finally { + vi.unstubAllEnvs(); + } + }); }); diff --git a/src/cron/isolated-agent/delivery-dispatch.named-agent.test.ts b/src/cron/isolated-agent/delivery-dispatch.named-agent.test.ts index 6de82039241..c5d7ec9b41c 100644 --- a/src/cron/isolated-agent/delivery-dispatch.named-agent.test.ts +++ b/src/cron/isolated-agent/delivery-dispatch.named-agent.test.ts @@ -96,4 +96,13 @@ describe("resolveCronDeliveryBestEffort", () => { } as never; expect(resolveCronDeliveryBestEffort(job)).toBe(true); }); + + it("lets explicit delivery.bestEffort=false override legacy payload bestEffortDeliver=true", async () => { + const { resolveCronDeliveryBestEffort } = await import("./delivery-dispatch.js"); + const job = { + delivery: { bestEffort: false }, + payload: { kind: "agentTurn", bestEffortDeliver: true }, + } as never; + expect(resolveCronDeliveryBestEffort(job)).toBe(false); + }); }); diff --git a/src/cron/isolated-agent/delivery-dispatch.ts b/src/cron/isolated-agent/delivery-dispatch.ts index a3a98b245d0..6ddddf20669 100644 --- a/src/cron/isolated-agent/delivery-dispatch.ts +++ b/src/cron/isolated-agent/delivery-dispatch.ts @@ -5,7 +5,10 @@ import { createOutboundSendDeps, type CliDeps } from "../../cli/outbound-send-de import type { OpenClawConfig } from "../../config/config.js"; import { callGateway } from "../../gateway/call.js"; import { sleepWithAbort } from "../../infra/backoff.js"; -import { deliverOutboundPayloads } from "../../infra/outbound/deliver.js"; +import { + deliverOutboundPayloads, + type OutboundDeliveryResult, +} from "../../infra/outbound/deliver.js"; import { resolveAgentOutboundIdentity } from "../../infra/outbound/identity.js"; import { buildOutboundSessionContext } from "../../infra/outbound/session-context.js"; import { logWarn } from "../../logger.js"; @@ -83,7 +86,7 @@ type DispatchCronDeliveryParams = { resolvedDelivery: DeliveryTargetResolution; deliveryRequested: boolean; skipHeartbeatDelivery: boolean; - skipMessagingToolDelivery: boolean; + skipMessagingToolDelivery?: boolean; deliveryBestEffort: boolean; deliveryPayloadHasStructuredContent: boolean; deliveryPayloads: ReplyPayload[]; @@ -131,6 +134,91 @@ const PERMANENT_DIRECT_CRON_DELIVERY_ERROR_PATTERNS: readonly RegExp[] = [ /outbound not configured for channel/i, ]; +type CompletedDirectCronDelivery = { + ts: number; + results: OutboundDeliveryResult[]; +}; + +const COMPLETED_DIRECT_CRON_DELIVERIES = new Map(); + +function cloneDeliveryResults( + results: readonly OutboundDeliveryResult[], +): OutboundDeliveryResult[] { + return results.map((result) => ({ + ...result, + ...(result.meta ? { meta: { ...result.meta } } : {}), + })); +} + +function pruneCompletedDirectCronDeliveries(now: number) { + const ttlMs = process.env.OPENCLAW_TEST_FAST === "1" ? 60_000 : 24 * 60 * 60 * 1000; + for (const [key, entry] of COMPLETED_DIRECT_CRON_DELIVERIES) { + if (now - entry.ts >= ttlMs) { + COMPLETED_DIRECT_CRON_DELIVERIES.delete(key); + } + } + const maxEntries = 2000; + if (COMPLETED_DIRECT_CRON_DELIVERIES.size <= maxEntries) { + return; + } + const entries = [...COMPLETED_DIRECT_CRON_DELIVERIES.entries()].toSorted( + (a, b) => a[1].ts - b[1].ts, + ); + const toDelete = COMPLETED_DIRECT_CRON_DELIVERIES.size - maxEntries; + for (let i = 0; i < toDelete; i += 1) { + const oldest = entries[i]; + if (!oldest) { + break; + } + COMPLETED_DIRECT_CRON_DELIVERIES.delete(oldest[0]); + } +} + +function rememberCompletedDirectCronDelivery( + idempotencyKey: string, + results: readonly OutboundDeliveryResult[], +) { + const now = Date.now(); + COMPLETED_DIRECT_CRON_DELIVERIES.set(idempotencyKey, { + ts: now, + results: cloneDeliveryResults(results), + }); + pruneCompletedDirectCronDeliveries(now); +} + +function getCompletedDirectCronDelivery( + idempotencyKey: string, +): OutboundDeliveryResult[] | undefined { + const now = Date.now(); + pruneCompletedDirectCronDeliveries(now); + const cached = COMPLETED_DIRECT_CRON_DELIVERIES.get(idempotencyKey); + if (!cached) { + return undefined; + } + return cloneDeliveryResults(cached.results); +} + +function buildDirectCronDeliveryIdempotencyKey(params: { + runSessionId: string; + delivery: SuccessfulDeliveryTarget; +}): string { + const threadId = + params.delivery.threadId == null || params.delivery.threadId === "" + ? "" + : String(params.delivery.threadId); + const accountId = params.delivery.accountId?.trim() ?? ""; + const normalizedTo = normalizeDeliveryTarget(params.delivery.channel, params.delivery.to); + return `cron-direct-delivery:v1:${params.runSessionId}:${params.delivery.channel}:${accountId}:${normalizedTo}:${threadId}`; +} + +export function resetCompletedDirectCronDeliveriesForTests() { + COMPLETED_DIRECT_CRON_DELIVERIES.clear(); +} + +export function getCompletedDirectCronDeliveriesCountForTests(): number { + return COMPLETED_DIRECT_CRON_DELIVERIES.size; +} + function summarizeDirectCronDeliveryError(error: unknown): string { if (error instanceof Error) { return error.message || "error"; @@ -157,7 +245,9 @@ function isTransientDirectCronDeliveryError(error: unknown): boolean { } function resolveDirectCronRetryDelaysMs(): readonly number[] { - return process.env.OPENCLAW_TEST_FAST === "1" ? [8, 16, 32] : [5_000, 10_000, 20_000]; + return process.env.NODE_ENV === "test" && process.env.OPENCLAW_TEST_FAST === "1" + ? [8, 16, 32] + : [5_000, 10_000, 20_000]; } async function retryTransientDirectCronDelivery(params: { @@ -192,15 +282,17 @@ async function retryTransientDirectCronDelivery(params: { export async function dispatchCronDelivery( params: DispatchCronDeliveryParams, ): Promise { + const skipMessagingToolDelivery = params.skipMessagingToolDelivery === true; let summary = params.summary; let outputText = params.outputText; let synthesizedText = params.synthesizedText; let deliveryPayloads = params.deliveryPayloads; - // `true` means we confirmed at least one outbound send reached the target. - // Keep this strict so timer fallback can safely decide whether to wake main. - let delivered = params.skipMessagingToolDelivery; - let deliveryAttempted = params.skipMessagingToolDelivery; + // Shared callers can treat a matching message-tool send as the completed + // delivery path. Cron-owned callers keep this false so direct cron delivery + // remains the only source of delivered state. + let delivered = skipMessagingToolDelivery; + let deliveryAttempted = skipMessagingToolDelivery; const failDeliveryTarget = (error: string) => params.withRunSession({ status: "error", @@ -217,6 +309,10 @@ export async function dispatchCronDelivery( options?: { retryTransient?: boolean }, ): Promise => { const identity = resolveAgentOutboundIdentity(params.cfgWithAgentDefaults, params.agentId); + const deliveryIdempotencyKey = buildDirectCronDeliveryIdempotencyKey({ + runSessionId: params.runSessionId, + delivery, + }); try { const payloadsForDelivery = deliveryPayloads.length > 0 @@ -236,6 +332,12 @@ export async function dispatchCronDelivery( }); } deliveryAttempted = true; + const cachedResults = getCompletedDirectCronDelivery(deliveryIdempotencyKey); + if (cachedResults) { + // Cached entries are only recorded after a successful non-empty delivery. + delivered = true; + return null; + } const deliverySession = buildOutboundSessionContext({ cfg: params.cfgWithAgentDefaults, agentId: params.agentId, @@ -254,6 +356,12 @@ export async function dispatchCronDelivery( bestEffort: params.deliveryBestEffort, deps: createOutboundSendDeps(params.deps), abortSignal: params.abortSignal, + // Isolated cron direct delivery uses its own transient retry loop. + // Keep all attempts out of the write-ahead delivery queue so a + // late-successful first send cannot leave behind a failed queue + // entry that replays on the next restart. + // See: https://github.com/openclaw/openclaw/issues/40545 + skipQueue: true, }); const deliveryResults = options?.retryTransient ? await retryTransientDirectCronDelivery({ @@ -263,6 +371,9 @@ export async function dispatchCronDelivery( }) : await runDelivery(); delivered = deliveryResults.length > 0; + if (delivered) { + rememberCompletedDirectCronDelivery(deliveryIdempotencyKey, deliveryResults); + } return null; } catch (err) { if (!params.deliveryBestEffort) { @@ -404,11 +515,7 @@ export async function dispatchCronDelivery( } }; - if ( - params.deliveryRequested && - !params.skipHeartbeatDelivery && - !params.skipMessagingToolDelivery - ) { + if (params.deliveryRequested && !params.skipHeartbeatDelivery && !skipMessagingToolDelivery) { if (!params.resolvedDelivery.ok) { if (!params.deliveryBestEffort) { return { diff --git a/src/cron/isolated-agent/delivery-target.test.ts b/src/cron/isolated-agent/delivery-target.test.ts index 0965c54d6b9..df7d29d419f 100644 --- a/src/cron/isolated-agent/delivery-target.test.ts +++ b/src/cron/isolated-agent/delivery-target.test.ts @@ -13,6 +13,10 @@ vi.mock("../../infra/outbound/channel-selection.js", () => ({ .mockResolvedValue({ channel: "telegram", configured: ["telegram"] }), })); +vi.mock("../../infra/outbound/target-resolver.js", () => ({ + maybeResolveIdLikeTarget: vi.fn(), +})); + vi.mock("../../pairing/pairing-store.js", () => ({ readChannelAllowFromStoreSync: vi.fn(() => []), })); @@ -23,6 +27,7 @@ vi.mock("../../web/accounts.js", () => ({ import { loadSessionStore } from "../../config/sessions.js"; import { resolveMessageChannelSelection } from "../../infra/outbound/channel-selection.js"; +import { maybeResolveIdLikeTarget } from "../../infra/outbound/target-resolver.js"; import { readChannelAllowFromStoreSync } from "../../pairing/pairing-store.js"; import { resolveWhatsAppAccount } from "../../web/accounts.js"; import { resolveDeliveryTarget } from "./delivery-target.js"; @@ -59,6 +64,23 @@ function setMainSessionEntry(entry?: SessionStore[string]) { vi.mocked(loadSessionStore).mockReturnValue(store); } +function setLastSessionEntry(params: { + sessionId: string; + lastChannel: string; + lastTo: string; + lastThreadId?: string; + lastAccountId?: string; +}) { + setMainSessionEntry({ + sessionId: params.sessionId, + updatedAt: 1000, + lastChannel: params.lastChannel, + lastTo: params.lastTo, + ...(params.lastThreadId ? { lastThreadId: params.lastThreadId } : {}), + ...(params.lastAccountId ? { lastAccountId: params.lastAccountId } : {}), + }); +} + function setWhatsAppAllowFrom(allowFrom: string[]) { vi.mocked(resolveWhatsAppAccount).mockReturnValue({ allowFrom, @@ -81,11 +103,17 @@ async function resolveForAgent(params: { }); } +async function resolveLastTarget(cfg: OpenClawConfig) { + return resolveForAgent({ + cfg, + target: { channel: "last", to: undefined }, + }); +} + describe("resolveDeliveryTarget", () => { it("reroutes implicit whatsapp delivery to authorized allowFrom recipient", async () => { - setMainSessionEntry({ + setLastSessionEntry({ sessionId: "sess-w1", - updatedAt: 1000, lastChannel: "whatsapp", lastTo: "+15550000099", }); @@ -93,16 +121,15 @@ describe("resolveDeliveryTarget", () => { setStoredWhatsAppAllowFrom(["+15550000001"]); const cfg = makeCfg({ bindings: [] }); - const result = await resolveDeliveryTarget(cfg, AGENT_ID, { channel: "last", to: undefined }); + const result = await resolveLastTarget(cfg); expect(result.channel).toBe("whatsapp"); expect(result.to).toBe("+15550000001"); }); it("keeps explicit whatsapp target unchanged", async () => { - setMainSessionEntry({ + setLastSessionEntry({ sessionId: "sess-w2", - updatedAt: 1000, lastChannel: "whatsapp", lastTo: "+15550000099", }); @@ -152,6 +179,30 @@ describe("resolveDeliveryTarget", () => { expect(result.accountId).toBeUndefined(); }); + it("applies id-like target normalization before returning delivery targets", async () => { + setMainSessionEntry(undefined); + vi.mocked(maybeResolveIdLikeTarget).mockClear(); + vi.mocked(maybeResolveIdLikeTarget).mockResolvedValueOnce({ + to: "user:123456789", + kind: "user", + source: "directory", + }); + + const result = await resolveDeliveryTarget(makeCfg({ bindings: [] }), AGENT_ID, { + channel: "telegram", + to: "123456789", + }); + + expect(result.ok).toBe(true); + expect(result.to).toBe("user:123456789"); + expect(maybeResolveIdLikeTarget).toHaveBeenCalledWith( + expect.objectContaining({ + channel: "telegram", + input: "123456789", + }), + ); + }); + it("selects correct binding when multiple agents have bindings", async () => { setMainSessionEntry(undefined); @@ -191,9 +242,8 @@ describe("resolveDeliveryTarget", () => { }); it("drops session threadId when destination does not match the previous recipient", async () => { - setMainSessionEntry({ + setLastSessionEntry({ sessionId: "sess-2", - updatedAt: 1000, lastChannel: "telegram", lastTo: "999999", lastThreadId: "thread-1", @@ -204,9 +254,8 @@ describe("resolveDeliveryTarget", () => { }); it("keeps session threadId when destination matches the previous recipient", async () => { - setMainSessionEntry({ + setLastSessionEntry({ sessionId: "sess-3", - updatedAt: 1000, lastChannel: "telegram", lastTo: "123456", lastThreadId: "thread-2", @@ -219,10 +268,7 @@ describe("resolveDeliveryTarget", () => { it("uses single configured channel when neither explicit nor session channel exists", async () => { setMainSessionEntry(undefined); - const result = await resolveForAgent({ - cfg: makeCfg({ bindings: [] }), - target: { channel: "last", to: undefined }, - }); + const result = await resolveLastTarget(makeCfg({ bindings: [] })); expect(result.channel).toBe("telegram"); expect(result.ok).toBe(false); if (result.ok) { @@ -239,10 +285,7 @@ describe("resolveDeliveryTarget", () => { new Error("Channel is required when multiple channels are configured: telegram, slack"), ); - const result = await resolveForAgent({ - cfg: makeCfg({ bindings: [] }), - target: { channel: "last", to: undefined }, - }); + const result = await resolveLastTarget(makeCfg({ bindings: [] })); expect(result.channel).toBeUndefined(); expect(result.to).toBeUndefined(); expect(result.ok).toBe(false); @@ -279,17 +322,13 @@ describe("resolveDeliveryTarget", () => { }); it("uses main session channel when channel=last and session route exists", async () => { - setMainSessionEntry({ + setLastSessionEntry({ sessionId: "sess-4", - updatedAt: 1000, lastChannel: "telegram", lastTo: "987654", }); - const result = await resolveForAgent({ - cfg: makeCfg({ bindings: [] }), - target: { channel: "last", to: undefined }, - }); + const result = await resolveLastTarget(makeCfg({ bindings: [] })); expect(result.channel).toBe("telegram"); expect(result.to).toBe("987654"); @@ -297,9 +336,8 @@ describe("resolveDeliveryTarget", () => { }); it("explicit delivery.accountId overrides session-derived accountId", async () => { - setMainSessionEntry({ + setLastSessionEntry({ sessionId: "sess-5", - updatedAt: 1000, lastChannel: "telegram", lastTo: "chat-999", lastAccountId: "default", diff --git a/src/cron/isolated-agent/delivery-target.ts b/src/cron/isolated-agent/delivery-target.ts index 1c27ed08b55..33bd80d4118 100644 --- a/src/cron/isolated-agent/delivery-target.ts +++ b/src/cron/isolated-agent/delivery-target.ts @@ -6,6 +6,7 @@ import { resolveStorePath, } from "../../config/sessions.js"; import { resolveMessageChannelSelection } from "../../infra/outbound/channel-selection.js"; +import { maybeResolveIdLikeTarget } from "../../infra/outbound/target-resolver.js"; import type { OutboundChannel } from "../../infra/outbound/targets.js"; import { resolveOutboundTarget, @@ -190,10 +191,16 @@ export async function resolveDeliveryTarget( error: docked.error, }; } + const idLikeTarget = await maybeResolveIdLikeTarget({ + cfg, + channel, + input: docked.to, + accountId, + }); return { ok: true, channel, - to: docked.to, + to: idLikeTarget?.to ?? docked.to, accountId, threadId, mode, diff --git a/src/cron/isolated-agent/run.fast-mode.test.ts b/src/cron/isolated-agent/run.fast-mode.test.ts new file mode 100644 index 00000000000..abe50ea5554 --- /dev/null +++ b/src/cron/isolated-agent/run.fast-mode.test.ts @@ -0,0 +1,115 @@ +import { describe, expect, it } from "vitest"; +import { + makeIsolatedAgentTurnJob, + makeIsolatedAgentTurnParams, + setupRunCronIsolatedAgentTurnSuite, +} from "./run.suite-helpers.js"; +import { + loadRunCronIsolatedAgentTurn, + makeCronSession, + resolveCronSessionMock, + runEmbeddedPiAgentMock, + runWithModelFallbackMock, +} from "./run.test-harness.js"; + +const runCronIsolatedAgentTurn = await loadRunCronIsolatedAgentTurn(); + +const OPENAI_GPT4_MODEL = "openai/gpt-4"; + +function mockSuccessfulModelFallback() { + runWithModelFallbackMock.mockImplementation(async ({ provider, model, run }) => { + await run(provider, model); + return { + result: { + payloads: [{ text: "ok" }], + meta: { agentMeta: { usage: { input: 10, output: 20 } } }, + }, + provider, + model, + attempts: [], + }; + }); +} + +async function runFastModeCase(params: { + configFastMode: boolean; + expectedFastMode: boolean; + message: string; + sessionFastMode?: boolean; +}) { + const baseSession = makeCronSession(); + resolveCronSessionMock.mockReturnValue( + params.sessionFastMode === undefined + ? baseSession + : makeCronSession({ + sessionEntry: { + ...baseSession.sessionEntry, + fastMode: params.sessionFastMode, + }, + }), + ); + mockSuccessfulModelFallback(); + + const result = await runCronIsolatedAgentTurn( + makeIsolatedAgentTurnParams({ + cfg: { + agents: { + defaults: { + models: { + [OPENAI_GPT4_MODEL]: { + params: { + fastMode: params.configFastMode, + }, + }, + }, + }, + }, + }, + job: makeIsolatedAgentTurnJob({ + payload: { + kind: "agentTurn", + message: params.message, + model: OPENAI_GPT4_MODEL, + }, + }), + }), + ); + + expect(result.status).toBe("ok"); + expect(runEmbeddedPiAgentMock).toHaveBeenCalledOnce(); + expect(runEmbeddedPiAgentMock.mock.calls[0][0]).toMatchObject({ + provider: "openai", + model: "gpt-4", + fastMode: params.expectedFastMode, + }); +} + +describe("runCronIsolatedAgentTurn — fast mode", () => { + setupRunCronIsolatedAgentTurnSuite(); + + it("passes config-driven fast mode into embedded cron runs", async () => { + await runFastModeCase({ + configFastMode: true, + expectedFastMode: true, + message: "test fast mode", + }); + }); + + it("honors session fastMode=false over config fastMode=true", async () => { + await runFastModeCase({ + configFastMode: true, + expectedFastMode: false, + message: "test fast mode override", + sessionFastMode: false, + }); + }); + + it("honors session fastMode=true over config fastMode=false", async () => { + await runFastModeCase({ + configFastMode: false, + expectedFastMode: true, + message: "test fast mode session override", + sessionFastMode: true, + }); + }); +}); diff --git a/src/cron/isolated-agent/run.interim-retry.test.ts b/src/cron/isolated-agent/run.interim-retry.test.ts index 90d663ed020..6f01a2e9232 100644 --- a/src/cron/isolated-agent/run.interim-retry.test.ts +++ b/src/cron/isolated-agent/run.interim-retry.test.ts @@ -7,6 +7,7 @@ import { countActiveDescendantRunsMock, listDescendantRunsForRequesterMock, loadRunCronIsolatedAgentTurn, + mockRunCronFallbackPassthrough, pickLastNonEmptyTextFromPayloadsMock, runEmbeddedPiAgentMock, runWithModelFallbackMock, @@ -17,13 +18,6 @@ const runCronIsolatedAgentTurn = await loadRunCronIsolatedAgentTurn(); describe("runCronIsolatedAgentTurn — interim ack retry", () => { setupRunCronIsolatedAgentTurnSuite(); - const mockFallbackPassthrough = () => { - runWithModelFallbackMock.mockImplementation(async ({ provider, model, run }) => { - const result = await run(provider, model); - return { result, provider, model, attempts: [] }; - }); - }; - const runTurnAndExpectOk = async (expectedFallbackCalls: number, expectedAgentCalls: number) => { const result = await runCronIsolatedAgentTurn(makeIsolatedAgentTurnParams()); expect(result.status).toBe("ok"); @@ -62,7 +56,7 @@ describe("runCronIsolatedAgentTurn — interim ack retry", () => { meta: { agentMeta: { usage: { input: 10, output: 20 } } }, }); - mockFallbackPassthrough(); + mockRunCronFallbackPassthrough(); await runTurnAndExpectOk(2, 2); expect(runEmbeddedPiAgentMock.mock.calls[1]?.[0]?.prompt).toContain( "previous response was only an acknowledgement", @@ -76,7 +70,7 @@ describe("runCronIsolatedAgentTurn — interim ack retry", () => { meta: { agentMeta: { usage: { input: 10, output: 20 } } }, }); - mockFallbackPassthrough(); + mockRunCronFallbackPassthrough(); await runTurnAndExpectOk(1, 1); }); @@ -93,7 +87,7 @@ describe("runCronIsolatedAgentTurn — interim ack retry", () => { ]); countActiveDescendantRunsMock.mockReturnValue(0); - mockFallbackPassthrough(); + mockRunCronFallbackPassthrough(); await runTurnAndExpectOk(1, 1); }); }); diff --git a/src/cron/isolated-agent/run.message-tool-policy.test.ts b/src/cron/isolated-agent/run.message-tool-policy.test.ts index 360f0794616..a92b19f5337 100644 --- a/src/cron/isolated-agent/run.message-tool-policy.test.ts +++ b/src/cron/isolated-agent/run.message-tool-policy.test.ts @@ -2,12 +2,12 @@ import { afterEach, beforeEach, describe, expect, it } from "vitest"; import { clearFastTestEnv, loadRunCronIsolatedAgentTurn, + mockRunCronFallbackPassthrough, resetRunCronIsolatedAgentTurnHarness, resolveCronDeliveryPlanMock, resolveDeliveryTargetMock, restoreFastTestEnv, runEmbeddedPiAgentMock, - runWithModelFallbackMock, } from "./run.test-harness.js"; const runCronIsolatedAgentTurn = await loadRunCronIsolatedAgentTurn(); @@ -32,12 +32,18 @@ function makeParams() { describe("runCronIsolatedAgentTurn message tool policy", () => { let previousFastTestEnv: string | undefined; - const mockFallbackPassthrough = () => { - runWithModelFallbackMock.mockImplementation(async ({ provider, model, run }) => { - const result = await run(provider, model); - return { result, provider, model, attempts: [] }; - }); - }; + async function expectMessageToolDisabledForPlan(plan: { + requested: boolean; + mode: "none" | "announce"; + channel?: string; + to?: string; + }) { + mockRunCronFallbackPassthrough(); + resolveCronDeliveryPlanMock.mockReturnValue(plan); + await runCronIsolatedAgentTurn(makeParams()); + expect(runEmbeddedPiAgentMock).toHaveBeenCalledTimes(1); + expect(runEmbeddedPiAgentMock.mock.calls[0]?.[0]?.disableMessageTool).toBe(true); + } beforeEach(() => { previousFastTestEnv = clearFastTestEnv(); @@ -55,31 +61,35 @@ describe("runCronIsolatedAgentTurn message tool policy", () => { restoreFastTestEnv(previousFastTestEnv); }); - it('keeps the message tool enabled when delivery.mode is "none"', async () => { - mockFallbackPassthrough(); - resolveCronDeliveryPlanMock.mockReturnValue({ + it('disables the message tool when delivery.mode is "none"', async () => { + await expectMessageToolDisabledForPlan({ requested: false, mode: "none", }); - - await runCronIsolatedAgentTurn(makeParams()); - - expect(runEmbeddedPiAgentMock).toHaveBeenCalledTimes(1); - expect(runEmbeddedPiAgentMock.mock.calls[0]?.[0]?.disableMessageTool).toBe(false); }); it("disables the message tool when cron delivery is active", async () => { - mockFallbackPassthrough(); - resolveCronDeliveryPlanMock.mockReturnValue({ + await expectMessageToolDisabledForPlan({ requested: true, mode: "announce", channel: "telegram", to: "123", }); + }); - await runCronIsolatedAgentTurn(makeParams()); + it("keeps the message tool enabled for shared callers when delivery is not requested", async () => { + mockRunCronFallbackPassthrough(); + resolveCronDeliveryPlanMock.mockReturnValue({ + requested: false, + mode: "none", + }); + + await runCronIsolatedAgentTurn({ + ...makeParams(), + deliveryContract: "shared", + }); expect(runEmbeddedPiAgentMock).toHaveBeenCalledTimes(1); - expect(runEmbeddedPiAgentMock.mock.calls[0]?.[0]?.disableMessageTool).toBe(true); + expect(runEmbeddedPiAgentMock.mock.calls[0]?.[0]?.disableMessageTool).toBe(false); }); }); diff --git a/src/cron/isolated-agent/run.sandbox-config-preserved.test.ts b/src/cron/isolated-agent/run.sandbox-config-preserved.test.ts index 28f3d87cb09..edaee62daa6 100644 --- a/src/cron/isolated-agent/run.sandbox-config-preserved.test.ts +++ b/src/cron/isolated-agent/run.sandbox-config-preserved.test.ts @@ -54,6 +54,31 @@ function makeParams(overrides?: Record) { }; } +function expectDefaultSandboxPreserved( + runCfg: + | { + agents?: { defaults?: { sandbox?: unknown } }; + } + | undefined, +) { + expect(runCfg?.agents?.defaults?.sandbox).toEqual({ + mode: "all", + workspaceAccess: "rw", + docker: { + network: "none", + dangerouslyAllowContainerNamespaceJoin: true, + dangerouslyAllowExternalBindSources: true, + }, + browser: { + enabled: true, + autoStart: false, + }, + prune: { + maxAgeDays: 7, + }, + }); +} + describe("runCronIsolatedAgentTurn sandbox config preserved", () => { let previousFastTestEnv: string | undefined; @@ -79,22 +104,7 @@ describe("runCronIsolatedAgentTurn sandbox config preserved", () => { expect(runWithModelFallbackMock).toHaveBeenCalledTimes(1); const runCfg = runWithModelFallbackMock.mock.calls[0]?.[0]?.cfg; - expect(runCfg?.agents?.defaults?.sandbox).toEqual({ - mode: "all", - workspaceAccess: "rw", - docker: { - network: "none", - dangerouslyAllowContainerNamespaceJoin: true, - dangerouslyAllowExternalBindSources: true, - }, - browser: { - enabled: true, - autoStart: false, - }, - prune: { - maxAgeDays: 7, - }, - }); + expectDefaultSandboxPreserved(runCfg); }); it("keeps global sandbox defaults when agent override is partial", async () => { @@ -118,22 +128,7 @@ describe("runCronIsolatedAgentTurn sandbox config preserved", () => { const runCfg = runWithModelFallbackMock.mock.calls[0]?.[0]?.cfg; const resolvedSandbox = resolveSandboxConfigForAgent(runCfg, "specialist"); - expect(runCfg?.agents?.defaults?.sandbox).toEqual({ - mode: "all", - workspaceAccess: "rw", - docker: { - network: "none", - dangerouslyAllowContainerNamespaceJoin: true, - dangerouslyAllowExternalBindSources: true, - }, - browser: { - enabled: true, - autoStart: false, - }, - prune: { - maxAgeDays: 7, - }, - }); + expectDefaultSandboxPreserved(runCfg); expect(resolvedSandbox.mode).toBe("all"); expect(resolvedSandbox.workspaceAccess).toBe("rw"); expect(resolvedSandbox.docker).toMatchObject({ diff --git a/src/cron/isolated-agent/run.test-harness.ts b/src/cron/isolated-agent/run.test-harness.ts index 6a1fa1c3dff..81e4c8b902b 100644 --- a/src/cron/isolated-agent/run.test-harness.ts +++ b/src/cron/isolated-agent/run.test-harness.ts @@ -46,31 +46,51 @@ export const pickLastNonEmptyTextFromPayloadsMock = createMock(); export const resolveCronDeliveryPlanMock = createMock(); export const resolveDeliveryTargetMock = createMock(); -vi.mock("../../agents/agent-scope.js", () => ({ - resolveAgentConfig: resolveAgentConfigMock, - resolveAgentDir: vi.fn().mockReturnValue("/tmp/agent-dir"), - resolveAgentModelFallbacksOverride: resolveAgentModelFallbacksOverrideMock, - resolveAgentWorkspaceDir: vi.fn().mockReturnValue("/tmp/workspace"), - resolveDefaultAgentId: vi.fn().mockReturnValue("default"), - resolveAgentSkillsFilter: resolveAgentSkillsFilterMock, -})); +vi.mock("../../agents/agent-scope.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + resolveAgentConfig: resolveAgentConfigMock, + resolveAgentDir: vi.fn().mockReturnValue("/tmp/agent-dir"), + resolveAgentModelFallbacksOverride: resolveAgentModelFallbacksOverrideMock, + resolveAgentWorkspaceDir: vi.fn().mockReturnValue("/tmp/workspace"), + resolveDefaultAgentId: vi.fn().mockReturnValue("default"), + resolveAgentSkillsFilter: resolveAgentSkillsFilterMock, + }; +}); -vi.mock("../../agents/skills.js", () => ({ - buildWorkspaceSkillSnapshot: buildWorkspaceSkillSnapshotMock, -})); +vi.mock("../../agents/skills.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + buildWorkspaceSkillSnapshot: buildWorkspaceSkillSnapshotMock, + }; +}); -vi.mock("../../agents/skills/refresh.js", () => ({ - getSkillsSnapshotVersion: vi.fn().mockReturnValue(42), -})); +vi.mock("../../agents/skills/refresh.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + getSkillsSnapshotVersion: vi.fn().mockReturnValue(42), + }; +}); -vi.mock("../../agents/workspace.js", () => ({ - DEFAULT_IDENTITY_FILENAME: "IDENTITY.md", - ensureAgentWorkspace: vi.fn().mockResolvedValue({ dir: "/tmp/workspace" }), -})); +vi.mock("../../agents/workspace.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + DEFAULT_IDENTITY_FILENAME: "IDENTITY.md", + ensureAgentWorkspace: vi.fn().mockResolvedValue({ dir: "/tmp/workspace" }), + }; +}); -vi.mock("../../agents/model-catalog.js", () => ({ - loadModelCatalog: vi.fn().mockResolvedValue({ models: [] }), -})); +vi.mock("../../agents/model-catalog.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + loadModelCatalog: vi.fn().mockResolvedValue({ models: [] }), + }; +}); vi.mock("../../agents/model-selection.js", async (importOriginal) => { const actual = await importOriginal(); @@ -85,67 +105,119 @@ vi.mock("../../agents/model-selection.js", async (importOriginal) => { }; }); -vi.mock("../../agents/model-fallback.js", () => ({ - runWithModelFallback: runWithModelFallbackMock, -})); +vi.mock("../../agents/model-fallback.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + runWithModelFallback: runWithModelFallbackMock, + }; +}); -vi.mock("../../agents/pi-embedded.js", () => ({ - runEmbeddedPiAgent: runEmbeddedPiAgentMock, -})); +vi.mock("../../agents/pi-embedded.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + runEmbeddedPiAgent: runEmbeddedPiAgentMock, + }; +}); -vi.mock("../../agents/context.js", () => ({ - lookupContextTokens: vi.fn().mockReturnValue(128000), -})); +vi.mock("../../agents/context.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + lookupContextTokens: vi.fn().mockReturnValue(128000), + }; +}); -vi.mock("../../agents/date-time.js", () => ({ - formatUserTime: vi.fn().mockReturnValue("2026-02-10 12:00"), - resolveUserTimeFormat: vi.fn().mockReturnValue("24h"), - resolveUserTimezone: vi.fn().mockReturnValue("UTC"), -})); +vi.mock("../../agents/date-time.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + formatUserTime: vi.fn().mockReturnValue("2026-02-10 12:00"), + resolveUserTimeFormat: vi.fn().mockReturnValue("24h"), + resolveUserTimezone: vi.fn().mockReturnValue("UTC"), + }; +}); -vi.mock("../../agents/timeout.js", () => ({ - resolveAgentTimeoutMs: vi.fn().mockReturnValue(60_000), -})); +vi.mock("../../agents/timeout.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + resolveAgentTimeoutMs: vi.fn().mockReturnValue(60_000), + }; +}); -vi.mock("../../agents/usage.js", () => ({ - deriveSessionTotalTokens: vi.fn().mockReturnValue(30), - hasNonzeroUsage: vi.fn().mockReturnValue(false), -})); +vi.mock("../../agents/usage.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + deriveSessionTotalTokens: vi.fn().mockReturnValue(30), + hasNonzeroUsage: vi.fn().mockReturnValue(false), + }; +}); -vi.mock("../../agents/subagent-announce.js", () => ({ - runSubagentAnnounceFlow: vi.fn().mockResolvedValue(true), -})); +vi.mock("../../agents/subagent-announce.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + runSubagentAnnounceFlow: vi.fn().mockResolvedValue(true), + }; +}); -vi.mock("../../agents/subagent-registry.js", () => ({ - countActiveDescendantRuns: countActiveDescendantRunsMock, - listDescendantRunsForRequester: listDescendantRunsForRequesterMock, -})); +vi.mock("../../agents/subagent-registry.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + countActiveDescendantRuns: countActiveDescendantRunsMock, + listDescendantRunsForRequester: listDescendantRunsForRequesterMock, + }; +}); -vi.mock("../../agents/cli-runner.js", () => ({ - runCliAgent: runCliAgentMock, -})); +vi.mock("../../agents/cli-runner.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + runCliAgent: runCliAgentMock, + }; +}); -vi.mock("../../agents/cli-session.js", () => ({ - getCliSessionId: getCliSessionIdMock, - setCliSessionId: vi.fn(), -})); +vi.mock("../../agents/cli-session.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + getCliSessionId: getCliSessionIdMock, + setCliSessionId: vi.fn(), + }; +}); -vi.mock("../../auto-reply/thinking.js", () => ({ - normalizeThinkLevel: vi.fn().mockReturnValue(undefined), - normalizeVerboseLevel: vi.fn().mockReturnValue("off"), - supportsXHighThinking: vi.fn().mockReturnValue(false), -})); +vi.mock("../../auto-reply/thinking.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + normalizeThinkLevel: vi.fn().mockReturnValue(undefined), + normalizeVerboseLevel: vi.fn().mockReturnValue("off"), + supportsXHighThinking: vi.fn().mockReturnValue(false), + }; +}); -vi.mock("../../cli/outbound-send-deps.js", () => ({ - createOutboundSendDeps: vi.fn().mockReturnValue({}), -})); +vi.mock("../../cli/outbound-send-deps.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + createOutboundSendDeps: vi.fn().mockReturnValue({}), + }; +}); -vi.mock("../../config/sessions.js", () => ({ - resolveAgentMainSessionKey: vi.fn().mockReturnValue("main:default"), - resolveSessionTranscriptPath: vi.fn().mockReturnValue("/tmp/transcript.jsonl"), - setSessionRuntimeModel: vi.fn(), - updateSessionStore: updateSessionStoreMock, -})); +vi.mock("../../config/sessions.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + resolveAgentMainSessionKey: vi.fn().mockReturnValue("main:default"), + resolveSessionTranscriptPath: vi.fn().mockReturnValue("/tmp/transcript.jsonl"), + setSessionRuntimeModel: vi.fn(), + updateSessionStore: updateSessionStoreMock, + }; +}); vi.mock("../../routing/session-key.js", async (importOriginal) => { const actual = await importOriginal(); @@ -156,28 +228,48 @@ vi.mock("../../routing/session-key.js", async (importOriginal) => { }; }); -vi.mock("../../infra/agent-events.js", () => ({ - registerAgentRunContext: vi.fn(), -})); +vi.mock("../../infra/agent-events.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + registerAgentRunContext: vi.fn(), + }; +}); -vi.mock("../../infra/outbound/deliver.js", () => ({ - deliverOutboundPayloads: vi.fn().mockResolvedValue(undefined), -})); +vi.mock("../../infra/outbound/deliver.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + deliverOutboundPayloads: vi.fn().mockResolvedValue(undefined), + }; +}); -vi.mock("../../infra/skills-remote.js", () => ({ - getRemoteSkillEligibility: vi.fn().mockReturnValue({}), -})); +vi.mock("../../infra/skills-remote.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + getRemoteSkillEligibility: vi.fn().mockReturnValue({}), + }; +}); -vi.mock("../../logger.js", () => ({ - logWarn: (...args: unknown[]) => logWarnMock(...args), -})); +vi.mock("../../logger.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + logWarn: (...args: unknown[]) => logWarnMock(...args), + }; +}); -vi.mock("../../security/external-content.js", () => ({ - buildSafeExternalPrompt: vi.fn().mockReturnValue("safe prompt"), - detectSuspiciousPatterns: vi.fn().mockReturnValue([]), - getHookType: vi.fn().mockReturnValue("unknown"), - isExternalHookSession: vi.fn().mockReturnValue(false), -})); +vi.mock("../../security/external-content.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + buildSafeExternalPrompt: vi.fn().mockReturnValue("safe prompt"), + detectSuspiciousPatterns: vi.fn().mockReturnValue([]), + getHookType: vi.fn().mockReturnValue("unknown"), + isExternalHookSession: vi.fn().mockReturnValue(false), + }; +}); vi.mock("../delivery.js", () => ({ resolveCronDeliveryPlan: resolveCronDeliveryPlanMock, @@ -200,11 +292,15 @@ vi.mock("./session.js", () => ({ resolveCronSession: resolveCronSessionMock, })); -vi.mock("../../agents/defaults.js", () => ({ - DEFAULT_CONTEXT_TOKENS: 128000, - DEFAULT_MODEL: "gpt-4", - DEFAULT_PROVIDER: "openai", -})); +vi.mock("../../agents/defaults.js", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + DEFAULT_CONTEXT_TOKENS: 128000, + DEFAULT_MODEL: "gpt-4", + DEFAULT_PROVIDER: "openai", + }; +}); export function makeCronSessionEntry(overrides?: Record): CronSessionEntry { return { @@ -245,6 +341,13 @@ function makeDefaultEmbeddedResult() { }; } +export function mockRunCronFallbackPassthrough(): void { + runWithModelFallbackMock.mockImplementation(async ({ provider, model, run }) => { + const result = await run(provider, model); + return { result, provider, model, attempts: [] }; + }); +} + export function resetRunCronIsolatedAgentTurnHarness(): void { vi.clearAllMocks(); diff --git a/src/cron/isolated-agent/run.ts b/src/cron/isolated-agent/run.ts index 813b99c0553..8a074338da7 100644 --- a/src/cron/isolated-agent/run.ts +++ b/src/cron/isolated-agent/run.ts @@ -12,6 +12,8 @@ import { getCliSessionId, setCliSessionId } from "../../agents/cli-session.js"; import { lookupContextTokens } from "../../agents/context.js"; import { resolveCronStyleNow } from "../../agents/current-time.js"; import { DEFAULT_CONTEXT_TOKENS, DEFAULT_MODEL, DEFAULT_PROVIDER } from "../../agents/defaults.js"; +import { resolveFastModeState } from "../../agents/fast-mode.js"; +import { resolveNestedAgentLane } from "../../agents/lanes.js"; import { loadModelCatalog } from "../../agents/model-catalog.js"; import { runWithModelFallback } from "../../agents/model-fallback.js"; import { @@ -78,11 +80,10 @@ export type RunCronAgentTurnResult = { /** Last non-empty agent text output (not truncated). */ outputText?: string; /** - * `true` when the isolated run already delivered its output to the target - * channel (via outbound payloads, the subagent announce flow, or a matching - * messaging-tool send). Callers should skip posting a summary to the main - * session to avoid duplicate - * messages. See: https://github.com/openclaw/openclaw/issues/15692 + * `true` when the isolated runner already handled the run's user-visible + * delivery outcome. Cron-owned callers use this for cron delivery or + * explicit suppression; shared callers may also use it for a matching + * message-tool send that already reached the target. */ delivered?: boolean; /** @@ -144,16 +145,22 @@ function buildCronAgentDefaultsConfig(params: { type ResolvedCronDeliveryTarget = Awaited>; +type IsolatedDeliveryContract = "cron-owned" | "shared"; + function resolveCronToolPolicy(params: { deliveryRequested: boolean; resolvedDelivery: ResolvedCronDeliveryTarget; + deliveryContract: IsolatedDeliveryContract; }) { return { // Only enforce an explicit message target when the cron delivery target // was successfully resolved. When resolution fails the agent should not // be blocked by a target it cannot satisfy (#27898). requireExplicitMessageTarget: params.deliveryRequested && params.resolvedDelivery.ok, - disableMessageTool: params.deliveryRequested, + // Cron-owned runs always route user-facing delivery through the runner + // itself. Shared callers keep the previous behavior so non-cron paths do + // not silently lose the message tool when no explicit delivery is active. + disableMessageTool: params.deliveryContract === "cron-owned" ? true : params.deliveryRequested, }; } @@ -161,6 +168,7 @@ async function resolveCronDeliveryContext(params: { cfg: OpenClawConfig; job: CronJob; agentId: string; + deliveryContract: IsolatedDeliveryContract; }) { const deliveryPlan = resolveCronDeliveryPlan(params.job); const resolvedDelivery = await resolveDeliveryTarget(params.cfg, params.agentId, { @@ -176,6 +184,7 @@ async function resolveCronDeliveryContext(params: { toolPolicy: resolveCronToolPolicy({ deliveryRequested: deliveryPlan.requested, resolvedDelivery, + deliveryContract: params.deliveryContract, }), }; } @@ -200,6 +209,7 @@ export async function runCronIsolatedAgentTurn(params: { sessionKey: string; agentId?: string; lane?: string; + deliveryContract?: IsolatedDeliveryContract; }): Promise { const abortSignal = params.abortSignal ?? params.signal; const isAborted = () => abortSignal?.aborted === true; @@ -210,6 +220,7 @@ export async function runCronIsolatedAgentTurn(params: { : "cron: job execution timed out"; }; const isFastTestEnv = process.env.OPENCLAW_TEST_FAST === "1"; + const deliveryContract = params.deliveryContract ?? "cron-owned"; const defaultAgentId = resolveDefaultAgentId(params.cfg); const requestedAgentId = typeof params.agentId === "string" && params.agentId.trim() @@ -425,6 +436,7 @@ export async function runCronIsolatedAgentTurn(params: { cfg: cfgWithAgentDefaults, job: params.job, agentId, + deliveryContract, }); const { formattedTime, timeLine } = resolveCronStyleNow(params.cfg, now); @@ -543,6 +555,7 @@ export async function runCronIsolatedAgentTurn(params: { cfg: cfgWithAgentDefaults, provider, model, + runId: cronSession.sessionEntry.sessionId, agentDir, fallbacksOverride: payloadFallbacks ?? resolveAgentModelFallbacksOverride(params.cfg, agentId), @@ -599,12 +612,18 @@ export async function runCronIsolatedAgentTurn(params: { config: cfgWithAgentDefaults, skillsSnapshot, prompt: promptText, - lane: params.lane ?? "cron", + lane: resolveNestedAgentLane(params.lane), provider: providerOverride, model: modelOverride, authProfileId, authProfileIdSource, thinkLevel, + fastMode: resolveFastModeState({ + cfg: cfgWithAgentDefaults, + provider: providerOverride, + model: modelOverride, + sessionEntry: cronSession.sessionEntry, + }).enabled, verboseLevel: resolvedVerboseLevel, timeoutMs, bootstrapContextMode: agentPayload?.lightContext ? "lightweight" : undefined, @@ -807,6 +826,7 @@ export async function runCronIsolatedAgentTurn(params: { const ackMaxChars = resolveHeartbeatAckMaxChars(agentCfg); const skipHeartbeatDelivery = deliveryRequested && isHeartbeatOnlyResponse(payloads, ackMaxChars); const skipMessagingToolDelivery = + deliveryContract === "shared" && deliveryRequested && finalRunResult.didSendViaMessagingTool === true && (finalRunResult.messagingToolSentTargets ?? []).some((target) => @@ -816,7 +836,6 @@ export async function runCronIsolatedAgentTurn(params: { accountId: resolvedDelivery.accountId, }), ); - const deliveryResult = await dispatchCronDelivery({ cfg: params.cfg, cfgWithAgentDefaults, diff --git a/src/cron/isolated-agent/subagent-followup.test.ts b/src/cron/isolated-agent/subagent-followup.test.ts index 093da010026..7861c75ff35 100644 --- a/src/cron/isolated-agent/subagent-followup.test.ts +++ b/src/cron/isolated-agent/subagent-followup.test.ts @@ -33,6 +33,29 @@ async function resolveAfterAdvancingTimers(promise: Promise, advanceMs = 1 return promise; } +function createDescendantRun(params?: { + runId?: string; + childSessionKey?: string; + task?: string; + cleanup?: "keep" | "delete"; + endedAt?: number; + frozenResultText?: string | null; +}) { + return { + runId: params?.runId ?? "run-1", + childSessionKey: params?.childSessionKey ?? "child-1", + requesterSessionKey: "test-session", + requesterDisplayKey: "test-session", + task: params?.task ?? "task-1", + cleanup: params?.cleanup ?? "keep", + createdAt: 1000, + endedAt: params?.endedAt ?? 2000, + ...(params?.frozenResultText === undefined + ? {} + : { frozenResultText: params.frozenResultText }), + }; +} + describe("isLikelyInterimCronMessage", () => { it("detects 'on it' as interim", () => { expect(isLikelyInterimCronMessage("on it")).toBe(true); @@ -47,8 +70,12 @@ describe("isLikelyInterimCronMessage", () => { false, ); }); - it("treats empty as interim", () => { - expect(isLikelyInterimCronMessage("")).toBe(true); + it("does not treat empty as interim (empty = NO_REPLY was stripped)", () => { + expect(isLikelyInterimCronMessage("")).toBe(false); + }); + + it("does not treat whitespace-only as interim", () => { + expect(isLikelyInterimCronMessage(" ")).toBe(false); }); }); @@ -81,18 +108,7 @@ describe("readDescendantSubagentFallbackReply", () => { }); it("reads reply from child session transcript", async () => { - vi.mocked(listDescendantRunsForRequester).mockReturnValue([ - { - runId: "run-1", - childSessionKey: "child-1", - requesterSessionKey: "test-session", - requesterDisplayKey: "test-session", - task: "task-1", - cleanup: "keep", - createdAt: 1000, - endedAt: 2000, - }, - ]); + vi.mocked(listDescendantRunsForRequester).mockReturnValue([createDescendantRun()]); vi.mocked(readLatestAssistantReply).mockResolvedValue("child output text"); const result = await readDescendantSubagentFallbackReply({ sessionKey: "test-session", @@ -103,17 +119,10 @@ describe("readDescendantSubagentFallbackReply", () => { it("falls back to frozenResultText when session transcript unavailable", async () => { vi.mocked(listDescendantRunsForRequester).mockReturnValue([ - { - runId: "run-1", - childSessionKey: "child-1", - requesterSessionKey: "test-session", - requesterDisplayKey: "test-session", - task: "task-1", + createDescendantRun({ cleanup: "delete", - createdAt: 1000, - endedAt: 2000, frozenResultText: "frozen child output", - }, + }), ]); vi.mocked(readLatestAssistantReply).mockResolvedValue(undefined); const result = await readDescendantSubagentFallbackReply({ @@ -125,17 +134,7 @@ describe("readDescendantSubagentFallbackReply", () => { it("prefers session transcript over frozenResultText", async () => { vi.mocked(listDescendantRunsForRequester).mockReturnValue([ - { - runId: "run-1", - childSessionKey: "child-1", - requesterSessionKey: "test-session", - requesterDisplayKey: "test-session", - task: "task-1", - cleanup: "keep", - createdAt: 1000, - endedAt: 2000, - frozenResultText: "frozen text", - }, + createDescendantRun({ frozenResultText: "frozen text" }), ]); vi.mocked(readLatestAssistantReply).mockResolvedValue("live transcript text"); const result = await readDescendantSubagentFallbackReply({ @@ -147,28 +146,14 @@ describe("readDescendantSubagentFallbackReply", () => { it("joins replies from multiple descendants", async () => { vi.mocked(listDescendantRunsForRequester).mockReturnValue([ - { - runId: "run-1", - childSessionKey: "child-1", - requesterSessionKey: "test-session", - requesterDisplayKey: "test-session", - task: "task-1", - cleanup: "keep", - createdAt: 1000, - endedAt: 2000, - frozenResultText: "first child output", - }, - { + createDescendantRun({ frozenResultText: "first child output" }), + createDescendantRun({ runId: "run-2", childSessionKey: "child-2", - requesterSessionKey: "test-session", - requesterDisplayKey: "test-session", task: "task-2", - cleanup: "keep", - createdAt: 1000, endedAt: 3000, frozenResultText: "second child output", - }, + }), ]); vi.mocked(readLatestAssistantReply).mockResolvedValue(undefined); const result = await readDescendantSubagentFallbackReply({ @@ -180,27 +165,14 @@ describe("readDescendantSubagentFallbackReply", () => { it("skips SILENT_REPLY_TOKEN descendants", async () => { vi.mocked(listDescendantRunsForRequester).mockReturnValue([ - { - runId: "run-1", - childSessionKey: "child-1", - requesterSessionKey: "test-session", - requesterDisplayKey: "test-session", - task: "task-1", - cleanup: "keep", - createdAt: 1000, - endedAt: 2000, - }, - { + createDescendantRun(), + createDescendantRun({ runId: "run-2", childSessionKey: "child-2", - requesterSessionKey: "test-session", - requesterDisplayKey: "test-session", task: "task-2", - cleanup: "keep", - createdAt: 1000, endedAt: 3000, frozenResultText: "useful output", - }, + }), ]); vi.mocked(readLatestAssistantReply).mockImplementation(async (params) => { if (params.sessionKey === "child-1") { @@ -217,17 +189,10 @@ describe("readDescendantSubagentFallbackReply", () => { it("returns undefined when frozenResultText is null", async () => { vi.mocked(listDescendantRunsForRequester).mockReturnValue([ - { - runId: "run-1", - childSessionKey: "child-1", - requesterSessionKey: "test-session", - requesterDisplayKey: "test-session", - task: "task-1", + createDescendantRun({ cleanup: "delete", - createdAt: 1000, - endedAt: 2000, frozenResultText: null, - }, + }), ]); vi.mocked(readLatestAssistantReply).mockResolvedValue(undefined); const result = await readDescendantSubagentFallbackReply({ diff --git a/src/cron/isolated-agent/subagent-followup.ts b/src/cron/isolated-agent/subagent-followup.ts index 6d5f9d4c502..a337fe528b7 100644 --- a/src/cron/isolated-agent/subagent-followup.ts +++ b/src/cron/isolated-agent/subagent-followup.ts @@ -42,7 +42,10 @@ function normalizeHintText(value: string): string { export function isLikelyInterimCronMessage(value: string): boolean { const normalized = normalizeHintText(value); if (!normalized) { - return true; + // Empty text after payload filtering means the agent either returned + // NO_REPLY (deliberately silent) or produced no deliverable content. + // Do not treat this as an interim acknowledgement that needs a rerun. + return false; } const words = normalized.split(" ").filter(Boolean).length; return words <= 45 && INTERIM_CRON_HINTS.some((hint) => normalized.includes(hint)); @@ -166,7 +169,7 @@ export async function waitForDescendantSubagentSummary(params: { // CRON_SUBAGENT_FINAL_REPLY_GRACE_MS) to capture that synthesis. const gracePeriodDeadline = Math.min(Date.now() + CRON_SUBAGENT_FINAL_REPLY_GRACE_MS, deadline); - while (Date.now() < gracePeriodDeadline) { + const resolveUsableLatestReply = async () => { const latest = (await readLatestAssistantReply({ sessionKey: params.sessionKey }))?.trim(); if ( latest && @@ -175,16 +178,20 @@ export async function waitForDescendantSubagentSummary(params: { ) { return latest; } + return undefined; + }; + + while (Date.now() < gracePeriodDeadline) { + const latest = await resolveUsableLatestReply(); + if (latest) { + return latest; + } await new Promise((resolve) => setTimeout(resolve, CRON_SUBAGENT_GRACE_POLL_MS)); } // Final read after grace period expires. - const latest = (await readLatestAssistantReply({ sessionKey: params.sessionKey }))?.trim(); - if ( - latest && - latest.toUpperCase() !== SILENT_REPLY_TOKEN.toUpperCase() && - (latest !== initialReply || !isLikelyInterimCronMessage(latest)) - ) { + const latest = await resolveUsableLatestReply(); + if (latest) { return latest; } diff --git a/src/cron/normalize.test.ts b/src/cron/normalize.test.ts index 6f34c85ebed..969faa6bb6f 100644 --- a/src/cron/normalize.test.ts +++ b/src/cron/normalize.test.ts @@ -414,6 +414,42 @@ describe("normalizeCronJobCreate", () => { expect(delivery.mode).toBeUndefined(); expect(delivery.to).toBe("123"); }); + + it("resolves current sessionTarget to a persistent session when context is available", () => { + const normalized = normalizeCronJobCreate( + { + name: "current-session", + schedule: { kind: "cron", expr: "* * * * *" }, + sessionTarget: "current", + payload: { kind: "agentTurn", message: "hello" }, + }, + { sessionContext: { sessionKey: "agent:main:discord:group:ops" } }, + ) as unknown as Record; + + expect(normalized.sessionTarget).toBe("session:agent:main:discord:group:ops"); + }); + + it("falls back current sessionTarget to isolated without context", () => { + const normalized = normalizeCronJobCreate({ + name: "current-without-context", + schedule: { kind: "cron", expr: "* * * * *" }, + sessionTarget: "current", + payload: { kind: "agentTurn", message: "hello" }, + }) as unknown as Record; + + expect(normalized.sessionTarget).toBe("isolated"); + }); + + it("preserves custom session ids with a session: prefix", () => { + const normalized = normalizeCronJobCreate({ + name: "custom-session", + schedule: { kind: "cron", expr: "* * * * *" }, + sessionTarget: "session:MySessionID", + payload: { kind: "agentTurn", message: "hello" }, + }) as unknown as Record; + + expect(normalized.sessionTarget).toBe("session:MySessionID"); + }); }); describe("normalizeCronJobPatch", () => { diff --git a/src/cron/normalize.ts b/src/cron/normalize.ts index 5a6c66ff356..b1afdfaaa12 100644 --- a/src/cron/normalize.ts +++ b/src/cron/normalize.ts @@ -11,6 +11,8 @@ type UnknownRecord = Record; type NormalizeOptions = { applyDefaults?: boolean; + /** Session context for resolving "current" sessionTarget or auto-binding when not specified */ + sessionContext?: { sessionKey?: string }; }; const DEFAULT_OPTIONS: NormalizeOptions = { @@ -218,9 +220,17 @@ function normalizeSessionTarget(raw: unknown) { if (typeof raw !== "string") { return undefined; } - const trimmed = raw.trim().toLowerCase(); - if (trimmed === "main" || trimmed === "isolated") { - return trimmed; + const trimmed = raw.trim(); + const lower = trimmed.toLowerCase(); + if (lower === "main" || lower === "isolated" || lower === "current") { + return lower; + } + // Support custom session IDs with "session:" prefix + if (lower.startsWith("session:")) { + const sessionId = trimmed.slice(8).trim(); + if (sessionId) { + return `session:${sessionId}`; + } } return undefined; } @@ -431,10 +441,37 @@ export function normalizeCronJobInput( } if (!next.sessionTarget && isRecord(next.payload)) { const kind = typeof next.payload.kind === "string" ? next.payload.kind : ""; + // Keep default behavior unchanged for backward compatibility: + // - systemEvent defaults to "main" + // - agentTurn defaults to "isolated" (NOT "current", to avoid token accumulation) + // Users must explicitly specify "current" or "session:xxx" for custom session binding if (kind === "systemEvent") { next.sessionTarget = "main"; + } else if (kind === "agentTurn") { + next.sessionTarget = "isolated"; } - if (kind === "agentTurn") { + } + + // Resolve "current" sessionTarget to the actual sessionKey from context + if (next.sessionTarget === "current") { + if (options.sessionContext?.sessionKey) { + const sessionKey = options.sessionContext.sessionKey.trim(); + if (sessionKey) { + // Store as session:customId format for persistence + next.sessionTarget = `session:${sessionKey}`; + } + } + // If "current" wasn't resolved, fall back to "isolated" behavior + // This handles CLI/headless usage where no session context exists + if (next.sessionTarget === "current") { + next.sessionTarget = "isolated"; + } + } + if (next.sessionTarget === "current") { + const sessionKey = options.sessionContext?.sessionKey?.trim(); + if (sessionKey) { + next.sessionTarget = `session:${sessionKey}`; + } else { next.sessionTarget = "isolated"; } } @@ -462,8 +499,12 @@ export function normalizeCronJobInput( const payload = isRecord(next.payload) ? next.payload : null; const payloadKind = payload && typeof payload.kind === "string" ? payload.kind : ""; const sessionTarget = typeof next.sessionTarget === "string" ? next.sessionTarget : ""; + // Support "isolated", custom session IDs (session:xxx), and resolved "current" as isolated-like targets const isIsolatedAgentTurn = - sessionTarget === "isolated" || (sessionTarget === "" && payloadKind === "agentTurn"); + sessionTarget === "isolated" || + sessionTarget === "current" || + sessionTarget.startsWith("session:") || + (sessionTarget === "" && payloadKind === "agentTurn"); const hasDelivery = "delivery" in next && next.delivery !== undefined; const normalizedLegacy = normalizeLegacyDeliveryInput({ delivery: isRecord(next.delivery) ? next.delivery : null, @@ -487,7 +528,7 @@ export function normalizeCronJobInput( export function normalizeCronJobCreate( raw: unknown, - options?: NormalizeOptions, + options?: Omit, ): CronJobCreate | null { return normalizeCronJobInput(raw, { applyDefaults: true, diff --git a/src/cron/service.delivery-plan.test.ts b/src/cron/service.delivery-plan.test.ts index 46c240e6c0f..5168d8bebc9 100644 --- a/src/cron/service.delivery-plan.test.ts +++ b/src/cron/service.delivery-plan.test.ts @@ -86,7 +86,7 @@ describe("CronService delivery plan consistency", () => { }); }); - it("treats delivery object without mode as announce", async () => { + it("treats delivery object without mode as announce without reviving legacy relay fallback", async () => { await withCronService({}, async ({ cron, enqueueSystemEvent }) => { const job = await addIsolatedAgentTurnJob(cron, { name: "partial-delivery", @@ -96,10 +96,8 @@ describe("CronService delivery plan consistency", () => { const result = await cron.run(job.id, "force"); expect(result).toEqual({ ok: true, ran: true }); - expect(enqueueSystemEvent).toHaveBeenCalledWith( - "Cron: done", - expect.objectContaining({ agentId: undefined }), - ); + expect(enqueueSystemEvent).not.toHaveBeenCalled(); + expect(cron.getJob(job.id)?.state.lastDeliveryStatus).toBe("unknown"); }); }); diff --git a/src/cron/service.heartbeat-ok-summary-suppressed.test.ts b/src/cron/service.heartbeat-ok-summary-suppressed.test.ts index 3ae9fc7c758..d2a620e1439 100644 --- a/src/cron/service.heartbeat-ok-summary-suppressed.test.ts +++ b/src/cron/service.heartbeat-ok-summary-suppressed.test.ts @@ -86,7 +86,7 @@ describe("cron isolated job HEARTBEAT_OK summary suppression (#32013)", () => { expect(requestHeartbeatNow).not.toHaveBeenCalled(); }); - it("still enqueues real cron summaries as system events", async () => { + it("does not revive legacy main-session relay for real cron summaries", async () => { const { storePath } = await makeStorePath(); const now = Date.now(); @@ -109,10 +109,7 @@ describe("cron isolated job HEARTBEAT_OK summary suppression (#32013)", () => { await runScheduledCron(cron); - // Real summaries SHOULD be enqueued. - expect(enqueueSystemEvent).toHaveBeenCalledWith( - expect.stringContaining("Weather update"), - expect.objectContaining({ agentId: undefined }), - ); + expect(enqueueSystemEvent).not.toHaveBeenCalled(); + expect(requestHeartbeatNow).not.toHaveBeenCalled(); }); }); diff --git a/src/cron/service.jobs.test.ts b/src/cron/service.jobs.test.ts index 053ea8764de..c514f7528ba 100644 --- a/src/cron/service.jobs.test.ts +++ b/src/cron/service.jobs.test.ts @@ -103,6 +103,29 @@ describe("applyJobPatch", () => { }); }); + it("maps legacy payload delivery updates for custom session targets", () => { + const job = createIsolatedAgentTurnJob( + "job-custom-session", + { + mode: "announce", + channel: "telegram", + to: "123", + }, + { sessionTarget: "session:project-alpha" }, + ); + + applyJobPatch(job, { + payload: { kind: "agentTurn", to: "555" }, + }); + + expect(job.delivery).toEqual({ + mode: "announce", + channel: "telegram", + to: "555", + bestEffort: undefined, + }); + }); + it("treats legacy payload targets as announce requests", () => { const job = createIsolatedAgentTurnJob("job-3", { mode: "none", diff --git a/src/cron/service.restart-catchup.test.ts b/src/cron/service.restart-catchup.test.ts index f0c9c3e4dc9..70da886b9a0 100644 --- a/src/cron/service.restart-catchup.test.ts +++ b/src/cron/service.restart-catchup.test.ts @@ -47,326 +47,274 @@ describe("CronService restart catch-up", () => { }; } - it("executes an overdue recurring job immediately on start", async () => { + async function withRestartedCron( + jobs: unknown[], + run: (params: { + cron: CronService; + enqueueSystemEvent: ReturnType; + requestHeartbeatNow: ReturnType; + }) => Promise, + ) { const store = await makeStorePath(); const enqueueSystemEvent = vi.fn(); const requestHeartbeatNow = vi.fn(); + await writeStoreJobs(store.storePath, jobs); + + const cron = createRestartCronService({ + storePath: store.storePath, + enqueueSystemEvent, + requestHeartbeatNow, + }); + + try { + await cron.start(); + await run({ cron, enqueueSystemEvent, requestHeartbeatNow }); + } finally { + cron.stop(); + await store.cleanup(); + } + } + + it("executes an overdue recurring job immediately on start", async () => { const dueAt = Date.parse("2025-12-13T15:00:00.000Z"); const lastRunAt = Date.parse("2025-12-12T15:00:00.000Z"); - await writeStoreJobs(store.storePath, [ - { - id: "restart-overdue-job", - name: "daily digest", - enabled: true, - createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), - updatedAtMs: Date.parse("2025-12-12T15:00:00.000Z"), - schedule: { kind: "cron", expr: "0 15 * * *", tz: "UTC" }, - sessionTarget: "main", - wakeMode: "next-heartbeat", - payload: { kind: "systemEvent", text: "digest now" }, - state: { - nextRunAtMs: dueAt, - lastRunAtMs: lastRunAt, - lastStatus: "ok", + await withRestartedCron( + [ + { + id: "restart-overdue-job", + name: "daily digest", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-12T15:00:00.000Z"), + schedule: { kind: "cron", expr: "0 15 * * *", tz: "UTC" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "digest now" }, + state: { + nextRunAtMs: dueAt, + lastRunAtMs: lastRunAt, + lastStatus: "ok", + }, }, + ], + async ({ cron, enqueueSystemEvent, requestHeartbeatNow }) => { + expect(enqueueSystemEvent).toHaveBeenCalledWith( + "digest now", + expect.objectContaining({ agentId: undefined }), + ); + expect(requestHeartbeatNow).toHaveBeenCalled(); + + const listedJobs = await cron.list({ includeDisabled: true }); + const updated = listedJobs.find((job) => job.id === "restart-overdue-job"); + expect(updated?.state.lastStatus).toBe("ok"); + expect(updated?.state.lastRunAtMs).toBe(Date.parse("2025-12-13T17:00:00.000Z")); + expect(updated?.state.nextRunAtMs).toBeGreaterThan(Date.parse("2025-12-13T17:00:00.000Z")); }, - ]); - - const cron = createRestartCronService({ - storePath: store.storePath, - enqueueSystemEvent, - requestHeartbeatNow, - }); - - await cron.start(); - - expect(enqueueSystemEvent).toHaveBeenCalledWith( - "digest now", - expect.objectContaining({ agentId: undefined }), ); - expect(requestHeartbeatNow).toHaveBeenCalled(); - - const jobs = await cron.list({ includeDisabled: true }); - const updated = jobs.find((job) => job.id === "restart-overdue-job"); - expect(updated?.state.lastStatus).toBe("ok"); - expect(updated?.state.lastRunAtMs).toBe(Date.parse("2025-12-13T17:00:00.000Z")); - expect(updated?.state.nextRunAtMs).toBeGreaterThan(Date.parse("2025-12-13T17:00:00.000Z")); - - cron.stop(); - await store.cleanup(); }); it("clears stale running markers without replaying interrupted startup jobs", async () => { - const store = await makeStorePath(); - const enqueueSystemEvent = vi.fn(); - const requestHeartbeatNow = vi.fn(); - const dueAt = Date.parse("2025-12-13T16:00:00.000Z"); const staleRunningAt = Date.parse("2025-12-13T16:30:00.000Z"); - await writeStoreJobs(store.storePath, [ - { - id: "restart-stale-running", - name: "daily stale marker", - enabled: true, - createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), - updatedAtMs: Date.parse("2025-12-13T16:30:00.000Z"), - schedule: { kind: "cron", expr: "0 16 * * *", tz: "UTC" }, - sessionTarget: "main", - wakeMode: "next-heartbeat", - payload: { kind: "systemEvent", text: "resume stale marker" }, - state: { - nextRunAtMs: dueAt, - runningAtMs: staleRunningAt, + await withRestartedCron( + [ + { + id: "restart-stale-running", + name: "daily stale marker", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-13T16:30:00.000Z"), + schedule: { kind: "cron", expr: "0 16 * * *", tz: "UTC" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "resume stale marker" }, + state: { + nextRunAtMs: dueAt, + runningAtMs: staleRunningAt, + }, }, + ], + async ({ cron, enqueueSystemEvent }) => { + expect(enqueueSystemEvent).not.toHaveBeenCalled(); + expect(noopLogger.warn).toHaveBeenCalledWith( + expect.objectContaining({ jobId: "restart-stale-running" }), + "cron: clearing stale running marker on startup", + ); + + const listedJobs = await cron.list({ includeDisabled: true }); + const updated = listedJobs.find((job) => job.id === "restart-stale-running"); + expect(updated?.state.runningAtMs).toBeUndefined(); + expect(updated?.state.lastStatus).toBeUndefined(); + expect(updated?.state.lastRunAtMs).toBeUndefined(); + expect((updated?.state.nextRunAtMs ?? 0) > Date.parse("2025-12-13T17:00:00.000Z")).toBe( + true, + ); }, - ]); - - const cron = createRestartCronService({ - storePath: store.storePath, - enqueueSystemEvent, - requestHeartbeatNow, - }); - - await cron.start(); - - expect(enqueueSystemEvent).not.toHaveBeenCalled(); - expect(noopLogger.warn).toHaveBeenCalledWith( - expect.objectContaining({ jobId: "restart-stale-running" }), - "cron: clearing stale running marker on startup", ); - - const jobs = await cron.list({ includeDisabled: true }); - const updated = jobs.find((job) => job.id === "restart-stale-running"); - expect(updated?.state.runningAtMs).toBeUndefined(); - expect(updated?.state.lastStatus).toBeUndefined(); - expect(updated?.state.lastRunAtMs).toBeUndefined(); - expect((updated?.state.nextRunAtMs ?? 0) > Date.parse("2025-12-13T17:00:00.000Z")).toBe(true); - - cron.stop(); - await store.cleanup(); }); it("replays the most recent missed cron slot after restart when nextRunAtMs already advanced", async () => { vi.setSystemTime(new Date("2025-12-13T04:02:00.000Z")); - const store = await makeStorePath(); - const enqueueSystemEvent = vi.fn(); - const requestHeartbeatNow = vi.fn(); - - await writeStoreJobs(store.storePath, [ - { - id: "restart-missed-slot", - name: "every ten minutes +1", - enabled: true, - createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), - updatedAtMs: Date.parse("2025-12-13T04:01:00.000Z"), - schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" }, - sessionTarget: "main", - wakeMode: "next-heartbeat", - payload: { kind: "systemEvent", text: "catch missed slot" }, - state: { - // Persisted state may already be recomputed from restart time and - // point to the future slot, even though 04:01 was missed. - nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"), - lastRunAtMs: Date.parse("2025-12-13T03:51:00.000Z"), - lastStatus: "ok", + await withRestartedCron( + [ + { + id: "restart-missed-slot", + name: "every ten minutes +1", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-13T04:01:00.000Z"), + schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "catch missed slot" }, + state: { + // Persisted state may already be recomputed from restart time and + // point to the future slot, even though 04:01 was missed. + nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"), + lastRunAtMs: Date.parse("2025-12-13T03:51:00.000Z"), + lastStatus: "ok", + }, }, + ], + async ({ cron, enqueueSystemEvent, requestHeartbeatNow }) => { + expect(enqueueSystemEvent).toHaveBeenCalledWith( + "catch missed slot", + expect.objectContaining({ agentId: undefined }), + ); + expect(requestHeartbeatNow).toHaveBeenCalled(); + + const listedJobs = await cron.list({ includeDisabled: true }); + const updated = listedJobs.find((job) => job.id === "restart-missed-slot"); + expect(updated?.state.lastRunAtMs).toBe(Date.parse("2025-12-13T04:02:00.000Z")); }, - ]); - - const cron = createRestartCronService({ - storePath: store.storePath, - enqueueSystemEvent, - requestHeartbeatNow, - }); - - await cron.start(); - - expect(enqueueSystemEvent).toHaveBeenCalledWith( - "catch missed slot", - expect.objectContaining({ agentId: undefined }), ); - expect(requestHeartbeatNow).toHaveBeenCalled(); - - const jobs = await cron.list({ includeDisabled: true }); - const updated = jobs.find((job) => job.id === "restart-missed-slot"); - expect(updated?.state.lastRunAtMs).toBe(Date.parse("2025-12-13T04:02:00.000Z")); - - cron.stop(); - await store.cleanup(); }); it("does not replay interrupted one-shot jobs on startup", async () => { - const store = await makeStorePath(); - const enqueueSystemEvent = vi.fn(); - const requestHeartbeatNow = vi.fn(); - const dueAt = Date.parse("2025-12-13T16:00:00.000Z"); const staleRunningAt = Date.parse("2025-12-13T16:30:00.000Z"); - await writeStoreJobs(store.storePath, [ - { - id: "restart-stale-one-shot", - name: "one shot stale marker", - enabled: true, - createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), - updatedAtMs: Date.parse("2025-12-13T16:30:00.000Z"), - schedule: { kind: "at", at: "2025-12-13T16:00:00.000Z" }, - sessionTarget: "main", - wakeMode: "next-heartbeat", - payload: { kind: "systemEvent", text: "one-shot stale marker" }, - state: { - nextRunAtMs: dueAt, - runningAtMs: staleRunningAt, + await withRestartedCron( + [ + { + id: "restart-stale-one-shot", + name: "one shot stale marker", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-13T16:30:00.000Z"), + schedule: { kind: "at", at: "2025-12-13T16:00:00.000Z" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "one-shot stale marker" }, + state: { + nextRunAtMs: dueAt, + runningAtMs: staleRunningAt, + }, }, + ], + async ({ cron, enqueueSystemEvent, requestHeartbeatNow }) => { + expect(enqueueSystemEvent).not.toHaveBeenCalled(); + expect(requestHeartbeatNow).not.toHaveBeenCalled(); + + const listedJobs = await cron.list({ includeDisabled: true }); + const updated = listedJobs.find((job) => job.id === "restart-stale-one-shot"); + expect(updated?.state.runningAtMs).toBeUndefined(); }, - ]); - - const cron = createRestartCronService({ - storePath: store.storePath, - enqueueSystemEvent, - requestHeartbeatNow, - }); - - await cron.start(); - - expect(enqueueSystemEvent).not.toHaveBeenCalled(); - expect(requestHeartbeatNow).not.toHaveBeenCalled(); - - const jobs = await cron.list({ includeDisabled: true }); - const updated = jobs.find((job) => job.id === "restart-stale-one-shot"); - expect(updated?.state.runningAtMs).toBeUndefined(); - - cron.stop(); - await store.cleanup(); + ); }); it("does not replay cron slot when the latest slot already ran before restart", async () => { vi.setSystemTime(new Date("2025-12-13T04:02:00.000Z")); - const store = await makeStorePath(); - const enqueueSystemEvent = vi.fn(); - const requestHeartbeatNow = vi.fn(); - - await writeStoreJobs(store.storePath, [ - { - id: "restart-no-duplicate-slot", - name: "every ten minutes +1 no duplicate", - enabled: true, - createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), - updatedAtMs: Date.parse("2025-12-13T04:01:00.000Z"), - schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" }, - sessionTarget: "main", - wakeMode: "next-heartbeat", - payload: { kind: "systemEvent", text: "already ran" }, - state: { - nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"), - lastRunAtMs: Date.parse("2025-12-13T04:01:00.000Z"), - lastStatus: "ok", + await withRestartedCron( + [ + { + id: "restart-no-duplicate-slot", + name: "every ten minutes +1 no duplicate", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-13T04:01:00.000Z"), + schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "already ran" }, + state: { + nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"), + lastRunAtMs: Date.parse("2025-12-13T04:01:00.000Z"), + lastStatus: "ok", + }, }, + ], + async ({ enqueueSystemEvent, requestHeartbeatNow }) => { + expect(enqueueSystemEvent).not.toHaveBeenCalled(); + expect(requestHeartbeatNow).not.toHaveBeenCalled(); }, - ]); - - const cron = createRestartCronService({ - storePath: store.storePath, - enqueueSystemEvent, - requestHeartbeatNow, - }); - - await cron.start(); - - expect(enqueueSystemEvent).not.toHaveBeenCalled(); - expect(requestHeartbeatNow).not.toHaveBeenCalled(); - cron.stop(); - await store.cleanup(); + ); }); it("does not replay missed cron slots while error backoff is pending after restart", async () => { vi.setSystemTime(new Date("2025-12-13T04:02:00.000Z")); - const store = await makeStorePath(); - const enqueueSystemEvent = vi.fn(); - const requestHeartbeatNow = vi.fn(); - - await writeStoreJobs(store.storePath, [ - { - id: "restart-backoff-pending", - name: "backoff pending", - enabled: true, - createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), - updatedAtMs: Date.parse("2025-12-13T04:01:10.000Z"), - schedule: { kind: "cron", expr: "* * * * *", tz: "UTC" }, - sessionTarget: "main", - wakeMode: "next-heartbeat", - payload: { kind: "systemEvent", text: "do not run during backoff" }, - state: { - // Next retry is intentionally delayed by backoff despite a newer cron slot. - nextRunAtMs: Date.parse("2025-12-13T04:10:00.000Z"), - lastRunAtMs: Date.parse("2025-12-13T04:01:00.000Z"), - lastStatus: "error", - consecutiveErrors: 4, + await withRestartedCron( + [ + { + id: "restart-backoff-pending", + name: "backoff pending", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-13T04:01:10.000Z"), + schedule: { kind: "cron", expr: "* * * * *", tz: "UTC" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "do not run during backoff" }, + state: { + // Next retry is intentionally delayed by backoff despite a newer cron slot. + nextRunAtMs: Date.parse("2025-12-13T04:10:00.000Z"), + lastRunAtMs: Date.parse("2025-12-13T04:01:00.000Z"), + lastStatus: "error", + consecutiveErrors: 4, + }, }, + ], + async ({ enqueueSystemEvent, requestHeartbeatNow }) => { + expect(enqueueSystemEvent).not.toHaveBeenCalled(); + expect(requestHeartbeatNow).not.toHaveBeenCalled(); }, - ]); - - const cron = createRestartCronService({ - storePath: store.storePath, - enqueueSystemEvent, - requestHeartbeatNow, - }); - - await cron.start(); - - expect(enqueueSystemEvent).not.toHaveBeenCalled(); - expect(requestHeartbeatNow).not.toHaveBeenCalled(); - - cron.stop(); - await store.cleanup(); + ); }); it("replays missed cron slot after restart when error backoff has already elapsed", async () => { vi.setSystemTime(new Date("2025-12-13T04:02:00.000Z")); - const store = await makeStorePath(); - const enqueueSystemEvent = vi.fn(); - const requestHeartbeatNow = vi.fn(); - - await writeStoreJobs(store.storePath, [ - { - id: "restart-backoff-elapsed-replay", - name: "backoff elapsed replay", - enabled: true, - createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), - updatedAtMs: Date.parse("2025-12-13T04:01:10.000Z"), - schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" }, - sessionTarget: "main", - wakeMode: "next-heartbeat", - payload: { kind: "systemEvent", text: "replay after backoff elapsed" }, - state: { - // Startup maintenance may already point to a future slot (04:11) even - // though 04:01 was missed and the 30s error backoff has elapsed. - nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"), - lastRunAtMs: Date.parse("2025-12-13T03:51:00.000Z"), - lastStatus: "error", - consecutiveErrors: 1, + await withRestartedCron( + [ + { + id: "restart-backoff-elapsed-replay", + name: "backoff elapsed replay", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-13T04:01:10.000Z"), + schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "replay after backoff elapsed" }, + state: { + // Startup maintenance may already point to a future slot (04:11) even + // though 04:01 was missed and the 30s error backoff has elapsed. + nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"), + lastRunAtMs: Date.parse("2025-12-13T03:51:00.000Z"), + lastStatus: "error", + consecutiveErrors: 1, + }, }, + ], + async ({ enqueueSystemEvent, requestHeartbeatNow }) => { + expect(enqueueSystemEvent).toHaveBeenCalledWith( + "replay after backoff elapsed", + expect.objectContaining({ agentId: undefined }), + ); + expect(requestHeartbeatNow).toHaveBeenCalled(); }, - ]); - - const cron = createRestartCronService({ - storePath: store.storePath, - enqueueSystemEvent, - requestHeartbeatNow, - }); - - await cron.start(); - - expect(enqueueSystemEvent).toHaveBeenCalledWith( - "replay after backoff elapsed", - expect.objectContaining({ agentId: undefined }), ); - expect(requestHeartbeatNow).toHaveBeenCalled(); - - cron.stop(); - await store.cleanup(); }); it("reschedules deferred missed jobs from the post-catchup clock so they stay in the future", async () => { diff --git a/src/cron/service.runs-one-shot-main-job-disables-it.test.ts b/src/cron/service.runs-one-shot-main-job-disables-it.test.ts index deac4a5b668..75ffb262d4d 100644 --- a/src/cron/service.runs-one-shot-main-job-disables-it.test.ts +++ b/src/cron/service.runs-one-shot-main-job-disables-it.test.ts @@ -620,14 +620,14 @@ describe("CronService", () => { await stopCronAndCleanup(cron, store); }); - it("runs an isolated job and posts summary to main", async () => { + it("runs an isolated job without posting a fallback summary to main", async () => { const runIsolatedAgentJob = vi.fn(async () => ({ status: "ok" as const, summary: "done" })); const { store, cron, enqueueSystemEvent, requestHeartbeatNow, events } = await createIsolatedAnnounceHarness(runIsolatedAgentJob); await runIsolatedAnnounceScenario({ cron, events, name: "weekly" }); expect(runIsolatedAgentJob).toHaveBeenCalledTimes(1); - expectMainSystemEventPosted(enqueueSystemEvent, "Cron: done"); - expect(requestHeartbeatNow).toHaveBeenCalled(); + expect(enqueueSystemEvent).not.toHaveBeenCalled(); + expect(requestHeartbeatNow).not.toHaveBeenCalled(); await stopCronAndCleanup(cron, store); }); @@ -685,7 +685,7 @@ describe("CronService", () => { await stopCronAndCleanup(cron, store); }); - it("posts last output to main even when isolated job errors", async () => { + it("does not post a fallback main summary when an isolated job errors", async () => { const runIsolatedAgentJob = vi.fn(async () => ({ status: "error" as const, summary: "last output", @@ -700,8 +700,8 @@ describe("CronService", () => { status: "error", }); - expectMainSystemEventPosted(enqueueSystemEvent, "Cron (error): last output"); - expect(requestHeartbeatNow).toHaveBeenCalled(); + expect(enqueueSystemEvent).not.toHaveBeenCalled(); + expect(requestHeartbeatNow).not.toHaveBeenCalled(); await stopCronAndCleanup(cron, store); }); @@ -759,7 +759,7 @@ describe("CronService", () => { wakeMode: "next-heartbeat", payload: { kind: "systemEvent", text: "nope" }, }), - ).rejects.toThrow(/isolated cron jobs require/); + ).rejects.toThrow(/isolated.*cron jobs require/); cron.stop(); await store.cleanup(); diff --git a/src/cron/service.store-migration.test.ts b/src/cron/service.store-migration.test.ts index 52c9f571b08..216154fa503 100644 --- a/src/cron/service.store-migration.test.ts +++ b/src/cron/service.store-migration.test.ts @@ -72,6 +72,39 @@ function createLegacyIsolatedAgentTurnJob( } describe("CronService store migrations", () => { + it("treats stored current session targets as isolated-like for default delivery migration", async () => { + const { store, cron } = await startCronWithStoredJobs([ + createLegacyIsolatedAgentTurnJob({ + id: "stored-current-job", + name: "stored current", + sessionTarget: "current", + }), + ]); + + const job = await listJobById(cron, "stored-current-job"); + expect(job).toBeDefined(); + expect(job?.sessionTarget).toBe("isolated"); + expect(job?.delivery).toEqual({ mode: "announce" }); + + await stopCronAndCleanup(cron, store); + }); + + it("preserves stored custom session targets", async () => { + const { store, cron } = await startCronWithStoredJobs([ + createLegacyIsolatedAgentTurnJob({ + id: "custom-session-job", + name: "custom session", + sessionTarget: "session:ProjectAlpha", + }), + ]); + + const job = await listJobById(cron, "custom-session-job"); + expect(job?.sessionTarget).toBe("session:ProjectAlpha"); + expect(job?.delivery).toEqual({ mode: "announce" }); + + await stopCronAndCleanup(cron, store); + }); + it("migrates legacy top-level agentTurn fields and initializes missing state", async () => { const { store, cron } = await startCronWithStoredJobs([ createLegacyIsolatedAgentTurnJob({ diff --git a/src/cron/service.store.migration.test.ts b/src/cron/service.store.migration.test.ts index 8daa0b39e9a..973efca67a6 100644 --- a/src/cron/service.store.migration.test.ts +++ b/src/cron/service.store.migration.test.ts @@ -133,6 +133,24 @@ describe("cron store migration", () => { expect(schedule.at).toBe(new Date(atMs).toISOString()); }); + it("preserves stored custom session targets", async () => { + const migrated = await migrateLegacyJob( + makeLegacyJob({ + id: "job-custom-session", + name: "Custom session", + schedule: { kind: "cron", expr: "0 23 * * *", tz: "UTC" }, + sessionTarget: "session:ProjectAlpha", + payload: { + kind: "agentTurn", + message: "hello", + }, + }), + ); + + expect(migrated.sessionTarget).toBe("session:ProjectAlpha"); + expect(migrated.delivery).toEqual({ mode: "announce" }); + }); + it("adds anchorMs to legacy every schedules", async () => { const createdAtMs = 1_700_000_000_000; const migrated = await migrateLegacyJob( diff --git a/src/cron/service/jobs.ts b/src/cron/service/jobs.ts index 5579e5430f0..542ba81053d 100644 --- a/src/cron/service/jobs.ts +++ b/src/cron/service/jobs.ts @@ -132,11 +132,15 @@ function resolveEveryAnchorMs(params: { } export function assertSupportedJobSpec(job: Pick) { + const isIsolatedLike = + job.sessionTarget === "isolated" || + job.sessionTarget === "current" || + job.sessionTarget.startsWith("session:"); if (job.sessionTarget === "main" && job.payload.kind !== "systemEvent") { throw new Error('main cron jobs require payload.kind="systemEvent"'); } - if (job.sessionTarget === "isolated" && job.payload.kind !== "agentTurn") { - throw new Error('isolated cron jobs require payload.kind="agentTurn"'); + if (isIsolatedLike && job.payload.kind !== "agentTurn") { + throw new Error('isolated/current/session cron jobs require payload.kind="agentTurn"'); } } @@ -181,6 +185,7 @@ function assertDeliverySupport(job: Pick) if (!job.delivery || job.delivery.mode === "none") { return; } + // Webhook delivery is allowed for any session target if (job.delivery.mode === "webhook") { const target = normalizeHttpWebhookUrl(job.delivery.to); if (!target) { @@ -189,7 +194,11 @@ function assertDeliverySupport(job: Pick) job.delivery.to = target; return; } - if (job.sessionTarget !== "isolated") { + const isIsolatedLike = + job.sessionTarget === "isolated" || + job.sessionTarget === "current" || + job.sessionTarget.startsWith("session:"); + if (!isIsolatedLike) { throw new Error('cron channel delivery config is only supported for sessionTarget="isolated"'); } if (job.delivery.channel === "telegram") { @@ -606,11 +615,11 @@ export function applyJobPatch( if (!patch.delivery && patch.payload?.kind === "agentTurn") { // Back-compat: legacy clients still update delivery via payload fields. const legacyDeliveryPatch = buildLegacyDeliveryPatch(patch.payload); - if ( - legacyDeliveryPatch && - job.sessionTarget === "isolated" && - job.payload.kind === "agentTurn" - ) { + const isIsolatedLike = + job.sessionTarget === "isolated" || + job.sessionTarget === "current" || + job.sessionTarget.startsWith("session:"); + if (legacyDeliveryPatch && isIsolatedLike && job.payload.kind === "agentTurn") { job.delivery = mergeCronDelivery(job.delivery, legacyDeliveryPatch); } } diff --git a/src/cron/service/ops.ts b/src/cron/service/ops.ts index c027c8d553f..69751e4dfdb 100644 --- a/src/cron/service/ops.ts +++ b/src/cron/service/ops.ts @@ -360,13 +360,23 @@ type ManualRunDisposition = | Extract | { ok: true; runnable: true }; +type ManualRunPreflightResult = + | { ok: false } + | Extract + | { + ok: true; + runnable: true; + job: CronJob; + now: number; + }; + let nextManualRunId = 1; -async function inspectManualRunDisposition( +async function inspectManualRunPreflight( state: CronServiceState, id: string, mode?: "due" | "force", -): Promise { +): Promise { return await locked(state, async () => { warnIfDisabled(state, "run"); await ensureLoaded(state, { skipRecompute: true }); @@ -383,46 +393,60 @@ async function inspectManualRunDisposition( if (!due) { return { ok: true, ran: false, reason: "not-due" as const }; } - return { ok: true, runnable: true } as const; + return { ok: true, runnable: true, job, now } as const; }); } +async function inspectManualRunDisposition( + state: CronServiceState, + id: string, + mode?: "due" | "force", +): Promise { + const result = await inspectManualRunPreflight(state, id, mode); + if (!result.ok) { + return result; + } + if ("reason" in result) { + return result; + } + return { ok: true, runnable: true } as const; +} + async function prepareManualRun( state: CronServiceState, id: string, mode?: "due" | "force", ): Promise { + const preflight = await inspectManualRunPreflight(state, id, mode); + if (!preflight.ok) { + return preflight; + } + if ("reason" in preflight) { + return { + ok: true, + ran: false, + reason: preflight.reason, + } as const; + } return await locked(state, async () => { - warnIfDisabled(state, "run"); - await ensureLoaded(state, { skipRecompute: true }); - // Normalize job tick state (clears stale runningAtMs markers) before - // checking if already running, so a stale marker from a crashed Phase-1 - // persist does not block manual triggers for up to STUCK_RUN_MS (#17554). - recomputeNextRunsForMaintenance(state); + // Reserve this run under lock, then execute outside lock so read ops + // (`list`, `status`) stay responsive while the run is in progress. const job = findJobOrThrow(state, id); if (typeof job.state.runningAtMs === "number") { return { ok: true, ran: false, reason: "already-running" as const }; } - const now = state.deps.nowMs(); - const due = isJobDue(job, now, { forced: mode === "force" }); - if (!due) { - return { ok: true, ran: false, reason: "not-due" as const }; - } - - // Reserve this run under lock, then execute outside lock so read ops - // (`list`, `status`) stay responsive while the run is in progress. - job.state.runningAtMs = now; + job.state.runningAtMs = preflight.now; job.state.lastError = undefined; // Persist the running marker before releasing lock so timer ticks that // force-reload from disk cannot start the same job concurrently. await persist(state); - emit(state, { jobId: job.id, action: "started", runAtMs: now }); + emit(state, { jobId: job.id, action: "started", runAtMs: preflight.now }); const executionJob = JSON.parse(JSON.stringify(job)) as CronJob; return { ok: true, ran: true, jobId: job.id, - startedAt: now, + startedAt: preflight.now, executionJob, } as const; }); diff --git a/src/cron/service/store.ts b/src/cron/service/store.ts index 2c40ac50643..d1d36e48e08 100644 --- a/src/cron/service/store.ts +++ b/src/cron/service/store.ts @@ -1,161 +1,10 @@ import fs from "node:fs"; -import { normalizeLegacyDeliveryInput } from "../legacy-delivery.js"; -import { parseAbsoluteTimeMs } from "../parse.js"; -import { migrateLegacyCronPayload } from "../payload-migration.js"; -import { coerceFiniteScheduleNumber } from "../schedule.js"; -import { normalizeCronStaggerMs, resolveDefaultCronStaggerMs } from "../stagger.js"; +import { normalizeStoredCronJobs } from "../store-migration.js"; import { loadCronStore, saveCronStore } from "../store.js"; import type { CronJob } from "../types.js"; import { recomputeNextRuns } from "./jobs.js"; -import { inferLegacyName, normalizeOptionalText } from "./normalize.js"; import type { CronServiceState } from "./state.js"; -function normalizePayloadKind(payload: Record) { - const raw = typeof payload.kind === "string" ? payload.kind.trim().toLowerCase() : ""; - if (raw === "agentturn") { - payload.kind = "agentTurn"; - return true; - } - if (raw === "systemevent") { - payload.kind = "systemEvent"; - return true; - } - return false; -} - -function inferPayloadIfMissing(raw: Record) { - const message = typeof raw.message === "string" ? raw.message.trim() : ""; - const text = typeof raw.text === "string" ? raw.text.trim() : ""; - const command = typeof raw.command === "string" ? raw.command.trim() : ""; - if (message) { - raw.payload = { kind: "agentTurn", message }; - return true; - } - if (text) { - raw.payload = { kind: "systemEvent", text }; - return true; - } - if (command) { - raw.payload = { kind: "systemEvent", text: command }; - return true; - } - return false; -} - -function copyTopLevelAgentTurnFields( - raw: Record, - payload: Record, -) { - let mutated = false; - - const copyTrimmedString = (field: "model" | "thinking") => { - const existing = payload[field]; - if (typeof existing === "string" && existing.trim()) { - return; - } - const value = raw[field]; - if (typeof value === "string" && value.trim()) { - payload[field] = value.trim(); - mutated = true; - } - }; - copyTrimmedString("model"); - copyTrimmedString("thinking"); - - if ( - typeof payload.timeoutSeconds !== "number" && - typeof raw.timeoutSeconds === "number" && - Number.isFinite(raw.timeoutSeconds) - ) { - payload.timeoutSeconds = Math.max(0, Math.floor(raw.timeoutSeconds)); - mutated = true; - } - - if ( - typeof payload.allowUnsafeExternalContent !== "boolean" && - typeof raw.allowUnsafeExternalContent === "boolean" - ) { - payload.allowUnsafeExternalContent = raw.allowUnsafeExternalContent; - mutated = true; - } - - if (typeof payload.deliver !== "boolean" && typeof raw.deliver === "boolean") { - payload.deliver = raw.deliver; - mutated = true; - } - if ( - typeof payload.channel !== "string" && - typeof raw.channel === "string" && - raw.channel.trim() - ) { - payload.channel = raw.channel.trim(); - mutated = true; - } - if (typeof payload.to !== "string" && typeof raw.to === "string" && raw.to.trim()) { - payload.to = raw.to.trim(); - mutated = true; - } - if ( - typeof payload.bestEffortDeliver !== "boolean" && - typeof raw.bestEffortDeliver === "boolean" - ) { - payload.bestEffortDeliver = raw.bestEffortDeliver; - mutated = true; - } - if ( - typeof payload.provider !== "string" && - typeof raw.provider === "string" && - raw.provider.trim() - ) { - payload.provider = raw.provider.trim(); - mutated = true; - } - - return mutated; -} - -function stripLegacyTopLevelFields(raw: Record) { - if ("model" in raw) { - delete raw.model; - } - if ("thinking" in raw) { - delete raw.thinking; - } - if ("timeoutSeconds" in raw) { - delete raw.timeoutSeconds; - } - if ("allowUnsafeExternalContent" in raw) { - delete raw.allowUnsafeExternalContent; - } - if ("message" in raw) { - delete raw.message; - } - if ("text" in raw) { - delete raw.text; - } - if ("deliver" in raw) { - delete raw.deliver; - } - if ("channel" in raw) { - delete raw.channel; - } - if ("to" in raw) { - delete raw.to; - } - if ("bestEffortDeliver" in raw) { - delete raw.bestEffortDeliver; - } - if ("provider" in raw) { - delete raw.provider; - } - if ("command" in raw) { - delete raw.command; - } - if ("timeout" in raw) { - delete raw.timeout; - } -} - async function getFileMtimeMs(path: string): Promise { try { const stats = await fs.promises.stat(path); @@ -185,287 +34,7 @@ export async function ensureLoaded( const fileMtimeMs = await getFileMtimeMs(state.deps.storePath); const loaded = await loadCronStore(state.deps.storePath); const jobs = (loaded.jobs ?? []) as unknown as Array>; - let mutated = false; - for (const raw of jobs) { - const state = raw.state; - if (!state || typeof state !== "object" || Array.isArray(state)) { - raw.state = {}; - mutated = true; - } - - const rawId = typeof raw.id === "string" ? raw.id.trim() : ""; - const legacyJobId = typeof raw.jobId === "string" ? raw.jobId.trim() : ""; - if (!rawId && legacyJobId) { - raw.id = legacyJobId; - mutated = true; - } else if (rawId && raw.id !== rawId) { - raw.id = rawId; - mutated = true; - } - if ("jobId" in raw) { - delete raw.jobId; - mutated = true; - } - - if (typeof raw.schedule === "string") { - const expr = raw.schedule.trim(); - raw.schedule = { kind: "cron", expr }; - mutated = true; - } - - const nameRaw = raw.name; - if (typeof nameRaw !== "string" || nameRaw.trim().length === 0) { - raw.name = inferLegacyName({ - schedule: raw.schedule as never, - payload: raw.payload as never, - }); - mutated = true; - } else { - raw.name = nameRaw.trim(); - } - - const desc = normalizeOptionalText(raw.description); - if (raw.description !== desc) { - raw.description = desc; - mutated = true; - } - - if ("sessionKey" in raw) { - const sessionKey = - typeof raw.sessionKey === "string" ? normalizeOptionalText(raw.sessionKey) : undefined; - if (raw.sessionKey !== sessionKey) { - raw.sessionKey = sessionKey; - mutated = true; - } - } - - if (typeof raw.enabled !== "boolean") { - raw.enabled = true; - mutated = true; - } - - const wakeModeRaw = typeof raw.wakeMode === "string" ? raw.wakeMode.trim().toLowerCase() : ""; - if (wakeModeRaw === "next-heartbeat") { - if (raw.wakeMode !== "next-heartbeat") { - raw.wakeMode = "next-heartbeat"; - mutated = true; - } - } else if (wakeModeRaw === "now") { - if (raw.wakeMode !== "now") { - raw.wakeMode = "now"; - mutated = true; - } - } else { - raw.wakeMode = "now"; - mutated = true; - } - - const payload = raw.payload; - if ( - (!payload || typeof payload !== "object" || Array.isArray(payload)) && - inferPayloadIfMissing(raw) - ) { - mutated = true; - } - - const payloadRecord = - raw.payload && typeof raw.payload === "object" && !Array.isArray(raw.payload) - ? (raw.payload as Record) - : null; - - if (payloadRecord) { - if (normalizePayloadKind(payloadRecord)) { - mutated = true; - } - if (!payloadRecord.kind) { - if (typeof payloadRecord.message === "string" && payloadRecord.message.trim()) { - payloadRecord.kind = "agentTurn"; - mutated = true; - } else if (typeof payloadRecord.text === "string" && payloadRecord.text.trim()) { - payloadRecord.kind = "systemEvent"; - mutated = true; - } - } - if (payloadRecord.kind === "agentTurn") { - if (copyTopLevelAgentTurnFields(raw, payloadRecord)) { - mutated = true; - } - } - } - - const hadLegacyTopLevelFields = - "model" in raw || - "thinking" in raw || - "timeoutSeconds" in raw || - "allowUnsafeExternalContent" in raw || - "message" in raw || - "text" in raw || - "deliver" in raw || - "channel" in raw || - "to" in raw || - "bestEffortDeliver" in raw || - "provider" in raw || - "command" in raw || - "timeout" in raw; - if (hadLegacyTopLevelFields) { - stripLegacyTopLevelFields(raw); - mutated = true; - } - - if (payloadRecord) { - if (migrateLegacyCronPayload(payloadRecord)) { - mutated = true; - } - } - - const schedule = raw.schedule; - if (schedule && typeof schedule === "object" && !Array.isArray(schedule)) { - const sched = schedule as Record; - const kind = typeof sched.kind === "string" ? sched.kind.trim().toLowerCase() : ""; - if (!kind && ("at" in sched || "atMs" in sched)) { - sched.kind = "at"; - mutated = true; - } - const atRaw = typeof sched.at === "string" ? sched.at.trim() : ""; - const atMsRaw = sched.atMs; - const parsedAtMs = - typeof atMsRaw === "number" - ? atMsRaw - : typeof atMsRaw === "string" - ? parseAbsoluteTimeMs(atMsRaw) - : atRaw - ? parseAbsoluteTimeMs(atRaw) - : null; - if (parsedAtMs !== null) { - sched.at = new Date(parsedAtMs).toISOString(); - if ("atMs" in sched) { - delete sched.atMs; - } - mutated = true; - } - - const everyMsRaw = sched.everyMs; - const everyMsCoerced = coerceFiniteScheduleNumber(everyMsRaw); - const everyMs = everyMsCoerced !== undefined ? Math.floor(everyMsCoerced) : null; - if (everyMs !== null && everyMsRaw !== everyMs) { - sched.everyMs = everyMs; - mutated = true; - } - if ((kind === "every" || sched.kind === "every") && everyMs !== null) { - const anchorRaw = sched.anchorMs; - const anchorCoerced = coerceFiniteScheduleNumber(anchorRaw); - const normalizedAnchor = - anchorCoerced !== undefined - ? Math.max(0, Math.floor(anchorCoerced)) - : typeof raw.createdAtMs === "number" && Number.isFinite(raw.createdAtMs) - ? Math.max(0, Math.floor(raw.createdAtMs)) - : typeof raw.updatedAtMs === "number" && Number.isFinite(raw.updatedAtMs) - ? Math.max(0, Math.floor(raw.updatedAtMs)) - : null; - if (normalizedAnchor !== null && anchorRaw !== normalizedAnchor) { - sched.anchorMs = normalizedAnchor; - mutated = true; - } - } - - const exprRaw = typeof sched.expr === "string" ? sched.expr.trim() : ""; - const legacyCronRaw = typeof sched.cron === "string" ? sched.cron.trim() : ""; - let normalizedExpr = exprRaw; - if (!normalizedExpr && legacyCronRaw) { - normalizedExpr = legacyCronRaw; - sched.expr = normalizedExpr; - mutated = true; - } - if (typeof sched.expr === "string" && sched.expr !== normalizedExpr) { - sched.expr = normalizedExpr; - mutated = true; - } - if ("cron" in sched) { - delete sched.cron; - mutated = true; - } - if ((kind === "cron" || sched.kind === "cron") && normalizedExpr) { - const explicitStaggerMs = normalizeCronStaggerMs(sched.staggerMs); - const defaultStaggerMs = resolveDefaultCronStaggerMs(normalizedExpr); - const targetStaggerMs = explicitStaggerMs ?? defaultStaggerMs; - if (targetStaggerMs === undefined) { - if ("staggerMs" in sched) { - delete sched.staggerMs; - mutated = true; - } - } else if (sched.staggerMs !== targetStaggerMs) { - sched.staggerMs = targetStaggerMs; - mutated = true; - } - } - } - - const delivery = raw.delivery; - if (delivery && typeof delivery === "object" && !Array.isArray(delivery)) { - const modeRaw = (delivery as { mode?: unknown }).mode; - if (typeof modeRaw === "string") { - const lowered = modeRaw.trim().toLowerCase(); - if (lowered === "deliver") { - (delivery as { mode?: unknown }).mode = "announce"; - mutated = true; - } - } else if (modeRaw === undefined || modeRaw === null) { - // Explicitly persist the default so existing jobs don't silently - // change behaviour when the runtime default shifts. - (delivery as { mode?: unknown }).mode = "announce"; - mutated = true; - } - } - - const isolation = raw.isolation; - if (isolation && typeof isolation === "object" && !Array.isArray(isolation)) { - delete raw.isolation; - mutated = true; - } - - const payloadKind = - payloadRecord && typeof payloadRecord.kind === "string" ? payloadRecord.kind : ""; - const normalizedSessionTarget = - typeof raw.sessionTarget === "string" ? raw.sessionTarget.trim().toLowerCase() : ""; - if (normalizedSessionTarget === "main" || normalizedSessionTarget === "isolated") { - if (raw.sessionTarget !== normalizedSessionTarget) { - raw.sessionTarget = normalizedSessionTarget; - mutated = true; - } - } else { - const inferredSessionTarget = payloadKind === "agentTurn" ? "isolated" : "main"; - if (raw.sessionTarget !== inferredSessionTarget) { - raw.sessionTarget = inferredSessionTarget; - mutated = true; - } - } - - const sessionTarget = - typeof raw.sessionTarget === "string" ? raw.sessionTarget.trim().toLowerCase() : ""; - const isIsolatedAgentTurn = - sessionTarget === "isolated" || (sessionTarget === "" && payloadKind === "agentTurn"); - const hasDelivery = delivery && typeof delivery === "object" && !Array.isArray(delivery); - const normalizedLegacy = normalizeLegacyDeliveryInput({ - delivery: hasDelivery ? (delivery as Record) : null, - payload: payloadRecord, - }); - - if (isIsolatedAgentTurn && payloadKind === "agentTurn") { - if (!hasDelivery && normalizedLegacy.delivery) { - raw.delivery = normalizedLegacy.delivery; - mutated = true; - } else if (!hasDelivery) { - raw.delivery = { mode: "announce" }; - mutated = true; - } else if (normalizedLegacy.mutated && normalizedLegacy.delivery) { - raw.delivery = normalizedLegacy.delivery; - mutated = true; - } - } else if (normalizedLegacy.mutated && normalizedLegacy.delivery) { - raw.delivery = normalizedLegacy.delivery; - mutated = true; - } - } + const { mutated } = normalizeStoredCronJobs(jobs); state.store = { version: 1, jobs: jobs as unknown as CronJob[] }; state.storeLoadedAtMs = state.deps.nowMs(); state.storeFileMtimeMs = fileMtimeMs; diff --git a/src/cron/service/timer.ts b/src/cron/service/timer.ts index f82290006b4..e12c4ae38e7 100644 --- a/src/cron/service/timer.ts +++ b/src/cron/service/timer.ts @@ -1,9 +1,8 @@ +import { resolveFailoverReasonFromError } from "../../agents/failover-error.js"; import type { CronConfig, CronRetryOn } from "../../config/types.cron.js"; -import { isCronSystemEvent } from "../../infra/heartbeat-events-filter.js"; import type { HeartbeatRunResult } from "../../infra/heartbeat-wake.js"; import { DEFAULT_AGENT_ID } from "../../routing/session-key.js"; import { resolveCronDeliveryPlan } from "../delivery.js"; -import { shouldEnqueueCronMainSummary } from "../heartbeat-policy.js"; import { sweepCronRunSessions } from "../session-reaper.js"; import type { CronDeliveryStatus, @@ -324,6 +323,10 @@ export function applyJobResult( job.state.lastStatus = result.status; job.state.lastDurationMs = Math.max(0, result.endedAt - result.startedAt); job.state.lastError = result.error; + job.state.lastErrorReason = + result.status === "error" && typeof result.error === "string" + ? (resolveFailoverReasonFromError(result.error) ?? undefined) + : undefined; job.state.lastDelivered = result.delivered; const deliveryStatus = resolveDeliveryStatus({ job, delivered: result.delivered }); job.state.lastDeliveryStatus = deliveryStatus; @@ -672,7 +675,6 @@ export async function onTimer(state: CronServiceState) { if (completedResults.length > 0) { await locked(state, async () => { await ensureLoaded(state, { forceReload: true, skipRecompute: true }); - for (const result of completedResults) { applyOutcomeToStoredJob(state, result); } @@ -1138,46 +1140,6 @@ export async function executeJobCore( return { status: "error", error: timeoutErrorMessage() }; } - // Post a short summary back to the main session only when announce - // delivery was requested and we are confident no outbound delivery path - // ran. If delivery was attempted but final ack is uncertain, suppress the - // main summary to avoid duplicate user-facing sends. - // See: https://github.com/openclaw/openclaw/issues/15692 - // - // Also suppress heartbeat-only summaries (e.g. "HEARTBEAT_OK") — these - // are internal ack tokens that should never leak into user conversations. - // See: https://github.com/openclaw/openclaw/issues/32013 - const summaryText = res.summary?.trim(); - const deliveryPlan = resolveCronDeliveryPlan(job); - const suppressMainSummary = - res.status === "error" && res.errorKind === "delivery-target" && deliveryPlan.requested; - if ( - shouldEnqueueCronMainSummary({ - summaryText, - deliveryRequested: deliveryPlan.requested, - delivered: res.delivered, - deliveryAttempted: res.deliveryAttempted, - suppressMainSummary, - isCronSystemEvent, - }) - ) { - const prefix = "Cron"; - const label = - res.status === "error" ? `${prefix} (error): ${summaryText}` : `${prefix}: ${summaryText}`; - state.deps.enqueueSystemEvent(label, { - agentId: job.agentId, - sessionKey: job.sessionKey, - contextKey: `cron:${job.id}`, - }); - if (job.wakeMode === "now") { - state.deps.requestHeartbeatNow({ - reason: `cron:${job.id}`, - agentId: job.agentId, - sessionKey: job.sessionKey, - }); - } - } - return { status: res.status, error: res.error, diff --git a/src/cron/store-migration.test.ts b/src/cron/store-migration.test.ts new file mode 100644 index 00000000000..9d82c55c472 --- /dev/null +++ b/src/cron/store-migration.test.ts @@ -0,0 +1,133 @@ +import { describe, expect, it } from "vitest"; +import { normalizeStoredCronJobs } from "./store-migration.js"; + +describe("normalizeStoredCronJobs", () => { + it("normalizes legacy cron fields and reports migration issues", () => { + const jobs = [ + { + jobId: "legacy-job", + schedule: { kind: "cron", cron: "*/5 * * * *", tz: "UTC" }, + message: "say hi", + model: "openai/gpt-4.1", + deliver: true, + provider: " TeLeGrAm ", + to: "12345", + }, + ] as Array>; + + const result = normalizeStoredCronJobs(jobs); + + expect(result.mutated).toBe(true); + expect(result.issues).toMatchObject({ + jobId: 1, + legacyScheduleCron: 1, + legacyTopLevelPayloadFields: 1, + legacyTopLevelDeliveryFields: 1, + }); + + const [job] = jobs; + expect(job?.jobId).toBeUndefined(); + expect(job?.id).toBe("legacy-job"); + expect(job?.schedule).toMatchObject({ + kind: "cron", + expr: "*/5 * * * *", + tz: "UTC", + }); + expect(job?.message).toBeUndefined(); + expect(job?.provider).toBeUndefined(); + expect(job?.delivery).toMatchObject({ + mode: "announce", + channel: "telegram", + to: "12345", + }); + expect(job?.payload).toMatchObject({ + kind: "agentTurn", + message: "say hi", + model: "openai/gpt-4.1", + }); + }); + + it("normalizes payload provider alias into channel", () => { + const jobs = [ + { + id: "legacy-provider", + schedule: { kind: "every", everyMs: 60_000 }, + payload: { + kind: "agentTurn", + message: "ping", + provider: " Slack ", + }, + }, + ] as Array>; + + const result = normalizeStoredCronJobs(jobs); + + expect(result.mutated).toBe(true); + expect(result.issues.legacyPayloadProvider).toBe(1); + expect(jobs[0]?.payload).toMatchObject({ + kind: "agentTurn", + message: "ping", + }); + const payload = jobs[0]?.payload as Record | undefined; + expect(payload?.provider).toBeUndefined(); + expect(jobs[0]?.delivery).toMatchObject({ + mode: "announce", + channel: "slack", + }); + }); + + it("does not report legacyPayloadKind for already-normalized payload kinds", () => { + const jobs = [ + { + id: "normalized-agent-turn", + name: "normalized", + enabled: true, + wakeMode: "now", + schedule: { kind: "every", everyMs: 60_000, anchorMs: 1 }, + payload: { kind: "agentTurn", message: "ping" }, + sessionTarget: "isolated", + delivery: { mode: "announce" }, + state: {}, + }, + ] as Array>; + + const result = normalizeStoredCronJobs(jobs); + + expect(result.mutated).toBe(false); + expect(result.issues.legacyPayloadKind).toBeUndefined(); + }); + + it("normalizes whitespace-padded and non-canonical payload kinds", () => { + const jobs = [ + { + id: "spaced-agent-turn", + name: "normalized", + enabled: true, + wakeMode: "now", + schedule: { kind: "every", everyMs: 60_000, anchorMs: 1 }, + payload: { kind: " agentTurn ", message: "ping" }, + sessionTarget: "isolated", + delivery: { mode: "announce" }, + state: {}, + }, + { + id: "upper-system-event", + name: "normalized", + enabled: true, + wakeMode: "now", + schedule: { kind: "every", everyMs: 60_000, anchorMs: 1 }, + payload: { kind: "SYSTEMEVENT", text: "pong" }, + sessionTarget: "main", + delivery: { mode: "announce" }, + state: {}, + }, + ] as Array>; + + const result = normalizeStoredCronJobs(jobs); + + expect(result.mutated).toBe(true); + expect(result.issues.legacyPayloadKind).toBe(2); + expect(jobs[0]?.payload).toMatchObject({ kind: "agentTurn", message: "ping" }); + expect(jobs[1]?.payload).toMatchObject({ kind: "systemEvent", text: "pong" }); + }); +}); diff --git a/src/cron/store-migration.ts b/src/cron/store-migration.ts new file mode 100644 index 00000000000..0a460174bd2 --- /dev/null +++ b/src/cron/store-migration.ts @@ -0,0 +1,514 @@ +import { normalizeLegacyDeliveryInput } from "./legacy-delivery.js"; +import { parseAbsoluteTimeMs } from "./parse.js"; +import { migrateLegacyCronPayload } from "./payload-migration.js"; +import { coerceFiniteScheduleNumber } from "./schedule.js"; +import { inferLegacyName, normalizeOptionalText } from "./service/normalize.js"; +import { normalizeCronStaggerMs, resolveDefaultCronStaggerMs } from "./stagger.js"; + +type CronStoreIssueKey = + | "jobId" + | "legacyScheduleString" + | "legacyScheduleCron" + | "legacyPayloadKind" + | "legacyPayloadProvider" + | "legacyTopLevelPayloadFields" + | "legacyTopLevelDeliveryFields" + | "legacyDeliveryMode"; + +type CronStoreIssues = Partial>; + +type NormalizeCronStoreJobsResult = { + issues: CronStoreIssues; + jobs: Array>; + mutated: boolean; +}; + +function incrementIssue(issues: CronStoreIssues, key: CronStoreIssueKey) { + issues[key] = (issues[key] ?? 0) + 1; +} + +function normalizePayloadKind(payload: Record) { + const raw = typeof payload.kind === "string" ? payload.kind.trim().toLowerCase() : ""; + if (raw === "agentturn") { + if (payload.kind !== "agentTurn") { + payload.kind = "agentTurn"; + return true; + } + return false; + } + if (raw === "systemevent") { + if (payload.kind !== "systemEvent") { + payload.kind = "systemEvent"; + return true; + } + return false; + } + return false; +} + +function inferPayloadIfMissing(raw: Record) { + const message = typeof raw.message === "string" ? raw.message.trim() : ""; + const text = typeof raw.text === "string" ? raw.text.trim() : ""; + const command = typeof raw.command === "string" ? raw.command.trim() : ""; + if (message) { + raw.payload = { kind: "agentTurn", message }; + return true; + } + if (text) { + raw.payload = { kind: "systemEvent", text }; + return true; + } + if (command) { + raw.payload = { kind: "systemEvent", text: command }; + return true; + } + return false; +} + +function copyTopLevelAgentTurnFields( + raw: Record, + payload: Record, +) { + let mutated = false; + + const copyTrimmedString = (field: "model" | "thinking") => { + const existing = payload[field]; + if (typeof existing === "string" && existing.trim()) { + return; + } + const value = raw[field]; + if (typeof value === "string" && value.trim()) { + payload[field] = value.trim(); + mutated = true; + } + }; + copyTrimmedString("model"); + copyTrimmedString("thinking"); + + if ( + typeof payload.timeoutSeconds !== "number" && + typeof raw.timeoutSeconds === "number" && + Number.isFinite(raw.timeoutSeconds) + ) { + payload.timeoutSeconds = Math.max(0, Math.floor(raw.timeoutSeconds)); + mutated = true; + } + + if ( + typeof payload.allowUnsafeExternalContent !== "boolean" && + typeof raw.allowUnsafeExternalContent === "boolean" + ) { + payload.allowUnsafeExternalContent = raw.allowUnsafeExternalContent; + mutated = true; + } + + if (typeof payload.deliver !== "boolean" && typeof raw.deliver === "boolean") { + payload.deliver = raw.deliver; + mutated = true; + } + if ( + typeof payload.channel !== "string" && + typeof raw.channel === "string" && + raw.channel.trim() + ) { + payload.channel = raw.channel.trim(); + mutated = true; + } + if (typeof payload.to !== "string" && typeof raw.to === "string" && raw.to.trim()) { + payload.to = raw.to.trim(); + mutated = true; + } + if ( + typeof payload.bestEffortDeliver !== "boolean" && + typeof raw.bestEffortDeliver === "boolean" + ) { + payload.bestEffortDeliver = raw.bestEffortDeliver; + mutated = true; + } + if ( + typeof payload.provider !== "string" && + typeof raw.provider === "string" && + raw.provider.trim() + ) { + payload.provider = raw.provider.trim(); + mutated = true; + } + + return mutated; +} + +function stripLegacyTopLevelFields(raw: Record) { + if ("model" in raw) { + delete raw.model; + } + if ("thinking" in raw) { + delete raw.thinking; + } + if ("timeoutSeconds" in raw) { + delete raw.timeoutSeconds; + } + if ("allowUnsafeExternalContent" in raw) { + delete raw.allowUnsafeExternalContent; + } + if ("message" in raw) { + delete raw.message; + } + if ("text" in raw) { + delete raw.text; + } + if ("deliver" in raw) { + delete raw.deliver; + } + if ("channel" in raw) { + delete raw.channel; + } + if ("to" in raw) { + delete raw.to; + } + if ("bestEffortDeliver" in raw) { + delete raw.bestEffortDeliver; + } + if ("provider" in raw) { + delete raw.provider; + } + if ("command" in raw) { + delete raw.command; + } + if ("timeout" in raw) { + delete raw.timeout; + } +} + +export function normalizeStoredCronJobs( + jobs: Array>, +): NormalizeCronStoreJobsResult { + const issues: CronStoreIssues = {}; + let mutated = false; + + for (const raw of jobs) { + const jobIssues = new Set(); + const trackIssue = (key: CronStoreIssueKey) => { + if (jobIssues.has(key)) { + return; + } + jobIssues.add(key); + incrementIssue(issues, key); + }; + + const state = raw.state; + if (!state || typeof state !== "object" || Array.isArray(state)) { + raw.state = {}; + mutated = true; + } + + const rawId = typeof raw.id === "string" ? raw.id.trim() : ""; + const legacyJobId = typeof raw.jobId === "string" ? raw.jobId.trim() : ""; + if (!rawId && legacyJobId) { + raw.id = legacyJobId; + mutated = true; + trackIssue("jobId"); + } else if (rawId && raw.id !== rawId) { + raw.id = rawId; + mutated = true; + } + if ("jobId" in raw) { + delete raw.jobId; + mutated = true; + trackIssue("jobId"); + } + + if (typeof raw.schedule === "string") { + const expr = raw.schedule.trim(); + raw.schedule = { kind: "cron", expr }; + mutated = true; + trackIssue("legacyScheduleString"); + } + + const nameRaw = raw.name; + if (typeof nameRaw !== "string" || nameRaw.trim().length === 0) { + raw.name = inferLegacyName({ + schedule: raw.schedule as never, + payload: raw.payload as never, + }); + mutated = true; + } else { + raw.name = nameRaw.trim(); + } + + const desc = normalizeOptionalText(raw.description); + if (raw.description !== desc) { + raw.description = desc; + mutated = true; + } + + if ("sessionKey" in raw) { + const sessionKey = + typeof raw.sessionKey === "string" ? normalizeOptionalText(raw.sessionKey) : undefined; + if (raw.sessionKey !== sessionKey) { + raw.sessionKey = sessionKey; + mutated = true; + } + } + + if (typeof raw.enabled !== "boolean") { + raw.enabled = true; + mutated = true; + } + + const wakeModeRaw = typeof raw.wakeMode === "string" ? raw.wakeMode.trim().toLowerCase() : ""; + if (wakeModeRaw === "next-heartbeat") { + if (raw.wakeMode !== "next-heartbeat") { + raw.wakeMode = "next-heartbeat"; + mutated = true; + } + } else if (wakeModeRaw === "now") { + if (raw.wakeMode !== "now") { + raw.wakeMode = "now"; + mutated = true; + } + } else { + raw.wakeMode = "now"; + mutated = true; + } + + const payload = raw.payload; + if ( + (!payload || typeof payload !== "object" || Array.isArray(payload)) && + inferPayloadIfMissing(raw) + ) { + mutated = true; + trackIssue("legacyTopLevelPayloadFields"); + } + + const payloadRecord = + raw.payload && typeof raw.payload === "object" && !Array.isArray(raw.payload) + ? (raw.payload as Record) + : null; + + if (payloadRecord) { + if (normalizePayloadKind(payloadRecord)) { + mutated = true; + trackIssue("legacyPayloadKind"); + } + if (!payloadRecord.kind) { + if (typeof payloadRecord.message === "string" && payloadRecord.message.trim()) { + payloadRecord.kind = "agentTurn"; + mutated = true; + trackIssue("legacyPayloadKind"); + } else if (typeof payloadRecord.text === "string" && payloadRecord.text.trim()) { + payloadRecord.kind = "systemEvent"; + mutated = true; + trackIssue("legacyPayloadKind"); + } + } + if (payloadRecord.kind === "agentTurn" && copyTopLevelAgentTurnFields(raw, payloadRecord)) { + mutated = true; + } + } + + const hadLegacyTopLevelPayloadFields = + "model" in raw || + "thinking" in raw || + "timeoutSeconds" in raw || + "allowUnsafeExternalContent" in raw || + "message" in raw || + "text" in raw || + "command" in raw || + "timeout" in raw; + const hadLegacyTopLevelDeliveryFields = + "deliver" in raw || + "channel" in raw || + "to" in raw || + "bestEffortDeliver" in raw || + "provider" in raw; + if (hadLegacyTopLevelPayloadFields || hadLegacyTopLevelDeliveryFields) { + stripLegacyTopLevelFields(raw); + mutated = true; + if (hadLegacyTopLevelPayloadFields) { + trackIssue("legacyTopLevelPayloadFields"); + } + if (hadLegacyTopLevelDeliveryFields) { + trackIssue("legacyTopLevelDeliveryFields"); + } + } + + if (payloadRecord) { + const hadLegacyPayloadProvider = + typeof payloadRecord.provider === "string" && payloadRecord.provider.trim().length > 0; + if (migrateLegacyCronPayload(payloadRecord)) { + mutated = true; + if (hadLegacyPayloadProvider) { + trackIssue("legacyPayloadProvider"); + } + } + } + + const schedule = raw.schedule; + if (schedule && typeof schedule === "object" && !Array.isArray(schedule)) { + const sched = schedule as Record; + const kind = typeof sched.kind === "string" ? sched.kind.trim().toLowerCase() : ""; + if (!kind && ("at" in sched || "atMs" in sched)) { + sched.kind = "at"; + mutated = true; + } + const atRaw = typeof sched.at === "string" ? sched.at.trim() : ""; + const atMsRaw = sched.atMs; + const parsedAtMs = + typeof atMsRaw === "number" + ? atMsRaw + : typeof atMsRaw === "string" + ? parseAbsoluteTimeMs(atMsRaw) + : atRaw + ? parseAbsoluteTimeMs(atRaw) + : null; + if (parsedAtMs !== null) { + sched.at = new Date(parsedAtMs).toISOString(); + if ("atMs" in sched) { + delete sched.atMs; + } + mutated = true; + } + + const everyMsRaw = sched.everyMs; + const everyMsCoerced = coerceFiniteScheduleNumber(everyMsRaw); + const everyMs = everyMsCoerced !== undefined ? Math.floor(everyMsCoerced) : null; + if (everyMs !== null && everyMsRaw !== everyMs) { + sched.everyMs = everyMs; + mutated = true; + } + if ((kind === "every" || sched.kind === "every") && everyMs !== null) { + const anchorRaw = sched.anchorMs; + const anchorCoerced = coerceFiniteScheduleNumber(anchorRaw); + const normalizedAnchor = + anchorCoerced !== undefined + ? Math.max(0, Math.floor(anchorCoerced)) + : typeof raw.createdAtMs === "number" && Number.isFinite(raw.createdAtMs) + ? Math.max(0, Math.floor(raw.createdAtMs)) + : typeof raw.updatedAtMs === "number" && Number.isFinite(raw.updatedAtMs) + ? Math.max(0, Math.floor(raw.updatedAtMs)) + : null; + if (normalizedAnchor !== null && anchorRaw !== normalizedAnchor) { + sched.anchorMs = normalizedAnchor; + mutated = true; + } + } + + const exprRaw = typeof sched.expr === "string" ? sched.expr.trim() : ""; + const legacyCronRaw = typeof sched.cron === "string" ? sched.cron.trim() : ""; + let normalizedExpr = exprRaw; + if (!normalizedExpr && legacyCronRaw) { + normalizedExpr = legacyCronRaw; + sched.expr = normalizedExpr; + mutated = true; + trackIssue("legacyScheduleCron"); + } + if (typeof sched.expr === "string" && sched.expr !== normalizedExpr) { + sched.expr = normalizedExpr; + mutated = true; + } + if ("cron" in sched) { + delete sched.cron; + mutated = true; + trackIssue("legacyScheduleCron"); + } + if ((kind === "cron" || sched.kind === "cron") && normalizedExpr) { + const explicitStaggerMs = normalizeCronStaggerMs(sched.staggerMs); + const defaultStaggerMs = resolveDefaultCronStaggerMs(normalizedExpr); + const targetStaggerMs = explicitStaggerMs ?? defaultStaggerMs; + if (targetStaggerMs === undefined) { + if ("staggerMs" in sched) { + delete sched.staggerMs; + mutated = true; + } + } else if (sched.staggerMs !== targetStaggerMs) { + sched.staggerMs = targetStaggerMs; + mutated = true; + } + } + } + + const delivery = raw.delivery; + if (delivery && typeof delivery === "object" && !Array.isArray(delivery)) { + const modeRaw = (delivery as { mode?: unknown }).mode; + if (typeof modeRaw === "string") { + const lowered = modeRaw.trim().toLowerCase(); + if (lowered === "deliver") { + (delivery as { mode?: unknown }).mode = "announce"; + mutated = true; + trackIssue("legacyDeliveryMode"); + } + } else if (modeRaw === undefined || modeRaw === null) { + (delivery as { mode?: unknown }).mode = "announce"; + mutated = true; + } + } + + const isolation = raw.isolation; + if (isolation && typeof isolation === "object" && !Array.isArray(isolation)) { + delete raw.isolation; + mutated = true; + } + + const payloadKind = + payloadRecord && typeof payloadRecord.kind === "string" ? payloadRecord.kind : ""; + const rawSessionTarget = typeof raw.sessionTarget === "string" ? raw.sessionTarget.trim() : ""; + const loweredSessionTarget = rawSessionTarget.toLowerCase(); + if (loweredSessionTarget === "main" || loweredSessionTarget === "isolated") { + if (raw.sessionTarget !== loweredSessionTarget) { + raw.sessionTarget = loweredSessionTarget; + mutated = true; + } + } else if (loweredSessionTarget.startsWith("session:")) { + const customSessionId = rawSessionTarget.slice(8).trim(); + if (customSessionId) { + const normalizedSessionTarget = `session:${customSessionId}`; + if (raw.sessionTarget !== normalizedSessionTarget) { + raw.sessionTarget = normalizedSessionTarget; + mutated = true; + } + } + } else if (loweredSessionTarget === "current") { + if (raw.sessionTarget !== "isolated") { + raw.sessionTarget = "isolated"; + mutated = true; + } + } else { + const inferredSessionTarget = payloadKind === "agentTurn" ? "isolated" : "main"; + if (raw.sessionTarget !== inferredSessionTarget) { + raw.sessionTarget = inferredSessionTarget; + mutated = true; + } + } + + const sessionTarget = + typeof raw.sessionTarget === "string" ? raw.sessionTarget.trim().toLowerCase() : ""; + const isIsolatedAgentTurn = + sessionTarget === "isolated" || + sessionTarget === "current" || + sessionTarget.startsWith("session:") || + (sessionTarget === "" && payloadKind === "agentTurn"); + const hasDelivery = delivery && typeof delivery === "object" && !Array.isArray(delivery); + const normalizedLegacy = normalizeLegacyDeliveryInput({ + delivery: hasDelivery ? (delivery as Record) : null, + payload: payloadRecord, + }); + + if (isIsolatedAgentTurn && payloadKind === "agentTurn") { + if (!hasDelivery && normalizedLegacy.delivery) { + raw.delivery = normalizedLegacy.delivery; + mutated = true; + } else if (!hasDelivery) { + raw.delivery = { mode: "announce" }; + mutated = true; + } else if (normalizedLegacy.mutated && normalizedLegacy.delivery) { + raw.delivery = normalizedLegacy.delivery; + mutated = true; + } + } else if (normalizedLegacy.mutated && normalizedLegacy.delivery) { + raw.delivery = normalizedLegacy.delivery; + mutated = true; + } + } + + return { issues, jobs, mutated }; +} diff --git a/src/cron/types.ts b/src/cron/types.ts index ef5de924b02..02078d15424 100644 --- a/src/cron/types.ts +++ b/src/cron/types.ts @@ -1,3 +1,4 @@ +import type { FailoverReason } from "../agents/pi-embedded-helpers.js"; import type { ChannelId } from "../channels/plugins/types.js"; import type { CronJobBase } from "./types-shared.js"; @@ -12,7 +13,7 @@ export type CronSchedule = staggerMs?: number; }; -export type CronSessionTarget = "main" | "isolated"; +export type CronSessionTarget = "main" | "isolated" | "current" | `session:${string}`; export type CronWakeMode = "next-heartbeat" | "now"; export type CronMessageChannel = ChannelId | "last"; @@ -105,7 +106,6 @@ type CronAgentTurnPayload = { type CronAgentTurnPayloadPatch = { kind: "agentTurn"; } & Partial; - export type CronJobState = { nextRunAtMs?: number; runningAtMs?: number; @@ -115,6 +115,8 @@ export type CronJobState = { /** Back-compat alias for lastRunStatus. */ lastStatus?: "ok" | "error" | "skipped"; lastError?: string; + /** Classified reason for the last error (when available). */ + lastErrorReason?: FailoverReason; lastDurationMs?: number; /** Number of consecutive execution errors (reset on success). Used for backoff. */ consecutiveErrors?: number; diff --git a/src/daemon/inspect.ts b/src/daemon/inspect.ts index 29ac8094ceb..c3025ae8b8a 100644 --- a/src/daemon/inspect.ts +++ b/src/daemon/inspect.ts @@ -7,6 +7,7 @@ import { resolveGatewaySystemdServiceName, resolveGatewayWindowsTaskName, } from "./constants.js"; +import { resolveHomeDir } from "./paths.js"; import { execSchtasks } from "./schtasks-exec.js"; export type ExtraGatewayService = { @@ -49,14 +50,6 @@ export function renderGatewayServiceCleanupHints( } } -function resolveHomeDir(env: Record): string { - const home = env.HOME?.trim() || env.USERPROFILE?.trim(); - if (!home) { - throw new Error("Missing HOME"); - } - return home; -} - type Marker = (typeof EXTRA_MARKERS)[number]; function detectMarker(content: string): Marker | null { diff --git a/src/daemon/launchd-restart-handoff.test.ts b/src/daemon/launchd-restart-handoff.test.ts new file mode 100644 index 00000000000..d685e64d851 --- /dev/null +++ b/src/daemon/launchd-restart-handoff.test.ts @@ -0,0 +1,43 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; + +const spawnMock = vi.hoisted(() => vi.fn()); +const unrefMock = vi.hoisted(() => vi.fn()); + +vi.mock("node:child_process", () => ({ + spawn: (...args: unknown[]) => spawnMock(...args), +})); + +import { scheduleDetachedLaunchdRestartHandoff } from "./launchd-restart-handoff.js"; + +afterEach(() => { + spawnMock.mockReset(); + unrefMock.mockReset(); + spawnMock.mockReturnValue({ pid: 4242, unref: unrefMock }); +}); + +describe("scheduleDetachedLaunchdRestartHandoff", () => { + it("waits for the caller pid before kickstarting launchd", () => { + const env = { + HOME: "/Users/test", + OPENCLAW_PROFILE: "default", + }; + spawnMock.mockReturnValue({ pid: 4242, unref: unrefMock }); + + const result = scheduleDetachedLaunchdRestartHandoff({ + env, + mode: "kickstart", + waitForPid: 9876, + }); + + expect(result).toEqual({ ok: true, pid: 4242 }); + expect(spawnMock).toHaveBeenCalledTimes(1); + const [, args] = spawnMock.mock.calls[0] as [string, string[]]; + expect(args[0]).toBe("-c"); + expect(args[2]).toBe("openclaw-launchd-restart-handoff"); + expect(args[6]).toBe("9876"); + expect(args[1]).toContain('while kill -0 "$wait_pid" >/dev/null 2>&1; do'); + expect(args[1]).toContain('launchctl kickstart -k "$service_target" >/dev/null 2>&1'); + expect(args[1]).not.toContain("sleep 1"); + expect(unrefMock).toHaveBeenCalledTimes(1); + }); +}); diff --git a/src/daemon/launchd-restart-handoff.ts b/src/daemon/launchd-restart-handoff.ts new file mode 100644 index 00000000000..ff2fa9dc612 --- /dev/null +++ b/src/daemon/launchd-restart-handoff.ts @@ -0,0 +1,138 @@ +import { spawn } from "node:child_process"; +import os from "node:os"; +import path from "node:path"; +import { resolveGatewayLaunchAgentLabel } from "./constants.js"; + +export type LaunchdRestartHandoffMode = "kickstart" | "start-after-exit"; + +export type LaunchdRestartHandoffResult = { + ok: boolean; + pid?: number; + detail?: string; +}; + +export type LaunchdRestartTarget = { + domain: string; + label: string; + plistPath: string; + serviceTarget: string; +}; + +function resolveGuiDomain(): string { + if (typeof process.getuid !== "function") { + return "gui/501"; + } + return `gui/${process.getuid()}`; +} + +function resolveLaunchAgentLabel(env?: Record): string { + const envLabel = env?.OPENCLAW_LAUNCHD_LABEL?.trim(); + if (envLabel) { + return envLabel; + } + return resolveGatewayLaunchAgentLabel(env?.OPENCLAW_PROFILE); +} + +export function resolveLaunchdRestartTarget( + env: Record = process.env, +): LaunchdRestartTarget { + const domain = resolveGuiDomain(); + const label = resolveLaunchAgentLabel(env); + const home = env.HOME?.trim() || os.homedir(); + const plistPath = path.join(home, "Library", "LaunchAgents", `${label}.plist`); + return { + domain, + label, + plistPath, + serviceTarget: `${domain}/${label}`, + }; +} + +export function isCurrentProcessLaunchdServiceLabel( + label: string, + env: NodeJS.ProcessEnv = process.env, +): boolean { + const launchdLabel = + env.LAUNCH_JOB_LABEL?.trim() || env.LAUNCH_JOB_NAME?.trim() || env.XPC_SERVICE_NAME?.trim(); + if (launchdLabel) { + return launchdLabel === label; + } + const configuredLabel = env.OPENCLAW_LAUNCHD_LABEL?.trim(); + return Boolean(configuredLabel && configuredLabel === label); +} + +function buildLaunchdRestartScript(mode: LaunchdRestartHandoffMode): string { + const waitForCallerPid = `wait_pid="$4" +if [ -n "$wait_pid" ] && [ "$wait_pid" -gt 1 ] 2>/dev/null; then + while kill -0 "$wait_pid" >/dev/null 2>&1; do + sleep 0.1 + done +fi +`; + + if (mode === "kickstart") { + return `service_target="$1" +domain="$2" +plist_path="$3" +${waitForCallerPid} +if ! launchctl kickstart -k "$service_target" >/dev/null 2>&1; then + launchctl enable "$service_target" >/dev/null 2>&1 + if launchctl bootstrap "$domain" "$plist_path" >/dev/null 2>&1; then + launchctl kickstart -k "$service_target" >/dev/null 2>&1 || true + fi +fi +`; + } + + return `service_target="$1" +domain="$2" +plist_path="$3" +${waitForCallerPid} +if ! launchctl start "$service_target" >/dev/null 2>&1; then + launchctl enable "$service_target" >/dev/null 2>&1 + if launchctl bootstrap "$domain" "$plist_path" >/dev/null 2>&1; then + launchctl start "$service_target" >/dev/null 2>&1 || launchctl kickstart -k "$service_target" >/dev/null 2>&1 || true + else + launchctl kickstart -k "$service_target" >/dev/null 2>&1 || true + fi +fi +`; +} + +export function scheduleDetachedLaunchdRestartHandoff(params: { + env?: Record; + mode: LaunchdRestartHandoffMode; + waitForPid?: number; +}): LaunchdRestartHandoffResult { + const target = resolveLaunchdRestartTarget(params.env); + const waitForPid = + typeof params.waitForPid === "number" && Number.isFinite(params.waitForPid) + ? Math.floor(params.waitForPid) + : 0; + try { + const child = spawn( + "/bin/sh", + [ + "-c", + buildLaunchdRestartScript(params.mode), + "openclaw-launchd-restart-handoff", + target.serviceTarget, + target.domain, + target.plistPath, + String(waitForPid), + ], + { + detached: true, + stdio: "ignore", + env: { ...process.env, ...params.env }, + }, + ); + child.unref(); + return { ok: true, pid: child.pid ?? undefined }; + } catch (err) { + return { + ok: false, + detail: err instanceof Error ? err.message : String(err), + }; + } +} diff --git a/src/daemon/launchd.test.ts b/src/daemon/launchd.test.ts index 99e5e1f933e..4c624cfeec1 100644 --- a/src/daemon/launchd.test.ts +++ b/src/daemon/launchd.test.ts @@ -18,13 +18,38 @@ const state = vi.hoisted(() => ({ listOutput: "", printOutput: "", bootstrapError: "", + kickstartError: "", + kickstartFailuresRemaining: 0, dirs: new Set(), dirModes: new Map(), files: new Map(), fileModes: new Map(), })); +const launchdRestartHandoffState = vi.hoisted(() => ({ + isCurrentProcessLaunchdServiceLabel: vi.fn<(label: string) => boolean>(() => false), + scheduleDetachedLaunchdRestartHandoff: vi.fn((_params: unknown) => ({ ok: true, pid: 7331 })), +})); const defaultProgramArguments = ["node", "-e", "process.exit(0)"]; +function expectLaunchctlEnableBootstrapOrder(env: Record) { + const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; + const label = "ai.openclaw.gateway"; + const plistPath = resolveLaunchAgentPlistPath(env); + const serviceId = `${domain}/${label}`; + const enableIndex = state.launchctlCalls.findIndex( + (c) => c[0] === "enable" && c[1] === serviceId, + ); + const bootstrapIndex = state.launchctlCalls.findIndex( + (c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath, + ); + + expect(enableIndex).toBeGreaterThanOrEqual(0); + expect(bootstrapIndex).toBeGreaterThanOrEqual(0); + expect(enableIndex).toBeLessThan(bootstrapIndex); + + return { domain, label, serviceId, bootstrapIndex }; +} + function normalizeLaunchctlArgs(file: string, args: string[]): string[] { if (file === "launchctl") { return args; @@ -49,10 +74,21 @@ vi.mock("./exec-file.js", () => ({ if (call[0] === "bootstrap" && state.bootstrapError) { return { stdout: "", stderr: state.bootstrapError, code: 1 }; } + if (call[0] === "kickstart" && state.kickstartError && state.kickstartFailuresRemaining > 0) { + state.kickstartFailuresRemaining -= 1; + return { stdout: "", stderr: state.kickstartError, code: 1 }; + } return { stdout: "", stderr: "", code: 0 }; }), })); +vi.mock("./launchd-restart-handoff.js", () => ({ + isCurrentProcessLaunchdServiceLabel: (label: string) => + launchdRestartHandoffState.isCurrentProcessLaunchdServiceLabel(label), + scheduleDetachedLaunchdRestartHandoff: (params: unknown) => + launchdRestartHandoffState.scheduleDetachedLaunchdRestartHandoff(params), +})); + vi.mock("node:fs/promises", async (importOriginal) => { const actual = await importOriginal(); const wrapped = { @@ -109,10 +145,19 @@ beforeEach(() => { state.listOutput = ""; state.printOutput = ""; state.bootstrapError = ""; + state.kickstartError = ""; + state.kickstartFailuresRemaining = 0; state.dirs.clear(); state.dirModes.clear(); state.files.clear(); state.fileModes.clear(); + launchdRestartHandoffState.isCurrentProcessLaunchdServiceLabel.mockReset(); + launchdRestartHandoffState.isCurrentProcessLaunchdServiceLabel.mockReturnValue(false); + launchdRestartHandoffState.scheduleDetachedLaunchdRestartHandoff.mockReset(); + launchdRestartHandoffState.scheduleDetachedLaunchdRestartHandoff.mockReturnValue({ + ok: true, + pid: 7331, + }); vi.clearAllMocks(); }); @@ -193,25 +238,12 @@ describe("launchd bootstrap repair", () => { const repair = await repairLaunchAgentBootstrap({ env }); expect(repair.ok).toBe(true); - const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; - const label = "ai.openclaw.gateway"; - const plistPath = resolveLaunchAgentPlistPath(env); - const serviceId = `${domain}/${label}`; - - const enableIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "enable" && c[1] === serviceId, - ); - const bootstrapIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath, - ); + const { serviceId, bootstrapIndex } = expectLaunchctlEnableBootstrapOrder(env); const kickstartIndex = state.launchctlCalls.findIndex( (c) => c[0] === "kickstart" && c[1] === "-k" && c[2] === serviceId, ); - expect(enableIndex).toBeGreaterThanOrEqual(0); - expect(bootstrapIndex).toBeGreaterThanOrEqual(0); expect(kickstartIndex).toBeGreaterThanOrEqual(0); - expect(enableIndex).toBeLessThan(bootstrapIndex); expect(bootstrapIndex).toBeLessThan(kickstartIndex); }); }); @@ -224,7 +256,7 @@ describe("launchd install", () => { }; } - it("enables service before bootstrap (clears persisted disabled state)", async () => { + it("enables service before bootstrap without self-restarting the fresh agent", async () => { const env = createDefaultLaunchdEnv(); await installLaunchAgent({ env, @@ -232,20 +264,11 @@ describe("launchd install", () => { programArguments: defaultProgramArguments, }); - const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; - const label = "ai.openclaw.gateway"; - const plistPath = resolveLaunchAgentPlistPath(env); - const serviceId = `${domain}/${label}`; - - const enableIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "enable" && c[1] === serviceId, + const { serviceId } = expectLaunchctlEnableBootstrapOrder(env); + const installKickstartIndex = state.launchctlCalls.findIndex( + (c) => c[0] === "kickstart" && c[2] === serviceId, ); - const bootstrapIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath, - ); - expect(enableIndex).toBeGreaterThanOrEqual(0); - expect(bootstrapIndex).toBeGreaterThanOrEqual(0); - expect(enableIndex).toBeLessThan(bootstrapIndex); + expect(installKickstartIndex).toBe(-1); }); it("writes TMPDIR to LaunchAgent environment when provided", async () => { @@ -304,73 +327,74 @@ describe("launchd install", () => { expect(state.fileModes.get(plistPath)).toBe(0o644); }); - it("restarts LaunchAgent with bootout-enable-bootstrap-kickstart order", async () => { + it("restarts LaunchAgent with kickstart and no bootout", async () => { const env = createDefaultLaunchdEnv(); - await restartLaunchAgent({ + const result = await restartLaunchAgent({ env, stdout: new PassThrough(), }); const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; const label = "ai.openclaw.gateway"; - const plistPath = resolveLaunchAgentPlistPath(env); const serviceId = `${domain}/${label}`; - const bootoutIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "bootout" && c[1] === serviceId, - ); - const enableIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "enable" && c[1] === serviceId, - ); - const bootstrapIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath, - ); - const kickstartIndex = state.launchctlCalls.findIndex( + expect(result).toEqual({ outcome: "completed" }); + expect(state.launchctlCalls).toContainEqual(["kickstart", "-k", serviceId]); + expect(state.launchctlCalls.some((call) => call[0] === "bootout")).toBe(false); + expect(state.launchctlCalls.some((call) => call[0] === "bootstrap")).toBe(false); + }); + + it("falls back to bootstrap when kickstart cannot find the service", async () => { + const env = createDefaultLaunchdEnv(); + state.kickstartError = "Could not find service"; + state.kickstartFailuresRemaining = 1; + + const result = await restartLaunchAgent({ + env, + stdout: new PassThrough(), + }); + + const { serviceId } = expectLaunchctlEnableBootstrapOrder(env); + const kickstartCalls = state.launchctlCalls.filter( (c) => c[0] === "kickstart" && c[1] === "-k" && c[2] === serviceId, ); - expect(bootoutIndex).toBeGreaterThanOrEqual(0); - expect(enableIndex).toBeGreaterThanOrEqual(0); - expect(bootstrapIndex).toBeGreaterThanOrEqual(0); - expect(kickstartIndex).toBeGreaterThanOrEqual(0); - expect(bootoutIndex).toBeLessThan(enableIndex); - expect(enableIndex).toBeLessThan(bootstrapIndex); - expect(bootstrapIndex).toBeLessThan(kickstartIndex); + expect(result).toEqual({ outcome: "completed" }); + expect(kickstartCalls).toHaveLength(2); + expect(state.launchctlCalls.some((call) => call[0] === "bootout")).toBe(false); }); - it("waits for previous launchd pid to exit before bootstrapping", async () => { + it("surfaces the original kickstart failure when the service is still loaded", async () => { const env = createDefaultLaunchdEnv(); - state.printOutput = ["state = running", "pid = 4242"].join("\n"); - const killSpy = vi.spyOn(process, "kill"); - killSpy - .mockImplementationOnce(() => true) - .mockImplementationOnce(() => { - const err = new Error("no such process") as NodeJS.ErrnoException; - err.code = "ESRCH"; - throw err; - }); + state.kickstartError = "Input/output error"; + state.kickstartFailuresRemaining = 1; - vi.useFakeTimers(); - try { - const restartPromise = restartLaunchAgent({ + await expect( + restartLaunchAgent({ env, stdout: new PassThrough(), - }); - await vi.advanceTimersByTimeAsync(250); - await restartPromise; - expect(killSpy).toHaveBeenCalledWith(4242, 0); - const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; - const label = "ai.openclaw.gateway"; - const bootoutIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "bootout" && c[1] === `${domain}/${label}`, - ); - const bootstrapIndex = state.launchctlCalls.findIndex((c) => c[0] === "bootstrap"); - expect(bootoutIndex).toBeGreaterThanOrEqual(0); - expect(bootstrapIndex).toBeGreaterThanOrEqual(0); - expect(bootoutIndex).toBeLessThan(bootstrapIndex); - } finally { - vi.useRealTimers(); - killSpy.mockRestore(); - } + }), + ).rejects.toThrow("launchctl kickstart failed: Input/output error"); + + expect(state.launchctlCalls.some((call) => call[0] === "enable")).toBe(false); + expect(state.launchctlCalls.some((call) => call[0] === "bootstrap")).toBe(false); + }); + + it("hands restart off to a detached helper when invoked from the current LaunchAgent", async () => { + const env = createDefaultLaunchdEnv(); + launchdRestartHandoffState.isCurrentProcessLaunchdServiceLabel.mockReturnValue(true); + + const result = await restartLaunchAgent({ + env, + stdout: new PassThrough(), + }); + + expect(result).toEqual({ outcome: "scheduled" }); + expect(launchdRestartHandoffState.scheduleDetachedLaunchdRestartHandoff).toHaveBeenCalledWith({ + env, + mode: "kickstart", + waitForPid: process.pid, + }); + expect(state.launchctlCalls).toEqual([]); }); it("shows actionable guidance when launchctl gui domain does not support bootstrap", async () => { diff --git a/src/daemon/launchd.ts b/src/daemon/launchd.ts index 492eb2e4d6e..29d0933558c 100644 --- a/src/daemon/launchd.ts +++ b/src/daemon/launchd.ts @@ -12,6 +12,10 @@ import { buildLaunchAgentPlist as buildLaunchAgentPlistImpl, readLaunchAgentProgramArgumentsFromFile, } from "./launchd-plist.js"; +import { + isCurrentProcessLaunchdServiceLabel, + scheduleDetachedLaunchdRestartHandoff, +} from "./launchd-restart-handoff.js"; import { formatLine, toPosixPath, writeFormattedLines } from "./output.js"; import { resolveGatewayStateDir, resolveHomeDir } from "./paths.js"; import { parseKeyValueOutput } from "./runtime-parse.js"; @@ -23,6 +27,7 @@ import type { GatewayServiceEnvArgs, GatewayServiceInstallArgs, GatewayServiceManageArgs, + GatewayServiceRestartResult, } from "./service-types.js"; const LAUNCH_AGENT_DIR_MODE = 0o755; @@ -115,6 +120,58 @@ function resolveGuiDomain(): string { return `gui/${process.getuid()}`; } +function throwBootstrapGuiSessionError(params: { + detail: string; + domain: string; + actionHint: string; +}) { + throw new Error( + [ + `launchctl bootstrap failed: ${params.detail}`, + `LaunchAgent ${params.actionHint} requires a logged-in macOS GUI session for this user (${params.domain}).`, + "This usually means you are running from SSH/headless context or as the wrong user (including sudo).", + `Fix: sign in to the macOS desktop as the target user and rerun \`${params.actionHint}\`.`, + "Headless deployments should use a dedicated logged-in user session or a custom LaunchDaemon (not shipped): https://docs.openclaw.ai/gateway", + ].join("\n"), + ); +} + +function writeLaunchAgentActionLine( + stdout: NodeJS.WritableStream, + label: string, + value: string, +): void { + try { + stdout.write(`${formatLine(label, value)}\n`); + } catch (err: unknown) { + if ((err as NodeJS.ErrnoException)?.code !== "EPIPE") { + throw err; + } + } +} + +async function bootstrapLaunchAgentOrThrow(params: { + domain: string; + serviceTarget: string; + plistPath: string; + actionHint: string; +}) { + await execLaunchctl(["enable", params.serviceTarget]); + const boot = await execLaunchctl(["bootstrap", params.domain, params.plistPath]); + if (boot.code === 0) { + return; + } + const detail = (boot.stderr || boot.stdout).trim(); + if (isUnsupportedGuiDomain(detail)) { + throwBootstrapGuiSessionError({ + detail, + domain: params.domain, + actionHint: params.actionHint, + }); + } + throw new Error(`launchctl bootstrap failed: ${detail}`); +} + async function ensureSecureDirectory(targetPath: string): Promise { await fs.mkdir(targetPath, { recursive: true, mode: LAUNCH_AGENT_DIR_MODE }); try { @@ -352,34 +409,6 @@ function isUnsupportedGuiDomain(detail: string): boolean { ); } -const RESTART_PID_WAIT_TIMEOUT_MS = 10_000; -const RESTART_PID_WAIT_INTERVAL_MS = 200; - -async function sleepMs(ms: number): Promise { - await new Promise((resolve) => { - setTimeout(resolve, ms); - }); -} - -async function waitForPidExit(pid: number): Promise { - if (!Number.isFinite(pid) || pid <= 1) { - return; - } - const deadline = Date.now() + RESTART_PID_WAIT_TIMEOUT_MS; - while (Date.now() < deadline) { - try { - process.kill(pid, 0); - } catch (err) { - const code = (err as NodeJS.ErrnoException).code; - if (code === "ESRCH" || code === "EPERM") { - return; - } - return; - } - await sleepMs(RESTART_PID_WAIT_INTERVAL_MS); - } -} - export async function stopLaunchAgent({ stdout, env }: GatewayServiceControlArgs): Promise { const domain = resolveGuiDomain(); const label = resolveLaunchAgentLabel({ env }); @@ -437,24 +466,15 @@ export async function installLaunchAgent({ await execLaunchctl(["bootout", domain, plistPath]); await execLaunchctl(["unload", plistPath]); // launchd can persist "disabled" state even after bootout + plist removal; clear it before bootstrap. - await execLaunchctl(["enable", `${domain}/${label}`]); - const boot = await execLaunchctl(["bootstrap", domain, plistPath]); - if (boot.code !== 0) { - const detail = (boot.stderr || boot.stdout).trim(); - if (isUnsupportedGuiDomain(detail)) { - throw new Error( - [ - `launchctl bootstrap failed: ${detail}`, - `LaunchAgent install requires a logged-in macOS GUI session for this user (${domain}).`, - "This usually means you are running from SSH/headless context or as the wrong user (including sudo).", - "Fix: sign in to the macOS desktop as the target user and rerun `openclaw gateway install --force`.", - "Headless deployments should use a dedicated logged-in user session or a custom LaunchDaemon (not shipped): https://docs.openclaw.ai/gateway", - ].join("\n"), - ); - } - throw new Error(`launchctl bootstrap failed: ${detail}`); - } - await execLaunchctl(["kickstart", "-k", `${domain}/${label}`]); + await bootstrapLaunchAgentOrThrow({ + domain, + serviceTarget: `${domain}/${label}`, + plistPath, + actionHint: "openclaw gateway install --force", + }); + // `bootstrap` already loads RunAtLoad agents. Avoid `kickstart -k` here: + // on slow macOS guests it SIGTERMs the freshly booted gateway and pushes the + // real listener startup past onboarding's health deadline. // Ensure we don't end up writing to a clack spinner line (wizards show progress without a newline). writeFormattedLines( @@ -471,55 +491,51 @@ export async function installLaunchAgent({ export async function restartLaunchAgent({ stdout, env, -}: GatewayServiceControlArgs): Promise { +}: GatewayServiceControlArgs): Promise { const serviceEnv = env ?? (process.env as GatewayServiceEnv); const domain = resolveGuiDomain(); const label = resolveLaunchAgentLabel({ env: serviceEnv }); const plistPath = resolveLaunchAgentPlistPath(serviceEnv); + const serviceTarget = `${domain}/${label}`; - const runtime = await execLaunchctl(["print", `${domain}/${label}`]); - const previousPid = - runtime.code === 0 - ? parseLaunchctlPrint(runtime.stdout || runtime.stderr || "").pid - : undefined; - - const stop = await execLaunchctl(["bootout", `${domain}/${label}`]); - if (stop.code !== 0 && !isLaunchctlNotLoaded(stop)) { - throw new Error(`launchctl bootout failed: ${stop.stderr || stop.stdout}`.trim()); - } - if (typeof previousPid === "number") { - await waitForPidExit(previousPid); - } - - // launchd can persist "disabled" state after bootout; clear it before bootstrap - // (matches the same guard in installLaunchAgent). - await execLaunchctl(["enable", `${domain}/${label}`]); - const boot = await execLaunchctl(["bootstrap", domain, plistPath]); - if (boot.code !== 0) { - const detail = (boot.stderr || boot.stdout).trim(); - if (isUnsupportedGuiDomain(detail)) { - throw new Error( - [ - `launchctl bootstrap failed: ${detail}`, - `LaunchAgent restart requires a logged-in macOS GUI session for this user (${domain}).`, - "This usually means you are running from SSH/headless context or as the wrong user (including sudo).", - "Fix: sign in to the macOS desktop as the target user and rerun `openclaw gateway restart`.", - "Headless deployments should use a dedicated logged-in user session or a custom LaunchDaemon (not shipped): https://docs.openclaw.ai/gateway", - ].join("\n"), - ); + // Restart requests issued from inside the managed gateway process tree need a + // detached handoff. A direct `kickstart -k` would terminate the caller before + // it can finish the restart command. + if (isCurrentProcessLaunchdServiceLabel(label)) { + const handoff = scheduleDetachedLaunchdRestartHandoff({ + env: serviceEnv, + mode: "kickstart", + waitForPid: process.pid, + }); + if (!handoff.ok) { + throw new Error(`launchd restart handoff failed: ${handoff.detail ?? "unknown error"}`); } - throw new Error(`launchctl bootstrap failed: ${detail}`); + writeLaunchAgentActionLine(stdout, "Scheduled LaunchAgent restart", serviceTarget); + return { outcome: "scheduled" }; } - const start = await execLaunchctl(["kickstart", "-k", `${domain}/${label}`]); - if (start.code !== 0) { + const start = await execLaunchctl(["kickstart", "-k", serviceTarget]); + if (start.code === 0) { + writeLaunchAgentActionLine(stdout, "Restarted LaunchAgent", serviceTarget); + return { outcome: "completed" }; + } + + if (!isLaunchctlNotLoaded(start)) { throw new Error(`launchctl kickstart failed: ${start.stderr || start.stdout}`.trim()); } - try { - stdout.write(`${formatLine("Restarted LaunchAgent", `${domain}/${label}`)}\n`); - } catch (err: unknown) { - if ((err as NodeJS.ErrnoException)?.code !== "EPIPE") { - throw err; - } + + // If the service was previously booted out, re-register the plist and retry. + await bootstrapLaunchAgentOrThrow({ + domain, + serviceTarget, + plistPath, + actionHint: "openclaw gateway restart", + }); + + const retry = await execLaunchctl(["kickstart", "-k", serviceTarget]); + if (retry.code !== 0) { + throw new Error(`launchctl kickstart failed: ${retry.stderr || retry.stdout}`.trim()); } + writeLaunchAgentActionLine(stdout, "Restarted LaunchAgent", serviceTarget); + return { outcome: "completed" }; } diff --git a/src/daemon/program-args.ts b/src/daemon/program-args.ts index c92065b584e..76bad8fc1ce 100644 --- a/src/daemon/program-args.ts +++ b/src/daemon/program-args.ts @@ -153,7 +153,9 @@ async function resolveBinaryPath(binary: string): Promise { if (binary === "bun") { throw new Error("Bun not found in PATH. Install bun: https://bun.sh"); } - throw new Error("Node not found in PATH. Install Node 22+."); + throw new Error( + "Node not found in PATH. Install Node 24 (recommended) or Node 22 LTS (22.16+).", + ); } } diff --git a/src/daemon/runtime-paths.test.ts b/src/daemon/runtime-paths.test.ts index 3b502193a33..8130aa7d4d5 100644 --- a/src/daemon/runtime-paths.test.ts +++ b/src/daemon/runtime-paths.test.ts @@ -56,7 +56,7 @@ describe("resolvePreferredNodePath", () => { const execFile = vi .fn() .mockResolvedValueOnce({ stdout: "18.0.0\n", stderr: "" }) // execPath too old - .mockResolvedValueOnce({ stdout: "22.12.0\n", stderr: "" }); // system node ok + .mockResolvedValueOnce({ stdout: "22.16.0\n", stderr: "" }); // system node ok const result = await resolvePreferredNodePath({ env: {}, @@ -73,7 +73,7 @@ describe("resolvePreferredNodePath", () => { it("ignores execPath when it is not node", async () => { mockNodePathPresent(darwinNode); - const execFile = vi.fn().mockResolvedValue({ stdout: "22.12.0\n", stderr: "" }); + const execFile = vi.fn().mockResolvedValue({ stdout: "22.16.0\n", stderr: "" }); const result = await resolvePreferredNodePath({ env: {}, @@ -93,8 +93,8 @@ describe("resolvePreferredNodePath", () => { it("uses system node when it meets the minimum version", async () => { mockNodePathPresent(darwinNode); - // Node 22.12.0+ is the minimum required version - const execFile = vi.fn().mockResolvedValue({ stdout: "22.12.0\n", stderr: "" }); + // Node 22.16.0+ is the minimum required version + const execFile = vi.fn().mockResolvedValue({ stdout: "22.16.0\n", stderr: "" }); const result = await resolvePreferredNodePath({ env: {}, @@ -111,8 +111,8 @@ describe("resolvePreferredNodePath", () => { it("skips system node when it is too old", async () => { mockNodePathPresent(darwinNode); - // Node 22.11.x is below minimum 22.12.0 - const execFile = vi.fn().mockResolvedValue({ stdout: "22.11.0\n", stderr: "" }); + // Node 22.15.x is below minimum 22.16.0 + const execFile = vi.fn().mockResolvedValue({ stdout: "22.15.0\n", stderr: "" }); const result = await resolvePreferredNodePath({ env: {}, @@ -168,7 +168,7 @@ describe("resolveStableNodePath", () => { it("resolves versioned node@22 formula to opt symlink", async () => { mockNodePathPresent("/opt/homebrew/opt/node@22/bin/node"); - const result = await resolveStableNodePath("/opt/homebrew/Cellar/node@22/22.12.0/bin/node"); + const result = await resolveStableNodePath("/opt/homebrew/Cellar/node@22/22.16.0/bin/node"); expect(result).toBe("/opt/homebrew/opt/node@22/bin/node"); }); @@ -218,8 +218,8 @@ describe("resolveSystemNodeInfo", () => { it("returns supported info when version is new enough", async () => { mockNodePathPresent(darwinNode); - // Node 22.12.0+ is the minimum required version - const execFile = vi.fn().mockResolvedValue({ stdout: "22.12.0\n", stderr: "" }); + // Node 22.16.0+ is the minimum required version + const execFile = vi.fn().mockResolvedValue({ stdout: "22.16.0\n", stderr: "" }); const result = await resolveSystemNodeInfo({ env: {}, @@ -229,7 +229,7 @@ describe("resolveSystemNodeInfo", () => { expect(result).toEqual({ path: darwinNode, - version: "22.12.0", + version: "22.16.0", supported: true, }); }); @@ -251,7 +251,7 @@ describe("resolveSystemNodeInfo", () => { "/Users/me/.fnm/node-22/bin/node", ); - expect(warning).toContain("below the required Node 22+"); + expect(warning).toContain("below the required Node 22.16+"); expect(warning).toContain(darwinNode); }); }); diff --git a/src/daemon/runtime-paths.ts b/src/daemon/runtime-paths.ts index a3b737d15bf..486ff5959ad 100644 --- a/src/daemon/runtime-paths.ts +++ b/src/daemon/runtime-paths.ts @@ -151,7 +151,7 @@ export function renderSystemNodeWarning( } const versionLabel = systemNode.version ?? "unknown"; const selectedLabel = selectedNodePath ? ` Using ${selectedNodePath} for the daemon.` : ""; - return `System Node ${versionLabel} at ${systemNode.path} is below the required Node 22+.${selectedLabel} Install Node 22+ from nodejs.org or Homebrew.`; + return `System Node ${versionLabel} at ${systemNode.path} is below the required Node 22.16+.${selectedLabel} Install Node 24 (recommended) or Node 22 LTS from nodejs.org or Homebrew.`; } export { resolveStableNodePath }; diff --git a/src/daemon/schtasks-exec.test.ts b/src/daemon/schtasks-exec.test.ts new file mode 100644 index 00000000000..52edb573ea7 --- /dev/null +++ b/src/daemon/schtasks-exec.test.ts @@ -0,0 +1,53 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const runCommandWithTimeout = vi.hoisted(() => vi.fn()); + +vi.mock("../process/exec.js", () => ({ + runCommandWithTimeout: (...args: unknown[]) => runCommandWithTimeout(...args), +})); + +const { execSchtasks } = await import("./schtasks-exec.js"); + +beforeEach(() => { + runCommandWithTimeout.mockReset(); +}); + +describe("execSchtasks", () => { + it("runs schtasks with bounded timeouts", async () => { + runCommandWithTimeout.mockResolvedValue({ + stdout: "ok", + stderr: "", + code: 0, + signal: null, + killed: false, + termination: "exit", + }); + + await expect(execSchtasks(["/Query"])).resolves.toEqual({ + stdout: "ok", + stderr: "", + code: 0, + }); + expect(runCommandWithTimeout).toHaveBeenCalledWith(["schtasks", "/Query"], { + timeoutMs: 15_000, + noOutputTimeoutMs: 5_000, + }); + }); + + it("maps a timeout into a non-zero schtasks result", async () => { + runCommandWithTimeout.mockResolvedValue({ + stdout: "", + stderr: "", + code: null, + signal: "SIGTERM", + killed: true, + termination: "timeout", + }); + + await expect(execSchtasks(["/Create"])).resolves.toEqual({ + stdout: "", + stderr: "schtasks timed out after 15000ms", + code: 124, + }); + }); +}); diff --git a/src/daemon/schtasks-exec.ts b/src/daemon/schtasks-exec.ts index e4344d3cd5d..cf27d927341 100644 --- a/src/daemon/schtasks-exec.ts +++ b/src/daemon/schtasks-exec.ts @@ -1,7 +1,24 @@ -import { execFileUtf8 } from "./exec-file.js"; +import { runCommandWithTimeout } from "../process/exec.js"; + +const SCHTASKS_TIMEOUT_MS = 15_000; +const SCHTASKS_NO_OUTPUT_TIMEOUT_MS = 5_000; export async function execSchtasks( args: string[], ): Promise<{ stdout: string; stderr: string; code: number }> { - return await execFileUtf8("schtasks", args, { windowsHide: true }); + const result = await runCommandWithTimeout(["schtasks", ...args], { + timeoutMs: SCHTASKS_TIMEOUT_MS, + noOutputTimeoutMs: SCHTASKS_NO_OUTPUT_TIMEOUT_MS, + }); + const timeoutDetail = + result.termination === "timeout" + ? `schtasks timed out after ${SCHTASKS_TIMEOUT_MS}ms` + : result.termination === "no-output-timeout" + ? `schtasks produced no output for ${SCHTASKS_NO_OUTPUT_TIMEOUT_MS}ms` + : ""; + return { + stdout: result.stdout, + stderr: result.stderr || timeoutDetail, + code: typeof result.code === "number" ? result.code : result.killed ? 124 : 1, + }; } diff --git a/src/daemon/schtasks.startup-fallback.test.ts b/src/daemon/schtasks.startup-fallback.test.ts new file mode 100644 index 00000000000..55e678052f3 --- /dev/null +++ b/src/daemon/schtasks.startup-fallback.test.ts @@ -0,0 +1,228 @@ +import fs from "node:fs/promises"; +import path from "node:path"; +import { PassThrough } from "node:stream"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { quoteCmdScriptArg } from "./cmd-argv.js"; +import "./test-helpers/schtasks-base-mocks.js"; +import { + inspectPortUsage, + killProcessTree, + resetSchtasksBaseMocks, + schtasksResponses, + withWindowsEnv, + writeGatewayScript, +} from "./test-helpers/schtasks-fixtures.js"; +const childUnref = vi.hoisted(() => vi.fn()); +const spawn = vi.hoisted(() => vi.fn(() => ({ unref: childUnref }))); + +vi.mock("node:child_process", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + spawn, + }; +}); + +const { + installScheduledTask, + isScheduledTaskInstalled, + readScheduledTaskRuntime, + restartScheduledTask, + resolveTaskScriptPath, + stopScheduledTask, +} = await import("./schtasks.js"); + +function resolveStartupEntryPath(env: Record) { + return path.join( + env.APPDATA, + "Microsoft", + "Windows", + "Start Menu", + "Programs", + "Startup", + "OpenClaw Gateway.cmd", + ); +} + +async function writeStartupFallbackEntry(env: Record) { + const startupEntryPath = resolveStartupEntryPath(env); + await fs.mkdir(path.dirname(startupEntryPath), { recursive: true }); + await fs.writeFile(startupEntryPath, "@echo off\r\n", "utf8"); + return startupEntryPath; +} + +function expectStartupFallbackSpawn(env: Record) { + expect(spawn).toHaveBeenCalledWith( + "cmd.exe", + ["/d", "/s", "/c", quoteCmdScriptArg(resolveTaskScriptPath(env))], + expect.objectContaining({ detached: true, stdio: "ignore", windowsHide: true }), + ); +} + +function expectGatewayTermination(pid: number) { + if (process.platform === "win32") { + expect(killProcessTree).not.toHaveBeenCalled(); + return; + } + expect(killProcessTree).toHaveBeenCalledWith(pid, { graceMs: 300 }); +} + +function addStartupFallbackMissingResponses( + extraResponses: Array<{ code: number; stdout: string; stderr: string }> = [], +) { + schtasksResponses.push( + { code: 0, stdout: "", stderr: "" }, + { code: 1, stdout: "", stderr: "not found" }, + ...extraResponses, + ); +} +beforeEach(() => { + resetSchtasksBaseMocks(); + spawn.mockClear(); + childUnref.mockClear(); +}); + +afterEach(() => { + vi.restoreAllMocks(); +}); + +describe("Windows startup fallback", () => { + it("falls back to a Startup-folder launcher when schtasks create is denied", async () => { + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { + schtasksResponses.push( + { code: 0, stdout: "", stderr: "" }, + { code: 5, stdout: "", stderr: "ERROR: Access is denied." }, + ); + + const stdout = new PassThrough(); + let printed = ""; + stdout.on("data", (chunk) => { + printed += String(chunk); + }); + + const result = await installScheduledTask({ + env, + stdout, + programArguments: ["node", "gateway.js", "--port", "18789"], + environment: { OPENCLAW_GATEWAY_PORT: "18789" }, + }); + + const startupEntryPath = resolveStartupEntryPath(env); + const startupScript = await fs.readFile(startupEntryPath, "utf8"); + expect(result.scriptPath).toBe(resolveTaskScriptPath(env)); + expect(startupScript).toContain('start "" /min cmd.exe /d /c'); + expect(startupScript).toContain("gateway.cmd"); + expect(spawn).toHaveBeenCalledWith( + "cmd.exe", + ["/d", "/s", "/c", quoteCmdScriptArg(resolveTaskScriptPath(env))], + expect.objectContaining({ detached: true, stdio: "ignore", windowsHide: true }), + ); + expect(childUnref).toHaveBeenCalled(); + expect(printed).toContain("Installed Windows login item"); + }); + }); + + it("falls back to a Startup-folder launcher when schtasks create hangs", async () => { + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { + schtasksResponses.push( + { code: 0, stdout: "", stderr: "" }, + { code: 124, stdout: "", stderr: "schtasks timed out after 15000ms" }, + ); + + const stdout = new PassThrough(); + await installScheduledTask({ + env, + stdout, + programArguments: ["node", "gateway.js", "--port", "18789"], + environment: { OPENCLAW_GATEWAY_PORT: "18789" }, + }); + + await expect(fs.access(resolveStartupEntryPath(env))).resolves.toBeUndefined(); + expectStartupFallbackSpawn(env); + }); + }); + + it("treats an installed Startup-folder launcher as loaded", async () => { + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { + addStartupFallbackMissingResponses(); + await writeStartupFallbackEntry(env); + + await expect(isScheduledTaskInstalled({ env })).resolves.toBe(true); + }); + }); + + it("reports runtime from the gateway listener when using the Startup fallback", async () => { + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { + addStartupFallbackMissingResponses(); + await writeStartupFallbackEntry(env); + inspectPortUsage.mockResolvedValue({ + port: 18789, + status: "busy", + listeners: [{ pid: 4242, command: "node.exe" }], + hints: [], + }); + + await expect(readScheduledTaskRuntime(env)).resolves.toMatchObject({ + status: "running", + pid: 4242, + }); + }); + }); + + it("restarts the Startup fallback by killing the current pid and relaunching the entry", async () => { + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { + addStartupFallbackMissingResponses([ + { code: 0, stdout: "", stderr: "" }, + { code: 1, stdout: "", stderr: "not found" }, + ]); + await writeStartupFallbackEntry(env); + inspectPortUsage.mockResolvedValue({ + port: 18789, + status: "busy", + listeners: [{ pid: 5151, command: "node.exe" }], + hints: [], + }); + + const stdout = new PassThrough(); + await expect(restartScheduledTask({ env, stdout })).resolves.toEqual({ + outcome: "completed", + }); + expectGatewayTermination(5151); + expectStartupFallbackSpawn(env); + }); + }); + + it("kills the Startup fallback runtime even when the CLI env omits the gateway port", async () => { + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { + schtasksResponses.push({ code: 0, stdout: "", stderr: "" }); + await writeGatewayScript(env); + await writeStartupFallbackEntry(env); + inspectPortUsage + .mockResolvedValueOnce({ + port: 18789, + status: "busy", + listeners: [{ pid: 5151, command: "node.exe" }], + hints: [], + }) + .mockResolvedValueOnce({ + port: 18789, + status: "busy", + listeners: [{ pid: 5151, command: "node.exe" }], + hints: [], + }) + .mockResolvedValueOnce({ + port: 18789, + status: "free", + listeners: [], + hints: [], + }); + + const stdout = new PassThrough(); + const envWithoutPort = { ...env }; + delete envWithoutPort.OPENCLAW_GATEWAY_PORT; + await stopScheduledTask({ env: envWithoutPort, stdout }); + + expectGatewayTermination(5151); + }); + }); +}); diff --git a/src/daemon/schtasks.stop.test.ts b/src/daemon/schtasks.stop.test.ts new file mode 100644 index 00000000000..04e5f1fced1 --- /dev/null +++ b/src/daemon/schtasks.stop.test.ts @@ -0,0 +1,170 @@ +import { PassThrough } from "node:stream"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import "./test-helpers/schtasks-base-mocks.js"; +import { + inspectPortUsage, + killProcessTree, + resetSchtasksBaseMocks, + schtasksCalls, + schtasksResponses, + withWindowsEnv, + writeGatewayScript, +} from "./test-helpers/schtasks-fixtures.js"; +const findVerifiedGatewayListenerPidsOnPortSync = vi.hoisted(() => + vi.fn<(port: number) => number[]>(() => []), +); + +vi.mock("../infra/gateway-processes.js", () => ({ + findVerifiedGatewayListenerPidsOnPortSync: (port: number) => + findVerifiedGatewayListenerPidsOnPortSync(port), +})); + +const { restartScheduledTask, stopScheduledTask } = await import("./schtasks.js"); +const GATEWAY_PORT = 18789; +const SUCCESS_RESPONSE = { code: 0, stdout: "", stderr: "" } as const; + +function pushSuccessfulSchtasksResponses(count: number) { + for (let i = 0; i < count; i += 1) { + schtasksResponses.push({ ...SUCCESS_RESPONSE }); + } +} + +function freePortUsage() { + return { + port: GATEWAY_PORT, + status: "free" as const, + listeners: [], + hints: [], + }; +} + +function busyPortUsage( + pid: number, + options: { + command?: string; + commandLine?: string; + } = {}, +) { + return { + port: GATEWAY_PORT, + status: "busy" as const, + listeners: [ + { + pid, + command: options.command ?? "node.exe", + ...(options.commandLine ? { commandLine: options.commandLine } : {}), + }, + ], + hints: [], + }; +} + +function expectGatewayTermination(pid: number) { + if (process.platform === "win32") { + expect(killProcessTree).not.toHaveBeenCalled(); + return; + } + expect(killProcessTree).toHaveBeenCalledWith(pid, { graceMs: 300 }); +} + +async function withPreparedGatewayTask( + run: (context: { env: Record; stdout: PassThrough }) => Promise, +) { + await withWindowsEnv("openclaw-win-stop-", async ({ env }) => { + await writeGatewayScript(env, GATEWAY_PORT); + const stdout = new PassThrough(); + await run({ env, stdout }); + }); +} + +beforeEach(() => { + resetSchtasksBaseMocks(); + findVerifiedGatewayListenerPidsOnPortSync.mockReset(); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([]); + inspectPortUsage.mockResolvedValue(freePortUsage()); +}); + +afterEach(() => { + vi.restoreAllMocks(); +}); + +describe("Scheduled Task stop/restart cleanup", () => { + it("kills lingering verified gateway listeners after schtasks stop", async () => { + await withPreparedGatewayTask(async ({ env, stdout }) => { + pushSuccessfulSchtasksResponses(3); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4242]); + inspectPortUsage + .mockResolvedValueOnce(busyPortUsage(4242)) + .mockResolvedValueOnce(freePortUsage()); + + await stopScheduledTask({ env, stdout }); + + expect(findVerifiedGatewayListenerPidsOnPortSync).toHaveBeenCalledWith(GATEWAY_PORT); + expectGatewayTermination(4242); + expect(inspectPortUsage).toHaveBeenCalledTimes(2); + }); + }); + + it("force-kills remaining busy port listeners when the first stop pass does not free the port", async () => { + await withPreparedGatewayTask(async ({ env, stdout }) => { + pushSuccessfulSchtasksResponses(3); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4242]); + inspectPortUsage.mockResolvedValueOnce(busyPortUsage(4242)); + for (let i = 0; i < 20; i += 1) { + inspectPortUsage.mockResolvedValueOnce(busyPortUsage(4242)); + } + inspectPortUsage + .mockResolvedValueOnce(busyPortUsage(5252)) + .mockResolvedValueOnce(freePortUsage()); + + await stopScheduledTask({ env, stdout }); + + if (process.platform !== "win32") { + expect(killProcessTree).toHaveBeenNthCalledWith(1, 4242, { graceMs: 300 }); + expect(killProcessTree).toHaveBeenNthCalledWith(2, expect.any(Number), { graceMs: 300 }); + } else { + expect(killProcessTree).not.toHaveBeenCalled(); + } + expect(inspectPortUsage.mock.calls.length).toBeGreaterThanOrEqual(22); + }); + }); + + it("falls back to inspected gateway listeners when sync verification misses on Windows", async () => { + await withPreparedGatewayTask(async ({ env, stdout }) => { + pushSuccessfulSchtasksResponses(3); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([]); + inspectPortUsage + .mockResolvedValueOnce( + busyPortUsage(6262, { + commandLine: + '"C:\\Program Files\\nodejs\\node.exe" "C:\\Users\\steipete\\AppData\\Roaming\\npm\\node_modules\\openclaw\\dist\\index.js" gateway --port 18789', + }), + ) + .mockResolvedValueOnce(freePortUsage()); + + await stopScheduledTask({ env, stdout }); + + expectGatewayTermination(6262); + expect(inspectPortUsage).toHaveBeenCalledTimes(2); + }); + }); + + it("kills lingering verified gateway listeners and waits for port release before restart", async () => { + await withPreparedGatewayTask(async ({ env, stdout }) => { + pushSuccessfulSchtasksResponses(4); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([5151]); + inspectPortUsage + .mockResolvedValueOnce(busyPortUsage(5151)) + .mockResolvedValueOnce(freePortUsage()); + + await expect(restartScheduledTask({ env, stdout })).resolves.toEqual({ + outcome: "completed", + }); + + expect(findVerifiedGatewayListenerPidsOnPortSync).toHaveBeenCalledWith(GATEWAY_PORT); + expectGatewayTermination(5151); + expect(inspectPortUsage).toHaveBeenCalledTimes(2); + expect(schtasksCalls.at(-1)).toEqual(["/Run", "/TN", "OpenClaw Gateway"]); + }); + }); +}); diff --git a/src/daemon/schtasks.test.ts b/src/daemon/schtasks.test.ts index 4b45445f727..633df0fee7e 100644 --- a/src/daemon/schtasks.test.ts +++ b/src/daemon/schtasks.test.ts @@ -179,6 +179,7 @@ describe("readScheduledTaskCommand", () => { const result = await readScheduledTaskCommand(env); expect(result).toEqual({ programArguments: ["C:/Program Files/Node/node.exe", "gateway.js"], + sourcePath: resolveTaskScriptPath(env), }); }, ); @@ -222,6 +223,7 @@ describe("readScheduledTaskCommand", () => { NODE_ENV: "production", OPENCLAW_PORT: "18789", }, + sourcePath: resolveTaskScriptPath(env), }); }, ); @@ -245,6 +247,7 @@ describe("readScheduledTaskCommand", () => { "--port", "18789", ], + sourcePath: resolveTaskScriptPath(env), }); }, ); @@ -268,6 +271,7 @@ describe("readScheduledTaskCommand", () => { "--port", "18789", ], + sourcePath: resolveTaskScriptPath(env), }); }, ); @@ -283,6 +287,7 @@ describe("readScheduledTaskCommand", () => { const result = await readScheduledTaskCommand(env); expect(result).toEqual({ programArguments: ["node", "gateway.js", "--from-state-dir"], + sourcePath: resolveTaskScriptPath(env), }); }, ); diff --git a/src/daemon/schtasks.ts b/src/daemon/schtasks.ts index af09d2ca564..2216e93bfd9 100644 --- a/src/daemon/schtasks.ts +++ b/src/daemon/schtasks.ts @@ -1,5 +1,11 @@ +import { spawn, spawnSync } from "node:child_process"; import fs from "node:fs/promises"; import path from "node:path"; +import { isGatewayArgv } from "../infra/gateway-process-argv.js"; +import { findVerifiedGatewayListenerPidsOnPortSync } from "../infra/gateway-processes.js"; +import { inspectPortUsage } from "../infra/ports.js"; +import { killProcessTree } from "../process/kill-tree.js"; +import { sleep } from "../utils.js"; import { parseCmdScriptCommandLine, quoteCmdScriptArg } from "./cmd-argv.js"; import { assertNoCmdLineBreak, parseCmdSetAssignment, renderCmdSetAssignment } from "./cmd-set.js"; import { resolveGatewayServiceDescription, resolveGatewayWindowsTaskName } from "./constants.js"; @@ -16,6 +22,7 @@ import type { GatewayServiceInstallArgs, GatewayServiceManageArgs, GatewayServiceRenderArgs, + GatewayServiceRestartResult, } from "./service-types.js"; function resolveTaskName(env: GatewayServiceEnv): string { @@ -26,6 +33,15 @@ function resolveTaskName(env: GatewayServiceEnv): string { return resolveGatewayWindowsTaskName(env.OPENCLAW_PROFILE); } +function shouldFallbackToStartupEntry(params: { code: number; detail: string }): boolean { + return ( + /access is denied/i.test(params.detail) || + params.code === 124 || + /schtasks timed out/i.test(params.detail) || + /schtasks produced no output/i.test(params.detail) + ); +} + export function resolveTaskScriptPath(env: GatewayServiceEnv): string { const override = env.OPENCLAW_TASK_SCRIPT?.trim(); if (override) { @@ -36,6 +52,36 @@ export function resolveTaskScriptPath(env: GatewayServiceEnv): string { return path.join(stateDir, scriptName); } +function resolveWindowsStartupDir(env: GatewayServiceEnv): string { + const appData = env.APPDATA?.trim(); + if (appData) { + return path.join(appData, "Microsoft", "Windows", "Start Menu", "Programs", "Startup"); + } + const home = env.USERPROFILE?.trim() || env.HOME?.trim(); + if (!home) { + throw new Error("Windows startup folder unavailable: APPDATA/USERPROFILE not set"); + } + return path.join( + home, + "AppData", + "Roaming", + "Microsoft", + "Windows", + "Start Menu", + "Programs", + "Startup", + ); +} + +function sanitizeWindowsFilename(value: string): string { + return value.replace(/[<>:"/\\|?*]/g, "_").replace(/\p{Cc}/gu, "_"); +} + +function resolveStartupEntryPath(env: GatewayServiceEnv): string { + const taskName = resolveTaskName(env); + return path.join(resolveWindowsStartupDir(env), `${sanitizeWindowsFilename(taskName)}.cmd`); +} + // `/TR` is parsed by schtasks itself, while the generated `gateway.cmd` line is parsed by cmd.exe. // Keep their quoting strategies separate so each parser gets the encoding it expects. function quoteSchtasksArg(value: string): string { @@ -102,6 +148,7 @@ export async function readScheduledTaskCommand( programArguments: parseCmdScriptCommandLine(commandLine), ...(workingDirectory ? { workingDirectory } : {}), ...(Object.keys(environment).length > 0 ? { environment } : {}), + sourcePath: scriptPath, }; } catch { return null; @@ -114,6 +161,12 @@ export type ScheduledTaskInfo = { lastRunResult?: string; }; +function hasListenerPid( + listener: T, +): listener is T & { pid: number } { + return typeof listener.pid === "number"; +} + export function parseSchtasksQuery(output: string): ScheduledTaskInfo { const entries = parseKeyValueOutput(output, ":"); const info: ScheduledTaskInfo = {}; @@ -210,6 +263,17 @@ function buildTaskScript({ return `${lines.join("\r\n")}\r\n`; } +function buildStartupLauncherScript(params: { description?: string; scriptPath: string }): string { + const lines = ["@echo off"]; + const trimmedDescription = params.description?.trim(); + if (trimmedDescription) { + assertNoCmdLineBreak(trimmedDescription, "Startup launcher description"); + lines.push(`rem ${trimmedDescription}`); + } + lines.push(`start "" /min cmd.exe /d /c ${quoteCmdScriptArg(params.scriptPath)}`); + return `${lines.join("\r\n")}\r\n`; +} + async function assertSchtasksAvailable() { const res = await execSchtasks(["/Query"]); if (res.code === 0) { @@ -219,6 +283,270 @@ async function assertSchtasksAvailable() { throw new Error(`schtasks unavailable: ${detail || "unknown error"}`.trim()); } +async function isStartupEntryInstalled(env: GatewayServiceEnv): Promise { + try { + await fs.access(resolveStartupEntryPath(env)); + return true; + } catch { + return false; + } +} + +async function isRegisteredScheduledTask(env: GatewayServiceEnv): Promise { + const taskName = resolveTaskName(env); + const res = await execSchtasks(["/Query", "/TN", taskName]).catch(() => ({ + code: 1, + stdout: "", + stderr: "", + })); + return res.code === 0; +} + +function launchFallbackTaskScript(scriptPath: string): void { + const child = spawn("cmd.exe", ["/d", "/s", "/c", quoteCmdScriptArg(scriptPath)], { + detached: true, + stdio: "ignore", + windowsHide: true, + }); + child.unref(); +} + +function resolveConfiguredGatewayPort(env: GatewayServiceEnv): number | null { + const raw = env.OPENCLAW_GATEWAY_PORT?.trim(); + if (!raw) { + return null; + } + const parsed = Number.parseInt(raw, 10); + return Number.isFinite(parsed) && parsed > 0 ? parsed : null; +} + +function parsePositivePort(raw: string | undefined): number | null { + const value = raw?.trim(); + if (!value) { + return null; + } + if (!/^\d+$/.test(value)) { + return null; + } + const parsed = Number.parseInt(value, 10); + return Number.isFinite(parsed) && parsed > 0 && parsed <= 65535 ? parsed : null; +} + +function parsePortFromProgramArguments(programArguments?: string[]): number | null { + if (!programArguments?.length) { + return null; + } + for (let i = 0; i < programArguments.length; i += 1) { + const arg = programArguments[i]; + if (!arg) { + continue; + } + const inlineMatch = arg.match(/^--port=(\d+)$/); + if (inlineMatch) { + return parsePositivePort(inlineMatch[1]); + } + if (arg === "--port") { + return parsePositivePort(programArguments[i + 1]); + } + } + return null; +} + +async function resolveScheduledTaskPort(env: GatewayServiceEnv): Promise { + const command = await readScheduledTaskCommand(env).catch(() => null); + return ( + parsePortFromProgramArguments(command?.programArguments) ?? + parsePositivePort(command?.environment?.OPENCLAW_GATEWAY_PORT) ?? + resolveConfiguredGatewayPort(env) + ); +} + +async function resolveScheduledTaskGatewayListenerPids(port: number): Promise { + const verified = findVerifiedGatewayListenerPidsOnPortSync(port); + if (verified.length > 0) { + return verified; + } + + const diagnostics = await inspectPortUsage(port).catch(() => null); + if (diagnostics?.status !== "busy") { + return []; + } + + const matchedGatewayPids = Array.from( + new Set( + diagnostics.listeners + .filter( + (listener) => + typeof listener.pid === "number" && + listener.commandLine && + isGatewayArgv(parseCmdScriptCommandLine(listener.commandLine), { + allowGatewayBinary: true, + }), + ) + .map((listener) => listener.pid as number), + ), + ); + if (matchedGatewayPids.length > 0) { + return matchedGatewayPids; + } + + return Array.from( + new Set( + diagnostics.listeners + .map((listener) => listener.pid) + .filter((pid): pid is number => typeof pid === "number" && Number.isFinite(pid) && pid > 0), + ), + ); +} + +async function terminateScheduledTaskGatewayListeners(env: GatewayServiceEnv): Promise { + const port = await resolveScheduledTaskPort(env); + if (!port) { + return []; + } + const pids = await resolveScheduledTaskGatewayListenerPids(port); + for (const pid of pids) { + await terminateGatewayProcessTree(pid, 300); + } + return pids; +} + +function isProcessAlive(pid: number): boolean { + try { + process.kill(pid, 0); + return true; + } catch { + return false; + } +} + +async function waitForProcessExit(pid: number, timeoutMs: number): Promise { + const deadline = Date.now() + timeoutMs; + while (Date.now() < deadline) { + if (!isProcessAlive(pid)) { + return true; + } + await sleep(100); + } + return !isProcessAlive(pid); +} + +async function terminateGatewayProcessTree(pid: number, graceMs: number): Promise { + if (process.platform !== "win32") { + killProcessTree(pid, { graceMs }); + return; + } + const taskkillPath = path.join( + process.env.SystemRoot ?? "C:\\Windows", + "System32", + "taskkill.exe", + ); + spawnSync(taskkillPath, ["/T", "/PID", String(pid)], { + stdio: "ignore", + timeout: 5_000, + windowsHide: true, + }); + if (await waitForProcessExit(pid, graceMs)) { + return; + } + spawnSync(taskkillPath, ["/F", "/T", "/PID", String(pid)], { + stdio: "ignore", + timeout: 5_000, + windowsHide: true, + }); + await waitForProcessExit(pid, 5_000); +} + +async function waitForGatewayPortRelease(port: number, timeoutMs = 5_000): Promise { + const deadline = Date.now() + timeoutMs; + while (Date.now() < deadline) { + const diagnostics = await inspectPortUsage(port).catch(() => null); + if (diagnostics?.status === "free") { + return true; + } + await sleep(250); + } + return false; +} + +async function terminateBusyPortListeners(port: number): Promise { + const diagnostics = await inspectPortUsage(port).catch(() => null); + if (diagnostics?.status !== "busy") { + return []; + } + const pids = Array.from( + new Set( + diagnostics.listeners + .map((listener) => listener.pid) + .filter((pid): pid is number => typeof pid === "number" && Number.isFinite(pid) && pid > 0), + ), + ); + for (const pid of pids) { + await terminateGatewayProcessTree(pid, 300); + } + return pids; +} + +async function resolveFallbackRuntime(env: GatewayServiceEnv): Promise { + const port = (await resolveScheduledTaskPort(env)) ?? resolveConfiguredGatewayPort(env); + if (!port) { + return { + status: "unknown", + detail: "Startup-folder login item installed; gateway port unknown.", + }; + } + const diagnostics = await inspectPortUsage(port).catch(() => null); + if (!diagnostics) { + return { + status: "unknown", + detail: `Startup-folder login item installed; could not inspect port ${port}.`, + }; + } + const listener = diagnostics.listeners.find(hasListenerPid); + return { + status: diagnostics.status === "busy" ? "running" : "stopped", + ...(listener?.pid ? { pid: listener.pid } : {}), + detail: + diagnostics.status === "busy" + ? `Startup-folder login item installed; listener detected on port ${port}.` + : `Startup-folder login item installed; no listener detected on port ${port}.`, + }; +} + +async function stopStartupEntry( + env: GatewayServiceEnv, + stdout: NodeJS.WritableStream, +): Promise { + const runtime = await resolveFallbackRuntime(env); + if (typeof runtime.pid === "number" && runtime.pid > 0) { + await terminateGatewayProcessTree(runtime.pid, 300); + } + stdout.write(`${formatLine("Stopped Windows login item", resolveTaskName(env))}\n`); +} + +async function terminateInstalledStartupRuntime(env: GatewayServiceEnv): Promise { + if (!(await isStartupEntryInstalled(env))) { + return; + } + const runtime = await resolveFallbackRuntime(env); + if (typeof runtime.pid === "number" && runtime.pid > 0) { + await terminateGatewayProcessTree(runtime.pid, 300); + } +} + +async function restartStartupEntry( + env: GatewayServiceEnv, + stdout: NodeJS.WritableStream, +): Promise { + const runtime = await resolveFallbackRuntime(env); + if (typeof runtime.pid === "number" && runtime.pid > 0) { + await terminateGatewayProcessTree(runtime.pid, 300); + } + launchFallbackTaskScript(resolveTaskScriptPath(env)); + stdout.write(`${formatLine("Restarted Windows login item", resolveTaskName(env))}\n`); + return { outcome: "completed" }; +} + export async function installScheduledTask({ env, stdout, @@ -262,10 +590,23 @@ export async function installScheduledTask({ } if (create.code !== 0) { const detail = create.stderr || create.stdout; - const hint = /access is denied/i.test(detail) - ? " Run PowerShell as Administrator or rerun without installing the daemon." - : ""; - throw new Error(`schtasks create failed: ${detail}${hint}`.trim()); + if (shouldFallbackToStartupEntry({ code: create.code, detail })) { + const startupEntryPath = resolveStartupEntryPath(env); + await fs.mkdir(path.dirname(startupEntryPath), { recursive: true }); + const launcher = buildStartupLauncherScript({ description: taskDescription, scriptPath }); + await fs.writeFile(startupEntryPath, launcher, "utf8"); + launchFallbackTaskScript(scriptPath); + writeFormattedLines( + stdout, + [ + { label: "Installed Windows login item", value: startupEntryPath }, + { label: "Task script", value: scriptPath }, + ], + { leadingBlankLine: true }, + ); + return { scriptPath }; + } + throw new Error(`schtasks create failed: ${detail}`.trim()); } await execSchtasks(["/Run", "/TN", taskName]); @@ -287,7 +628,16 @@ export async function uninstallScheduledTask({ }: GatewayServiceManageArgs): Promise { await assertSchtasksAvailable(); const taskName = resolveTaskName(env); - await execSchtasks(["/Delete", "/F", "/TN", taskName]); + const taskInstalled = await isRegisteredScheduledTask(env).catch(() => false); + if (taskInstalled) { + await execSchtasks(["/Delete", "/F", "/TN", taskName]); + } + + const startupEntryPath = resolveStartupEntryPath(env); + try { + await fs.unlink(startupEntryPath); + stdout.write(`${formatLine("Removed Windows login item", startupEntryPath)}\n`); + } catch {} const scriptPath = resolveTaskScriptPath(env); try { @@ -304,34 +654,90 @@ function isTaskNotRunning(res: { stdout: string; stderr: string; code: number }) } export async function stopScheduledTask({ stdout, env }: GatewayServiceControlArgs): Promise { - await assertSchtasksAvailable(); - const taskName = resolveTaskName(env ?? (process.env as GatewayServiceEnv)); + const effectiveEnv = env ?? (process.env as GatewayServiceEnv); + try { + await assertSchtasksAvailable(); + } catch (err) { + if (await isStartupEntryInstalled(effectiveEnv)) { + await stopStartupEntry(effectiveEnv, stdout); + return; + } + throw err; + } + if (!(await isRegisteredScheduledTask(effectiveEnv))) { + if (await isStartupEntryInstalled(effectiveEnv)) { + await stopStartupEntry(effectiveEnv, stdout); + return; + } + } + const taskName = resolveTaskName(effectiveEnv); const res = await execSchtasks(["/End", "/TN", taskName]); if (res.code !== 0 && !isTaskNotRunning(res)) { throw new Error(`schtasks end failed: ${res.stderr || res.stdout}`.trim()); } + const stopPort = await resolveScheduledTaskPort(effectiveEnv); + await terminateScheduledTaskGatewayListeners(effectiveEnv); + await terminateInstalledStartupRuntime(effectiveEnv); + if (stopPort) { + const released = await waitForGatewayPortRelease(stopPort); + if (!released) { + await terminateBusyPortListeners(stopPort); + const releasedAfterForce = await waitForGatewayPortRelease(stopPort, 2_000); + if (!releasedAfterForce) { + throw new Error(`gateway port ${stopPort} is still busy after stop`); + } + } + } stdout.write(`${formatLine("Stopped Scheduled Task", taskName)}\n`); } export async function restartScheduledTask({ stdout, env, -}: GatewayServiceControlArgs): Promise { - await assertSchtasksAvailable(); - const taskName = resolveTaskName(env ?? (process.env as GatewayServiceEnv)); +}: GatewayServiceControlArgs): Promise { + const effectiveEnv = env ?? (process.env as GatewayServiceEnv); + try { + await assertSchtasksAvailable(); + } catch (err) { + if (await isStartupEntryInstalled(effectiveEnv)) { + return await restartStartupEntry(effectiveEnv, stdout); + } + throw err; + } + if (!(await isRegisteredScheduledTask(effectiveEnv))) { + if (await isStartupEntryInstalled(effectiveEnv)) { + return await restartStartupEntry(effectiveEnv, stdout); + } + } + const taskName = resolveTaskName(effectiveEnv); await execSchtasks(["/End", "/TN", taskName]); + const restartPort = await resolveScheduledTaskPort(effectiveEnv); + await terminateScheduledTaskGatewayListeners(effectiveEnv); + await terminateInstalledStartupRuntime(effectiveEnv); + if (restartPort) { + const released = await waitForGatewayPortRelease(restartPort); + if (!released) { + await terminateBusyPortListeners(restartPort); + const releasedAfterForce = await waitForGatewayPortRelease(restartPort, 2_000); + if (!releasedAfterForce) { + throw new Error(`gateway port ${restartPort} is still busy before restart`); + } + } + } const res = await execSchtasks(["/Run", "/TN", taskName]); if (res.code !== 0) { throw new Error(`schtasks run failed: ${res.stderr || res.stdout}`.trim()); } stdout.write(`${formatLine("Restarted Scheduled Task", taskName)}\n`); + return { outcome: "completed" }; } export async function isScheduledTaskInstalled(args: GatewayServiceEnvArgs): Promise { - await assertSchtasksAvailable(); - const taskName = resolveTaskName(args.env ?? (process.env as GatewayServiceEnv)); - const res = await execSchtasks(["/Query", "/TN", taskName]); - return res.code === 0; + const effectiveEnv = args.env ?? (process.env as GatewayServiceEnv); + if (await isRegisteredScheduledTask(effectiveEnv)) { + return true; + } + return await isStartupEntryInstalled(effectiveEnv); } export async function readScheduledTaskRuntime( @@ -340,6 +746,9 @@ export async function readScheduledTaskRuntime( try { await assertSchtasksAvailable(); } catch (err) { + if (await isStartupEntryInstalled(env)) { + return await resolveFallbackRuntime(env); + } return { status: "unknown", detail: String(err), @@ -348,6 +757,9 @@ export async function readScheduledTaskRuntime( const taskName = resolveTaskName(env); const res = await execSchtasks(["/Query", "/TN", taskName, "/V", "/FO", "LIST"]); if (res.code !== 0) { + if (await isStartupEntryInstalled(env)) { + return await resolveFallbackRuntime(env); + } const detail = (res.stderr || res.stdout).trim(); const missing = detail.toLowerCase().includes("cannot find the file"); return { diff --git a/src/daemon/service-audit.test.ts b/src/daemon/service-audit.test.ts index ffdd0fa526d..f7e87b6a518 100644 --- a/src/daemon/service-audit.test.ts +++ b/src/daemon/service-audit.test.ts @@ -6,6 +6,53 @@ import { } from "./service-audit.js"; import { buildMinimalServicePath } from "./service-env.js"; +function hasIssue( + audit: Awaited>, + code: (typeof SERVICE_AUDIT_CODES)[keyof typeof SERVICE_AUDIT_CODES], +) { + return audit.issues.some((issue) => issue.code === code); +} + +function createGatewayAudit({ + expectedGatewayToken, + path = "/usr/local/bin:/usr/bin:/bin", + serviceToken, + environmentValueSources, +}: { + expectedGatewayToken?: string; + path?: string; + serviceToken?: string; + environmentValueSources?: Record; +} = {}) { + return auditGatewayServiceConfig({ + env: { HOME: "/tmp" }, + platform: "linux", + expectedGatewayToken, + command: { + programArguments: ["/usr/bin/node", "gateway"], + environment: { + PATH: path, + ...(serviceToken ? { OPENCLAW_GATEWAY_TOKEN: serviceToken } : {}), + }, + ...(environmentValueSources ? { environmentValueSources } : {}), + }, + }); +} + +function expectTokenAudit( + audit: Awaited>, + { + embedded, + mismatch, + }: { + embedded: boolean; + mismatch: boolean; + }, +) { + expect(hasIssue(audit, SERVICE_AUDIT_CODES.gatewayTokenEmbedded)).toBe(embedded); + expect(hasIssue(audit, SERVICE_AUDIT_CODES.gatewayTokenMismatch)).toBe(mismatch); +} + describe("auditGatewayServiceConfig", () => { it("flags bun runtime", async () => { const audit = await auditGatewayServiceConfig({ @@ -66,89 +113,37 @@ describe("auditGatewayServiceConfig", () => { }); it("flags gateway token mismatch when service token is stale", async () => { - const audit = await auditGatewayServiceConfig({ - env: { HOME: "/tmp" }, - platform: "linux", + const audit = await createGatewayAudit({ expectedGatewayToken: "new-token", - command: { - programArguments: ["/usr/bin/node", "gateway"], - environment: { - PATH: "/usr/local/bin:/usr/bin:/bin", - OPENCLAW_GATEWAY_TOKEN: "old-token", - }, - }, + serviceToken: "old-token", }); - expect( - audit.issues.some((issue) => issue.code === SERVICE_AUDIT_CODES.gatewayTokenEmbedded), - ).toBe(true); - expect( - audit.issues.some((issue) => issue.code === SERVICE_AUDIT_CODES.gatewayTokenMismatch), - ).toBe(true); + expectTokenAudit(audit, { embedded: true, mismatch: true }); }); it("flags embedded service token even when it matches config token", async () => { - const audit = await auditGatewayServiceConfig({ - env: { HOME: "/tmp" }, - platform: "linux", + const audit = await createGatewayAudit({ expectedGatewayToken: "new-token", - command: { - programArguments: ["/usr/bin/node", "gateway"], - environment: { - PATH: "/usr/local/bin:/usr/bin:/bin", - OPENCLAW_GATEWAY_TOKEN: "new-token", - }, - }, + serviceToken: "new-token", }); - expect( - audit.issues.some((issue) => issue.code === SERVICE_AUDIT_CODES.gatewayTokenEmbedded), - ).toBe(true); - expect( - audit.issues.some((issue) => issue.code === SERVICE_AUDIT_CODES.gatewayTokenMismatch), - ).toBe(false); + expectTokenAudit(audit, { embedded: true, mismatch: false }); }); it("does not flag token issues when service token is not embedded", async () => { - const audit = await auditGatewayServiceConfig({ - env: { HOME: "/tmp" }, - platform: "linux", + const audit = await createGatewayAudit({ expectedGatewayToken: "new-token", - command: { - programArguments: ["/usr/bin/node", "gateway"], - environment: { - PATH: "/usr/local/bin:/usr/bin:/bin", - }, - }, }); - expect( - audit.issues.some((issue) => issue.code === SERVICE_AUDIT_CODES.gatewayTokenEmbedded), - ).toBe(false); - expect( - audit.issues.some((issue) => issue.code === SERVICE_AUDIT_CODES.gatewayTokenMismatch), - ).toBe(false); + expectTokenAudit(audit, { embedded: false, mismatch: false }); }); it("does not treat EnvironmentFile-backed tokens as embedded", async () => { - const audit = await auditGatewayServiceConfig({ - env: { HOME: "/tmp" }, - platform: "linux", + const audit = await createGatewayAudit({ expectedGatewayToken: "new-token", - command: { - programArguments: ["/usr/bin/node", "gateway"], - environment: { - PATH: "/usr/local/bin:/usr/bin:/bin", - OPENCLAW_GATEWAY_TOKEN: "old-token", - }, - environmentValueSources: { - OPENCLAW_GATEWAY_TOKEN: "file", - }, + serviceToken: "old-token", + environmentValueSources: { + OPENCLAW_GATEWAY_TOKEN: "file", }, }); - expect( - audit.issues.some((issue) => issue.code === SERVICE_AUDIT_CODES.gatewayTokenEmbedded), - ).toBe(false); - expect( - audit.issues.some((issue) => issue.code === SERVICE_AUDIT_CODES.gatewayTokenMismatch), - ).toBe(false); + expectTokenAudit(audit, { embedded: false, mismatch: false }); }); }); diff --git a/src/daemon/service-audit.ts b/src/daemon/service-audit.ts index 61f5c94f683..8524e79da47 100644 --- a/src/daemon/service-audit.ts +++ b/src/daemon/service-audit.ts @@ -362,7 +362,7 @@ async function auditGatewayRuntime( issues.push({ code: SERVICE_AUDIT_CODES.gatewayRuntimeNodeSystemMissing, message: - "System Node 22+ not found; install it before migrating away from version managers.", + "System Node 22 LTS (22.16+) or Node 24 not found; install it before migrating away from version managers.", level: "recommended", }); } diff --git a/src/daemon/service-types.ts b/src/daemon/service-types.ts index ae7d8d1a28f..202930bd6ce 100644 --- a/src/daemon/service-types.ts +++ b/src/daemon/service-types.ts @@ -19,6 +19,8 @@ export type GatewayServiceControlArgs = { env?: GatewayServiceEnv; }; +export type GatewayServiceRestartResult = { outcome: "completed" } | { outcome: "scheduled" }; + export type GatewayServiceEnvArgs = { env?: GatewayServiceEnv; }; diff --git a/src/daemon/service.test.ts b/src/daemon/service.test.ts index 19811e49699..ea2c53e8e1a 100644 --- a/src/daemon/service.test.ts +++ b/src/daemon/service.test.ts @@ -1,5 +1,5 @@ import { afterEach, describe, expect, it } from "vitest"; -import { resolveGatewayService } from "./service.js"; +import { describeGatewayServiceRestart, resolveGatewayService } from "./service.js"; const originalPlatformDescriptor = Object.getOwnPropertyDescriptor(process, "platform"); @@ -37,4 +37,13 @@ describe("resolveGatewayService", () => { setPlatform("aix"); expect(() => resolveGatewayService()).toThrow("Gateway service install not supported on aix"); }); + + it("describes scheduled restart handoffs consistently", () => { + expect(describeGatewayServiceRestart("Gateway", { outcome: "scheduled" })).toEqual({ + scheduled: true, + daemonActionResult: "scheduled", + message: "restart scheduled, gateway will restart momentarily", + progressMessage: "Gateway service restart scheduled.", + }); + }); }); diff --git a/src/daemon/service.ts b/src/daemon/service.ts index 9685ed1ece5..8083ce4b5e1 100644 --- a/src/daemon/service.ts +++ b/src/daemon/service.ts @@ -24,6 +24,7 @@ import type { GatewayServiceEnvArgs, GatewayServiceInstallArgs, GatewayServiceManageArgs, + GatewayServiceRestartResult, } from "./service-types.js"; import { installSystemdService, @@ -41,6 +42,7 @@ export type { GatewayServiceEnvArgs, GatewayServiceInstallArgs, GatewayServiceManageArgs, + GatewayServiceRestartResult, } from "./service-types.js"; function ignoreInstallResult( @@ -58,12 +60,37 @@ export type GatewayService = { install: (args: GatewayServiceInstallArgs) => Promise; uninstall: (args: GatewayServiceManageArgs) => Promise; stop: (args: GatewayServiceControlArgs) => Promise; - restart: (args: GatewayServiceControlArgs) => Promise; + restart: (args: GatewayServiceControlArgs) => Promise; isLoaded: (args: GatewayServiceEnvArgs) => Promise; readCommand: (env: GatewayServiceEnv) => Promise; readRuntime: (env: GatewayServiceEnv) => Promise; }; +export function describeGatewayServiceRestart( + serviceNoun: string, + result: GatewayServiceRestartResult, +): { + scheduled: boolean; + daemonActionResult: "restarted" | "scheduled"; + message: string; + progressMessage: string; +} { + if (result.outcome === "scheduled") { + return { + scheduled: true, + daemonActionResult: "scheduled", + message: `restart scheduled, ${serviceNoun.toLowerCase()} will restart momentarily`, + progressMessage: `${serviceNoun} service restart scheduled.`, + }; + } + return { + scheduled: false, + daemonActionResult: "restarted", + message: `${serviceNoun} service restarted.`, + progressMessage: `${serviceNoun} service restarted.`, + }; +} + type SupportedGatewayServicePlatform = "darwin" | "linux" | "win32"; const GATEWAY_SERVICE_REGISTRY: Record = { diff --git a/src/daemon/systemd.test.ts b/src/daemon/systemd.test.ts index 1d72adaaf43..0041107264a 100644 --- a/src/daemon/systemd.test.ts +++ b/src/daemon/systemd.test.ts @@ -25,6 +25,10 @@ type ExecFileError = Error & { code?: string | number; }; +const TEST_SERVICE_HOME = "/home/test"; +const TEST_MANAGED_HOME = "/tmp/openclaw-test-home"; +const GATEWAY_SERVICE = "openclaw-gateway.service"; + const createExecFileError = ( message: string, options: { stderr?: string; code?: string | number } = {}, @@ -58,6 +62,48 @@ function pathLikeToString(pathname: unknown): string { return ""; } +function assertUserSystemctlArgs(args: string[], ...command: string[]) { + expect(args).toEqual(["--user", ...command]); +} + +function assertMachineUserSystemctlArgs(args: string[], user: string, ...command: string[]) { + expect(args).toEqual(["--machine", `${user}@`, "--user", ...command]); +} + +async function readManagedServiceEnabled(env: NodeJS.ProcessEnv = { HOME: TEST_MANAGED_HOME }) { + const { isSystemdServiceEnabled } = await import("./systemd.js"); + vi.spyOn(fs, "access").mockResolvedValue(undefined); + return isSystemdServiceEnabled({ env }); +} + +function mockReadGatewayServiceFile( + unitLines: string[], + extraFiles: Record = {}, +) { + return vi.spyOn(fs, "readFile").mockImplementation(async (pathname) => { + const pathValue = pathLikeToString(pathname); + if (pathValue.endsWith(`/${GATEWAY_SERVICE}`)) { + return unitLines.join("\n"); + } + const extraFile = extraFiles[pathValue]; + if (typeof extraFile === "string") { + return extraFile; + } + if (extraFile instanceof Error) { + throw extraFile; + } + throw new Error(`unexpected readFile path: ${pathValue}`); + }); +} + +async function expectExecStartWithoutEnvironment(envFileLine: string) { + mockReadGatewayServiceFile(["[Service]", "ExecStart=/usr/bin/openclaw gateway run", envFileLine]); + + const command = await readSystemdServiceExecStart({ HOME: TEST_SERVICE_HOME }); + expect(command?.programArguments).toEqual(["/usr/bin/openclaw", "gateway", "run"]); + expect(command?.environment).toBeUndefined(); +} + const assertRestartSuccess = async (env: NodeJS.ProcessEnv) => { const { write, stdout } = createWritableStreamMock(); await restartSystemdService({ stdout, env }); @@ -118,24 +164,18 @@ describe("systemd availability", () => { }); describe("isSystemdServiceEnabled", () => { - const mockManagedUnitPresent = () => { - vi.spyOn(fs, "access").mockResolvedValue(undefined); - }; - beforeEach(() => { vi.restoreAllMocks(); execFileMock.mockReset(); }); it("returns false when systemctl is not present", async () => { - const { isSystemdServiceEnabled } = await import("./systemd.js"); - mockManagedUnitPresent(); execFileMock.mockImplementation((_cmd, _args, _opts, cb) => { const err = new Error("spawn systemctl EACCES") as Error & { code?: string }; err.code = "EACCES"; cb(err, "", ""); }); - const result = await isSystemdServiceEnabled({ env: { HOME: "/tmp/openclaw-test-home" } }); + const result = await readManagedServiceEnabled(); expect(result).toBe(false); }); @@ -152,55 +192,45 @@ describe("isSystemdServiceEnabled", () => { }); it("calls systemctl is-enabled when systemctl is present", async () => { - const { isSystemdServiceEnabled } = await import("./systemd.js"); - mockManagedUnitPresent(); execFileMock.mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "is-enabled", "openclaw-gateway.service"]); + assertUserSystemctlArgs(args, "is-enabled", GATEWAY_SERVICE); cb(null, "enabled", ""); }); - const result = await isSystemdServiceEnabled({ env: { HOME: "/tmp/openclaw-test-home" } }); + const result = await readManagedServiceEnabled(); expect(result).toBe(true); }); it("returns false when systemctl reports disabled", async () => { - const { isSystemdServiceEnabled } = await import("./systemd.js"); - mockManagedUnitPresent(); execFileMock.mockImplementationOnce((_cmd, _args, _opts, cb) => { const err = new Error("disabled") as Error & { code?: number }; err.code = 1; cb(err, "disabled", ""); }); - const result = await isSystemdServiceEnabled({ env: { HOME: "/tmp/openclaw-test-home" } }); + const result = await readManagedServiceEnabled(); expect(result).toBe(false); }); it("returns false for the WSL2 Ubuntu 24.04 wrapper-only is-enabled failure", async () => { - const { isSystemdServiceEnabled } = await import("./systemd.js"); - mockManagedUnitPresent(); execFileMock.mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "is-enabled", "openclaw-gateway.service"]); + assertUserSystemctlArgs(args, "is-enabled", GATEWAY_SERVICE); const err = new Error( - "Command failed: systemctl --user is-enabled openclaw-gateway.service", + `Command failed: systemctl --user is-enabled ${GATEWAY_SERVICE}`, ) as Error & { code?: number }; err.code = 1; cb(err, "", ""); }); - await expect( - isSystemdServiceEnabled({ env: { HOME: "/tmp/openclaw-test-home" } }), - ).rejects.toThrow( - "systemctl is-enabled unavailable: Command failed: systemctl --user is-enabled openclaw-gateway.service", + await expect(readManagedServiceEnabled()).rejects.toThrow( + `systemctl is-enabled unavailable: Command failed: systemctl --user is-enabled ${GATEWAY_SERVICE}`, ); }); it("returns false when is-enabled cannot connect to the user bus without machine fallback", async () => { - const { isSystemdServiceEnabled } = await import("./systemd.js"); - mockManagedUnitPresent(); vi.spyOn(os, "userInfo").mockImplementationOnce(() => { throw new Error("no user info"); }); execFileMock.mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "is-enabled", "openclaw-gateway.service"]); + assertUserSystemctlArgs(args, "is-enabled", GATEWAY_SERVICE); cb( createExecFileError("Failed to connect to bus", { stderr: "Failed to connect to bus" }), "", @@ -209,18 +239,14 @@ describe("isSystemdServiceEnabled", () => { }); await expect( - isSystemdServiceEnabled({ - env: { HOME: "/tmp/openclaw-test-home", USER: "", LOGNAME: "" }, - }), + readManagedServiceEnabled({ HOME: TEST_MANAGED_HOME, USER: "", LOGNAME: "" }), ).rejects.toThrow("systemctl is-enabled unavailable: Failed to connect to bus"); }); it("returns false when both direct and machine-scope is-enabled checks report bus unavailability", async () => { - const { isSystemdServiceEnabled } = await import("./systemd.js"); - mockManagedUnitPresent(); execFileMock .mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "is-enabled", "openclaw-gateway.service"]); + assertUserSystemctlArgs(args, "is-enabled", GATEWAY_SERVICE); cb( createExecFileError("Failed to connect to bus", { stderr: "Failed to connect to bus" }), "", @@ -228,13 +254,7 @@ describe("isSystemdServiceEnabled", () => { ); }) .mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual([ - "--machine", - "debian@", - "--user", - "is-enabled", - "openclaw-gateway.service", - ]); + assertMachineUserSystemctlArgs(args, "debian", "is-enabled", GATEWAY_SERVICE); cb( createExecFileError("Failed to connect to user scope bus via local transport", { stderr: @@ -246,32 +266,28 @@ describe("isSystemdServiceEnabled", () => { }); await expect( - isSystemdServiceEnabled({ - env: { HOME: "/tmp/openclaw-test-home", USER: "debian" }, - }), + readManagedServiceEnabled({ HOME: TEST_MANAGED_HOME, USER: "debian" }), ).rejects.toThrow("systemctl is-enabled unavailable: Failed to connect to user scope bus"); }); it("throws when generic wrapper errors report infrastructure failures", async () => { - const { isSystemdServiceEnabled } = await import("./systemd.js"); - mockManagedUnitPresent(); execFileMock.mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "is-enabled", "openclaw-gateway.service"]); + assertUserSystemctlArgs(args, "is-enabled", GATEWAY_SERVICE); const err = new Error( - "Command failed: systemctl --user is-enabled openclaw-gateway.service", + `Command failed: systemctl --user is-enabled ${GATEWAY_SERVICE}`, ) as Error & { code?: number }; err.code = 1; cb(err, "", "read-only file system"); }); - await expect( - isSystemdServiceEnabled({ env: { HOME: "/tmp/openclaw-test-home" } }), - ).rejects.toThrow("systemctl is-enabled unavailable: read-only file system"); + await expect(readManagedServiceEnabled()).rejects.toThrow( + "systemctl is-enabled unavailable: read-only file system", + ); }); it("throws when systemctl is-enabled fails for non-state errors", async () => { const { isSystemdServiceEnabled } = await import("./systemd.js"); - mockManagedUnitPresent(); + vi.spyOn(fs, "access").mockResolvedValue(undefined); execFileMock .mockImplementationOnce((_cmd, args, _opts, cb) => { expect(args).toEqual(["--user", "is-enabled", "openclaw-gateway.service"]); @@ -294,7 +310,7 @@ describe("isSystemdServiceEnabled", () => { it("returns false when systemctl is-enabled exits with code 4 (not-found)", async () => { const { isSystemdServiceEnabled } = await import("./systemd.js"); - mockManagedUnitPresent(); + vi.spyOn(fs, "access").mockResolvedValue(undefined); execFileMock.mockImplementationOnce((_cmd, _args, _opts, cb) => { // On Ubuntu 24.04, `systemctl --user is-enabled ` exits with // code 4 and prints "not-found" to stdout when the unit doesn't exist. @@ -463,82 +479,38 @@ describe("readSystemdServiceExecStart", () => { }); it("loads OPENCLAW_GATEWAY_TOKEN from EnvironmentFile", async () => { - const readFileSpy = vi.spyOn(fs, "readFile").mockImplementation(async (pathname) => { - const pathValue = pathLikeToString(pathname); - if (pathValue.endsWith("/openclaw-gateway.service")) { - return [ - "[Service]", - "ExecStart=/usr/bin/openclaw gateway run", - "EnvironmentFile=%h/.openclaw/.env", - ].join("\n"); - } - if (pathValue === "/home/test/.openclaw/.env") { - return "OPENCLAW_GATEWAY_TOKEN=env-file-token\n"; - } - throw new Error(`unexpected readFile path: ${pathValue}`); - }); + const readFileSpy = mockReadGatewayServiceFile( + ["[Service]", "ExecStart=/usr/bin/openclaw gateway run", "EnvironmentFile=%h/.openclaw/.env"], + { [`${TEST_SERVICE_HOME}/.openclaw/.env`]: "OPENCLAW_GATEWAY_TOKEN=env-file-token\n" }, + ); - const command = await readSystemdServiceExecStart({ HOME: "/home/test" }); + const command = await readSystemdServiceExecStart({ HOME: TEST_SERVICE_HOME }); expect(command?.environment?.OPENCLAW_GATEWAY_TOKEN).toBe("env-file-token"); expect(readFileSpy).toHaveBeenCalledTimes(2); }); it("lets EnvironmentFile override inline Environment values", async () => { - vi.spyOn(fs, "readFile").mockImplementation(async (pathname) => { - const pathValue = pathLikeToString(pathname); - if (pathValue.endsWith("/openclaw-gateway.service")) { - return [ - "[Service]", - "ExecStart=/usr/bin/openclaw gateway run", - "EnvironmentFile=%h/.openclaw/.env", - 'Environment="OPENCLAW_GATEWAY_TOKEN=inline-token"', - ].join("\n"); - } - if (pathValue === "/home/test/.openclaw/.env") { - return "OPENCLAW_GATEWAY_TOKEN=env-file-token\n"; - } - throw new Error(`unexpected readFile path: ${pathValue}`); - }); + mockReadGatewayServiceFile( + [ + "[Service]", + "ExecStart=/usr/bin/openclaw gateway run", + "EnvironmentFile=%h/.openclaw/.env", + 'Environment="OPENCLAW_GATEWAY_TOKEN=inline-token"', + ], + { [`${TEST_SERVICE_HOME}/.openclaw/.env`]: "OPENCLAW_GATEWAY_TOKEN=env-file-token\n" }, + ); - const command = await readSystemdServiceExecStart({ HOME: "/home/test" }); + const command = await readSystemdServiceExecStart({ HOME: TEST_SERVICE_HOME }); expect(command?.environment?.OPENCLAW_GATEWAY_TOKEN).toBe("env-file-token"); expect(command?.environmentValueSources?.OPENCLAW_GATEWAY_TOKEN).toBe("file"); }); it("ignores missing optional EnvironmentFile entries", async () => { - vi.spyOn(fs, "readFile").mockImplementation(async (pathname) => { - const pathValue = pathLikeToString(pathname); - if (pathValue.endsWith("/openclaw-gateway.service")) { - return [ - "[Service]", - "ExecStart=/usr/bin/openclaw gateway run", - "EnvironmentFile=-%h/.openclaw/missing.env", - ].join("\n"); - } - throw new Error(`missing: ${pathValue}`); - }); - - const command = await readSystemdServiceExecStart({ HOME: "/home/test" }); - expect(command?.programArguments).toEqual(["/usr/bin/openclaw", "gateway", "run"]); - expect(command?.environment).toBeUndefined(); + await expectExecStartWithoutEnvironment("EnvironmentFile=-%h/.openclaw/missing.env"); }); it("keeps parsing when non-optional EnvironmentFile entries are missing", async () => { - vi.spyOn(fs, "readFile").mockImplementation(async (pathname) => { - const pathValue = pathLikeToString(pathname); - if (pathValue.endsWith("/openclaw-gateway.service")) { - return [ - "[Service]", - "ExecStart=/usr/bin/openclaw gateway run", - "EnvironmentFile=%h/.openclaw/missing.env", - ].join("\n"); - } - throw new Error(`missing: ${pathValue}`); - }); - - const command = await readSystemdServiceExecStart({ HOME: "/home/test" }); - expect(command?.programArguments).toEqual(["/usr/bin/openclaw", "gateway", "run"]); - expect(command?.environment).toBeUndefined(); + await expectExecStartWithoutEnvironment("EnvironmentFile=%h/.openclaw/missing.env"); }); it("supports multiple EnvironmentFile entries and quoted paths", async () => { @@ -631,7 +603,7 @@ describe("readSystemdServiceExecStart", () => { describe("systemd service control", () => { const assertMachineRestartArgs = (args: string[]) => { - expect(args).toEqual(["--machine", "debian@", "--user", "restart", "openclaw-gateway.service"]); + assertMachineUserSystemctlArgs(args, "debian", "restart", GATEWAY_SERVICE); }; beforeEach(() => { @@ -642,7 +614,7 @@ describe("systemd service control", () => { execFileMock .mockImplementationOnce((_cmd, _args, _opts, cb) => cb(null, "", "")) .mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "stop", "openclaw-gateway.service"]); + assertUserSystemctlArgs(args, "stop", GATEWAY_SERVICE); cb(null, "", ""); }); const write = vi.fn(); @@ -664,7 +636,7 @@ describe("systemd service control", () => { ), ) .mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "stop", "openclaw-gateway.service"]); + assertUserSystemctlArgs(args, "stop", GATEWAY_SERVICE); cb(null, "", ""); }); @@ -678,7 +650,7 @@ describe("systemd service control", () => { execFileMock .mockImplementationOnce((_cmd, _args, _opts, cb) => cb(null, "", "")) .mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "restart", "openclaw-gateway-work.service"]); + assertUserSystemctlArgs(args, "restart", "openclaw-gateway-work.service"); cb(null, "", ""); }); await assertRestartSuccess({ OPENCLAW_PROFILE: "work" }); @@ -724,7 +696,7 @@ describe("systemd service control", () => { it("targets the sudo caller's user scope when SUDO_USER is set", async () => { execFileMock .mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--machine", "debian@", "--user", "status"]); + assertMachineUserSystemctlArgs(args, "debian", "status"); cb(null, "", ""); }) .mockImplementationOnce((_cmd, args, _opts, cb) => { @@ -737,11 +709,11 @@ describe("systemd service control", () => { it("keeps direct --user scope when SUDO_USER is root", async () => { execFileMock .mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "status"]); + assertUserSystemctlArgs(args, "status"); cb(null, "", ""); }) .mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "restart", "openclaw-gateway.service"]); + assertUserSystemctlArgs(args, "restart", GATEWAY_SERVICE); cb(null, "", ""); }); await assertRestartSuccess({ SUDO_USER: "root", USER: "root" }); @@ -750,7 +722,7 @@ describe("systemd service control", () => { it("falls back to machine user scope for restart when user bus env is missing", async () => { execFileMock .mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "status"]); + assertUserSystemctlArgs(args, "status"); const err = createExecFileError("Failed to connect to user scope bus", { stderr: "Failed to connect to user scope bus via local transport: $DBUS_SESSION_BUS_ADDRESS and $XDG_RUNTIME_DIR not defined", @@ -758,11 +730,11 @@ describe("systemd service control", () => { cb(err, "", ""); }) .mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--machine", "debian@", "--user", "status"]); + assertMachineUserSystemctlArgs(args, "debian", "status"); cb(null, "", ""); }) .mockImplementationOnce((_cmd, args, _opts, cb) => { - expect(args).toEqual(["--user", "restart", "openclaw-gateway.service"]); + assertUserSystemctlArgs(args, "restart", GATEWAY_SERVICE); const err = createExecFileError("Failed to connect to user scope bus", { stderr: "Failed to connect to user scope bus", }); diff --git a/src/daemon/systemd.ts b/src/daemon/systemd.ts index bce7593e24e..62ab2dfa146 100644 --- a/src/daemon/systemd.ts +++ b/src/daemon/systemd.ts @@ -20,6 +20,7 @@ import type { GatewayServiceEnvArgs, GatewayServiceInstallArgs, GatewayServiceManageArgs, + GatewayServiceRestartResult, } from "./service-types.js"; import { enableSystemdUserLinger, @@ -570,13 +571,14 @@ export async function stopSystemdService({ export async function restartSystemdService({ stdout, env, -}: GatewayServiceControlArgs): Promise { +}: GatewayServiceControlArgs): Promise { await runSystemdServiceAction({ stdout, env, action: "restart", label: "Restarted systemd service", }); + return { outcome: "completed" }; } export async function isSystemdServiceEnabled(args: GatewayServiceEnvArgs): Promise { diff --git a/src/daemon/test-helpers/schtasks-base-mocks.ts b/src/daemon/test-helpers/schtasks-base-mocks.ts new file mode 100644 index 00000000000..e3f0f950482 --- /dev/null +++ b/src/daemon/test-helpers/schtasks-base-mocks.ts @@ -0,0 +1,22 @@ +import { vi } from "vitest"; +import { + inspectPortUsage, + killProcessTree, + schtasksCalls, + schtasksResponses, +} from "./schtasks-fixtures.js"; + +vi.mock("../schtasks-exec.js", () => ({ + execSchtasks: async (argv: string[]) => { + schtasksCalls.push(argv); + return schtasksResponses.shift() ?? { code: 0, stdout: "", stderr: "" }; + }, +})); + +vi.mock("../../infra/ports.js", () => ({ + inspectPortUsage: (port: number) => inspectPortUsage(port), +})); + +vi.mock("../../process/kill-tree.js", () => ({ + killProcessTree: (pid: number, opts?: { graceMs?: number }) => killProcessTree(pid, opts), +})); diff --git a/src/daemon/test-helpers/schtasks-fixtures.ts b/src/daemon/test-helpers/schtasks-fixtures.ts new file mode 100644 index 00000000000..9755acefae7 --- /dev/null +++ b/src/daemon/test-helpers/schtasks-fixtures.ts @@ -0,0 +1,57 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { vi } from "vitest"; +import type { PortUsage } from "../../infra/ports-types.js"; +import type { killProcessTree as killProcessTreeImpl } from "../../process/kill-tree.js"; +import type { MockFn } from "../../test-utils/vitest-mock-fn.js"; +import { resolveTaskScriptPath } from "../schtasks.js"; + +export const schtasksResponses: Array<{ code: number; stdout: string; stderr: string }> = []; +export const schtasksCalls: string[][] = []; + +export const inspectPortUsage: MockFn<(port: number) => Promise> = vi.fn(); +export const killProcessTree: MockFn = vi.fn(); + +export async function withWindowsEnv( + prefix: string, + run: (params: { tmpDir: string; env: Record }) => Promise, +) { + const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), prefix)); + const env = { + USERPROFILE: tmpDir, + APPDATA: path.join(tmpDir, "AppData", "Roaming"), + OPENCLAW_PROFILE: "default", + OPENCLAW_GATEWAY_PORT: "18789", + }; + try { + await run({ tmpDir, env }); + } finally { + await fs.rm(tmpDir, { recursive: true, force: true }); + } +} + +export function resetSchtasksBaseMocks() { + schtasksResponses.length = 0; + schtasksCalls.length = 0; + inspectPortUsage.mockReset(); + killProcessTree.mockReset(); +} + +export async function writeGatewayScript( + env: Record, + port = Number(env.OPENCLAW_GATEWAY_PORT || "18789"), +) { + const scriptPath = resolveTaskScriptPath(env); + await fs.mkdir(path.dirname(scriptPath), { recursive: true }); + await fs.writeFile( + scriptPath, + [ + "@echo off", + `set "OPENCLAW_GATEWAY_PORT=${port}"`, + `"C:\\Program Files\\nodejs\\node.exe" "C:\\Users\\steipete\\AppData\\Roaming\\npm\\node_modules\\openclaw\\dist\\index.js" gateway --port ${port}`, + "", + ].join("\r\n"), + "utf8", + ); +} diff --git a/src/discord/accounts.test.ts b/src/discord/accounts.test.ts index 6fd11965a07..1f6d70b1ea0 100644 --- a/src/discord/accounts.test.ts +++ b/src/discord/accounts.test.ts @@ -1,5 +1,5 @@ import { describe, expect, it } from "vitest"; -import { resolveDiscordAccount } from "./accounts.js"; +import { resolveDiscordAccount, resolveDiscordMaxLinesPerMessage } from "./accounts.js"; describe("resolveDiscordAccount allowFrom precedence", () => { it("prefers accounts.default.allowFrom over top-level for default account", () => { @@ -56,3 +56,62 @@ describe("resolveDiscordAccount allowFrom precedence", () => { expect(resolved.config.allowFrom).toBeUndefined(); }); }); + +describe("resolveDiscordMaxLinesPerMessage", () => { + it("falls back to merged root discord maxLinesPerMessage when runtime config omits it", () => { + const resolved = resolveDiscordMaxLinesPerMessage({ + cfg: { + channels: { + discord: { + maxLinesPerMessage: 120, + accounts: { + default: { token: "token-default" }, + }, + }, + }, + }, + discordConfig: {}, + accountId: "default", + }); + + expect(resolved).toBe(120); + }); + + it("prefers explicit runtime discord maxLinesPerMessage over merged config", () => { + const resolved = resolveDiscordMaxLinesPerMessage({ + cfg: { + channels: { + discord: { + maxLinesPerMessage: 120, + accounts: { + default: { token: "token-default", maxLinesPerMessage: 80 }, + }, + }, + }, + }, + discordConfig: { maxLinesPerMessage: 55 }, + accountId: "default", + }); + + expect(resolved).toBe(55); + }); + + it("uses per-account discord maxLinesPerMessage over the root value when runtime config omits it", () => { + const resolved = resolveDiscordMaxLinesPerMessage({ + cfg: { + channels: { + discord: { + maxLinesPerMessage: 120, + accounts: { + work: { token: "token-work", maxLinesPerMessage: 80 }, + }, + }, + }, + }, + discordConfig: {}, + accountId: "work", + }); + + expect(resolved).toBe(80); + }); +}); diff --git a/src/discord/accounts.ts b/src/discord/accounts.ts index 75eeff40b3e..b4e71c78343 100644 --- a/src/discord/accounts.ts +++ b/src/discord/accounts.ts @@ -68,6 +68,20 @@ export function resolveDiscordAccount(params: { }; } +export function resolveDiscordMaxLinesPerMessage(params: { + cfg: OpenClawConfig; + discordConfig?: DiscordAccountConfig | null; + accountId?: string | null; +}): number | undefined { + if (typeof params.discordConfig?.maxLinesPerMessage === "number") { + return params.discordConfig.maxLinesPerMessage; + } + return resolveDiscordAccount({ + cfg: params.cfg, + accountId: params.accountId, + }).config.maxLinesPerMessage; +} + export function listEnabledDiscordAccounts(cfg: OpenClawConfig): ResolvedDiscordAccount[] { return listDiscordAccountIds(cfg) .map((accountId) => resolveDiscordAccount({ cfg, accountId })) diff --git a/src/discord/client.test.ts b/src/discord/client.test.ts new file mode 100644 index 00000000000..3dc156670e7 --- /dev/null +++ b/src/discord/client.test.ts @@ -0,0 +1,91 @@ +import type { RequestClient } from "@buape/carbon"; +import { describe, expect, it } from "vitest"; +import type { OpenClawConfig } from "../config/config.js"; +import { createDiscordRestClient } from "./client.js"; + +describe("createDiscordRestClient", () => { + const fakeRest = {} as RequestClient; + + it("uses explicit token without resolving config token SecretRefs", () => { + const cfg = { + channels: { + discord: { + token: { + source: "exec", + provider: "vault", + id: "discord/bot-token", + }, + }, + }, + } as OpenClawConfig; + + const result = createDiscordRestClient( + { + token: "Bot explicit-token", + rest: fakeRest, + }, + cfg, + ); + + expect(result.token).toBe("explicit-token"); + expect(result.rest).toBe(fakeRest); + expect(result.account.accountId).toBe("default"); + }); + + it("keeps account retry config when explicit token is provided", () => { + const cfg = { + channels: { + discord: { + accounts: { + ops: { + token: { + source: "exec", + provider: "vault", + id: "discord/ops-token", + }, + retry: { + attempts: 7, + }, + }, + }, + }, + }, + } as OpenClawConfig; + + const result = createDiscordRestClient( + { + accountId: "ops", + token: "Bot explicit-account-token", + rest: fakeRest, + }, + cfg, + ); + + expect(result.token).toBe("explicit-account-token"); + expect(result.account.accountId).toBe("ops"); + expect(result.account.config.retry).toMatchObject({ attempts: 7 }); + }); + + it("still throws when no explicit token is provided and config token is unresolved", () => { + const cfg = { + channels: { + discord: { + token: { + source: "file", + provider: "default", + id: "/discord/token", + }, + }, + }, + } as OpenClawConfig; + + expect(() => + createDiscordRestClient( + { + rest: fakeRest, + }, + cfg, + ), + ).toThrow(/unresolved SecretRef/i); + }); +}); diff --git a/src/discord/client.ts b/src/discord/client.ts index 4f754fa8624..62d917cebb6 100644 --- a/src/discord/client.ts +++ b/src/discord/client.ts @@ -2,10 +2,16 @@ import { RequestClient } from "@buape/carbon"; import { loadConfig } from "../config/config.js"; import { createDiscordRetryRunner, type RetryRunner } from "../infra/retry-policy.js"; import type { RetryConfig } from "../infra/retry.js"; -import { resolveDiscordAccount } from "./accounts.js"; +import { normalizeAccountId } from "../routing/session-key.js"; +import { + mergeDiscordAccountConfig, + resolveDiscordAccount, + type ResolvedDiscordAccount, +} from "./accounts.js"; import { normalizeDiscordToken } from "./token.js"; export type DiscordClientOpts = { + cfg?: ReturnType; token?: string; accountId?: string; rest?: RequestClient; @@ -13,11 +19,7 @@ export type DiscordClientOpts = { verbose?: boolean; }; -function resolveToken(params: { explicit?: string; accountId: string; fallbackToken?: string }) { - const explicit = normalizeDiscordToken(params.explicit, "channels.discord.token"); - if (explicit) { - return explicit; - } +function resolveToken(params: { accountId: string; fallbackToken?: string }) { const fallback = normalizeDiscordToken(params.fallbackToken, "channels.discord.token"); if (!fallback) { throw new Error( @@ -31,22 +33,48 @@ function resolveRest(token: string, rest?: RequestClient) { return rest ?? new RequestClient(token); } -export function createDiscordRestClient(opts: DiscordClientOpts, cfg = loadConfig()) { - const account = resolveDiscordAccount({ cfg, accountId: opts.accountId }); - const token = resolveToken({ - explicit: opts.token, - accountId: account.accountId, - fallbackToken: account.token, - }); +function resolveAccountWithoutToken(params: { + cfg: ReturnType; + accountId?: string; +}): ResolvedDiscordAccount { + const accountId = normalizeAccountId(params.accountId); + const merged = mergeDiscordAccountConfig(params.cfg, accountId); + const baseEnabled = params.cfg.channels?.discord?.enabled !== false; + const accountEnabled = merged.enabled !== false; + return { + accountId, + enabled: baseEnabled && accountEnabled, + name: merged.name?.trim() || undefined, + token: "", + tokenSource: "none", + config: merged, + }; +} + +export function createDiscordRestClient( + opts: DiscordClientOpts, + cfg?: ReturnType, +) { + const resolvedCfg = opts.cfg ?? cfg ?? loadConfig(); + const explicitToken = normalizeDiscordToken(opts.token, "channels.discord.token"); + const account = explicitToken + ? resolveAccountWithoutToken({ cfg: resolvedCfg, accountId: opts.accountId }) + : resolveDiscordAccount({ cfg: resolvedCfg, accountId: opts.accountId }); + const token = + explicitToken ?? + resolveToken({ + accountId: account.accountId, + fallbackToken: account.token, + }); const rest = resolveRest(token, opts.rest); return { token, rest, account }; } export function createDiscordClient( opts: DiscordClientOpts, - cfg = loadConfig(), + cfg?: ReturnType, ): { token: string; rest: RequestClient; request: RetryRunner } { - const { token, rest, account } = createDiscordRestClient(opts, cfg); + const { token, rest, account } = createDiscordRestClient(opts, opts.cfg ?? cfg); const request = createDiscordRetryRunner({ retry: opts.retry, configRetry: account.config.retry, @@ -56,5 +84,5 @@ export function createDiscordClient( } export function resolveDiscordRest(opts: DiscordClientOpts) { - return createDiscordRestClient(opts).rest; + return createDiscordRestClient(opts, opts.cfg).rest; } diff --git a/src/discord/exec-approvals.ts b/src/discord/exec-approvals.ts new file mode 100644 index 00000000000..f4be9a22e0c --- /dev/null +++ b/src/discord/exec-approvals.ts @@ -0,0 +1,23 @@ +import type { ReplyPayload } from "../auto-reply/types.js"; +import type { OpenClawConfig } from "../config/config.js"; +import { getExecApprovalReplyMetadata } from "../infra/exec-approval-reply.js"; +import { resolveDiscordAccount } from "./accounts.js"; + +export function isDiscordExecApprovalClientEnabled(params: { + cfg: OpenClawConfig; + accountId?: string | null; +}): boolean { + const config = resolveDiscordAccount(params).config.execApprovals; + return Boolean(config?.enabled && (config.approvers?.length ?? 0) > 0); +} + +export function shouldSuppressLocalDiscordExecApprovalPrompt(params: { + cfg: OpenClawConfig; + accountId?: string | null; + payload: ReplyPayload; +}): boolean { + return ( + isDiscordExecApprovalClientEnabled(params) && + getExecApprovalReplyMetadata(params.payload) !== null + ); +} diff --git a/src/discord/monitor.test.ts b/src/discord/monitor.test.ts index 10c7dc66747..d3289155699 100644 --- a/src/discord/monitor.test.ts +++ b/src/discord/monitor.test.ts @@ -38,6 +38,7 @@ const makeEntries = ( requireMention: value.requireMention, reactionNotifications: value.reactionNotifications, users: value.users, + roles: value.roles, channels: value.channels, }; } @@ -246,6 +247,18 @@ describe("discord guild/channel resolution", () => { expect(resolved?.slug).toBe("friends-of-openclaw"); }); + it("resolves guild entry by raw guild id when guild object is missing", () => { + const guildEntries = makeEntries({ + "123": { slug: "friends-of-openclaw" }, + }); + const resolved = resolveDiscordGuildEntry({ + guildId: "123", + guildEntries, + }); + expect(resolved?.id).toBe("123"); + expect(resolved?.slug).toBe("friends-of-openclaw"); + }); + it("resolves guild entry by slug key", () => { const guildEntries = makeEntries({ "friends-of-openclaw": { slug: "friends-of-openclaw" }, @@ -730,6 +743,17 @@ describe("discord reaction notification gating", () => { }, expected: true, }, + { + name: "all mode blocks non-allowlisted guild member", + input: { + mode: "all" as const, + botId: "bot-1", + messageAuthorId: "user-1", + userId: "user-2", + guildInfo: { users: ["trusted-user"] }, + }, + expected: false, + }, { name: "own mode with bot-authored message", input: { @@ -750,6 +774,17 @@ describe("discord reaction notification gating", () => { }, expected: false, }, + { + name: "own mode still blocks member outside users allowlist", + input: { + mode: "own" as const, + botId: "bot-1", + messageAuthorId: "bot-1", + userId: "user-3", + guildInfo: { users: ["trusted-user"] }, + }, + expected: false, + }, { name: "allowlist mode without match", input: { @@ -769,7 +804,7 @@ describe("discord reaction notification gating", () => { messageAuthorId: "user-1", userId: "123", userName: "steipete", - allowlist: ["123", "other"] as string[], + guildInfo: { users: ["123", "other"] }, }, expected: true, }, @@ -781,7 +816,7 @@ describe("discord reaction notification gating", () => { messageAuthorId: "user-1", userId: "999", userName: "trusted-user", - allowlist: ["trusted-user"] as string[], + guildInfo: { users: ["trusted-user"] }, }, expected: false, }, @@ -793,21 +828,29 @@ describe("discord reaction notification gating", () => { messageAuthorId: "user-1", userId: "999", userName: "trusted-user", - allowlist: ["trusted-user"] as string[], + guildInfo: { users: ["trusted-user"] }, allowNameMatching: true, }, expected: true, }, + { + name: "allowlist mode matches allowed role", + input: { + mode: "allowlist" as const, + botId: "bot-1", + messageAuthorId: "user-1", + userId: "999", + guildInfo: { roles: ["role:trusted-role"] }, + memberRoleIds: ["trusted-role"], + }, + expected: true, + }, ]); for (const testCase of cases) { expect( shouldEmitDiscordReactionNotification({ ...testCase.input, - allowlist: - "allowlist" in testCase.input && testCase.input.allowlist - ? [...testCase.input.allowlist] - : undefined, }), testCase.name, ).toBe(testCase.expected); @@ -863,6 +906,7 @@ function makeReactionEvent(overrides?: { messageAuthorId?: string; messageFetch?: ReturnType; guild?: { name?: string; id?: string }; + memberRoleIds?: string[]; }) { const userId = overrides?.userId ?? "user-1"; const messageId = overrides?.messageId ?? "msg-1"; @@ -882,6 +926,7 @@ function makeReactionEvent(overrides?: { message_id: messageId, emoji: { name: overrides?.emojiName ?? "👍", id: null }, guild: overrides?.guild, + rawMember: overrides?.memberRoleIds ? { roles: overrides.memberRoleIds } : undefined, user: { id: userId, bot: false, @@ -1059,7 +1104,31 @@ describe("discord DM reaction handling", () => { expect(enqueueSystemEventSpy).not.toHaveBeenCalled(); }); - it("still processes guild reactions (no regression)", async () => { + it("blocks guild reactions for sender outside users allowlist", async () => { + const data = makeReactionEvent({ + guildId: "guild-123", + userId: "attacker-user", + botAsAuthor: true, + guild: { id: "guild-123", name: "Test Guild" }, + }); + const client = makeReactionClient({ channelType: ChannelType.GuildText }); + const listener = new DiscordReactionListener( + makeReactionListenerParams({ + guildEntries: makeEntries({ + "guild-123": { + users: ["user:trusted-user"], + }, + }), + }), + ); + + await listener.handle(data, client); + + expect(enqueueSystemEventSpy).not.toHaveBeenCalled(); + expect(resolveAgentRouteMock).not.toHaveBeenCalled(); + }); + + it("allows guild reactions for sender in channel role allowlist override", async () => { resolveAgentRouteMock.mockReturnValueOnce({ agentId: "default", channel: "discord", @@ -1069,11 +1138,27 @@ describe("discord DM reaction handling", () => { const data = makeReactionEvent({ guildId: "guild-123", + userId: "member-user", botAsAuthor: true, - guild: { name: "Test Guild" }, + guild: { id: "guild-123", name: "Test Guild" }, + memberRoleIds: ["trusted-role"], }); const client = makeReactionClient({ channelType: ChannelType.GuildText }); - const listener = new DiscordReactionListener(makeReactionListenerParams()); + const listener = new DiscordReactionListener( + makeReactionListenerParams({ + guildEntries: makeEntries({ + "guild-123": { + roles: ["role:blocked-role"], + channels: { + "channel-1": { + allow: true, + roles: ["role:trusted-role"], + }, + }, + }, + }), + }), + ); await listener.handle(data, client); diff --git a/src/discord/monitor/agent-components.ts b/src/discord/monitor/agent-components.ts index deeb9b35221..80239ea51d7 100644 --- a/src/discord/monitor/agent-components.ts +++ b/src/discord/monitor/agent-components.ts @@ -43,6 +43,7 @@ import { readStoreAllowFromForDmPolicy, resolvePinnedMainDmOwnerFromAllowlist, } from "../../security/dm-policy-shared.js"; +import { resolveDiscordMaxLinesPerMessage } from "../accounts.js"; import { resolveDiscordComponentEntry, resolveDiscordModalEntry } from "../components-registry.js"; import { createDiscordFormModal, @@ -359,6 +360,7 @@ async function ensureAgentComponentInteractionAllowed(params: { }): Promise<{ parentId: string | undefined } | null> { const guildInfo = resolveDiscordGuildEntry({ guild: params.interaction.guild ?? undefined, + guildId: params.rawGuildId, guildEntries: params.ctx.guildEntries, }); const channelCtx = resolveDiscordChannelContext(params.interaction); @@ -1008,6 +1010,7 @@ async function dispatchDiscordComponentEvent(params: { deliver: async (payload) => { const replyToId = replyReference.use(); await deliverDiscordReply({ + cfg: ctx.cfg, replies: [payload], target: deliverTarget, token, @@ -1017,7 +1020,11 @@ async function dispatchDiscordComponentEvent(params: { replyToId, replyToMode, textLimit, - maxLinesPerMessage: ctx.discordConfig?.maxLinesPerMessage, + maxLinesPerMessage: resolveDiscordMaxLinesPerMessage({ + cfg: ctx.cfg, + discordConfig: ctx.discordConfig, + accountId, + }), tableMode, chunkMode: resolveChunkMode(ctx.cfg, "discord", accountId), mediaLocalRoots, @@ -1088,6 +1095,7 @@ async function handleDiscordComponentEvent(params: { const { channelId, user, replyOpts, rawGuildId, memberRoleIds } = interactionCtx; const guildInfo = resolveDiscordGuildEntry({ guild: params.interaction.guild ?? undefined, + guildId: rawGuildId, guildEntries: params.ctx.guildEntries, }); const channelCtx = resolveDiscordChannelContext(params.interaction); @@ -1240,6 +1248,7 @@ async function handleDiscordModalTrigger(params: { const { channelId, user, replyOpts, rawGuildId, memberRoleIds } = interactionCtx; const guildInfo = resolveDiscordGuildEntry({ guild: params.interaction.guild ?? undefined, + guildId: rawGuildId, guildEntries: params.ctx.guildEntries, }); const channelCtx = resolveDiscordChannelContext(params.interaction); @@ -1690,6 +1699,7 @@ class DiscordComponentModal extends Modal { const { channelId, user, replyOpts, rawGuildId, memberRoleIds } = interactionCtx; const guildInfo = resolveDiscordGuildEntry({ guild: interaction.guild ?? undefined, + guildId: rawGuildId, guildEntries: this.ctx.guildEntries, }); const channelCtx = resolveDiscordChannelContext(interaction); diff --git a/src/discord/monitor/allow-list.ts b/src/discord/monitor/allow-list.ts index 5432cb5d128..353ab8635be 100644 --- a/src/discord/monitor/allow-list.ts +++ b/src/discord/monitor/allow-list.ts @@ -19,33 +19,7 @@ export type DiscordAllowListMatch = AllowlistMatch<"wildcard" | "id" | "name" | const DISCORD_OWNER_ALLOWLIST_PREFIXES = ["discord:", "user:", "pk:"]; -export type DiscordGuildEntryResolved = { - id?: string; - slug?: string; - requireMention?: boolean; - ignoreOtherMentions?: boolean; - reactionNotifications?: "off" | "own" | "all" | "allowlist"; - users?: string[]; - roles?: string[]; - channels?: Record< - string, - { - allow?: boolean; - requireMention?: boolean; - ignoreOtherMentions?: boolean; - skills?: string[]; - enabled?: boolean; - users?: string[]; - roles?: string[]; - systemPrompt?: string; - includeThreadStarter?: boolean; - autoThread?: boolean; - } - >; -}; - -export type DiscordChannelConfigResolved = { - allowed: boolean; +type DiscordChannelOverrideConfig = { requireMention?: boolean; ignoreOtherMentions?: boolean; skills?: string[]; @@ -55,6 +29,22 @@ export type DiscordChannelConfigResolved = { systemPrompt?: string; includeThreadStarter?: boolean; autoThread?: boolean; + autoArchiveDuration?: "60" | "1440" | "4320" | "10080" | 60 | 1440 | 4320 | 10080; +}; + +export type DiscordGuildEntryResolved = { + id?: string; + slug?: string; + requireMention?: boolean; + ignoreOtherMentions?: boolean; + reactionNotifications?: "off" | "own" | "all" | "allowlist"; + users?: string[]; + roles?: string[]; + channels?: Record; +}; + +export type DiscordChannelConfigResolved = DiscordChannelOverrideConfig & { + allowed: boolean; matchKey?: string; matchSource?: ChannelMatchSource; }; @@ -101,6 +91,21 @@ export function normalizeDiscordSlug(value: string) { .replace(/^-+|-+$/g, ""); } +function resolveDiscordAllowListNameMatch( + list: DiscordAllowList, + candidate: { name?: string; tag?: string }, +): { matchKey: string; matchSource: "name" | "tag" } | null { + const nameSlug = candidate.name ? normalizeDiscordSlug(candidate.name) : ""; + if (nameSlug && list.names.has(nameSlug)) { + return { matchKey: nameSlug, matchSource: "name" }; + } + const tagSlug = candidate.tag ? normalizeDiscordSlug(candidate.tag) : ""; + if (tagSlug && list.names.has(tagSlug)) { + return { matchKey: tagSlug, matchSource: "tag" }; + } + return null; +} + export function allowListMatches( list: DiscordAllowList, candidate: { id?: string; name?: string; tag?: string }, @@ -113,11 +118,7 @@ export function allowListMatches( return true; } if (params?.allowNameMatching === true) { - const slug = candidate.name ? normalizeDiscordSlug(candidate.name) : ""; - if (slug && list.names.has(slug)) { - return true; - } - if (candidate.tag && list.names.has(normalizeDiscordSlug(candidate.tag))) { + if (resolveDiscordAllowListNameMatch(list, candidate)) { return true; } } @@ -137,13 +138,9 @@ export function resolveDiscordAllowListMatch(params: { return { allowed: true, matchKey: candidate.id, matchSource: "id" }; } if (params.allowNameMatching === true) { - const nameSlug = candidate.name ? normalizeDiscordSlug(candidate.name) : ""; - if (nameSlug && allowList.names.has(nameSlug)) { - return { allowed: true, matchKey: nameSlug, matchSource: "name" }; - } - const tagSlug = candidate.tag ? normalizeDiscordSlug(candidate.tag) : ""; - if (tagSlug && allowList.names.has(tagSlug)) { - return { allowed: true, matchKey: tagSlug, matchSource: "tag" }; + const namedMatch = resolveDiscordAllowListNameMatch(allowList, candidate); + if (namedMatch) { + return { allowed: true, ...namedMatch }; } } return { allowed: false }; @@ -324,25 +321,30 @@ export function resolveDiscordCommandAuthorized(params: { export function resolveDiscordGuildEntry(params: { guild?: Guild | Guild | null; + guildId?: string | null; guildEntries?: Record; }): DiscordGuildEntryResolved | null { const guild = params.guild; const entries = params.guildEntries; - if (!guild || !entries) { + const guildId = params.guildId?.trim() || guild?.id; + if (!entries) { return null; } - const byId = entries[guild.id]; + const byId = guildId ? entries[guildId] : undefined; if (byId) { - return { ...byId, id: guild.id }; + return { ...byId, id: guildId }; + } + if (!guild) { + return null; } const slug = normalizeDiscordSlug(guild.name ?? ""); const bySlug = entries[slug]; if (bySlug) { - return { ...bySlug, id: guild.id, slug: slug || bySlug.slug }; + return { ...bySlug, id: guildId ?? guild.id, slug: slug || bySlug.slug }; } const wildcard = entries["*"]; if (wildcard) { - return { ...wildcard, id: guild.id, slug: slug || wildcard.slug }; + return { ...wildcard, id: guildId ?? guild.id, slug: slug || wildcard.slug }; } return null; } @@ -401,6 +403,7 @@ function resolveDiscordChannelConfigEntry( systemPrompt: entry.systemPrompt, includeThreadStarter: entry.includeThreadStarter, autoThread: entry.autoThread, + autoArchiveDuration: entry.autoArchiveDuration, }; return resolved; } @@ -553,6 +556,9 @@ export function shouldEmitDiscordReactionNotification(params: { userId: string; userName?: string; userTag?: string; + channelConfig?: DiscordChannelConfigResolved | null; + guildInfo?: DiscordGuildEntryResolved | null; + memberRoleIds?: string[]; allowlist?: string[]; allowNameMatching?: boolean; }) { @@ -560,26 +566,31 @@ export function shouldEmitDiscordReactionNotification(params: { if (mode === "off") { return false; } + const accessGuildInfo = + params.guildInfo ?? + (params.allowlist ? ({ users: params.allowlist } satisfies DiscordGuildEntryResolved) : null); + const { hasAccessRestrictions, memberAllowed } = resolveDiscordMemberAccessState({ + channelConfig: params.channelConfig, + guildInfo: accessGuildInfo, + memberRoleIds: params.memberRoleIds ?? [], + sender: { + id: params.userId, + name: params.userName, + tag: params.userTag, + }, + allowNameMatching: params.allowNameMatching, + }); + if (mode === "allowlist") { + return hasAccessRestrictions && memberAllowed; + } + if (hasAccessRestrictions && !memberAllowed) { + return false; + } if (mode === "all") { return true; } if (mode === "own") { return Boolean(params.botId && params.messageAuthorId === params.botId); } - if (mode === "allowlist") { - const list = normalizeDiscordAllowList(params.allowlist, ["discord:", "user:", "pk:"]); - if (!list) { - return false; - } - return allowListMatches( - list, - { - id: params.userId, - name: params.userName, - tag: params.userTag, - }, - { allowNameMatching: params.allowNameMatching }, - ); - } return false; } diff --git a/src/discord/monitor/auto-presence.test.ts b/src/discord/monitor/auto-presence.test.ts index b5a83d5242d..d901a76d642 100644 --- a/src/discord/monitor/auto-presence.test.ts +++ b/src/discord/monitor/auto-presence.test.ts @@ -29,45 +29,33 @@ function createStore(params?: { }; } +function expectExhaustedDecision(params: { failureCounts: Record }) { + const now = Date.now(); + const decision = resolveDiscordAutoPresenceDecision({ + discordConfig: { + autoPresence: { + enabled: true, + exhaustedText: "token exhausted", + }, + }, + authStore: createStore({ cooldownUntil: now + 60_000, failureCounts: params.failureCounts }), + gatewayConnected: true, + now, + }); + + expect(decision).toBeTruthy(); + expect(decision?.state).toBe("exhausted"); + expect(decision?.presence.status).toBe("dnd"); + expect(decision?.presence.activities[0]?.state).toBe("token exhausted"); +} + describe("discord auto presence", () => { it("maps exhausted runtime signal to dnd", () => { - const now = Date.now(); - const decision = resolveDiscordAutoPresenceDecision({ - discordConfig: { - autoPresence: { - enabled: true, - exhaustedText: "token exhausted", - }, - }, - authStore: createStore({ cooldownUntil: now + 60_000, failureCounts: { rate_limit: 2 } }), - gatewayConnected: true, - now, - }); - - expect(decision).toBeTruthy(); - expect(decision?.state).toBe("exhausted"); - expect(decision?.presence.status).toBe("dnd"); - expect(decision?.presence.activities[0]?.state).toBe("token exhausted"); + expectExhaustedDecision({ failureCounts: { rate_limit: 2 } }); }); it("treats overloaded cooldown as exhausted", () => { - const now = Date.now(); - const decision = resolveDiscordAutoPresenceDecision({ - discordConfig: { - autoPresence: { - enabled: true, - exhaustedText: "token exhausted", - }, - }, - authStore: createStore({ cooldownUntil: now + 60_000, failureCounts: { overloaded: 2 } }), - gatewayConnected: true, - now, - }); - - expect(decision).toBeTruthy(); - expect(decision?.state).toBe("exhausted"); - expect(decision?.presence.status).toBe("dnd"); - expect(decision?.presence.activities[0]?.state).toBe("token exhausted"); + expectExhaustedDecision({ failureCounts: { overloaded: 2 } }); }); it("recovers from exhausted to online once a profile becomes usable", () => { diff --git a/src/discord/monitor/exec-approvals.test.ts b/src/discord/monitor/exec-approvals.test.ts index f5e607022ee..c7cb72b82ec 100644 --- a/src/discord/monitor/exec-approvals.test.ts +++ b/src/discord/monitor/exec-approvals.test.ts @@ -116,6 +116,62 @@ function createHandler(config: DiscordExecApprovalConfig, accountId = "default") }); } +function mockSuccessfulDmDelivery(params?: { + noteChannelId?: string; + expectedNoteText?: string; + throwOnUnexpectedRoute?: boolean; +}) { + mockRestPost.mockImplementation( + async (route: string, requestParams?: { body?: { content?: string } }) => { + if (params?.noteChannelId && route === Routes.channelMessages(params.noteChannelId)) { + if (params.expectedNoteText) { + expect(requestParams?.body?.content).toContain(params.expectedNoteText); + } + return { id: "note-1", channel_id: params.noteChannelId }; + } + if (route === Routes.userChannels()) { + return { id: "dm-1" }; + } + if (route === Routes.channelMessages("dm-1")) { + return { id: "msg-1", channel_id: "dm-1" }; + } + if (params?.throwOnUnexpectedRoute) { + throw new Error(`unexpected route: ${route}`); + } + return { id: "msg-unknown" }; + }, + ); +} + +async function expectGatewayAuthStart(params: { + handler: DiscordExecApprovalHandler; + expectedUrl: string; + expectedSource: "cli" | "env"; + expectedToken?: string; + expectedPassword?: string; +}) { + await params.handler.start(); + + expect(mockResolveGatewayConnectionAuth).toHaveBeenCalledWith( + expect.objectContaining({ + env: process.env, + urlOverride: params.expectedUrl, + urlOverrideSource: params.expectedSource, + }), + ); + + const expectedClientParams: Record = { + url: params.expectedUrl, + }; + if (params.expectedToken !== undefined) { + expectedClientParams.token = params.expectedToken; + } + if (params.expectedPassword !== undefined) { + expectedClientParams.password = params.expectedPassword; + } + expect(mockGatewayClientCtor).toHaveBeenCalledWith(expect.objectContaining(expectedClientParams)); +} + type ExecApprovalHandlerInternals = { pending: Map< string, @@ -470,15 +526,15 @@ describe("ExecApprovalButton", () => { function createMockInteraction(userId: string) { const reply = vi.fn().mockResolvedValue(undefined); - const update = vi.fn().mockResolvedValue(undefined); + const acknowledge = vi.fn().mockResolvedValue(undefined); const followUp = vi.fn().mockResolvedValue(undefined); const interaction = { userId, reply, - update, + acknowledge, followUp, } as unknown as ButtonInteraction; - return { interaction, reply, update, followUp }; + return { interaction, reply, acknowledge, followUp }; } it("denies unauthorized users with ephemeral message", async () => { @@ -486,7 +542,7 @@ describe("ExecApprovalButton", () => { const ctx: ExecApprovalButtonContext = { handler }; const button = new ExecApprovalButton(ctx); - const { interaction, reply, update } = createMockInteraction("999"); + const { interaction, reply, acknowledge } = createMockInteraction("999"); const data: ComponentData = { id: "test-approval", action: "allow-once" }; await button.run(interaction, data); @@ -495,7 +551,7 @@ describe("ExecApprovalButton", () => { content: "⛔ You are not authorized to approve exec requests.", ephemeral: true, }); - expect(update).not.toHaveBeenCalled(); + expect(acknowledge).not.toHaveBeenCalled(); // oxlint-disable-next-line typescript/unbound-method -- vi.fn() mock expect(handler.resolveApproval).not.toHaveBeenCalled(); }); @@ -505,50 +561,45 @@ describe("ExecApprovalButton", () => { const ctx: ExecApprovalButtonContext = { handler }; const button = new ExecApprovalButton(ctx); - const { interaction, reply, update } = createMockInteraction("222"); + const { interaction, reply, acknowledge } = createMockInteraction("222"); const data: ComponentData = { id: "test-approval", action: "allow-once" }; await button.run(interaction, data); expect(reply).not.toHaveBeenCalled(); - expect(update).toHaveBeenCalledWith({ - content: "Submitting decision: **Allowed (once)**...", - components: [], - }); + expect(acknowledge).toHaveBeenCalledTimes(1); // oxlint-disable-next-line typescript/unbound-method -- vi.fn() mock expect(handler.resolveApproval).toHaveBeenCalledWith("test-approval", "allow-once"); }); - it("shows correct label for allow-always", async () => { + it("acknowledges allow-always interactions before resolving", async () => { const handler = createMockHandler(["111"]); const ctx: ExecApprovalButtonContext = { handler }; const button = new ExecApprovalButton(ctx); - const { interaction, update } = createMockInteraction("111"); + const { interaction, acknowledge } = createMockInteraction("111"); const data: ComponentData = { id: "test-approval", action: "allow-always" }; await button.run(interaction, data); - expect(update).toHaveBeenCalledWith({ - content: "Submitting decision: **Allowed (always)**...", - components: [], - }); + expect(acknowledge).toHaveBeenCalledTimes(1); + // oxlint-disable-next-line typescript/unbound-method -- vi.fn() mock + expect(handler.resolveApproval).toHaveBeenCalledWith("test-approval", "allow-always"); }); - it("shows correct label for deny", async () => { + it("acknowledges deny interactions before resolving", async () => { const handler = createMockHandler(["111"]); const ctx: ExecApprovalButtonContext = { handler }; const button = new ExecApprovalButton(ctx); - const { interaction, update } = createMockInteraction("111"); + const { interaction, acknowledge } = createMockInteraction("111"); const data: ComponentData = { id: "test-approval", action: "deny" }; await button.run(interaction, data); - expect(update).toHaveBeenCalledWith({ - content: "Submitting decision: **Denied**...", - components: [], - }); + expect(acknowledge).toHaveBeenCalledTimes(1); + // oxlint-disable-next-line typescript/unbound-method -- vi.fn() mock + expect(handler.resolveApproval).toHaveBeenCalledWith("test-approval", "deny"); }); it("handles invalid data gracefully", async () => { @@ -556,18 +607,20 @@ describe("ExecApprovalButton", () => { const ctx: ExecApprovalButtonContext = { handler }; const button = new ExecApprovalButton(ctx); - const { interaction, update } = createMockInteraction("111"); + const { interaction, acknowledge, reply } = createMockInteraction("111"); const data: ComponentData = { id: "", action: "invalid" }; await button.run(interaction, data); - expect(update).toHaveBeenCalledWith({ + expect(reply).toHaveBeenCalledWith({ content: "This approval is no longer valid.", - components: [], + ephemeral: true, }); + expect(acknowledge).not.toHaveBeenCalled(); // oxlint-disable-next-line typescript/unbound-method -- vi.fn() mock expect(handler.resolveApproval).not.toHaveBeenCalled(); }); + it("follows up with error when resolve fails", async () => { const handler = createMockHandler(["111"]); handler.resolveApproval = vi.fn().mockResolvedValue(false); @@ -581,7 +634,7 @@ describe("ExecApprovalButton", () => { expect(followUp).toHaveBeenCalledWith({ content: - "Failed to submit approval decision. The request may have expired or already been resolved.", + "Failed to submit approval decision for **Allowed (once)**. The request may have expired or already been resolved.", ephemeral: true, }); }); @@ -596,14 +649,14 @@ describe("ExecApprovalButton", () => { const ctx: ExecApprovalButtonContext = { handler }; const button = new ExecApprovalButton(ctx); - const { interaction, update, reply } = createMockInteraction("111"); + const { interaction, acknowledge, reply } = createMockInteraction("111"); const data: ComponentData = { id: "test-approval", action: "allow-once" }; await button.run(interaction, data); // Should match because getApprovers returns [111] and button does String(id) === userId expect(reply).not.toHaveBeenCalled(); - expect(update).toHaveBeenCalled(); + expect(acknowledge).toHaveBeenCalled(); }); }); @@ -775,15 +828,7 @@ describe("DiscordExecApprovalHandler delivery routing", () => { }); const internals = getHandlerInternals(handler); - mockRestPost.mockImplementation(async (route: string) => { - if (route === Routes.userChannels()) { - return { id: "dm-1" }; - } - if (route === Routes.channelMessages("dm-1")) { - return { id: "msg-1", channel_id: "dm-1" }; - } - return { id: "msg-unknown" }; - }); + mockSuccessfulDmDelivery(); const request = createRequest({ sessionKey: "agent:main:discord:dm:123" }); await internals.handleApprovalRequested(request); @@ -803,6 +848,62 @@ describe("DiscordExecApprovalHandler delivery routing", () => { clearPendingTimeouts(handler); }); + + it("posts an in-channel note when target is dm and the request came from a non-DM discord conversation", async () => { + const handler = createHandler({ + enabled: true, + approvers: ["123"], + target: "dm", + }); + const internals = getHandlerInternals(handler); + + mockSuccessfulDmDelivery({ + noteChannelId: "999888777", + expectedNoteText: "I sent the allowed approvers DMs", + throwOnUnexpectedRoute: true, + }); + + await internals.handleApprovalRequested(createRequest()); + + expect(mockRestPost).toHaveBeenCalledWith( + Routes.channelMessages("999888777"), + expect.objectContaining({ + body: expect.objectContaining({ + content: expect.stringContaining("I sent the allowed approvers DMs"), + }), + }), + ); + expect(mockRestPost).toHaveBeenCalledWith( + Routes.channelMessages("dm-1"), + expect.objectContaining({ + body: expect.any(Object), + }), + ); + + clearPendingTimeouts(handler); + }); + + it("does not post an in-channel note when the request already came from a discord DM", async () => { + const handler = createHandler({ + enabled: true, + approvers: ["123"], + target: "dm", + }); + const internals = getHandlerInternals(handler); + + mockSuccessfulDmDelivery({ throwOnUnexpectedRoute: true }); + + await internals.handleApprovalRequested( + createRequest({ sessionKey: "agent:main:discord:dm:123" }), + ); + + expect(mockRestPost).not.toHaveBeenCalledWith( + Routes.channelMessages("999888777"), + expect.anything(), + ); + + clearPendingTimeouts(handler); + }); }); describe("DiscordExecApprovalHandler gateway auth resolution", () => { @@ -819,22 +920,13 @@ describe("DiscordExecApprovalHandler gateway auth resolution", () => { cfg: { session: { store: STORE_PATH } }, }); - await handler.start(); - - expect(mockResolveGatewayConnectionAuth).toHaveBeenCalledWith( - expect.objectContaining({ - env: process.env, - urlOverride: "wss://override.example/ws", - urlOverrideSource: "cli", - }), - ); - expect(mockGatewayClientCtor).toHaveBeenCalledWith( - expect.objectContaining({ - url: "wss://override.example/ws", - token: "resolved-token", - password: "resolved-password", // pragma: allowlist secret - }), - ); + await expectGatewayAuthStart({ + handler, + expectedUrl: "wss://override.example/ws", + expectedSource: "cli", + expectedToken: "resolved-token", + expectedPassword: "resolved-password", // pragma: allowlist secret + }); await handler.stop(); }); @@ -850,20 +942,11 @@ describe("DiscordExecApprovalHandler gateway auth resolution", () => { cfg: { session: { store: STORE_PATH } }, }); - await handler.start(); - - expect(mockResolveGatewayConnectionAuth).toHaveBeenCalledWith( - expect.objectContaining({ - env: process.env, - urlOverride: "wss://gateway-from-env.example/ws", - urlOverrideSource: "env", - }), - ); - expect(mockGatewayClientCtor).toHaveBeenCalledWith( - expect.objectContaining({ - url: "wss://gateway-from-env.example/ws", - }), - ); + await expectGatewayAuthStart({ + handler, + expectedUrl: "wss://gateway-from-env.example/ws", + expectedSource: "env", + }); await handler.stop(); } finally { diff --git a/src/discord/monitor/exec-approvals.ts b/src/discord/monitor/exec-approvals.ts index 5564b126e3c..8dd3156e991 100644 --- a/src/discord/monitor/exec-approvals.ts +++ b/src/discord/monitor/exec-approvals.ts @@ -13,10 +13,11 @@ import { ButtonStyle, Routes } from "discord-api-types/v10"; import type { OpenClawConfig } from "../../config/config.js"; import { loadSessionStore, resolveStorePath } from "../../config/sessions.js"; import type { DiscordExecApprovalConfig } from "../../config/types.discord.js"; -import { buildGatewayConnectionDetails } from "../../gateway/call.js"; import { GatewayClient } from "../../gateway/client.js"; -import { resolveGatewayConnectionAuth } from "../../gateway/connection-auth.js"; +import { createOperatorApprovalsGatewayClient } from "../../gateway/operator-approvals-client.js"; import type { EventFrame } from "../../gateway/protocol/index.js"; +import { resolveExecApprovalCommandDisplay } from "../../infra/exec-approval-command-display.js"; +import { getExecApprovalApproverDmNoticeText } from "../../infra/exec-approval-reply.js"; import type { ExecApprovalDecision, ExecApprovalRequest, @@ -26,11 +27,7 @@ import { logDebug, logError } from "../../logger.js"; import { normalizeAccountId, resolveAgentIdFromSessionKey } from "../../routing/session-key.js"; import type { RuntimeEnv } from "../../runtime.js"; import { compileSafeRegex, testRegexWithBoundedInput } from "../../security/safe-regex.js"; -import { - GATEWAY_CLIENT_MODES, - GATEWAY_CLIENT_NAMES, - normalizeMessageChannel, -} from "../../utils/message-channel.js"; +import { normalizeMessageChannel } from "../../utils/message-channel.js"; import { createDiscordClient, stripUndefinedFields } from "../send.shared.js"; import { DiscordUiContainer } from "../ui.js"; @@ -47,6 +44,12 @@ export function extractDiscordChannelId(sessionKey?: string | null): string | nu return match ? match[1] : null; } +function buildDiscordApprovalDmRedirectNotice(): { content: string } { + return { + content: getExecApprovalApproverDmNoticeText(), + }; +} + type PendingApproval = { discordMessageId: string; discordChannelId: string; @@ -103,6 +106,7 @@ type ExecApprovalContainerParams = { title: string; description?: string; commandPreview: string; + commandSecondaryPreview?: string | null; metadataLines?: string[]; actionRow?: Row - - - +
+ ${THEME_MODE_OPTIONS.map( + (opt) => html` + + `, + )}
`; } -function renderSunIcon() { +export function renderSidebarConnectionStatus(state: AppViewState) { + const label = state.connected ? t("common.online") : t("common.offline"); + const toneClass = state.connected + ? "sidebar-connection-status--online" + : "sidebar-connection-status--offline"; + return html` - + `; } -function renderMoonIcon() { - return html` - - `; -} +export function renderThemeToggle(state: AppViewState) { + const setOpen = (orb: HTMLElement, nextOpen: boolean) => { + orb.classList.toggle("theme-orb--open", nextOpen); + const trigger = orb.querySelector(".theme-orb__trigger"); + const menu = orb.querySelector(".theme-orb__menu"); + if (trigger) { + trigger.setAttribute("aria-expanded", nextOpen ? "true" : "false"); + } + if (menu) { + menu.setAttribute("aria-hidden", nextOpen ? "false" : "true"); + } + }; + + const toggleOpen = (e: Event) => { + const orb = (e.currentTarget as HTMLElement).closest(".theme-orb"); + if (!orb) { + return; + } + const isOpen = orb.classList.contains("theme-orb--open"); + if (isOpen) { + setOpen(orb, false); + } else { + setOpen(orb, true); + const close = (ev: MouseEvent) => { + if (!orb.contains(ev.target as Node)) { + setOpen(orb, false); + document.removeEventListener("click", close); + } + }; + requestAnimationFrame(() => document.addEventListener("click", close)); + } + }; + + const pick = (opt: ThemeOption, e: Event) => { + const orb = (e.currentTarget as HTMLElement).closest(".theme-orb"); + if (orb) { + setOpen(orb, false); + } + if (opt.id !== state.theme) { + const context: ThemeTransitionContext = { element: orb ?? undefined }; + state.setTheme(opt.id, context); + } + }; -function renderMonitorIcon() { return html` - +
+ + +
`; } diff --git a/ui/src/ui/app-render.ts b/ui/src/ui/app-render.ts index 7fbe38c9ca7..643edfca521 100644 --- a/ui/src/ui/app-render.ts +++ b/ui/src/ui/app-render.ts @@ -1,9 +1,18 @@ import { html, nothing } from "lit"; -import { parseAgentSessionKey } from "../../../src/routing/session-key.js"; +import { + buildAgentMainSessionKey, + parseAgentSessionKey, +} from "../../../src/routing/session-key.js"; import { t } from "../i18n/index.ts"; import { refreshChatAvatar } from "./app-chat.ts"; import { renderUsageTab } from "./app-render-usage-tab.ts"; -import { renderChatControls, renderTab, renderThemeToggle } from "./app-render.helpers.ts"; +import { + renderChatControls, + renderChatSessionSelect, + renderTab, + renderSidebarConnectionStatus, + renderTopbarThemeModeToggle, +} from "./app-render.helpers.ts"; import type { AppViewState } from "./app-view-state.ts"; import { loadAgentFileContent, loadAgentFiles, saveAgentFile } from "./controllers/agent-files.ts"; import { loadAgentIdentities, loadAgentIdentity } from "./controllers/agent-identity.ts"; @@ -16,6 +25,7 @@ import { ensureAgentConfigEntry, findAgentConfigEntryIndex, loadConfig, + openConfigFile, runUpdate, saveConfig, updateConfigFormValue, @@ -65,9 +75,11 @@ import { updateSkillEdit, updateSkillEnabled, } from "./controllers/skills.ts"; +import "./components/dashboard-header.ts"; import { buildExternalLinkRel, EXTERNAL_LINK_TARGET } from "./external-link.ts"; import { icons } from "./icons.ts"; import { normalizeBasePath, TAB_GROUPS, subtitleForTab, titleForTab } from "./navigation.ts"; +import { agentLogoUrl } from "./views/agents-utils.ts"; import { resolveAgentConfig, resolveConfiguredCronModelSuggestions, @@ -75,23 +87,53 @@ import { resolveModelPrimary, sortLocaleStrings, } from "./views/agents-utils.ts"; -import { renderAgents } from "./views/agents.ts"; -import { renderChannels } from "./views/channels.ts"; import { renderChat } from "./views/chat.ts"; +import { renderCommandPalette } from "./views/command-palette.ts"; import { renderConfig } from "./views/config.ts"; -import { renderCron } from "./views/cron.ts"; -import { renderDebug } from "./views/debug.ts"; import { renderExecApprovalPrompt } from "./views/exec-approval.ts"; import { renderGatewayUrlConfirmation } from "./views/gateway-url-confirmation.ts"; -import { renderInstances } from "./views/instances.ts"; -import { renderLogs } from "./views/logs.ts"; -import { renderNodes } from "./views/nodes.ts"; +import { renderLoginGate } from "./views/login-gate.ts"; import { renderOverview } from "./views/overview.ts"; -import { renderSessions } from "./views/sessions.ts"; -import { renderSkills } from "./views/skills.ts"; -const AVATAR_DATA_RE = /^data:/i; -const AVATAR_HTTP_RE = /^https?:\/\//i; +// Lazy-loaded view modules – deferred so the initial bundle stays small. +// Each loader resolves once; subsequent calls return the cached module. +type LazyState = { mod: T | null; promise: Promise | null }; + +let _pendingUpdate: (() => void) | undefined; + +function createLazy(loader: () => Promise): () => T | null { + const s: LazyState = { mod: null, promise: null }; + return () => { + if (s.mod) { + return s.mod; + } + if (!s.promise) { + s.promise = loader().then((m) => { + s.mod = m; + _pendingUpdate?.(); + return m; + }); + } + return null; + }; +} + +const lazyAgents = createLazy(() => import("./views/agents.ts")); +const lazyChannels = createLazy(() => import("./views/channels.ts")); +const lazyCron = createLazy(() => import("./views/cron.ts")); +const lazyDebug = createLazy(() => import("./views/debug.ts")); +const lazyInstances = createLazy(() => import("./views/instances.ts")); +const lazyLogs = createLazy(() => import("./views/logs.ts")); +const lazyNodes = createLazy(() => import("./views/nodes.ts")); +const lazySessions = createLazy(() => import("./views/sessions.ts")); +const lazySkills = createLazy(() => import("./views/skills.ts")); + +function lazyRender(getter: () => M | null, render: (mod: M) => unknown) { + const mod = getter(); + return mod ? render(mod) : nothing; +} + +const UPDATE_BANNER_DISMISS_KEY = "openclaw:control-ui:update-banner-dismissed:v1"; const CRON_THINKING_SUGGESTIONS = ["off", "minimal", "low", "medium", "high"]; const CRON_TIMEZONE_SUGGESTIONS = [ "UTC", @@ -130,6 +172,99 @@ function uniquePreserveOrder(values: string[]): string[] { return output; } +type DismissedUpdateBanner = { + latestVersion: string; + channel: string | null; + dismissedAtMs: number; +}; + +function loadDismissedUpdateBanner(): DismissedUpdateBanner | null { + try { + const raw = localStorage.getItem(UPDATE_BANNER_DISMISS_KEY); + if (!raw) { + return null; + } + const parsed = JSON.parse(raw) as Partial; + if (!parsed || typeof parsed.latestVersion !== "string") { + return null; + } + return { + latestVersion: parsed.latestVersion, + channel: typeof parsed.channel === "string" ? parsed.channel : null, + dismissedAtMs: typeof parsed.dismissedAtMs === "number" ? parsed.dismissedAtMs : Date.now(), + }; + } catch { + return null; + } +} + +function isUpdateBannerDismissed(updateAvailable: unknown): boolean { + const dismissed = loadDismissedUpdateBanner(); + if (!dismissed) { + return false; + } + const info = updateAvailable as { latestVersion?: unknown; channel?: unknown }; + const latestVersion = info && typeof info.latestVersion === "string" ? info.latestVersion : null; + const channel = info && typeof info.channel === "string" ? info.channel : null; + return Boolean( + latestVersion && dismissed.latestVersion === latestVersion && dismissed.channel === channel, + ); +} + +function dismissUpdateBanner(updateAvailable: unknown) { + const info = updateAvailable as { latestVersion?: unknown; channel?: unknown }; + const latestVersion = info && typeof info.latestVersion === "string" ? info.latestVersion : null; + if (!latestVersion) { + return; + } + const channel = info && typeof info.channel === "string" ? info.channel : null; + const payload: DismissedUpdateBanner = { + latestVersion, + channel, + dismissedAtMs: Date.now(), + }; + try { + localStorage.setItem(UPDATE_BANNER_DISMISS_KEY, JSON.stringify(payload)); + } catch { + // ignore + } +} + +const AVATAR_DATA_RE = /^data:/i; +const AVATAR_HTTP_RE = /^https?:\/\//i; +const COMMUNICATION_SECTION_KEYS = ["channels", "messages", "broadcast", "talk", "audio"] as const; +const APPEARANCE_SECTION_KEYS = ["__appearance__", "ui", "wizard"] as const; +const AUTOMATION_SECTION_KEYS = [ + "commands", + "hooks", + "bindings", + "cron", + "approvals", + "plugins", +] as const; +const INFRASTRUCTURE_SECTION_KEYS = [ + "gateway", + "web", + "browser", + "nodeHost", + "canvasHost", + "discovery", + "media", +] as const; +const AI_AGENTS_SECTION_KEYS = [ + "agents", + "models", + "skills", + "tools", + "memory", + "session", +] as const; +type CommunicationSectionKey = (typeof COMMUNICATION_SECTION_KEYS)[number]; +type AppearanceSectionKey = (typeof APPEARANCE_SECTION_KEYS)[number]; +type AutomationSectionKey = (typeof AUTOMATION_SECTION_KEYS)[number]; +type InfrastructureSectionKey = (typeof INFRASTRUCTURE_SECTION_KEYS)[number]; +type AiAgentsSectionKey = (typeof AI_AGENTS_SECTION_KEYS)[number]; + function resolveAssistantAvatarUrl(state: AppViewState): string | undefined { const list = state.agentsList?.agents ?? []; const parsed = parseAgentSessionKey(state.sessionKey); @@ -147,22 +282,30 @@ function resolveAssistantAvatarUrl(state: AppViewState): string | undefined { } export function renderApp(state: AppViewState) { - const openClawVersion = - (typeof state.hello?.server?.version === "string" && state.hello.server.version.trim()) || - state.updateAvailable?.currentVersion || - t("common.na"); - const availableUpdate = - state.updateAvailable && - state.updateAvailable.latestVersion !== state.updateAvailable.currentVersion - ? state.updateAvailable - : null; - const versionStatusClass = availableUpdate ? "warn" : "ok"; + const updatableState = state as AppViewState & { requestUpdate?: () => void }; + const requestHostUpdate = + typeof updatableState.requestUpdate === "function" + ? () => updatableState.requestUpdate?.() + : undefined; + _pendingUpdate = requestHostUpdate; + + // Gate: require successful gateway connection before showing the dashboard. + // The gateway URL confirmation overlay is always rendered so URL-param flows still work. + if (!state.connected) { + return html` + ${renderLoginGate(state)} + ${renderGatewayUrlConfirmation(state)} + `; + } + const presenceCount = state.presenceEntries.length; const sessionsCount = state.sessionsResult?.count ?? null; const cronNext = state.cronStatus?.nextWakeAtMs ?? null; const chatDisabledReason = state.connected ? null : t("chat.disconnected"); const isChat = state.tab === "chat"; const chatFocus = isChat && (state.settings.chatFocusMode || state.onboarding); + const navDrawerOpen = Boolean(state.navDrawerOpen && !chatFocus && !state.onboarding); + const navCollapsed = Boolean(state.settings.navCollapsed && !navDrawerOpen); const showThinking = state.onboarding ? false : state.settings.chatShowThinking; const assistantAvatarUrl = resolveAssistantAvatarUrl(state); const chatAvatarUrl = state.chatAvatarUrl ?? assistantAvatarUrl ?? null; @@ -234,114 +377,232 @@ export function renderApp(state: AppViewState) { : rawDeliveryToSuggestions; return html` -
+ ${renderCommandPalette({ + open: state.paletteOpen, + query: state.paletteQuery, + activeIndex: state.paletteActiveIndex, + onToggle: () => { + state.paletteOpen = !state.paletteOpen; + }, + onQueryChange: (q) => { + state.paletteQuery = q; + }, + onActiveIndexChange: (i) => { + state.paletteActiveIndex = i; + }, + onNavigate: (tab) => { + state.setTab(tab as import("./navigation.ts").Tab); + }, + onSlashCommand: (cmd) => { + state.setTab("chat" as import("./navigation.ts").Tab); + state.chatMessage = cmd.endsWith(" ") ? cmd : `${cmd} `; + }, + })} +
+
-
+
-
- -
-
OPENCLAW
-
Gateway Dashboard
-
+
+
-
-
-
- - ${t("common.version")} - ${openClawVersion} +
+ +
${renderTopbarThemeModeToggle(state)}
-
- - ${t("common.health")} - ${state.connected ? t("common.ok") : t("common.offline")} -
- ${renderThemeToggle(state)}
- +
${ - availableUpdate + state.updateAvailable && + state.updateAvailable.latestVersion !== state.updateAvailable.currentVersion && + !isUpdateBannerDismissed(state.updateAvailable) ? html`` : nothing } -
-
- ${state.tab === "usage" ? nothing : html`
${titleForTab(state.tab)}
`} - ${state.tab === "usage" ? nothing : html`
${subtitleForTab(state.tab)}
`} -
-
- ${state.lastError ? html`
${state.lastError}
` : nothing} - ${isChat ? renderChatControls(state) : nothing} -
-
+ ${ + state.tab === "config" + ? nothing + : html`
+
+ ${ + isChat + ? renderChatSessionSelect(state) + : html`
${titleForTab(state.tab)}
` + } + ${isChat ? nothing : html`
${subtitleForTab(state.tab)}
`} +
+
+ ${state.lastError ? html`
${state.lastError}
` : nothing} + ${isChat ? renderChatControls(state) : nothing} +
+
` + } ${ state.tab === "overview" @@ -357,6 +618,16 @@ export function renderApp(state: AppViewState) { cronEnabled: state.cronStatus?.enabled ?? null, cronNext, lastChannelsRefresh: state.channelsLastSuccess, + usageResult: state.usageResult, + sessionsResult: state.sessionsResult, + skillsReport: state.skillsReport, + cronJobs: state.cronJobs, + cronStatus: state.cronStatus, + attentionItems: state.attentionItems, + eventLog: state.eventLog, + overviewLogLines: state.overviewLogLines, + showGatewayToken: state.overviewShowGatewayToken, + showGatewayPassword: state.overviewShowGatewayPassword, onSettingsChange: (next) => state.applySettings(next), onPasswordChange: (next) => (state.password = next), onSessionKeyChange: (next) => { @@ -370,84 +641,123 @@ export function renderApp(state: AppViewState) { }); void state.loadAssistantIdentity(); }, + onToggleGatewayTokenVisibility: () => { + state.overviewShowGatewayToken = !state.overviewShowGatewayToken; + }, + onToggleGatewayPasswordVisibility: () => { + state.overviewShowGatewayPassword = !state.overviewShowGatewayPassword; + }, onConnect: () => state.connect(), onRefresh: () => state.loadOverview(), + onNavigate: (tab) => state.setTab(tab as import("./navigation.ts").Tab), + onRefreshLogs: () => state.loadOverview(), }) : nothing } ${ state.tab === "channels" - ? renderChannels({ - connected: state.connected, - loading: state.channelsLoading, - snapshot: state.channelsSnapshot, - lastError: state.channelsError, - lastSuccessAt: state.channelsLastSuccess, - whatsappMessage: state.whatsappLoginMessage, - whatsappQrDataUrl: state.whatsappLoginQrDataUrl, - whatsappConnected: state.whatsappLoginConnected, - whatsappBusy: state.whatsappBusy, - configSchema: state.configSchema, - configSchemaLoading: state.configSchemaLoading, - configForm: state.configForm, - configUiHints: state.configUiHints, - configSaving: state.configSaving, - configFormDirty: state.configFormDirty, - nostrProfileFormState: state.nostrProfileFormState, - nostrProfileAccountId: state.nostrProfileAccountId, - onRefresh: (probe) => loadChannels(state, probe), - onWhatsAppStart: (force) => state.handleWhatsAppStart(force), - onWhatsAppWait: () => state.handleWhatsAppWait(), - onWhatsAppLogout: () => state.handleWhatsAppLogout(), - onConfigPatch: (path, value) => updateConfigFormValue(state, path, value), - onConfigSave: () => state.handleChannelConfigSave(), - onConfigReload: () => state.handleChannelConfigReload(), - onNostrProfileEdit: (accountId, profile) => - state.handleNostrProfileEdit(accountId, profile), - onNostrProfileCancel: () => state.handleNostrProfileCancel(), - onNostrProfileFieldChange: (field, value) => - state.handleNostrProfileFieldChange(field, value), - onNostrProfileSave: () => state.handleNostrProfileSave(), - onNostrProfileImport: () => state.handleNostrProfileImport(), - onNostrProfileToggleAdvanced: () => state.handleNostrProfileToggleAdvanced(), - }) + ? lazyRender(lazyChannels, (m) => + m.renderChannels({ + connected: state.connected, + loading: state.channelsLoading, + snapshot: state.channelsSnapshot, + lastError: state.channelsError, + lastSuccessAt: state.channelsLastSuccess, + whatsappMessage: state.whatsappLoginMessage, + whatsappQrDataUrl: state.whatsappLoginQrDataUrl, + whatsappConnected: state.whatsappLoginConnected, + whatsappBusy: state.whatsappBusy, + configSchema: state.configSchema, + configSchemaLoading: state.configSchemaLoading, + configForm: state.configForm, + configUiHints: state.configUiHints, + configSaving: state.configSaving, + configFormDirty: state.configFormDirty, + nostrProfileFormState: state.nostrProfileFormState, + nostrProfileAccountId: state.nostrProfileAccountId, + onRefresh: (probe) => loadChannels(state, probe), + onWhatsAppStart: (force) => state.handleWhatsAppStart(force), + onWhatsAppWait: () => state.handleWhatsAppWait(), + onWhatsAppLogout: () => state.handleWhatsAppLogout(), + onConfigPatch: (path, value) => updateConfigFormValue(state, path, value), + onConfigSave: () => state.handleChannelConfigSave(), + onConfigReload: () => state.handleChannelConfigReload(), + onNostrProfileEdit: (accountId, profile) => + state.handleNostrProfileEdit(accountId, profile), + onNostrProfileCancel: () => state.handleNostrProfileCancel(), + onNostrProfileFieldChange: (field, value) => + state.handleNostrProfileFieldChange(field, value), + onNostrProfileSave: () => state.handleNostrProfileSave(), + onNostrProfileImport: () => state.handleNostrProfileImport(), + onNostrProfileToggleAdvanced: () => state.handleNostrProfileToggleAdvanced(), + }), + ) : nothing } ${ state.tab === "instances" - ? renderInstances({ - loading: state.presenceLoading, - entries: state.presenceEntries, - lastError: state.presenceError, - statusMessage: state.presenceStatus, - onRefresh: () => loadPresence(state), - }) + ? lazyRender(lazyInstances, (m) => + m.renderInstances({ + loading: state.presenceLoading, + entries: state.presenceEntries, + lastError: state.presenceError, + statusMessage: state.presenceStatus, + onRefresh: () => loadPresence(state), + }), + ) : nothing } ${ state.tab === "sessions" - ? renderSessions({ - loading: state.sessionsLoading, - result: state.sessionsResult, - error: state.sessionsError, - activeMinutes: state.sessionsFilterActive, - limit: state.sessionsFilterLimit, - includeGlobal: state.sessionsIncludeGlobal, - includeUnknown: state.sessionsIncludeUnknown, - basePath: state.basePath, - onFiltersChange: (next) => { - state.sessionsFilterActive = next.activeMinutes; - state.sessionsFilterLimit = next.limit; - state.sessionsIncludeGlobal = next.includeGlobal; - state.sessionsIncludeUnknown = next.includeUnknown; - }, - onRefresh: () => loadSessions(state), - onPatch: (key, patch) => patchSession(state, key, patch), - onDelete: (key) => deleteSessionAndRefresh(state, key), - }) + ? lazyRender(lazySessions, (m) => + m.renderSessions({ + loading: state.sessionsLoading, + result: state.sessionsResult, + error: state.sessionsError, + activeMinutes: state.sessionsFilterActive, + limit: state.sessionsFilterLimit, + includeGlobal: state.sessionsIncludeGlobal, + includeUnknown: state.sessionsIncludeUnknown, + basePath: state.basePath, + searchQuery: state.sessionsSearchQuery, + sortColumn: state.sessionsSortColumn, + sortDir: state.sessionsSortDir, + page: state.sessionsPage, + pageSize: state.sessionsPageSize, + actionsOpenKey: state.sessionsActionsOpenKey, + onFiltersChange: (next) => { + state.sessionsFilterActive = next.activeMinutes; + state.sessionsFilterLimit = next.limit; + state.sessionsIncludeGlobal = next.includeGlobal; + state.sessionsIncludeUnknown = next.includeUnknown; + }, + onSearchChange: (q) => { + state.sessionsSearchQuery = q; + state.sessionsPage = 0; + }, + onSortChange: (col, dir) => { + state.sessionsSortColumn = col; + state.sessionsSortDir = dir; + state.sessionsPage = 0; + }, + onPageChange: (p) => { + state.sessionsPage = p; + }, + onPageSizeChange: (s) => { + state.sessionsPageSize = s; + state.sessionsPage = 0; + }, + onActionsOpenChange: (key) => { + state.sessionsActionsOpenKey = key; + }, + onRefresh: () => loadSessions(state), + onPatch: (key, patch) => patchSession(state, key, patch), + onDelete: (key) => deleteSessionAndRefresh(state, key), + }), + ) : nothing } @@ -455,499 +765,559 @@ export function renderApp(state: AppViewState) { ${ state.tab === "cron" - ? renderCron({ - basePath: state.basePath, - loading: state.cronLoading, - jobsLoadingMore: state.cronJobsLoadingMore, - status: state.cronStatus, - jobs: visibleCronJobs, - jobsTotal: state.cronJobsTotal, - jobsHasMore: state.cronJobsHasMore, - jobsQuery: state.cronJobsQuery, - jobsEnabledFilter: state.cronJobsEnabledFilter, - jobsScheduleKindFilter: state.cronJobsScheduleKindFilter, - jobsLastStatusFilter: state.cronJobsLastStatusFilter, - jobsSortBy: state.cronJobsSortBy, - jobsSortDir: state.cronJobsSortDir, - error: state.cronError, - busy: state.cronBusy, - form: state.cronForm, - fieldErrors: state.cronFieldErrors, - canSubmit: !hasCronFormErrors(state.cronFieldErrors), - editingJobId: state.cronEditingJobId, - channels: state.channelsSnapshot?.channelMeta?.length - ? state.channelsSnapshot.channelMeta.map((entry) => entry.id) - : (state.channelsSnapshot?.channelOrder ?? []), - channelLabels: state.channelsSnapshot?.channelLabels ?? {}, - channelMeta: state.channelsSnapshot?.channelMeta ?? [], - runsJobId: state.cronRunsJobId, - runs: state.cronRuns, - runsTotal: state.cronRunsTotal, - runsHasMore: state.cronRunsHasMore, - runsLoadingMore: state.cronRunsLoadingMore, - runsScope: state.cronRunsScope, - runsStatuses: state.cronRunsStatuses, - runsDeliveryStatuses: state.cronRunsDeliveryStatuses, - runsStatusFilter: state.cronRunsStatusFilter, - runsQuery: state.cronRunsQuery, - runsSortDir: state.cronRunsSortDir, - agentSuggestions: cronAgentSuggestions, - modelSuggestions: cronModelSuggestions, - thinkingSuggestions: CRON_THINKING_SUGGESTIONS, - timezoneSuggestions: CRON_TIMEZONE_SUGGESTIONS, - deliveryToSuggestions, - accountSuggestions, - onFormChange: (patch) => { - state.cronForm = normalizeCronFormState({ ...state.cronForm, ...patch }); - state.cronFieldErrors = validateCronForm(state.cronForm); - }, - onRefresh: () => state.loadCron(), - onAdd: () => addCronJob(state), - onEdit: (job) => startCronEdit(state, job), - onClone: (job) => startCronClone(state, job), - onCancelEdit: () => cancelCronEdit(state), - onToggle: (job, enabled) => toggleCronJob(state, job, enabled), - onRun: (job, mode) => runCronJob(state, job, mode ?? "force"), - onRemove: (job) => removeCronJob(state, job), - onLoadRuns: async (jobId) => { - updateCronRunsFilter(state, { cronRunsScope: "job" }); - await loadCronRuns(state, jobId); - }, - onLoadMoreJobs: () => loadMoreCronJobs(state), - onJobsFiltersChange: async (patch) => { - updateCronJobsFilter(state, patch); - const shouldReload = - typeof patch.cronJobsQuery === "string" || - Boolean(patch.cronJobsEnabledFilter) || - Boolean(patch.cronJobsSortBy) || - Boolean(patch.cronJobsSortDir); - if (shouldReload) { + ? lazyRender(lazyCron, (m) => + m.renderCron({ + basePath: state.basePath, + loading: state.cronLoading, + status: state.cronStatus, + jobs: visibleCronJobs, + jobsLoadingMore: state.cronJobsLoadingMore, + jobsTotal: state.cronJobsTotal, + jobsHasMore: state.cronJobsHasMore, + jobsQuery: state.cronJobsQuery, + jobsEnabledFilter: state.cronJobsEnabledFilter, + jobsScheduleKindFilter: state.cronJobsScheduleKindFilter, + jobsLastStatusFilter: state.cronJobsLastStatusFilter, + jobsSortBy: state.cronJobsSortBy, + jobsSortDir: state.cronJobsSortDir, + editingJobId: state.cronEditingJobId, + error: state.cronError, + busy: state.cronBusy, + form: state.cronForm, + channels: state.channelsSnapshot?.channelMeta?.length + ? state.channelsSnapshot.channelMeta.map((entry) => entry.id) + : (state.channelsSnapshot?.channelOrder ?? []), + channelLabels: state.channelsSnapshot?.channelLabels ?? {}, + channelMeta: state.channelsSnapshot?.channelMeta ?? [], + runsJobId: state.cronRunsJobId, + runs: state.cronRuns, + runsTotal: state.cronRunsTotal, + runsHasMore: state.cronRunsHasMore, + runsLoadingMore: state.cronRunsLoadingMore, + runsScope: state.cronRunsScope, + runsStatuses: state.cronRunsStatuses, + runsDeliveryStatuses: state.cronRunsDeliveryStatuses, + runsStatusFilter: state.cronRunsStatusFilter, + runsQuery: state.cronRunsQuery, + runsSortDir: state.cronRunsSortDir, + fieldErrors: state.cronFieldErrors, + canSubmit: !hasCronFormErrors(state.cronFieldErrors), + agentSuggestions: cronAgentSuggestions, + modelSuggestions: cronModelSuggestions, + thinkingSuggestions: CRON_THINKING_SUGGESTIONS, + timezoneSuggestions: CRON_TIMEZONE_SUGGESTIONS, + deliveryToSuggestions, + accountSuggestions, + onFormChange: (patch) => { + state.cronForm = normalizeCronFormState({ ...state.cronForm, ...patch }); + state.cronFieldErrors = validateCronForm(state.cronForm); + }, + onRefresh: () => state.loadCron(), + onAdd: () => addCronJob(state), + onEdit: (job) => startCronEdit(state, job), + onClone: (job) => startCronClone(state, job), + onCancelEdit: () => cancelCronEdit(state), + onToggle: (job, enabled) => toggleCronJob(state, job, enabled), + onRun: (job, mode) => runCronJob(state, job, mode ?? "force"), + onRemove: (job) => removeCronJob(state, job), + onLoadRuns: async (jobId) => { + updateCronRunsFilter(state, { cronRunsScope: "job" }); + await loadCronRuns(state, jobId); + }, + onLoadMoreJobs: () => loadMoreCronJobs(state), + onJobsFiltersChange: async (patch) => { + updateCronJobsFilter(state, patch); + const shouldReload = + typeof patch.cronJobsQuery === "string" || + Boolean(patch.cronJobsEnabledFilter) || + Boolean(patch.cronJobsSortBy) || + Boolean(patch.cronJobsSortDir); + if (shouldReload) { + await reloadCronJobs(state); + } + }, + onJobsFiltersReset: async () => { + updateCronJobsFilter(state, { + cronJobsQuery: "", + cronJobsEnabledFilter: "all", + cronJobsScheduleKindFilter: "all", + cronJobsLastStatusFilter: "all", + cronJobsSortBy: "nextRunAtMs", + cronJobsSortDir: "asc", + }); await reloadCronJobs(state); - } - }, - onJobsFiltersReset: async () => { - updateCronJobsFilter(state, { - cronJobsQuery: "", - cronJobsEnabledFilter: "all", - cronJobsScheduleKindFilter: "all", - cronJobsLastStatusFilter: "all", - cronJobsSortBy: "nextRunAtMs", - cronJobsSortDir: "asc", - }); - await reloadCronJobs(state); - }, - onLoadMoreRuns: () => loadMoreCronRuns(state), - onRunsFiltersChange: async (patch) => { - updateCronRunsFilter(state, patch); - if (state.cronRunsScope === "all") { - await loadCronRuns(state, null); - return; - } - await loadCronRuns(state, state.cronRunsJobId); - }, - }) + }, + onLoadMoreRuns: () => loadMoreCronRuns(state), + onRunsFiltersChange: async (patch) => { + updateCronRunsFilter(state, patch); + if (state.cronRunsScope === "all") { + await loadCronRuns(state, null); + return; + } + await loadCronRuns(state, state.cronRunsJobId); + }, + }), + ) : nothing } ${ state.tab === "agents" - ? renderAgents({ - loading: state.agentsLoading, - error: state.agentsError, - agentsList: state.agentsList, - selectedAgentId: resolvedAgentId, - activePanel: state.agentsPanel, - configForm: configValue, - configLoading: state.configLoading, - configSaving: state.configSaving, - configDirty: state.configFormDirty, - channelsLoading: state.channelsLoading, - channelsError: state.channelsError, - channelsSnapshot: state.channelsSnapshot, - channelsLastSuccess: state.channelsLastSuccess, - cronLoading: state.cronLoading, - cronStatus: state.cronStatus, - cronJobs: state.cronJobs, - cronError: state.cronError, - agentFilesLoading: state.agentFilesLoading, - agentFilesError: state.agentFilesError, - agentFilesList: state.agentFilesList, - agentFileActive: state.agentFileActive, - agentFileContents: state.agentFileContents, - agentFileDrafts: state.agentFileDrafts, - agentFileSaving: state.agentFileSaving, - agentIdentityLoading: state.agentIdentityLoading, - agentIdentityError: state.agentIdentityError, - agentIdentityById: state.agentIdentityById, - agentSkillsLoading: state.agentSkillsLoading, - agentSkillsReport: state.agentSkillsReport, - agentSkillsError: state.agentSkillsError, - agentSkillsAgentId: state.agentSkillsAgentId, - toolsCatalogLoading: state.toolsCatalogLoading, - toolsCatalogError: state.toolsCatalogError, - toolsCatalogResult: state.toolsCatalogResult, - skillsFilter: state.skillsFilter, - onRefresh: async () => { - await loadAgents(state); - const nextSelected = - state.agentsSelectedId ?? - state.agentsList?.defaultId ?? - state.agentsList?.agents?.[0]?.id ?? - null; - await loadToolsCatalog(state, nextSelected); - const agentIds = state.agentsList?.agents?.map((entry) => entry.id) ?? []; - if (agentIds.length > 0) { - void loadAgentIdentities(state, agentIds); - } - }, - onSelectAgent: (agentId) => { - if (state.agentsSelectedId === agentId) { - return; - } - state.agentsSelectedId = agentId; - state.agentFilesList = null; - state.agentFilesError = null; - state.agentFilesLoading = false; - state.agentFileActive = null; - state.agentFileContents = {}; - state.agentFileDrafts = {}; - state.agentSkillsReport = null; - state.agentSkillsError = null; - state.agentSkillsAgentId = null; - void loadAgentIdentity(state, agentId); - if (state.agentsPanel === "tools") { - void loadToolsCatalog(state, agentId); - } - if (state.agentsPanel === "files") { - void loadAgentFiles(state, agentId); - } - if (state.agentsPanel === "skills") { - void loadAgentSkills(state, agentId); - } - }, - onSelectPanel: (panel) => { - state.agentsPanel = panel; - if (panel === "files" && resolvedAgentId) { - if (state.agentFilesList?.agentId !== resolvedAgentId) { - state.agentFilesList = null; - state.agentFilesError = null; - state.agentFileActive = null; - state.agentFileContents = {}; - state.agentFileDrafts = {}; - void loadAgentFiles(state, resolvedAgentId); + ? lazyRender(lazyAgents, (m) => + m.renderAgents({ + basePath: state.basePath ?? "", + loading: state.agentsLoading, + error: state.agentsError, + agentsList: state.agentsList, + selectedAgentId: resolvedAgentId, + activePanel: state.agentsPanel, + config: { + form: configValue, + loading: state.configLoading, + saving: state.configSaving, + dirty: state.configFormDirty, + }, + channels: { + snapshot: state.channelsSnapshot, + loading: state.channelsLoading, + error: state.channelsError, + lastSuccess: state.channelsLastSuccess, + }, + cron: { + status: state.cronStatus, + jobs: state.cronJobs, + loading: state.cronLoading, + error: state.cronError, + }, + agentFiles: { + list: state.agentFilesList, + loading: state.agentFilesLoading, + error: state.agentFilesError, + active: state.agentFileActive, + contents: state.agentFileContents, + drafts: state.agentFileDrafts, + saving: state.agentFileSaving, + }, + agentIdentityLoading: state.agentIdentityLoading, + agentIdentityError: state.agentIdentityError, + agentIdentityById: state.agentIdentityById, + agentSkills: { + report: state.agentSkillsReport, + loading: state.agentSkillsLoading, + error: state.agentSkillsError, + agentId: state.agentSkillsAgentId, + filter: state.skillsFilter, + }, + toolsCatalog: { + loading: state.toolsCatalogLoading, + error: state.toolsCatalogError, + result: state.toolsCatalogResult, + }, + onRefresh: async () => { + await loadAgents(state); + const agentIds = state.agentsList?.agents?.map((entry) => entry.id) ?? []; + if (agentIds.length > 0) { + void loadAgentIdentities(state, agentIds); } - } - if (panel === "tools") { - void loadToolsCatalog(state, resolvedAgentId); - } - if (panel === "skills") { + const refreshedAgentId = + state.agentsSelectedId ?? + state.agentsList?.defaultId ?? + state.agentsList?.agents?.[0]?.id ?? + null; + if (state.agentsPanel === "files" && refreshedAgentId) { + void loadAgentFiles(state, refreshedAgentId); + } + if (state.agentsPanel === "skills" && refreshedAgentId) { + void loadAgentSkills(state, refreshedAgentId); + } + if (state.agentsPanel === "tools" && refreshedAgentId) { + void loadToolsCatalog(state, refreshedAgentId); + } + if (state.agentsPanel === "channels") { + void loadChannels(state, false); + } + if (state.agentsPanel === "cron") { + void state.loadCron(); + } + }, + onSelectAgent: (agentId) => { + if (state.agentsSelectedId === agentId) { + return; + } + state.agentsSelectedId = agentId; + state.agentFilesList = null; + state.agentFilesError = null; + state.agentFilesLoading = false; + state.agentFileActive = null; + state.agentFileContents = {}; + state.agentFileDrafts = {}; + state.agentSkillsReport = null; + state.agentSkillsError = null; + state.agentSkillsAgentId = null; + state.toolsCatalogResult = null; + state.toolsCatalogError = null; + state.toolsCatalogLoading = false; + void loadAgentIdentity(state, agentId); + if (state.agentsPanel === "files") { + void loadAgentFiles(state, agentId); + } + if (state.agentsPanel === "tools") { + void loadToolsCatalog(state, agentId); + } + if (state.agentsPanel === "skills") { + void loadAgentSkills(state, agentId); + } + }, + onSelectPanel: (panel) => { + state.agentsPanel = panel; + if (panel === "files" && resolvedAgentId) { + if (state.agentFilesList?.agentId !== resolvedAgentId) { + state.agentFilesList = null; + state.agentFilesError = null; + state.agentFileActive = null; + state.agentFileContents = {}; + state.agentFileDrafts = {}; + void loadAgentFiles(state, resolvedAgentId); + } + } + if (panel === "skills") { + if (resolvedAgentId) { + void loadAgentSkills(state, resolvedAgentId); + } + } + if (panel === "tools" && resolvedAgentId) { + if ( + state.toolsCatalogResult?.agentId !== resolvedAgentId || + state.toolsCatalogError + ) { + void loadToolsCatalog(state, resolvedAgentId); + } + } + if (panel === "channels") { + void loadChannels(state, false); + } + if (panel === "cron") { + void state.loadCron(); + } + }, + onLoadFiles: (agentId) => loadAgentFiles(state, agentId), + onSelectFile: (name) => { + state.agentFileActive = name; + if (!resolvedAgentId) { + return; + } + void loadAgentFileContent(state, resolvedAgentId, name); + }, + onFileDraftChange: (name, content) => { + state.agentFileDrafts = { ...state.agentFileDrafts, [name]: content }; + }, + onFileReset: (name) => { + const base = state.agentFileContents[name] ?? ""; + state.agentFileDrafts = { ...state.agentFileDrafts, [name]: base }; + }, + onFileSave: (name) => { + if (!resolvedAgentId) { + return; + } + const content = + state.agentFileDrafts[name] ?? state.agentFileContents[name] ?? ""; + void saveAgentFile(state, resolvedAgentId, name, content); + }, + onToolsProfileChange: (agentId, profile, clearAllow) => { + const index = + profile || clearAllow ? ensureAgentIndex(agentId) : findAgentIndex(agentId); + if (index < 0) { + return; + } + const basePath = ["agents", "list", index, "tools"]; + if (profile) { + updateConfigFormValue(state, [...basePath, "profile"], profile); + } else { + removeConfigFormValue(state, [...basePath, "profile"]); + } + if (clearAllow) { + removeConfigFormValue(state, [...basePath, "allow"]); + } + }, + onToolsOverridesChange: (agentId, alsoAllow, deny) => { + const index = + alsoAllow.length > 0 || deny.length > 0 + ? ensureAgentIndex(agentId) + : findAgentIndex(agentId); + if (index < 0) { + return; + } + const basePath = ["agents", "list", index, "tools"]; + if (alsoAllow.length > 0) { + updateConfigFormValue(state, [...basePath, "alsoAllow"], alsoAllow); + } else { + removeConfigFormValue(state, [...basePath, "alsoAllow"]); + } + if (deny.length > 0) { + updateConfigFormValue(state, [...basePath, "deny"], deny); + } else { + removeConfigFormValue(state, [...basePath, "deny"]); + } + }, + onConfigReload: () => loadConfig(state), + onConfigSave: () => saveAgentsConfig(state), + onChannelsRefresh: () => loadChannels(state, false), + onCronRefresh: () => state.loadCron(), + onCronRunNow: (jobId) => { + const job = state.cronJobs.find((entry) => entry.id === jobId); + if (!job) { + return; + } + void runCronJob(state, job, "force"); + }, + onSkillsFilterChange: (next) => (state.skillsFilter = next), + onSkillsRefresh: () => { if (resolvedAgentId) { void loadAgentSkills(state, resolvedAgentId); } - } - if (panel === "channels") { - void loadChannels(state, false); - } - if (panel === "cron") { - void state.loadCron(); - } - }, - onLoadFiles: (agentId) => loadAgentFiles(state, agentId), - onSelectFile: (name) => { - state.agentFileActive = name; - if (!resolvedAgentId) { - return; - } - void loadAgentFileContent(state, resolvedAgentId, name); - }, - onFileDraftChange: (name, content) => { - state.agentFileDrafts = { ...state.agentFileDrafts, [name]: content }; - }, - onFileReset: (name) => { - const base = state.agentFileContents[name] ?? ""; - state.agentFileDrafts = { ...state.agentFileDrafts, [name]: base }; - }, - onFileSave: (name) => { - if (!resolvedAgentId) { - return; - } - const content = - state.agentFileDrafts[name] ?? state.agentFileContents[name] ?? ""; - void saveAgentFile(state, resolvedAgentId, name, content); - }, - onToolsProfileChange: (agentId, profile, clearAllow) => { - const index = - profile || clearAllow ? ensureAgentIndex(agentId) : findAgentIndex(agentId); - if (index < 0) { - return; - } - const basePath = ["agents", "list", index, "tools"]; - if (profile) { - updateConfigFormValue(state, [...basePath, "profile"], profile); - } else { - removeConfigFormValue(state, [...basePath, "profile"]); - } - if (clearAllow) { - removeConfigFormValue(state, [...basePath, "allow"]); - } - }, - onToolsOverridesChange: (agentId, alsoAllow, deny) => { - const index = - alsoAllow.length > 0 || deny.length > 0 - ? ensureAgentIndex(agentId) - : findAgentIndex(agentId); - if (index < 0) { - return; - } - const basePath = ["agents", "list", index, "tools"]; - if (alsoAllow.length > 0) { - updateConfigFormValue(state, [...basePath, "alsoAllow"], alsoAllow); - } else { - removeConfigFormValue(state, [...basePath, "alsoAllow"]); - } - if (deny.length > 0) { - updateConfigFormValue(state, [...basePath, "deny"], deny); - } else { - removeConfigFormValue(state, [...basePath, "deny"]); - } - }, - onConfigReload: () => loadConfig(state), - onConfigSave: () => saveAgentsConfig(state), - onChannelsRefresh: () => loadChannels(state, false), - onCronRefresh: () => state.loadCron(), - onSkillsFilterChange: (next) => (state.skillsFilter = next), - onSkillsRefresh: () => { - if (resolvedAgentId) { - void loadAgentSkills(state, resolvedAgentId); - } - }, - onAgentSkillToggle: (agentId, skillName, enabled) => { - const index = ensureAgentIndex(agentId); - if (index < 0) { - return; - } - const list = (getCurrentConfigValue() as { agents?: { list?: unknown[] } } | null) - ?.agents?.list; - const entry = Array.isArray(list) - ? (list[index] as { skills?: unknown }) - : undefined; - const normalizedSkill = skillName.trim(); - if (!normalizedSkill) { - return; - } - const allSkills = - state.agentSkillsReport?.skills?.map((skill) => skill.name).filter(Boolean) ?? - []; - const existing = Array.isArray(entry?.skills) - ? entry.skills.map((name) => String(name).trim()).filter(Boolean) - : undefined; - const base = existing ?? allSkills; - const next = new Set(base); - if (enabled) { - next.add(normalizedSkill); - } else { - next.delete(normalizedSkill); - } - updateConfigFormValue(state, ["agents", "list", index, "skills"], [...next]); - }, - onAgentSkillsClear: (agentId) => { - const index = findAgentIndex(agentId); - if (index < 0) { - return; - } - removeConfigFormValue(state, ["agents", "list", index, "skills"]); - }, - onAgentSkillsDisableAll: (agentId) => { - const index = ensureAgentIndex(agentId); - if (index < 0) { - return; - } - updateConfigFormValue(state, ["agents", "list", index, "skills"], []); - }, - onModelChange: (agentId, modelId) => { - const index = modelId ? ensureAgentIndex(agentId) : findAgentIndex(agentId); - if (index < 0) { - return; - } - const list = (getCurrentConfigValue() as { agents?: { list?: unknown[] } } | null) - ?.agents?.list; - const basePath = ["agents", "list", index, "model"]; - if (!modelId) { - removeConfigFormValue(state, basePath); - return; - } - const entry = Array.isArray(list) - ? (list[index] as { model?: unknown }) - : undefined; - const existing = entry?.model; - if (existing && typeof existing === "object" && !Array.isArray(existing)) { - const fallbacks = (existing as { fallbacks?: unknown }).fallbacks; - const next = { - primary: modelId, - ...(Array.isArray(fallbacks) ? { fallbacks } : {}), - }; - updateConfigFormValue(state, basePath, next); - } else { - updateConfigFormValue(state, basePath, modelId); - } - }, - onModelFallbacksChange: (agentId, fallbacks) => { - const normalized = fallbacks.map((name) => name.trim()).filter(Boolean); - const currentConfig = getCurrentConfigValue(); - const resolvedConfig = resolveAgentConfig(currentConfig, agentId); - const effectivePrimary = - resolveModelPrimary(resolvedConfig.entry?.model) ?? - resolveModelPrimary(resolvedConfig.defaults?.model); - const effectiveFallbacks = resolveEffectiveModelFallbacks( - resolvedConfig.entry?.model, - resolvedConfig.defaults?.model, - ); - const index = - normalized.length > 0 - ? effectivePrimary - ? ensureAgentIndex(agentId) - : -1 - : (effectiveFallbacks?.length ?? 0) > 0 || findAgentIndex(agentId) >= 0 - ? ensureAgentIndex(agentId) - : -1; - if (index < 0) { - return; - } - const list = (getCurrentConfigValue() as { agents?: { list?: unknown[] } } | null) - ?.agents?.list; - const basePath = ["agents", "list", index, "model"]; - const entry = Array.isArray(list) - ? (list[index] as { model?: unknown }) - : undefined; - const existing = entry?.model; - const resolvePrimary = () => { - if (typeof existing === "string") { - return existing.trim() || null; + }, + onAgentSkillToggle: (agentId, skillName, enabled) => { + const index = ensureAgentIndex(agentId); + if (index < 0) { + return; } - if (existing && typeof existing === "object" && !Array.isArray(existing)) { - const primary = (existing as { primary?: unknown }).primary; - if (typeof primary === "string") { - const trimmed = primary.trim(); - return trimmed || null; - } + const list = ( + getCurrentConfigValue() as { agents?: { list?: unknown[] } } | null + )?.agents?.list; + const entry = Array.isArray(list) + ? (list[index] as { skills?: unknown }) + : undefined; + const normalizedSkill = skillName.trim(); + if (!normalizedSkill) { + return; } - return null; - }; - const primary = resolvePrimary() ?? effectivePrimary; - if (normalized.length === 0) { - if (primary) { - updateConfigFormValue(state, basePath, primary); + const allSkills = + state.agentSkillsReport?.skills?.map((skill) => skill.name).filter(Boolean) ?? + []; + const existing = Array.isArray(entry?.skills) + ? entry.skills.map((name) => String(name).trim()).filter(Boolean) + : undefined; + const base = existing ?? allSkills; + const next = new Set(base); + if (enabled) { + next.add(normalizedSkill); } else { - removeConfigFormValue(state, basePath); + next.delete(normalizedSkill); } - return; - } - if (!primary) { - return; - } - updateConfigFormValue(state, basePath, { primary, fallbacks: normalized }); - }, - }) + updateConfigFormValue(state, ["agents", "list", index, "skills"], [...next]); + }, + onAgentSkillsClear: (agentId) => { + const index = findAgentIndex(agentId); + if (index < 0) { + return; + } + removeConfigFormValue(state, ["agents", "list", index, "skills"]); + }, + onAgentSkillsDisableAll: (agentId) => { + const index = ensureAgentIndex(agentId); + if (index < 0) { + return; + } + updateConfigFormValue(state, ["agents", "list", index, "skills"], []); + }, + onModelChange: (agentId, modelId) => { + const index = modelId ? ensureAgentIndex(agentId) : findAgentIndex(agentId); + if (index < 0) { + return; + } + const list = ( + getCurrentConfigValue() as { agents?: { list?: unknown[] } } | null + )?.agents?.list; + const basePath = ["agents", "list", index, "model"]; + if (!modelId) { + removeConfigFormValue(state, basePath); + return; + } + const entry = Array.isArray(list) + ? (list[index] as { model?: unknown }) + : undefined; + const existing = entry?.model; + if (existing && typeof existing === "object" && !Array.isArray(existing)) { + const fallbacks = (existing as { fallbacks?: unknown }).fallbacks; + const next = { + primary: modelId, + ...(Array.isArray(fallbacks) ? { fallbacks } : {}), + }; + updateConfigFormValue(state, basePath, next); + } else { + updateConfigFormValue(state, basePath, modelId); + } + }, + onModelFallbacksChange: (agentId, fallbacks) => { + const normalized = fallbacks.map((name) => name.trim()).filter(Boolean); + const currentConfig = getCurrentConfigValue(); + const resolvedConfig = resolveAgentConfig(currentConfig, agentId); + const effectivePrimary = + resolveModelPrimary(resolvedConfig.entry?.model) ?? + resolveModelPrimary(resolvedConfig.defaults?.model); + const effectiveFallbacks = resolveEffectiveModelFallbacks( + resolvedConfig.entry?.model, + resolvedConfig.defaults?.model, + ); + const index = + normalized.length > 0 + ? effectivePrimary + ? ensureAgentIndex(agentId) + : -1 + : (effectiveFallbacks?.length ?? 0) > 0 || findAgentIndex(agentId) >= 0 + ? ensureAgentIndex(agentId) + : -1; + if (index < 0) { + return; + } + const list = ( + getCurrentConfigValue() as { agents?: { list?: unknown[] } } | null + )?.agents?.list; + const basePath = ["agents", "list", index, "model"]; + const entry = Array.isArray(list) + ? (list[index] as { model?: unknown }) + : undefined; + const existing = entry?.model; + const resolvePrimary = () => { + if (typeof existing === "string") { + return existing.trim() || null; + } + if (existing && typeof existing === "object" && !Array.isArray(existing)) { + const primary = (existing as { primary?: unknown }).primary; + if (typeof primary === "string") { + const trimmed = primary.trim(); + return trimmed || null; + } + } + return null; + }; + const primary = resolvePrimary() ?? effectivePrimary; + if (normalized.length === 0) { + if (primary) { + updateConfigFormValue(state, basePath, primary); + } else { + removeConfigFormValue(state, basePath); + } + return; + } + if (!primary) { + return; + } + updateConfigFormValue(state, basePath, { primary, fallbacks: normalized }); + }, + onSetDefault: (agentId) => { + if (!configValue) { + return; + } + updateConfigFormValue(state, ["agents", "defaultId"], agentId); + }, + }), + ) : nothing } ${ state.tab === "skills" - ? renderSkills({ - loading: state.skillsLoading, - report: state.skillsReport, - error: state.skillsError, - filter: state.skillsFilter, - edits: state.skillEdits, - messages: state.skillMessages, - busyKey: state.skillsBusyKey, - onFilterChange: (next) => (state.skillsFilter = next), - onRefresh: () => loadSkills(state, { clearMessages: true }), - onToggle: (key, enabled) => updateSkillEnabled(state, key, enabled), - onEdit: (key, value) => updateSkillEdit(state, key, value), - onSaveKey: (key) => saveSkillApiKey(state, key), - onInstall: (skillKey, name, installId) => - installSkill(state, skillKey, name, installId), - }) + ? lazyRender(lazySkills, (m) => + m.renderSkills({ + connected: state.connected, + loading: state.skillsLoading, + report: state.skillsReport, + error: state.skillsError, + filter: state.skillsFilter, + edits: state.skillEdits, + messages: state.skillMessages, + busyKey: state.skillsBusyKey, + onFilterChange: (next) => (state.skillsFilter = next), + onRefresh: () => loadSkills(state, { clearMessages: true }), + onToggle: (key, enabled) => updateSkillEnabled(state, key, enabled), + onEdit: (key, value) => updateSkillEdit(state, key, value), + onSaveKey: (key) => saveSkillApiKey(state, key), + onInstall: (skillKey, name, installId) => + installSkill(state, skillKey, name, installId), + }), + ) : nothing } ${ state.tab === "nodes" - ? renderNodes({ - loading: state.nodesLoading, - nodes: state.nodes, - devicesLoading: state.devicesLoading, - devicesError: state.devicesError, - devicesList: state.devicesList, - configForm: - state.configForm ?? - (state.configSnapshot?.config as Record | null), - configLoading: state.configLoading, - configSaving: state.configSaving, - configDirty: state.configFormDirty, - configFormMode: state.configFormMode, - execApprovalsLoading: state.execApprovalsLoading, - execApprovalsSaving: state.execApprovalsSaving, - execApprovalsDirty: state.execApprovalsDirty, - execApprovalsSnapshot: state.execApprovalsSnapshot, - execApprovalsForm: state.execApprovalsForm, - execApprovalsSelectedAgent: state.execApprovalsSelectedAgent, - execApprovalsTarget: state.execApprovalsTarget, - execApprovalsTargetNodeId: state.execApprovalsTargetNodeId, - onRefresh: () => loadNodes(state), - onDevicesRefresh: () => loadDevices(state), - onDeviceApprove: (requestId) => approveDevicePairing(state, requestId), - onDeviceReject: (requestId) => rejectDevicePairing(state, requestId), - onDeviceRotate: (deviceId, role, scopes) => - rotateDeviceToken(state, { deviceId, role, scopes }), - onDeviceRevoke: (deviceId, role) => revokeDeviceToken(state, { deviceId, role }), - onLoadConfig: () => loadConfig(state), - onLoadExecApprovals: () => { - const target = - state.execApprovalsTarget === "node" && state.execApprovalsTargetNodeId - ? { kind: "node" as const, nodeId: state.execApprovalsTargetNodeId } - : { kind: "gateway" as const }; - return loadExecApprovals(state, target); - }, - onBindDefault: (nodeId) => { - if (nodeId) { - updateConfigFormValue(state, ["tools", "exec", "node"], nodeId); - } else { - removeConfigFormValue(state, ["tools", "exec", "node"]); - } - }, - onBindAgent: (agentIndex, nodeId) => { - const basePath = ["agents", "list", agentIndex, "tools", "exec", "node"]; - if (nodeId) { - updateConfigFormValue(state, basePath, nodeId); - } else { - removeConfigFormValue(state, basePath); - } - }, - onSaveBindings: () => saveConfig(state), - onExecApprovalsTargetChange: (kind, nodeId) => { - state.execApprovalsTarget = kind; - state.execApprovalsTargetNodeId = nodeId; - state.execApprovalsSnapshot = null; - state.execApprovalsForm = null; - state.execApprovalsDirty = false; - state.execApprovalsSelectedAgent = null; - }, - onExecApprovalsSelectAgent: (agentId) => { - state.execApprovalsSelectedAgent = agentId; - }, - onExecApprovalsPatch: (path, value) => - updateExecApprovalsFormValue(state, path, value), - onExecApprovalsRemove: (path) => removeExecApprovalsFormValue(state, path), - onSaveExecApprovals: () => { - const target = - state.execApprovalsTarget === "node" && state.execApprovalsTargetNodeId - ? { kind: "node" as const, nodeId: state.execApprovalsTargetNodeId } - : { kind: "gateway" as const }; - return saveExecApprovals(state, target); - }, - }) + ? lazyRender(lazyNodes, (m) => + m.renderNodes({ + loading: state.nodesLoading, + nodes: state.nodes, + devicesLoading: state.devicesLoading, + devicesError: state.devicesError, + devicesList: state.devicesList, + configForm: + state.configForm ?? + (state.configSnapshot?.config as Record | null), + configLoading: state.configLoading, + configSaving: state.configSaving, + configDirty: state.configFormDirty, + configFormMode: state.configFormMode, + execApprovalsLoading: state.execApprovalsLoading, + execApprovalsSaving: state.execApprovalsSaving, + execApprovalsDirty: state.execApprovalsDirty, + execApprovalsSnapshot: state.execApprovalsSnapshot, + execApprovalsForm: state.execApprovalsForm, + execApprovalsSelectedAgent: state.execApprovalsSelectedAgent, + execApprovalsTarget: state.execApprovalsTarget, + execApprovalsTargetNodeId: state.execApprovalsTargetNodeId, + onRefresh: () => loadNodes(state), + onDevicesRefresh: () => loadDevices(state), + onDeviceApprove: (requestId) => approveDevicePairing(state, requestId), + onDeviceReject: (requestId) => rejectDevicePairing(state, requestId), + onDeviceRotate: (deviceId, role, scopes) => + rotateDeviceToken(state, { deviceId, role, scopes }), + onDeviceRevoke: (deviceId, role) => revokeDeviceToken(state, { deviceId, role }), + onLoadConfig: () => loadConfig(state), + onLoadExecApprovals: () => { + const target = + state.execApprovalsTarget === "node" && state.execApprovalsTargetNodeId + ? { kind: "node" as const, nodeId: state.execApprovalsTargetNodeId } + : { kind: "gateway" as const }; + return loadExecApprovals(state, target); + }, + onBindDefault: (nodeId) => { + if (nodeId) { + updateConfigFormValue(state, ["tools", "exec", "node"], nodeId); + } else { + removeConfigFormValue(state, ["tools", "exec", "node"]); + } + }, + onBindAgent: (agentIndex, nodeId) => { + const basePath = ["agents", "list", agentIndex, "tools", "exec", "node"]; + if (nodeId) { + updateConfigFormValue(state, basePath, nodeId); + } else { + removeConfigFormValue(state, basePath); + } + }, + onSaveBindings: () => saveConfig(state), + onExecApprovalsTargetChange: (kind, nodeId) => { + state.execApprovalsTarget = kind; + state.execApprovalsTargetNodeId = nodeId; + state.execApprovalsSnapshot = null; + state.execApprovalsForm = null; + state.execApprovalsDirty = false; + state.execApprovalsSelectedAgent = null; + }, + onExecApprovalsSelectAgent: (agentId) => { + state.execApprovalsSelectedAgent = agentId; + }, + onExecApprovalsPatch: (path, value) => + updateExecApprovalsFormValue(state, path, value), + onExecApprovalsRemove: (path) => removeExecApprovalsFormValue(state, path), + onSaveExecApprovals: () => { + const target = + state.execApprovalsTarget === "node" && state.execApprovalsTargetNodeId + ? { kind: "node" as const, nodeId: state.execApprovalsTargetNodeId } + : { kind: "gateway" as const }; + return saveExecApprovals(state, target); + }, + }), + ) : nothing } @@ -1008,7 +1378,9 @@ export function renderApp(state: AppViewState) { }); }, onChatScroll: (event) => state.handleChatScroll(event), + getDraft: () => state.chatMessage, onDraftChange: (next) => (state.chatMessage = next), + onRequestUpdate: requestHostUpdate, attachments: state.chatAttachments, onAttachmentsChange: (next) => (state.chatAttachments = next), onSend: () => state.handleSendChat(), @@ -1016,6 +1388,45 @@ export function renderApp(state: AppViewState) { onAbort: () => void state.handleAbortChat(), onQueueRemove: (id) => state.removeQueuedMessage(id), onNewSession: () => state.handleSendChat("/new", { restoreDraft: true }), + onClearHistory: async () => { + if (!state.client || !state.connected) { + return; + } + try { + await state.client.request("sessions.reset", { key: state.sessionKey }); + state.chatMessages = []; + state.chatStream = null; + state.chatRunId = null; + await loadChatHistory(state); + } catch (err) { + state.lastError = String(err); + } + }, + agentsList: state.agentsList, + currentAgentId: resolvedAgentId ?? "main", + onAgentChange: (agentId: string) => { + state.sessionKey = buildAgentMainSessionKey({ agentId }); + state.chatMessages = []; + state.chatStream = null; + state.chatRunId = null; + state.applySettings({ + ...state.settings, + sessionKey: state.sessionKey, + lastActiveSessionKey: state.sessionKey, + }); + void loadChatHistory(state); + void state.loadAssistantIdentity(); + }, + onNavigateToAgent: () => { + state.agentsSelectedId = resolvedAgentId; + state.setTab("agents" as import("./navigation.ts").Tab); + }, + onSessionSelect: (key: string) => { + state.setSessionKey(key); + state.chatMessages = []; + void loadChatHistory(state); + void state.loadAssistantIdentity(); + }, showNewMessages: state.chatNewMessagesBelow && !state.chatManualRefreshInFlight, onScrollToBottom: () => state.scrollToBottom(), // Sidebar props for tool output viewing @@ -1028,6 +1439,7 @@ export function renderApp(state: AppViewState) { onSplitRatioChange: (ratio: number) => state.handleSplitRatioChange(ratio), assistantName: state.assistantName, assistantAvatar: state.assistantAvatar, + basePath: state.basePath ?? "", }) : nothing } @@ -1048,11 +1460,48 @@ export function renderApp(state: AppViewState) { schemaLoading: state.configSchemaLoading, uiHints: state.configUiHints, formMode: state.configFormMode, + showModeToggle: true, formValue: state.configForm, originalValue: state.configFormOriginal, searchQuery: state.configSearchQuery, - activeSection: state.configActiveSection, - activeSubsection: state.configActiveSubsection, + activeSection: + state.configActiveSection && + (COMMUNICATION_SECTION_KEYS.includes( + state.configActiveSection as CommunicationSectionKey, + ) || + APPEARANCE_SECTION_KEYS.includes( + state.configActiveSection as AppearanceSectionKey, + ) || + AUTOMATION_SECTION_KEYS.includes( + state.configActiveSection as AutomationSectionKey, + ) || + INFRASTRUCTURE_SECTION_KEYS.includes( + state.configActiveSection as InfrastructureSectionKey, + ) || + AI_AGENTS_SECTION_KEYS.includes( + state.configActiveSection as AiAgentsSectionKey, + )) + ? null + : state.configActiveSection, + activeSubsection: + state.configActiveSection && + (COMMUNICATION_SECTION_KEYS.includes( + state.configActiveSection as CommunicationSectionKey, + ) || + APPEARANCE_SECTION_KEYS.includes( + state.configActiveSection as AppearanceSectionKey, + ) || + AUTOMATION_SECTION_KEYS.includes( + state.configActiveSection as AutomationSectionKey, + ) || + INFRASTRUCTURE_SECTION_KEYS.includes( + state.configActiveSection as InfrastructureSectionKey, + ) || + AI_AGENTS_SECTION_KEYS.includes( + state.configActiveSection as AiAgentsSectionKey, + )) + ? null + : state.configActiveSubsection, onRawChange: (next) => { state.configRaw = next; }, @@ -1068,56 +1517,400 @@ export function renderApp(state: AppViewState) { onSave: () => saveConfig(state), onApply: () => applyConfig(state), onUpdate: () => runUpdate(state), + onOpenFile: () => openConfigFile(state), + version: state.hello?.server?.version ?? "", + theme: state.theme, + themeMode: state.themeMode, + setTheme: (t, ctx) => state.setTheme(t, ctx), + setThemeMode: (m, ctx) => state.setThemeMode(m, ctx), + gatewayUrl: state.settings.gatewayUrl, + assistantName: state.assistantName, + configPath: state.configSnapshot?.path ?? null, + excludeSections: [ + ...COMMUNICATION_SECTION_KEYS, + ...AUTOMATION_SECTION_KEYS, + ...INFRASTRUCTURE_SECTION_KEYS, + ...AI_AGENTS_SECTION_KEYS, + "ui", + "wizard", + ], + includeVirtualSections: false, + }) + : nothing + } + + ${ + state.tab === "communications" + ? renderConfig({ + raw: state.configRaw, + originalRaw: state.configRawOriginal, + valid: state.configValid, + issues: state.configIssues, + loading: state.configLoading, + saving: state.configSaving, + applying: state.configApplying, + updating: state.updateRunning, + connected: state.connected, + schema: state.configSchema, + schemaLoading: state.configSchemaLoading, + uiHints: state.configUiHints, + formMode: state.communicationsFormMode, + formValue: state.configForm, + originalValue: state.configFormOriginal, + searchQuery: state.communicationsSearchQuery, + activeSection: + state.communicationsActiveSection && + !COMMUNICATION_SECTION_KEYS.includes( + state.communicationsActiveSection as CommunicationSectionKey, + ) + ? null + : state.communicationsActiveSection, + activeSubsection: + state.communicationsActiveSection && + !COMMUNICATION_SECTION_KEYS.includes( + state.communicationsActiveSection as CommunicationSectionKey, + ) + ? null + : state.communicationsActiveSubsection, + onRawChange: (next) => { + state.configRaw = next; + }, + onFormModeChange: (mode) => (state.communicationsFormMode = mode), + onFormPatch: (path, value) => updateConfigFormValue(state, path, value), + onSearchChange: (query) => (state.communicationsSearchQuery = query), + onSectionChange: (section) => { + state.communicationsActiveSection = section; + state.communicationsActiveSubsection = null; + }, + onSubsectionChange: (section) => (state.communicationsActiveSubsection = section), + onReload: () => loadConfig(state), + onSave: () => saveConfig(state), + onApply: () => applyConfig(state), + onUpdate: () => runUpdate(state), + onOpenFile: () => openConfigFile(state), + version: state.hello?.server?.version ?? "", + theme: state.theme, + themeMode: state.themeMode, + setTheme: (t, ctx) => state.setTheme(t, ctx), + setThemeMode: (m, ctx) => state.setThemeMode(m, ctx), + gatewayUrl: state.settings.gatewayUrl, + assistantName: state.assistantName, + configPath: state.configSnapshot?.path ?? null, + navRootLabel: "Communication", + includeSections: [...COMMUNICATION_SECTION_KEYS], + includeVirtualSections: false, + }) + : nothing + } + + ${ + state.tab === "appearance" + ? renderConfig({ + raw: state.configRaw, + originalRaw: state.configRawOriginal, + valid: state.configValid, + issues: state.configIssues, + loading: state.configLoading, + saving: state.configSaving, + applying: state.configApplying, + updating: state.updateRunning, + connected: state.connected, + schema: state.configSchema, + schemaLoading: state.configSchemaLoading, + uiHints: state.configUiHints, + formMode: state.appearanceFormMode, + formValue: state.configForm, + originalValue: state.configFormOriginal, + searchQuery: state.appearanceSearchQuery, + activeSection: + state.appearanceActiveSection && + !APPEARANCE_SECTION_KEYS.includes( + state.appearanceActiveSection as AppearanceSectionKey, + ) + ? null + : state.appearanceActiveSection, + activeSubsection: + state.appearanceActiveSection && + !APPEARANCE_SECTION_KEYS.includes( + state.appearanceActiveSection as AppearanceSectionKey, + ) + ? null + : state.appearanceActiveSubsection, + onRawChange: (next) => { + state.configRaw = next; + }, + onFormModeChange: (mode) => (state.appearanceFormMode = mode), + onFormPatch: (path, value) => updateConfigFormValue(state, path, value), + onSearchChange: (query) => (state.appearanceSearchQuery = query), + onSectionChange: (section) => { + state.appearanceActiveSection = section; + state.appearanceActiveSubsection = null; + }, + onSubsectionChange: (section) => (state.appearanceActiveSubsection = section), + onReload: () => loadConfig(state), + onSave: () => saveConfig(state), + onApply: () => applyConfig(state), + onUpdate: () => runUpdate(state), + onOpenFile: () => openConfigFile(state), + version: state.hello?.server?.version ?? "", + theme: state.theme, + themeMode: state.themeMode, + setTheme: (t, ctx) => state.setTheme(t, ctx), + setThemeMode: (m, ctx) => state.setThemeMode(m, ctx), + gatewayUrl: state.settings.gatewayUrl, + assistantName: state.assistantName, + configPath: state.configSnapshot?.path ?? null, + navRootLabel: "Appearance", + includeSections: [...APPEARANCE_SECTION_KEYS], + includeVirtualSections: true, + }) + : nothing + } + + ${ + state.tab === "automation" + ? renderConfig({ + raw: state.configRaw, + originalRaw: state.configRawOriginal, + valid: state.configValid, + issues: state.configIssues, + loading: state.configLoading, + saving: state.configSaving, + applying: state.configApplying, + updating: state.updateRunning, + connected: state.connected, + schema: state.configSchema, + schemaLoading: state.configSchemaLoading, + uiHints: state.configUiHints, + formMode: state.automationFormMode, + formValue: state.configForm, + originalValue: state.configFormOriginal, + searchQuery: state.automationSearchQuery, + activeSection: + state.automationActiveSection && + !AUTOMATION_SECTION_KEYS.includes( + state.automationActiveSection as AutomationSectionKey, + ) + ? null + : state.automationActiveSection, + activeSubsection: + state.automationActiveSection && + !AUTOMATION_SECTION_KEYS.includes( + state.automationActiveSection as AutomationSectionKey, + ) + ? null + : state.automationActiveSubsection, + onRawChange: (next) => { + state.configRaw = next; + }, + onFormModeChange: (mode) => (state.automationFormMode = mode), + onFormPatch: (path, value) => updateConfigFormValue(state, path, value), + onSearchChange: (query) => (state.automationSearchQuery = query), + onSectionChange: (section) => { + state.automationActiveSection = section; + state.automationActiveSubsection = null; + }, + onSubsectionChange: (section) => (state.automationActiveSubsection = section), + onReload: () => loadConfig(state), + onSave: () => saveConfig(state), + onApply: () => applyConfig(state), + onUpdate: () => runUpdate(state), + onOpenFile: () => openConfigFile(state), + version: state.hello?.server?.version ?? "", + theme: state.theme, + themeMode: state.themeMode, + setTheme: (t, ctx) => state.setTheme(t, ctx), + setThemeMode: (m, ctx) => state.setThemeMode(m, ctx), + gatewayUrl: state.settings.gatewayUrl, + assistantName: state.assistantName, + configPath: state.configSnapshot?.path ?? null, + navRootLabel: "Automation", + includeSections: [...AUTOMATION_SECTION_KEYS], + includeVirtualSections: false, + }) + : nothing + } + + ${ + state.tab === "infrastructure" + ? renderConfig({ + raw: state.configRaw, + originalRaw: state.configRawOriginal, + valid: state.configValid, + issues: state.configIssues, + loading: state.configLoading, + saving: state.configSaving, + applying: state.configApplying, + updating: state.updateRunning, + connected: state.connected, + schema: state.configSchema, + schemaLoading: state.configSchemaLoading, + uiHints: state.configUiHints, + formMode: state.infrastructureFormMode, + formValue: state.configForm, + originalValue: state.configFormOriginal, + searchQuery: state.infrastructureSearchQuery, + activeSection: + state.infrastructureActiveSection && + !INFRASTRUCTURE_SECTION_KEYS.includes( + state.infrastructureActiveSection as InfrastructureSectionKey, + ) + ? null + : state.infrastructureActiveSection, + activeSubsection: + state.infrastructureActiveSection && + !INFRASTRUCTURE_SECTION_KEYS.includes( + state.infrastructureActiveSection as InfrastructureSectionKey, + ) + ? null + : state.infrastructureActiveSubsection, + onRawChange: (next) => { + state.configRaw = next; + }, + onFormModeChange: (mode) => (state.infrastructureFormMode = mode), + onFormPatch: (path, value) => updateConfigFormValue(state, path, value), + onSearchChange: (query) => (state.infrastructureSearchQuery = query), + onSectionChange: (section) => { + state.infrastructureActiveSection = section; + state.infrastructureActiveSubsection = null; + }, + onSubsectionChange: (section) => (state.infrastructureActiveSubsection = section), + onReload: () => loadConfig(state), + onSave: () => saveConfig(state), + onApply: () => applyConfig(state), + onUpdate: () => runUpdate(state), + onOpenFile: () => openConfigFile(state), + version: state.hello?.server?.version ?? "", + theme: state.theme, + themeMode: state.themeMode, + setTheme: (t, ctx) => state.setTheme(t, ctx), + setThemeMode: (m, ctx) => state.setThemeMode(m, ctx), + gatewayUrl: state.settings.gatewayUrl, + assistantName: state.assistantName, + configPath: state.configSnapshot?.path ?? null, + navRootLabel: "Infrastructure", + includeSections: [...INFRASTRUCTURE_SECTION_KEYS], + includeVirtualSections: false, + }) + : nothing + } + + ${ + state.tab === "aiAgents" + ? renderConfig({ + raw: state.configRaw, + originalRaw: state.configRawOriginal, + valid: state.configValid, + issues: state.configIssues, + loading: state.configLoading, + saving: state.configSaving, + applying: state.configApplying, + updating: state.updateRunning, + connected: state.connected, + schema: state.configSchema, + schemaLoading: state.configSchemaLoading, + uiHints: state.configUiHints, + formMode: state.aiAgentsFormMode, + formValue: state.configForm, + originalValue: state.configFormOriginal, + searchQuery: state.aiAgentsSearchQuery, + activeSection: + state.aiAgentsActiveSection && + !AI_AGENTS_SECTION_KEYS.includes( + state.aiAgentsActiveSection as AiAgentsSectionKey, + ) + ? null + : state.aiAgentsActiveSection, + activeSubsection: + state.aiAgentsActiveSection && + !AI_AGENTS_SECTION_KEYS.includes( + state.aiAgentsActiveSection as AiAgentsSectionKey, + ) + ? null + : state.aiAgentsActiveSubsection, + onRawChange: (next) => { + state.configRaw = next; + }, + onFormModeChange: (mode) => (state.aiAgentsFormMode = mode), + onFormPatch: (path, value) => updateConfigFormValue(state, path, value), + onSearchChange: (query) => (state.aiAgentsSearchQuery = query), + onSectionChange: (section) => { + state.aiAgentsActiveSection = section; + state.aiAgentsActiveSubsection = null; + }, + onSubsectionChange: (section) => (state.aiAgentsActiveSubsection = section), + onReload: () => loadConfig(state), + onSave: () => saveConfig(state), + onApply: () => applyConfig(state), + onUpdate: () => runUpdate(state), + onOpenFile: () => openConfigFile(state), + version: state.hello?.server?.version ?? "", + theme: state.theme, + themeMode: state.themeMode, + setTheme: (t, ctx) => state.setTheme(t, ctx), + setThemeMode: (m, ctx) => state.setThemeMode(m, ctx), + gatewayUrl: state.settings.gatewayUrl, + assistantName: state.assistantName, + configPath: state.configSnapshot?.path ?? null, + navRootLabel: "AI & Agents", + includeSections: [...AI_AGENTS_SECTION_KEYS], + includeVirtualSections: false, }) : nothing } ${ state.tab === "debug" - ? renderDebug({ - loading: state.debugLoading, - status: state.debugStatus, - health: state.debugHealth, - models: state.debugModels, - heartbeat: state.debugHeartbeat, - eventLog: state.eventLog, - callMethod: state.debugCallMethod, - callParams: state.debugCallParams, - callResult: state.debugCallResult, - callError: state.debugCallError, - onCallMethodChange: (next) => (state.debugCallMethod = next), - onCallParamsChange: (next) => (state.debugCallParams = next), - onRefresh: () => loadDebug(state), - onCall: () => callDebugMethod(state), - }) + ? lazyRender(lazyDebug, (m) => + m.renderDebug({ + loading: state.debugLoading, + status: state.debugStatus, + health: state.debugHealth, + models: state.debugModels, + heartbeat: state.debugHeartbeat, + eventLog: state.eventLog, + methods: (state.hello?.features?.methods ?? []).toSorted(), + callMethod: state.debugCallMethod, + callParams: state.debugCallParams, + callResult: state.debugCallResult, + callError: state.debugCallError, + onCallMethodChange: (next) => (state.debugCallMethod = next), + onCallParamsChange: (next) => (state.debugCallParams = next), + onRefresh: () => loadDebug(state), + onCall: () => callDebugMethod(state), + }), + ) : nothing } ${ state.tab === "logs" - ? renderLogs({ - loading: state.logsLoading, - error: state.logsError, - file: state.logsFile, - entries: state.logsEntries, - filterText: state.logsFilterText, - levelFilters: state.logsLevelFilters, - autoFollow: state.logsAutoFollow, - truncated: state.logsTruncated, - onFilterTextChange: (next) => (state.logsFilterText = next), - onLevelToggle: (level, enabled) => { - state.logsLevelFilters = { ...state.logsLevelFilters, [level]: enabled }; - }, - onToggleAutoFollow: (next) => (state.logsAutoFollow = next), - onRefresh: () => loadLogs(state, { reset: true }), - onExport: (lines, label) => state.exportLogs(lines, label), - onScroll: (event) => state.handleLogsScroll(event), - }) + ? lazyRender(lazyLogs, (m) => + m.renderLogs({ + loading: state.logsLoading, + error: state.logsError, + file: state.logsFile, + entries: state.logsEntries, + filterText: state.logsFilterText, + levelFilters: state.logsLevelFilters, + autoFollow: state.logsAutoFollow, + truncated: state.logsTruncated, + onFilterTextChange: (next) => (state.logsFilterText = next), + onLevelToggle: (level, enabled) => { + state.logsLevelFilters = { ...state.logsLevelFilters, [level]: enabled }; + }, + onToggleAutoFollow: (next) => (state.logsAutoFollow = next), + onRefresh: () => loadLogs(state, { reset: true }), + onExport: (lines, label) => state.exportLogs(lines, label), + onScroll: (event) => state.handleLogsScroll(event), + }), + ) : nothing }
${renderExecApprovalPrompt(state)} ${renderGatewayUrlConfirmation(state)} + ${nothing}
`; } diff --git a/ui/src/ui/app-settings.test.ts b/ui/src/ui/app-settings.test.ts index 48411bbe5b0..e259031d76e 100644 --- a/ui/src/ui/app-settings.test.ts +++ b/ui/src/ui/app-settings.test.ts @@ -1,26 +1,107 @@ import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; -import { setTabFromRoute } from "./app-settings.ts"; -import type { Tab } from "./navigation.ts"; +import { + applyResolvedTheme, + applySettings, + attachThemeListener, + setTabFromRoute, + syncThemeWithSettings, +} from "./app-settings.ts"; +import type { ThemeMode, ThemeName } from "./theme.ts"; -type SettingsHost = Parameters[0] & { +type Tab = + | "agents" + | "overview" + | "channels" + | "instances" + | "sessions" + | "usage" + | "cron" + | "skills" + | "nodes" + | "chat" + | "config" + | "communications" + | "appearance" + | "automation" + | "infrastructure" + | "aiAgents" + | "debug" + | "logs"; + +type SettingsHost = { + settings: { + gatewayUrl: string; + token: string; + sessionKey: string; + lastActiveSessionKey: string; + theme: ThemeName; + themeMode: ThemeMode; + chatFocusMode: boolean; + chatShowThinking: boolean; + splitRatio: number; + navCollapsed: boolean; + navWidth: number; + navGroupsCollapsed: Record; + }; + theme: ThemeName & ThemeMode; + themeMode: ThemeMode; + themeResolved: import("./theme.ts").ResolvedTheme; + applySessionKey: string; + sessionKey: string; + tab: Tab; + connected: boolean; + chatHasAutoScrolled: boolean; + logsAtBottom: boolean; + eventLog: unknown[]; + eventLogBuffer: unknown[]; + basePath: string; + themeMedia: MediaQueryList | null; + themeMediaHandler: ((event: MediaQueryListEvent) => void) | null; logsPollInterval: number | null; debugPollInterval: number | null; }; +function createStorageMock(): Storage { + const store = new Map(); + return { + get length() { + return store.size; + }, + clear() { + store.clear(); + }, + getItem(key: string) { + return store.get(key) ?? null; + }, + key(index: number) { + return Array.from(store.keys())[index] ?? null; + }, + removeItem(key: string) { + store.delete(key); + }, + setItem(key: string, value: string) { + store.set(key, String(value)); + }, + }; +} + const createHost = (tab: Tab): SettingsHost => ({ settings: { gatewayUrl: "", token: "", sessionKey: "main", lastActiveSessionKey: "main", - theme: "system", + theme: "claw", + themeMode: "system", chatFocusMode: false, chatShowThinking: true, splitRatio: 0.6, navCollapsed: false, + navWidth: 220, navGroupsCollapsed: {}, }, - theme: "system", + theme: "claw" as unknown as ThemeName & ThemeMode, + themeMode: "system", themeResolved: "dark", applySessionKey: "main", sessionKey: "main", @@ -39,11 +120,12 @@ const createHost = (tab: Tab): SettingsHost => ({ describe("setTabFromRoute", () => { beforeEach(() => { - vi.useFakeTimers(); + vi.stubGlobal("localStorage", createStorageMock()); + vi.stubGlobal("navigator", { language: "en-US" } as Navigator); }); afterEach(() => { - vi.useRealTimers(); + vi.unstubAllGlobals(); }); it("starts and stops log polling based on the tab", () => { @@ -67,4 +149,76 @@ describe("setTabFromRoute", () => { setTabFromRoute(host, "chat"); expect(host.debugPollInterval).toBeNull(); }); + + it("re-resolves the active palette when only themeMode changes", () => { + const host = createHost("chat"); + host.settings.theme = "knot"; + host.settings.themeMode = "dark"; + host.theme = "knot" as unknown as ThemeName & ThemeMode; + host.themeMode = "dark"; + host.themeResolved = "openknot"; + + applySettings(host, { + ...host.settings, + themeMode: "light", + }); + + expect(host.theme).toBe("knot"); + expect(host.themeMode).toBe("light"); + expect(host.themeResolved).toBe("openknot-light"); + }); + + it("syncs both theme family and mode from persisted settings", () => { + const host = createHost("chat"); + host.settings.theme = "dash"; + host.settings.themeMode = "light"; + + syncThemeWithSettings(host); + + expect(host.theme).toBe("dash"); + expect(host.themeMode).toBe("light"); + expect(host.themeResolved).toBe("dash-light"); + }); + + it("applies named system themes on OS preference changes", () => { + const listeners: Array<(event: MediaQueryListEvent) => void> = []; + const matchMedia = vi.fn().mockReturnValue({ + matches: false, + addEventListener: (_name: string, handler: (event: MediaQueryListEvent) => void) => { + listeners.push(handler); + }, + removeEventListener: vi.fn(), + }); + vi.stubGlobal("matchMedia", matchMedia); + Object.defineProperty(window, "matchMedia", { + configurable: true, + value: matchMedia, + }); + + const host = createHost("chat"); + host.theme = "knot" as unknown as ThemeName & ThemeMode; + host.themeMode = "system"; + + attachThemeListener(host); + listeners[0]?.({ matches: true } as MediaQueryListEvent); + expect(host.themeResolved).toBe("openknot"); + + listeners[0]?.({ matches: false } as MediaQueryListEvent); + expect(host.themeResolved).toBe("openknot"); + }); + + it("normalizes light family themes to the shared light CSS token", () => { + const root = { + dataset: {} as DOMStringMap, + style: { colorScheme: "" } as CSSStyleDeclaration & { colorScheme: string }, + }; + vi.stubGlobal("document", { documentElement: root } as Document); + + const host = createHost("chat"); + applyResolvedTheme(host, "dash-light"); + + expect(host.themeResolved).toBe("dash-light"); + expect(root.dataset.theme).toBe("dash-light"); + expect(root.style.colorScheme).toBe("light"); + }); }); diff --git a/ui/src/ui/app-settings.ts b/ui/src/ui/app-settings.ts index 55dd59ace0d..50575826813 100644 --- a/ui/src/ui/app-settings.ts +++ b/ui/src/ui/app-settings.ts @@ -1,3 +1,4 @@ +import { roleScopesAllow } from "../../../src/shared/operator-scope-compat.js"; import { refreshChat } from "./app-chat.ts"; import { startLogsPolling, @@ -9,15 +10,10 @@ import { scheduleChatScroll, scheduleLogsScroll } from "./app-scroll.ts"; import type { OpenClawApp } from "./app.ts"; import { loadAgentIdentities, loadAgentIdentity } from "./controllers/agent-identity.ts"; import { loadAgentSkills } from "./controllers/agent-skills.ts"; -import { loadAgents, loadToolsCatalog } from "./controllers/agents.ts"; +import { loadAgents } from "./controllers/agents.ts"; import { loadChannels } from "./controllers/channels.ts"; import { loadConfig, loadConfigSchema } from "./controllers/config.ts"; -import { - loadCronJobs, - loadCronModelSuggestions, - loadCronRuns, - loadCronStatus, -} from "./controllers/cron.ts"; +import { loadCronJobs, loadCronRuns, loadCronStatus } from "./controllers/cron.ts"; import { loadDebug } from "./controllers/debug.ts"; import { loadDevices } from "./controllers/devices.ts"; import { loadExecApprovals } from "./controllers/exec-approvals.ts"; @@ -26,6 +22,7 @@ import { loadNodes } from "./controllers/nodes.ts"; import { loadPresence } from "./controllers/presence.ts"; import { loadSessions } from "./controllers/sessions.ts"; import { loadSkills } from "./controllers/skills.ts"; +import { loadUsage } from "./controllers/usage.ts"; import { inferBasePathFromPathname, normalizeBasePath, @@ -36,13 +33,15 @@ import { } from "./navigation.ts"; import { saveSettings, type UiSettings } from "./storage.ts"; import { startThemeTransition, type ThemeTransitionContext } from "./theme-transition.ts"; -import { resolveTheme, type ResolvedTheme, type ThemeMode } from "./theme.ts"; -import type { AgentsListResult } from "./types.ts"; +import { resolveTheme, type ResolvedTheme, type ThemeMode, type ThemeName } from "./theme.ts"; +import type { AgentsListResult, AttentionItem } from "./types.ts"; +import { resetChatViewState } from "./views/chat.ts"; type SettingsHost = { settings: UiSettings; password?: string; - theme: ThemeMode; + theme: ThemeName; + themeMode: ThemeMode; themeResolved: ResolvedTheme; applySessionKey: string; sessionKey: string; @@ -56,9 +55,8 @@ type SettingsHost = { agentsList?: AgentsListResult | null; agentsSelectedId?: string | null; agentsPanel?: "overview" | "files" | "tools" | "skills" | "channels" | "cron"; - themeMedia: MediaQueryList | null; - themeMediaHandler: ((event: MediaQueryListEvent) => void) | null; pendingGatewayUrl?: string | null; + systemThemeCleanup?: (() => void) | null; pendingGatewayToken?: string | null; }; @@ -69,9 +67,10 @@ export function applySettings(host: SettingsHost, next: UiSettings) { }; host.settings = normalized; saveSettings(normalized); - if (next.theme !== host.theme) { + if (next.theme !== host.theme || next.themeMode !== host.themeMode) { host.theme = next.theme; - applyResolvedTheme(host, resolveTheme(next.theme)); + host.themeMode = next.themeMode; + applyResolvedTheme(host, resolveTheme(next.theme, next.themeMode)); } host.applySessionKey = host.settings.lastActiveSessionKey; } @@ -166,18 +165,36 @@ export function setTab(host: SettingsHost, next: Tab) { applyTabSelection(host, next, { refreshPolicy: "always", syncUrl: true }); } -export function setTheme(host: SettingsHost, next: ThemeMode, context?: ThemeTransitionContext) { +export function setTheme(host: SettingsHost, next: ThemeName, context?: ThemeTransitionContext) { + const resolved = resolveTheme(next, host.themeMode); const applyTheme = () => { - host.theme = next; applySettings(host, { ...host.settings, theme: next }); - applyResolvedTheme(host, resolveTheme(next)); }; startThemeTransition({ - nextTheme: next, + nextTheme: resolved, applyTheme, context, - currentTheme: host.theme, + currentTheme: host.themeResolved, }); + syncSystemThemeListener(host); +} + +export function setThemeMode( + host: SettingsHost, + next: ThemeMode, + context?: ThemeTransitionContext, +) { + const resolved = resolveTheme(host.theme, next); + const applyMode = () => { + applySettings(host, { ...host.settings, themeMode: next }); + }; + startThemeTransition({ + nextTheme: resolved, + applyTheme: applyMode, + context, + currentTheme: host.themeResolved, + }); + syncSystemThemeListener(host); } export async function refreshActiveTab(host: SettingsHost) { @@ -201,7 +218,6 @@ export async function refreshActiveTab(host: SettingsHost) { } if (host.tab === "agents") { await loadAgents(host as unknown as OpenClawApp); - await loadToolsCatalog(host as unknown as OpenClawApp); await loadConfig(host as unknown as OpenClawApp); const agentIds = host.agentsList?.agents?.map((entry) => entry.id) ?? []; if (agentIds.length > 0) { @@ -235,7 +251,14 @@ export async function refreshActiveTab(host: SettingsHost) { !host.chatHasAutoScrolled, ); } - if (host.tab === "config") { + if ( + host.tab === "config" || + host.tab === "communications" || + host.tab === "appearance" || + host.tab === "automation" || + host.tab === "infrastructure" || + host.tab === "aiAgents" + ) { await loadConfigSchema(host as unknown as OpenClawApp); await loadConfig(host as unknown as OpenClawApp); } @@ -262,8 +285,19 @@ export function inferBasePath() { } export function syncThemeWithSettings(host: SettingsHost) { - host.theme = host.settings.theme ?? "system"; - applyResolvedTheme(host, resolveTheme(host.theme)); + host.theme = host.settings.theme ?? "claw"; + host.themeMode = host.settings.themeMode ?? "system"; + applyResolvedTheme(host, resolveTheme(host.theme, host.themeMode)); + syncSystemThemeListener(host); +} + +export function attachThemeListener(host: SettingsHost) { + syncSystemThemeListener(host); +} + +export function detachThemeListener(host: SettingsHost) { + host.systemThemeCleanup?.(); + host.systemThemeCleanup = null; } export function applyResolvedTheme(host: SettingsHost, resolved: ResolvedTheme) { @@ -272,45 +306,45 @@ export function applyResolvedTheme(host: SettingsHost, resolved: ResolvedTheme) return; } const root = document.documentElement; + const themeMode = resolved.endsWith("light") ? "light" : "dark"; root.dataset.theme = resolved; - root.style.colorScheme = resolved; + root.dataset.themeMode = themeMode; + root.style.colorScheme = themeMode; } -export function attachThemeListener(host: SettingsHost) { - if (typeof window === "undefined" || typeof window.matchMedia !== "function") { +function syncSystemThemeListener(host: SettingsHost) { + // Clean up existing listener if mode is not "system" + if (host.themeMode !== "system") { + host.systemThemeCleanup?.(); + host.systemThemeCleanup = null; return; } - host.themeMedia = window.matchMedia("(prefers-color-scheme: dark)"); - host.themeMediaHandler = (event) => { - if (host.theme !== "system") { + + // Skip if listener already attached for this host + if (host.systemThemeCleanup) { + return; + } + + if (typeof globalThis.matchMedia !== "function") { + return; + } + + const mql = globalThis.matchMedia("(prefers-color-scheme: light)"); + const onChange = () => { + if (host.themeMode !== "system") { return; } - applyResolvedTheme(host, event.matches ? "dark" : "light"); + applyResolvedTheme(host, resolveTheme(host.theme, "system")); }; - if (typeof host.themeMedia.addEventListener === "function") { - host.themeMedia.addEventListener("change", host.themeMediaHandler); + if (typeof mql.addEventListener === "function") { + mql.addEventListener("change", onChange); + host.systemThemeCleanup = () => mql.removeEventListener("change", onChange); return; } - const legacy = host.themeMedia as MediaQueryList & { - addListener: (cb: (event: MediaQueryListEvent) => void) => void; - }; - legacy.addListener(host.themeMediaHandler); -} - -export function detachThemeListener(host: SettingsHost) { - if (!host.themeMedia || !host.themeMediaHandler) { - return; + if (typeof mql.addListener === "function") { + mql.addListener(onChange); + host.systemThemeCleanup = () => mql.removeListener(onChange); } - if (typeof host.themeMedia.removeEventListener === "function") { - host.themeMedia.removeEventListener("change", host.themeMediaHandler); - return; - } - const legacy = host.themeMedia as MediaQueryList & { - removeListener: (cb: (event: MediaQueryListEvent) => void) => void; - }; - legacy.removeListener(host.themeMediaHandler); - host.themeMedia = null; - host.themeMediaHandler = null; } export function syncTabWithLocation(host: SettingsHost, replace: boolean) { @@ -354,9 +388,16 @@ function applyTabSelection( next: Tab, options: { refreshPolicy: "always" | "connected"; syncUrl?: boolean }, ) { + const prev = host.tab; if (host.tab !== next) { host.tab = next; } + + // Cleanup chat module state when navigating away from chat + if (prev === "chat" && next !== "chat") { + resetChatViewState(); + } + if (next === "chat") { host.chatHasAutoScrolled = false; } @@ -419,13 +460,143 @@ export function syncUrlWithSessionKey(host: SettingsHost, sessionKey: string, re } export async function loadOverview(host: SettingsHost) { - await Promise.all([ - loadChannels(host as unknown as OpenClawApp, false), - loadPresence(host as unknown as OpenClawApp), - loadSessions(host as unknown as OpenClawApp), - loadCronStatus(host as unknown as OpenClawApp), - loadDebug(host as unknown as OpenClawApp), + const app = host as unknown as OpenClawApp; + await Promise.allSettled([ + loadChannels(app, false), + loadPresence(app), + loadSessions(app), + loadCronStatus(app), + loadCronJobs(app), + loadDebug(app), + loadSkills(app), + loadUsage(app), + loadOverviewLogs(app), ]); + buildAttentionItems(app); +} + +export function hasOperatorReadAccess( + auth: { role?: string; scopes?: readonly string[] } | null, +): boolean { + if (!auth?.scopes) { + return false; + } + return roleScopesAllow({ + role: auth.role ?? "operator", + requestedScopes: ["operator.read"], + allowedScopes: auth.scopes, + }); +} + +export function hasMissingSkillDependencies( + missing: Record | null | undefined, +): boolean { + if (!missing) { + return false; + } + return Object.values(missing).some((value) => Array.isArray(value) && value.length > 0); +} + +async function loadOverviewLogs(host: OpenClawApp) { + if (!host.client || !host.connected) { + return; + } + try { + const res = await host.client.request("logs.tail", { + cursor: host.overviewLogCursor || undefined, + limit: 100, + maxBytes: 50_000, + }); + const payload = res as { + cursor?: number; + lines?: unknown; + }; + const lines = Array.isArray(payload.lines) + ? payload.lines.filter((line): line is string => typeof line === "string") + : []; + host.overviewLogLines = [...host.overviewLogLines, ...lines].slice(-500); + if (typeof payload.cursor === "number") { + host.overviewLogCursor = payload.cursor; + } + } catch { + /* non-critical */ + } +} + +function buildAttentionItems(host: OpenClawApp) { + const items: AttentionItem[] = []; + + if (host.lastError) { + items.push({ + severity: "error", + icon: "x", + title: "Gateway Error", + description: host.lastError, + }); + } + + const hello = host.hello; + const auth = (hello as { auth?: { role?: string; scopes?: string[] } } | null)?.auth ?? null; + if (auth?.scopes && !hasOperatorReadAccess(auth)) { + items.push({ + severity: "warning", + icon: "key", + title: "Missing operator.read scope", + description: + "This connection does not have the operator.read scope. Some features may be unavailable.", + href: "https://docs.openclaw.ai/web/dashboard", + external: true, + }); + } + + const skills = host.skillsReport?.skills ?? []; + const missingDeps = skills.filter((s) => !s.disabled && hasMissingSkillDependencies(s.missing)); + if (missingDeps.length > 0) { + const names = missingDeps.slice(0, 3).map((s) => s.name); + const more = missingDeps.length > 3 ? ` +${missingDeps.length - 3} more` : ""; + items.push({ + severity: "warning", + icon: "zap", + title: "Skills with missing dependencies", + description: `${names.join(", ")}${more}`, + }); + } + + const blocked = skills.filter((s) => s.blockedByAllowlist); + if (blocked.length > 0) { + items.push({ + severity: "warning", + icon: "shield", + title: `${blocked.length} skill${blocked.length > 1 ? "s" : ""} blocked`, + description: blocked.map((s) => s.name).join(", "), + }); + } + + const cronJobs = host.cronJobs ?? []; + const failedCron = cronJobs.filter((j) => j.state?.lastStatus === "error"); + if (failedCron.length > 0) { + items.push({ + severity: "error", + icon: "clock", + title: `${failedCron.length} cron job${failedCron.length > 1 ? "s" : ""} failed`, + description: failedCron.map((j) => j.name).join(", "), + }); + } + + const now = Date.now(); + const overdue = cronJobs.filter( + (j) => j.enabled && j.state?.nextRunAtMs != null && now - j.state.nextRunAtMs > 300_000, + ); + if (overdue.length > 0) { + items.push({ + severity: "warning", + icon: "clock", + title: `${overdue.length} overdue job${overdue.length > 1 ? "s" : ""}`, + description: overdue.map((j) => j.name).join(", "), + }); + } + + host.attentionItems = items; } export async function loadChannelsTab(host: SettingsHost) { @@ -437,18 +608,12 @@ export async function loadChannelsTab(host: SettingsHost) { } export async function loadCron(host: SettingsHost) { - const cronHost = host as unknown as OpenClawApp; + const app = host as unknown as OpenClawApp; + const activeCronJobId = app.cronRunsScope === "job" ? app.cronRunsJobId : null; await Promise.all([ - loadChannels(host as unknown as OpenClawApp, false), - loadCronStatus(cronHost), - loadCronJobs(cronHost), - loadCronModelSuggestions(cronHost), + loadChannels(app, false), + loadCronStatus(app), + loadCronJobs(app), + loadCronRuns(app, activeCronJobId), ]); - if (cronHost.cronRunsScope === "all") { - await loadCronRuns(cronHost, null); - return; - } - if (cronHost.cronRunsJobId) { - await loadCronRuns(cronHost, cronHost.cronRunsJobId); - } } diff --git a/ui/src/ui/app-view-state.ts b/ui/src/ui/app-view-state.ts index 2029bd8f8f4..ad2910625b6 100644 --- a/ui/src/ui/app-view-state.ts +++ b/ui/src/ui/app-view-state.ts @@ -9,17 +9,19 @@ import type { GatewayBrowserClient, GatewayHelloOk } from "./gateway.ts"; import type { Tab } from "./navigation.ts"; import type { UiSettings } from "./storage.ts"; import type { ThemeTransitionContext } from "./theme-transition.ts"; -import type { ThemeMode } from "./theme.ts"; +import type { ResolvedTheme, ThemeMode, ThemeName } from "./theme.ts"; import type { AgentsListResult, AgentsFilesListResult, AgentIdentityResult, + AttentionItem, ChannelsStatusSnapshot, ConfigSnapshot, ConfigUiHints, - HealthSnapshot, + HealthSummary, LogEntry, LogLevel, + ModelCatalogEntry, NostrProfile, PresenceEntry, SessionsUsageResult, @@ -27,8 +29,8 @@ import type { SessionUsageTimeSeries, SessionsListResult, SkillStatusReport, - ToolsCatalogResult, StatusSummary, + ToolsCatalogResult, } from "./types.ts"; import type { ChatAttachment, ChatQueueItem } from "./ui-types.ts"; import type { NostrProfileFormState } from "./views/channels.nostr-profile-form.ts"; @@ -37,12 +39,16 @@ import type { SessionLogEntry } from "./views/usage.ts"; export type AppViewState = { settings: UiSettings; password: string; + loginShowGatewayToken: boolean; + loginShowGatewayPassword: boolean; tab: Tab; onboarding: boolean; basePath: string; connected: boolean; - theme: ThemeMode; - themeResolved: "light" | "dark"; + theme: ThemeName; + themeMode: ThemeMode; + themeResolved: ResolvedTheme; + themeOrder: ThemeName[]; hello: GatewayHelloOk | null; lastError: string | null; lastErrorCode: string | null; @@ -65,11 +71,15 @@ export type AppViewState = { fallbackStatus: FallbackStatus | null; chatAvatarUrl: string | null; chatThinkingLevel: string | null; + chatModelOverrides: Record; + chatModelsLoading: boolean; + chatModelCatalog: ModelCatalogEntry[]; chatQueue: ChatQueueItem[]; chatManualRefreshInFlight: boolean; nodesLoading: boolean; nodes: Array>; chatNewMessagesBelow: boolean; + navDrawerOpen: boolean; sidebarOpen: boolean; sidebarContent: string | null; sidebarError: string | null; @@ -110,6 +120,26 @@ export type AppViewState = { configSearchQuery: string; configActiveSection: string | null; configActiveSubsection: string | null; + communicationsFormMode: "form" | "raw"; + communicationsSearchQuery: string; + communicationsActiveSection: string | null; + communicationsActiveSubsection: string | null; + appearanceFormMode: "form" | "raw"; + appearanceSearchQuery: string; + appearanceActiveSection: string | null; + appearanceActiveSubsection: string | null; + automationFormMode: "form" | "raw"; + automationSearchQuery: string; + automationActiveSection: string | null; + automationActiveSubsection: string | null; + infrastructureFormMode: "form" | "raw"; + infrastructureSearchQuery: string; + infrastructureActiveSection: string | null; + infrastructureActiveSubsection: string | null; + aiAgentsFormMode: "form" | "raw"; + aiAgentsSearchQuery: string; + aiAgentsActiveSection: string | null; + aiAgentsActiveSubsection: string | null; channelsLoading: boolean; channelsSnapshot: ChannelsStatusSnapshot | null; channelsError: string | null; @@ -155,6 +185,12 @@ export type AppViewState = { sessionsIncludeGlobal: boolean; sessionsIncludeUnknown: boolean; sessionsHideCron: boolean; + sessionsSearchQuery: string; + sessionsSortColumn: "key" | "kind" | "updated" | "tokens"; + sessionsSortDir: "asc" | "desc"; + sessionsPage: number; + sessionsPageSize: number; + sessionsActionsOpenKey: string | null; usageLoading: boolean; usageResult: SessionsUsageResult | null; usageCostSummary: CostUsageSummary | null; @@ -233,10 +269,13 @@ export type AppViewState = { skillEdits: Record; skillMessages: Record; skillsBusyKey: string | null; + healthLoading: boolean; + healthResult: HealthSummary | null; + healthError: string | null; debugLoading: boolean; debugStatus: StatusSummary | null; - debugHealth: HealthSnapshot | null; - debugModels: unknown[]; + debugHealth: HealthSummary | null; + debugModels: ModelCatalogEntry[]; debugHeartbeat: unknown; debugCallMethod: string; debugCallParams: string; @@ -256,11 +295,21 @@ export type AppViewState = { logsMaxBytes: number; logsAtBottom: boolean; updateAvailable: import("./types.js").UpdateAvailable | null; + attentionItems: AttentionItem[]; + paletteOpen: boolean; + paletteQuery: string; + paletteActiveIndex: number; + streamMode: boolean; + overviewShowGatewayToken: boolean; + overviewShowGatewayPassword: boolean; + overviewLogLines: string[]; + overviewLogCursor: number; client: GatewayBrowserClient | null; refreshSessionsAfterChat: Set; connect: () => void; setTab: (tab: Tab) => void; - setTheme: (theme: ThemeMode, context?: ThemeTransitionContext) => void; + setTheme: (theme: ThemeName, context?: ThemeTransitionContext) => void; + setThemeMode: (mode: ThemeMode, context?: ThemeTransitionContext) => void; applySettings: (next: UiSettings) => void; loadOverview: () => Promise; loadAssistantIdentity: () => Promise; diff --git a/ui/src/ui/app.ts b/ui/src/ui/app.ts index 6467ca9e394..1b3971a41f6 100644 --- a/ui/src/ui/app.ts +++ b/ui/src/ui/app.ts @@ -42,6 +42,7 @@ import { loadOverview as loadOverviewInternal, setTab as setTabInternal, setTheme as setThemeInternal, + setThemeMode as setThemeModeInternal, onPopState as onPopStateInternal, } from "./app-settings.ts"; import { @@ -52,8 +53,8 @@ import { } from "./app-tool-stream.ts"; import type { AppViewState } from "./app-view-state.ts"; import { normalizeAssistantIdentity } from "./assistant-identity.ts"; +import { exportChatMarkdown } from "./chat/export.ts"; import { loadAssistantIdentity as loadAssistantIdentityInternal } from "./controllers/assistant-identity.ts"; -import type { CronFieldErrors } from "./controllers/cron.ts"; import type { DevicePairingList } from "./controllers/devices.ts"; import type { ExecApprovalRequest } from "./controllers/exec-approval.ts"; import type { ExecApprovalsFile, ExecApprovalsSnapshot } from "./controllers/exec-approvals.ts"; @@ -61,7 +62,7 @@ import type { SkillMessage } from "./controllers/skills.ts"; import type { GatewayBrowserClient, GatewayHelloOk } from "./gateway.ts"; import type { Tab } from "./navigation.ts"; import { loadSettings, type UiSettings } from "./storage.ts"; -import type { ResolvedTheme, ThemeMode } from "./theme.ts"; +import { VALID_THEME_NAMES, type ResolvedTheme, type ThemeMode, type ThemeName } from "./theme.ts"; import type { AgentsListResult, AgentsFilesListResult, @@ -71,16 +72,17 @@ import type { CronJob, CronRunLogEntry, CronStatus, - HealthSnapshot, + HealthSummary, LogEntry, LogLevel, + ModelCatalogEntry, PresenceEntry, ChannelsStatusSnapshot, SessionsListResult, SkillStatusReport, - ToolsCatalogResult, StatusSummary, NostrProfile, + ToolsCatalogResult, } from "./types.ts"; import { type ChatAttachment, type ChatQueueItem, type CronFormState } from "./ui-types.ts"; import { generateUUID } from "./uuid.ts"; @@ -120,11 +122,15 @@ export class OpenClawApp extends LitElement { } } @state() password = ""; + @state() loginShowGatewayToken = false; + @state() loginShowGatewayPassword = false; @state() tab: Tab = "chat"; @state() onboarding = resolveOnboardingMode(); @state() connected = false; - @state() theme: ThemeMode = this.settings.theme ?? "system"; + @state() theme: ThemeName = this.settings.theme ?? "claw"; + @state() themeMode: ThemeMode = this.settings.themeMode ?? "system"; @state() themeResolved: ResolvedTheme = "dark"; + @state() themeOrder: ThemeName[] = this.buildThemeOrder(this.theme); @state() hello: GatewayHelloOk | null = null; @state() lastError: string | null = null; @state() lastErrorCode: string | null = null; @@ -152,9 +158,16 @@ export class OpenClawApp extends LitElement { @state() fallbackStatus: FallbackStatus | null = null; @state() chatAvatarUrl: string | null = null; @state() chatThinkingLevel: string | null = null; + @state() chatModelOverrides: Record = {}; + @state() chatModelsLoading = false; + @state() chatModelCatalog: ModelCatalogEntry[] = []; @state() chatQueue: ChatQueueItem[] = []; @state() chatAttachments: ChatAttachment[] = []; @state() chatManualRefreshInFlight = false; + @state() navDrawerOpen = false; + + onSlashAction?: (action: string) => void; + // Sidebar state for tool output viewing @state() sidebarOpen = false; @state() sidebarContent: string | null = null; @@ -201,6 +214,26 @@ export class OpenClawApp extends LitElement { @state() configSearchQuery = ""; @state() configActiveSection: string | null = null; @state() configActiveSubsection: string | null = null; + @state() communicationsFormMode: "form" | "raw" = "form"; + @state() communicationsSearchQuery = ""; + @state() communicationsActiveSection: string | null = null; + @state() communicationsActiveSubsection: string | null = null; + @state() appearanceFormMode: "form" | "raw" = "form"; + @state() appearanceSearchQuery = ""; + @state() appearanceActiveSection: string | null = null; + @state() appearanceActiveSubsection: string | null = null; + @state() automationFormMode: "form" | "raw" = "form"; + @state() automationSearchQuery = ""; + @state() automationActiveSection: string | null = null; + @state() automationActiveSubsection: string | null = null; + @state() infrastructureFormMode: "form" | "raw" = "form"; + @state() infrastructureSearchQuery = ""; + @state() infrastructureActiveSection: string | null = null; + @state() infrastructureActiveSubsection: string | null = null; + @state() aiAgentsFormMode: "form" | "raw" = "form"; + @state() aiAgentsSearchQuery = ""; + @state() aiAgentsActiveSection: string | null = null; + @state() aiAgentsActiveSubsection: string | null = null; @state() channelsLoading = false; @state() channelsSnapshot: ChannelsStatusSnapshot | null = null; @@ -250,6 +283,12 @@ export class OpenClawApp extends LitElement { @state() sessionsIncludeGlobal = true; @state() sessionsIncludeUnknown = false; @state() sessionsHideCron = true; + @state() sessionsSearchQuery = ""; + @state() sessionsSortColumn: "key" | "kind" | "updated" | "tokens" = "updated"; + @state() sessionsSortDir: "asc" | "desc" = "desc"; + @state() sessionsPage = 0; + @state() sessionsPageSize = 10; + @state() sessionsActionsOpenKey: string | null = null; @state() usageLoading = false; @state() usageResult: import("./types.js").SessionsUsageResult | null = null; @@ -324,7 +363,7 @@ export class OpenClawApp extends LitElement { @state() cronStatus: CronStatus | null = null; @state() cronError: string | null = null; @state() cronForm: CronFormState = { ...DEFAULT_CRON_FORM }; - @state() cronFieldErrors: CronFieldErrors = {}; + @state() cronFieldErrors: import("./controllers/cron.js").CronFieldErrors = {}; @state() cronEditingJobId: string | null = null; @state() cronRunsJobId: string | null = null; @state() cronRunsLoadingMore = false; @@ -344,6 +383,16 @@ export class OpenClawApp extends LitElement { @state() updateAvailable: import("./types.js").UpdateAvailable | null = null; + // Overview dashboard state + @state() attentionItems: import("./types.js").AttentionItem[] = []; + @state() paletteOpen = false; + @state() paletteQuery = ""; + @state() paletteActiveIndex = 0; + @state() overviewShowGatewayToken = false; + @state() overviewShowGatewayPassword = false; + @state() overviewLogLines: string[] = []; + @state() overviewLogCursor = 0; + @state() skillsLoading = false; @state() skillsReport: SkillStatusReport | null = null; @state() skillsError: string | null = null; @@ -352,10 +401,14 @@ export class OpenClawApp extends LitElement { @state() skillsBusyKey: string | null = null; @state() skillMessages: Record = {}; + @state() healthLoading = false; + @state() healthResult: HealthSummary | null = null; + @state() healthError: string | null = null; + @state() debugLoading = false; @state() debugStatus: StatusSummary | null = null; - @state() debugHealth: HealthSnapshot | null = null; - @state() debugModels: unknown[] = []; + @state() debugHealth: HealthSummary | null = null; + @state() debugModels: ModelCatalogEntry[] = []; @state() debugHeartbeat: unknown = null; @state() debugCallMethod = ""; @state() debugCallParams = "{}"; @@ -394,9 +447,17 @@ export class OpenClawApp extends LitElement { basePath = ""; private popStateHandler = () => onPopStateInternal(this as unknown as Parameters[0]); - private themeMedia: MediaQueryList | null = null; - private themeMediaHandler: ((event: MediaQueryListEvent) => void) | null = null; private topbarObserver: ResizeObserver | null = null; + private globalKeydownHandler = (e: KeyboardEvent) => { + if ((e.metaKey || e.ctrlKey) && !e.shiftKey && e.key === "k") { + e.preventDefault(); + this.paletteOpen = !this.paletteOpen; + if (this.paletteOpen) { + this.paletteQuery = ""; + this.paletteActiveIndex = 0; + } + } + }; createRenderRoot() { return this; @@ -404,6 +465,20 @@ export class OpenClawApp extends LitElement { connectedCallback() { super.connectedCallback(); + this.onSlashAction = (action: string) => { + switch (action) { + case "toggle-focus": + this.applySettings({ + ...this.settings, + chatFocusMode: !this.settings.chatFocusMode, + }); + break; + case "export": + exportChatMarkdown(this.chatMessages, this.assistantName); + break; + } + }; + document.addEventListener("keydown", this.globalKeydownHandler); handleConnected(this as unknown as Parameters[0]); } @@ -412,6 +487,7 @@ export class OpenClawApp extends LitElement { } disconnectedCallback() { + document.removeEventListener("keydown", this.globalKeydownHandler); handleDisconnected(this as unknown as Parameters[0]); super.disconnectedCallback(); } @@ -469,10 +545,26 @@ export class OpenClawApp extends LitElement { setTab(next: Tab) { setTabInternal(this as unknown as Parameters[0], next); + this.navDrawerOpen = false; } - setTheme(next: ThemeMode, context?: Parameters[2]) { + setTheme(next: ThemeName, context?: Parameters[2]) { setThemeInternal(this as unknown as Parameters[0], next, context); + this.themeOrder = this.buildThemeOrder(next); + } + + setThemeMode(next: ThemeMode, context?: Parameters[2]) { + setThemeModeInternal( + this as unknown as Parameters[0], + next, + context, + ); + } + + buildThemeOrder(active: ThemeName): ThemeName[] { + const all = [...VALID_THEME_NAMES]; + const rest = all.filter((id) => id !== active); + return [active, ...rest]; } async loadOverview() { diff --git a/ui/src/ui/chat-export.ts b/ui/src/ui/chat-export.ts new file mode 100644 index 00000000000..ed5bbf931f8 --- /dev/null +++ b/ui/src/ui/chat-export.ts @@ -0,0 +1 @@ +export { exportChatMarkdown } from "./chat/export.ts"; diff --git a/ui/src/ui/chat/attachment-support.ts b/ui/src/ui/chat/attachment-support.ts new file mode 100644 index 00000000000..70deb1b4743 --- /dev/null +++ b/ui/src/ui/chat/attachment-support.ts @@ -0,0 +1,5 @@ +export const CHAT_ATTACHMENT_ACCEPT = "image/*"; + +export function isSupportedChatAttachmentMimeType(mimeType: string | null | undefined): boolean { + return typeof mimeType === "string" && mimeType.startsWith("image/"); +} diff --git a/ui/src/ui/chat/deleted-messages.ts b/ui/src/ui/chat/deleted-messages.ts new file mode 100644 index 00000000000..21094bb9e83 --- /dev/null +++ b/ui/src/ui/chat/deleted-messages.ts @@ -0,0 +1,53 @@ +const PREFIX = "openclaw:deleted:"; + +export class DeletedMessages { + private key: string; + private _keys = new Set(); + + constructor(sessionKey: string) { + this.key = PREFIX + sessionKey; + this.load(); + } + + has(key: string): boolean { + return this._keys.has(key); + } + + delete(key: string): void { + this._keys.add(key); + this.save(); + } + + restore(key: string): void { + this._keys.delete(key); + this.save(); + } + + clear(): void { + this._keys.clear(); + this.save(); + } + + private load(): void { + try { + const raw = localStorage.getItem(this.key); + if (!raw) { + return; + } + const arr = JSON.parse(raw); + if (Array.isArray(arr)) { + this._keys = new Set(arr.filter((s) => typeof s === "string")); + } + } catch { + // ignore + } + } + + private save(): void { + try { + localStorage.setItem(this.key, JSON.stringify([...this._keys])); + } catch { + // ignore + } + } +} diff --git a/ui/src/ui/chat/export.node.test.ts b/ui/src/ui/chat/export.node.test.ts new file mode 100644 index 00000000000..807fba8813a --- /dev/null +++ b/ui/src/ui/chat/export.node.test.ts @@ -0,0 +1,26 @@ +import { describe, expect, it } from "vitest"; +import { buildChatMarkdown } from "./export.ts"; + +describe("chat export", () => { + it("returns null for empty history", () => { + expect(buildChatMarkdown([], "Bot")).toBeNull(); + }); + + it("renders markdown headings and strips assistant thinking tags", () => { + const markdown = buildChatMarkdown( + [ + { + role: "assistant", + content: "scratchpadFinal answer", + timestamp: Date.UTC(2026, 2, 11, 12, 0, 0), + }, + ], + "Bot", + ); + + expect(markdown).toContain("# Chat with Bot"); + expect(markdown).toContain("## Bot (2026-03-11T12:00:00.000Z)"); + expect(markdown).toContain("Final answer"); + expect(markdown).not.toContain("scratchpad"); + }); +}); diff --git a/ui/src/ui/chat/export.ts b/ui/src/ui/chat/export.ts new file mode 100644 index 00000000000..4eeb545581d --- /dev/null +++ b/ui/src/ui/chat/export.ts @@ -0,0 +1,34 @@ +import { extractTextCached } from "./message-extract.ts"; + +/** + * Export chat history as markdown file. + */ +export function exportChatMarkdown(messages: unknown[], assistantName: string): void { + const markdown = buildChatMarkdown(messages, assistantName); + if (!markdown) { + return; + } + const blob = new Blob([markdown], { type: "text/markdown" }); + const url = URL.createObjectURL(blob); + const link = document.createElement("a"); + link.href = url; + link.download = `chat-${assistantName}-${Date.now()}.md`; + link.click(); + URL.revokeObjectURL(url); +} + +export function buildChatMarkdown(messages: unknown[], assistantName: string): string | null { + const history = Array.isArray(messages) ? messages : []; + if (history.length === 0) { + return null; + } + const lines: string[] = [`# Chat with ${assistantName}`, ""]; + for (const msg of history) { + const m = msg as Record; + const role = m.role === "user" ? "You" : m.role === "assistant" ? assistantName : "Tool"; + const content = extractTextCached(msg) ?? ""; + const ts = typeof m.timestamp === "number" ? new Date(m.timestamp).toISOString() : ""; + lines.push(`## ${role}${ts ? ` (${ts})` : ""}`, "", content, ""); + } + return lines.join("\n"); +} diff --git a/ui/src/ui/chat/grouped-render.ts b/ui/src/ui/chat/grouped-render.ts index f64584bd190..6b584be512b 100644 --- a/ui/src/ui/chat/grouped-render.ts +++ b/ui/src/ui/chat/grouped-render.ts @@ -1,10 +1,12 @@ import { html, nothing } from "lit"; import { unsafeHTML } from "lit/directives/unsafe-html.js"; import type { AssistantIdentity } from "../assistant-identity.ts"; +import { icons } from "../icons.ts"; import { toSanitizedMarkdownHtml } from "../markdown.ts"; import { openExternalUrlSafe } from "../open-external-url.ts"; import { detectTextDirection } from "../text-direction.ts"; -import type { MessageGroup } from "../types/chat-types.ts"; +import type { MessageGroup, ToolCard } from "../types/chat-types.ts"; +import { agentLogoUrl } from "../views/agents-utils.ts"; import { renderCopyAsMarkdownButton } from "./copy-as-markdown.ts"; import { extractTextCached, @@ -12,6 +14,7 @@ import { formatReasoningMarkdown, } from "./message-extract.ts"; import { isToolResultMessage, normalizeRoleForGrouping } from "./message-normalizer.ts"; +import { isTtsSupported, speakText, stopTts, isTtsSpeaking } from "./speech.ts"; import { extractToolCards, renderToolCardSidebar } from "./tool-cards.ts"; type ImageBlock = { @@ -56,10 +59,10 @@ function extractImages(message: unknown): ImageBlock[] { return images; } -export function renderReadingIndicatorGroup(assistant?: AssistantIdentity) { +export function renderReadingIndicatorGroup(assistant?: AssistantIdentity, basePath?: string) { return html`
- ${renderAvatar("assistant", assistant)} + ${renderAvatar("assistant", assistant, basePath)}
+ +
+ +
+ ` }
diff --git a/ui/src/ui/views/agents-panels-tools-skills.ts b/ui/src/ui/views/agents-panels-tools-skills.ts index 4e25aaefc31..413c0ccae21 100644 --- a/ui/src/ui/views/agents-panels-tools-skills.ts +++ b/ui/src/ui/views/agents-panels-tools-skills.ts @@ -2,12 +2,14 @@ import { html, nothing } from "lit"; import { normalizeToolName } from "../../../../src/agents/tool-policy-shared.js"; import type { SkillStatusEntry, SkillStatusReport, ToolsCatalogResult } from "../types.ts"; import { + type AgentToolEntry, + type AgentToolSection, isAllowedByPolicy, matchesList, - PROFILE_OPTIONS, resolveAgentConfig, + resolveToolProfileOptions, resolveToolProfile, - TOOL_SECTIONS, + resolveToolSections, } from "./agents-utils.ts"; import type { SkillGroup } from "./skills-grouping.ts"; import { groupSkills } from "./skills-grouping.ts"; @@ -17,6 +19,28 @@ import { renderSkillStatusChips, } from "./skills-shared.ts"; +function renderToolBadges(section: AgentToolSection, tool: AgentToolEntry) { + const source = tool.source ?? section.source; + const pluginId = tool.pluginId ?? section.pluginId; + const badges: string[] = []; + if (source === "plugin" && pluginId) { + badges.push(`plugin:${pluginId}`); + } else if (source === "core") { + badges.push("core"); + } + if (tool.optional) { + badges.push("optional"); + } + if (badges.length === 0) { + return nothing; + } + return html` +
+ ${badges.map((badge) => html`${badge}`)} +
+ `; +} + export function renderAgentTools(params: { agentId: string; configForm: Record | null; @@ -35,6 +59,8 @@ export function renderAgentTools(params: { const agentTools = config.entry?.tools ?? {}; const globalTools = config.globalTools ?? {}; const profile = agentTools.profile ?? globalTools.profile ?? "full"; + const profileOptions = resolveToolProfileOptions(params.toolsCatalogResult); + const toolSections = resolveToolSections(params.toolsCatalogResult); const profileSource = agentTools.profile ? "agent override" : globalTools.profile @@ -43,7 +69,11 @@ export function renderAgentTools(params: { const hasAgentAllow = Array.isArray(agentTools.allow) && agentTools.allow.length > 0; const hasGlobalAllow = Array.isArray(globalTools.allow) && globalTools.allow.length > 0; const editable = - Boolean(params.configForm) && !params.configLoading && !params.configSaving && !hasAgentAllow; + Boolean(params.configForm) && + !params.configLoading && + !params.configSaving && + !hasAgentAllow && + !(params.toolsCatalogLoading && !params.toolsCatalogResult && !params.toolsCatalogError); const alsoAllow = hasAgentAllow ? [] : Array.isArray(agentTools.alsoAllow) @@ -53,17 +83,7 @@ export function renderAgentTools(params: { const basePolicy = hasAgentAllow ? { allow: agentTools.allow ?? [], deny: agentTools.deny ?? [] } : (resolveToolProfile(profile) ?? undefined); - const sections = - params.toolsCatalogResult?.groups?.length && - params.toolsCatalogResult.agentId === params.agentId - ? params.toolsCatalogResult.groups - : TOOL_SECTIONS; - const profileOptions = - params.toolsCatalogResult?.profiles?.length && - params.toolsCatalogResult.agentId === params.agentId - ? params.toolsCatalogResult.profiles - : PROFILE_OPTIONS; - const toolIds = sections.flatMap((section) => section.tools.map((tool) => tool.id)); + const toolIds = toolSections.flatMap((section) => section.tools.map((tool) => tool.id)); const resolveAllowed = (toolId: string) => { const baseAllowed = isAllowedByPolicy(toolId, basePolicy); @@ -152,15 +172,6 @@ export function renderAgentTools(params: { - ${ - params.toolsCatalogError - ? html` -
- Could not load runtime tool catalog. Showing fallback list. -
- ` - : nothing - } ${ !params.configForm ? html` @@ -188,6 +199,22 @@ export function renderAgentTools(params: { ` : nothing } + ${ + params.toolsCatalogLoading && !params.toolsCatalogResult && !params.toolsCatalogError + ? html` +
Loading runtime tool catalog…
+ ` + : nothing + } + ${ + params.toolsCatalogError + ? html` +
+ Could not load runtime tool catalog. Showing built-in fallback list instead. +
+ ` + : nothing + }
@@ -235,50 +262,27 @@ export function renderAgentTools(params: {
- ${sections.map( + ${toolSections.map( (section) => html`
${section.label} ${ - "source" in section && section.source === "plugin" - ? html` - plugin - ` + section.source === "plugin" && section.pluginId + ? html`plugin:${section.pluginId}` : nothing }
${section.tools.map((tool) => { const { allowed } = resolveAllowed(tool.id); - const catalogTool = tool as { - source?: "core" | "plugin"; - pluginId?: string; - optional?: boolean; - }; - const source = - catalogTool.source === "plugin" - ? catalogTool.pluginId - ? `plugin:${catalogTool.pluginId}` - : "plugin" - : "core"; - const isOptional = catalogTool.optional === true; return html`
-
- ${tool.label} - ${source} - ${ - isOptional - ? html` - optional - ` - : nothing - } -
+
${tool.label}
${tool.description}
+ ${renderToolBadges(section, tool)}
-
- - +
+
+ + + +
@@ -430,6 +437,8 @@ export function renderAgentSkills(params: { .value=${params.filter} @input=${(e: Event) => params.onFilterChange((e.target as HTMLInputElement).value)} placeholder="Search skills" + autocomplete="off" + name="agent-skills-filter" />
${filtered.length} shown
diff --git a/ui/src/ui/views/agents-utils.test.ts b/ui/src/ui/views/agents-utils.test.ts index eea9bec03c8..a9b30e549db 100644 --- a/ui/src/ui/views/agents-utils.test.ts +++ b/ui/src/ui/views/agents-utils.test.ts @@ -1,6 +1,8 @@ import { describe, expect, it } from "vitest"; import { + agentLogoUrl, resolveConfiguredCronModelSuggestions, + resolveAgentAvatarUrl, resolveEffectiveModelFallbacks, sortLocaleStrings, } from "./agents-utils.ts"; @@ -98,3 +100,34 @@ describe("sortLocaleStrings", () => { expect(sortLocaleStrings(new Set(["beta", "alpha"]))).toEqual(["alpha", "beta"]); }); }); + +describe("agentLogoUrl", () => { + it("keeps base-mounted control UI logo paths absolute to the mount", () => { + expect(agentLogoUrl("/ui")).toBe("/ui/favicon.svg"); + expect(agentLogoUrl("/apps/openclaw/")).toBe("/apps/openclaw/favicon.svg"); + }); + + it("uses a route-relative fallback before basePath bootstrap finishes", () => { + expect(agentLogoUrl("")).toBe("favicon.svg"); + }); +}); + +describe("resolveAgentAvatarUrl", () => { + it("prefers a runtime avatar URL over non-URL identity avatars", () => { + expect( + resolveAgentAvatarUrl( + { identity: { avatar: "A", avatarUrl: "/avatar/main" } }, + { + agentId: "main", + avatar: "A", + name: "Main", + }, + ), + ).toBe("/avatar/main"); + }); + + it("returns null for initials or emoji avatar values without a URL", () => { + expect(resolveAgentAvatarUrl({ identity: { avatar: "A" } })).toBeNull(); + expect(resolveAgentAvatarUrl({ identity: { avatar: "🦞" } })).toBeNull(); + }); +}); diff --git a/ui/src/ui/views/agents-utils.ts b/ui/src/ui/views/agents-utils.ts index 556b1c98247..e0c06c41386 100644 --- a/ui/src/ui/views/agents-utils.ts +++ b/ui/src/ui/views/agents-utils.ts @@ -1,18 +1,157 @@ import { html } from "lit"; -import { - listCoreToolSections, - PROFILE_OPTIONS as TOOL_PROFILE_OPTIONS, -} from "../../../../src/agents/tool-catalog.js"; import { expandToolGroups, normalizeToolName, resolveToolProfilePolicy, } from "../../../../src/agents/tool-policy-shared.js"; -import type { AgentIdentityResult, AgentsFilesListResult, AgentsListResult } from "../types.ts"; +import type { + AgentIdentityResult, + AgentsFilesListResult, + AgentsListResult, + ToolCatalogProfile, + ToolsCatalogResult, +} from "../types.ts"; -export const TOOL_SECTIONS = listCoreToolSections(); +export type AgentToolEntry = { + id: string; + label: string; + description: string; + source?: "core" | "plugin"; + pluginId?: string; + optional?: boolean; + defaultProfiles?: string[]; +}; -export const PROFILE_OPTIONS = TOOL_PROFILE_OPTIONS; +export type AgentToolSection = { + id: string; + label: string; + source?: "core" | "plugin"; + pluginId?: string; + tools: AgentToolEntry[]; +}; + +export const FALLBACK_TOOL_SECTIONS: AgentToolSection[] = [ + { + id: "fs", + label: "Files", + tools: [ + { id: "read", label: "read", description: "Read file contents" }, + { id: "write", label: "write", description: "Create or overwrite files" }, + { id: "edit", label: "edit", description: "Make precise edits" }, + { id: "apply_patch", label: "apply_patch", description: "Patch files (OpenAI)" }, + ], + }, + { + id: "runtime", + label: "Runtime", + tools: [ + { id: "exec", label: "exec", description: "Run shell commands" }, + { id: "process", label: "process", description: "Manage background processes" }, + ], + }, + { + id: "web", + label: "Web", + tools: [ + { id: "web_search", label: "web_search", description: "Search the web" }, + { id: "web_fetch", label: "web_fetch", description: "Fetch web content" }, + ], + }, + { + id: "memory", + label: "Memory", + tools: [ + { id: "memory_search", label: "memory_search", description: "Semantic search" }, + { id: "memory_get", label: "memory_get", description: "Read memory files" }, + ], + }, + { + id: "sessions", + label: "Sessions", + tools: [ + { id: "sessions_list", label: "sessions_list", description: "List sessions" }, + { id: "sessions_history", label: "sessions_history", description: "Session history" }, + { id: "sessions_send", label: "sessions_send", description: "Send to session" }, + { id: "sessions_spawn", label: "sessions_spawn", description: "Spawn sub-agent" }, + { id: "session_status", label: "session_status", description: "Session status" }, + ], + }, + { + id: "ui", + label: "UI", + tools: [ + { id: "browser", label: "browser", description: "Control web browser" }, + { id: "canvas", label: "canvas", description: "Control canvases" }, + ], + }, + { + id: "messaging", + label: "Messaging", + tools: [{ id: "message", label: "message", description: "Send messages" }], + }, + { + id: "automation", + label: "Automation", + tools: [ + { id: "cron", label: "cron", description: "Schedule tasks" }, + { id: "gateway", label: "gateway", description: "Gateway control" }, + ], + }, + { + id: "nodes", + label: "Nodes", + tools: [{ id: "nodes", label: "nodes", description: "Nodes + devices" }], + }, + { + id: "agents", + label: "Agents", + tools: [{ id: "agents_list", label: "agents_list", description: "List agents" }], + }, + { + id: "media", + label: "Media", + tools: [{ id: "image", label: "image", description: "Image understanding" }], + }, +]; + +export const PROFILE_OPTIONS = [ + { id: "minimal", label: "Minimal" }, + { id: "coding", label: "Coding" }, + { id: "messaging", label: "Messaging" }, + { id: "full", label: "Full" }, +] as const; + +export function resolveToolSections( + toolsCatalogResult: ToolsCatalogResult | null, +): AgentToolSection[] { + if (toolsCatalogResult?.groups?.length) { + return toolsCatalogResult.groups.map((group) => ({ + id: group.id, + label: group.label, + source: group.source, + pluginId: group.pluginId, + tools: group.tools.map((tool) => ({ + id: tool.id, + label: tool.label, + description: tool.description, + source: tool.source, + pluginId: tool.pluginId, + optional: tool.optional, + defaultProfiles: [...tool.defaultProfiles], + })), + })); + } + return FALLBACK_TOOL_SECTIONS; +} + +export function resolveToolProfileOptions( + toolsCatalogResult: ToolsCatalogResult | null, +): readonly ToolCatalogProfile[] | typeof PROFILE_OPTIONS { + if (toolsCatalogResult?.profiles?.length) { + return toolsCatalogResult.profiles; + } + return PROFILE_OPTIONS; +} type ToolPolicy = { allow?: string[]; @@ -55,6 +194,33 @@ export function normalizeAgentLabel(agent: { return agent.name?.trim() || agent.identity?.name?.trim() || agent.id; } +const AVATAR_URL_RE = /^(https?:\/\/|data:image\/|\/)/i; + +export function resolveAgentAvatarUrl( + agent: { identity?: { avatar?: string; avatarUrl?: string } }, + agentIdentity?: AgentIdentityResult | null, +): string | null { + const candidates = [ + agentIdentity?.avatar?.trim(), + agent.identity?.avatarUrl?.trim(), + agent.identity?.avatar?.trim(), + ]; + for (const candidate of candidates) { + if (!candidate) { + continue; + } + if (AVATAR_URL_RE.test(candidate)) { + return candidate; + } + } + return null; +} + +export function agentLogoUrl(basePath: string): string { + const base = basePath?.trim() ? basePath.replace(/\/$/, "") : ""; + return base ? `${base}/favicon.svg` : "favicon.svg"; +} + function isLikelyEmoji(value: string) { const trimmed = value.trim(); if (!trimmed) { @@ -106,6 +272,14 @@ export function agentBadgeText(agentId: string, defaultId: string | null) { return defaultId && agentId === defaultId ? "default" : null; } +export function agentAvatarHue(id: string): number { + let hash = 0; + for (let i = 0; i < id.length; i += 1) { + hash = (hash * 31 + id.charCodeAt(i)) | 0; + } + return ((hash % 360) + 360) % 360; +} + export function formatBytes(bytes?: number) { if (bytes == null || !Number.isFinite(bytes)) { return "-"; @@ -138,7 +312,7 @@ export type AgentContext = { workspace: string; model: string; identityName: string; - identityEmoji: string; + identityAvatar: string; skillsLabel: string; isDefault: boolean; }; @@ -164,14 +338,14 @@ export function buildAgentContext( agent.name?.trim() || config.entry?.name || agent.id; - const identityEmoji = resolveAgentEmoji(agent, agentIdentity) || "-"; + const identityAvatar = resolveAgentAvatarUrl(agent, agentIdentity) ? "custom" : "—"; const skillFilter = Array.isArray(config.entry?.skills) ? config.entry?.skills : null; const skillCount = skillFilter?.length ?? null; return { workspace, model: modelLabel, identityName, - identityEmoji, + identityAvatar, skillsLabel: skillFilter ? `${skillCount} selected` : "all skills", isDefault: Boolean(defaultId && agent.id === defaultId), }; diff --git a/ui/src/ui/views/agents.test.ts b/ui/src/ui/views/agents.test.ts new file mode 100644 index 00000000000..f763877937a --- /dev/null +++ b/ui/src/ui/views/agents.test.ts @@ -0,0 +1,174 @@ +import { render } from "lit"; +import { describe, expect, it } from "vitest"; +import { renderAgents, type AgentsProps } from "./agents.ts"; + +function createSkill() { + return { + name: "Repo Skill", + description: "Skill description", + source: "workspace", + filePath: "/tmp/skill", + baseDir: "/tmp", + skillKey: "repo-skill", + always: false, + disabled: false, + blockedByAllowlist: false, + eligible: true, + requirements: { + bins: [], + env: [], + config: [], + os: [], + }, + missing: { + bins: [], + env: [], + config: [], + os: [], + }, + configChecks: [], + install: [], + }; +} + +function createProps(overrides: Partial = {}): AgentsProps { + return { + basePath: "", + loading: false, + error: null, + agentsList: { + defaultId: "alpha", + mainKey: "main", + scope: "workspace", + agents: [{ id: "alpha", name: "Alpha" } as never, { id: "beta", name: "Beta" } as never], + }, + selectedAgentId: "beta", + activePanel: "overview", + config: { + form: null, + loading: false, + saving: false, + dirty: false, + }, + channels: { + snapshot: null, + loading: false, + error: null, + lastSuccess: null, + }, + cron: { + status: null, + jobs: [], + loading: false, + error: null, + }, + agentFiles: { + list: null, + loading: false, + error: null, + active: null, + contents: {}, + drafts: {}, + saving: false, + }, + agentIdentityLoading: false, + agentIdentityError: null, + agentIdentityById: {}, + agentSkills: { + report: null, + loading: false, + error: null, + agentId: null, + filter: "", + }, + toolsCatalog: { + loading: false, + error: null, + result: null, + }, + onRefresh: () => undefined, + onSelectAgent: () => undefined, + onSelectPanel: () => undefined, + onLoadFiles: () => undefined, + onSelectFile: () => undefined, + onFileDraftChange: () => undefined, + onFileReset: () => undefined, + onFileSave: () => undefined, + onToolsProfileChange: () => undefined, + onToolsOverridesChange: () => undefined, + onConfigReload: () => undefined, + onConfigSave: () => undefined, + onModelChange: () => undefined, + onModelFallbacksChange: () => undefined, + onChannelsRefresh: () => undefined, + onCronRefresh: () => undefined, + onCronRunNow: () => undefined, + onSkillsFilterChange: () => undefined, + onSkillsRefresh: () => undefined, + onAgentSkillToggle: () => undefined, + onAgentSkillsClear: () => undefined, + onAgentSkillsDisableAll: () => undefined, + onSetDefault: () => undefined, + ...overrides, + }; +} + +describe("renderAgents", () => { + it("shows the skills count only for the selected agent's report", async () => { + const container = document.createElement("div"); + render( + renderAgents( + createProps({ + agentSkills: { + report: { + workspaceDir: "/tmp/workspace", + managedSkillsDir: "/tmp/skills", + skills: [createSkill()], + }, + loading: false, + error: null, + agentId: "alpha", + filter: "", + }, + }), + ), + container, + ); + await Promise.resolve(); + + const skillsTab = Array.from(container.querySelectorAll(".agent-tab")).find( + (button) => button.textContent?.includes("Skills"), + ); + + expect(skillsTab?.textContent?.trim()).toBe("Skills"); + }); + + it("shows the selected agent's skills count when the report matches", async () => { + const container = document.createElement("div"); + render( + renderAgents( + createProps({ + agentSkills: { + report: { + workspaceDir: "/tmp/workspace", + managedSkillsDir: "/tmp/skills", + skills: [createSkill()], + }, + loading: false, + error: null, + agentId: "beta", + filter: "", + }, + }), + ), + container, + ); + await Promise.resolve(); + + const skillsTab = Array.from(container.querySelectorAll(".agent-tab")).find( + (button) => button.textContent?.includes("Skills"), + ); + + expect(skillsTab?.textContent?.trim()).toContain("1"); + }); +}); diff --git a/ui/src/ui/views/agents.ts b/ui/src/ui/views/agents.ts index 891190d9abb..4e8b9a065ba 100644 --- a/ui/src/ui/views/agents.ts +++ b/ui/src/ui/views/agents.ts @@ -9,64 +9,78 @@ import type { SkillStatusReport, ToolsCatalogResult, } from "../types.ts"; +import { renderAgentOverview } from "./agents-panels-overview.ts"; import { renderAgentFiles, renderAgentChannels, renderAgentCron, } from "./agents-panels-status-files.ts"; import { renderAgentTools, renderAgentSkills } from "./agents-panels-tools-skills.ts"; -import { - agentBadgeText, - buildAgentContext, - buildModelOptions, - normalizeAgentLabel, - normalizeModelValue, - parseFallbackList, - resolveAgentConfig, - resolveAgentEmoji, - resolveEffectiveModelFallbacks, - resolveModelLabel, - resolveModelPrimary, -} from "./agents-utils.ts"; +import { agentBadgeText, buildAgentContext, normalizeAgentLabel } from "./agents-utils.ts"; export type AgentsPanel = "overview" | "files" | "tools" | "skills" | "channels" | "cron"; +export type ConfigState = { + form: Record | null; + loading: boolean; + saving: boolean; + dirty: boolean; +}; + +export type ChannelsState = { + snapshot: ChannelsStatusSnapshot | null; + loading: boolean; + error: string | null; + lastSuccess: number | null; +}; + +export type CronState = { + status: CronStatus | null; + jobs: CronJob[]; + loading: boolean; + error: string | null; +}; + +export type AgentFilesState = { + list: AgentsFilesListResult | null; + loading: boolean; + error: string | null; + active: string | null; + contents: Record; + drafts: Record; + saving: boolean; +}; + +export type AgentSkillsState = { + report: SkillStatusReport | null; + loading: boolean; + error: string | null; + agentId: string | null; + filter: string; +}; + +export type ToolsCatalogState = { + loading: boolean; + error: string | null; + result: ToolsCatalogResult | null; +}; + export type AgentsProps = { + basePath: string; loading: boolean; error: string | null; agentsList: AgentsListResult | null; selectedAgentId: string | null; activePanel: AgentsPanel; - configForm: Record | null; - configLoading: boolean; - configSaving: boolean; - configDirty: boolean; - channelsLoading: boolean; - channelsError: string | null; - channelsSnapshot: ChannelsStatusSnapshot | null; - channelsLastSuccess: number | null; - cronLoading: boolean; - cronStatus: CronStatus | null; - cronJobs: CronJob[]; - cronError: string | null; - agentFilesLoading: boolean; - agentFilesError: string | null; - agentFilesList: AgentsFilesListResult | null; - agentFileActive: string | null; - agentFileContents: Record; - agentFileDrafts: Record; - agentFileSaving: boolean; + config: ConfigState; + channels: ChannelsState; + cron: CronState; + agentFiles: AgentFilesState; agentIdentityLoading: boolean; agentIdentityError: string | null; agentIdentityById: Record; - agentSkillsLoading: boolean; - agentSkillsReport: SkillStatusReport | null; - agentSkillsError: string | null; - agentSkillsAgentId: string | null; - toolsCatalogLoading: boolean; - toolsCatalogError: string | null; - toolsCatalogResult: ToolsCatalogResult | null; - skillsFilter: string; + agentSkills: AgentSkillsState; + toolsCatalog: ToolsCatalogState; onRefresh: () => void; onSelectAgent: (agentId: string) => void; onSelectPanel: (panel: AgentsPanel) => void; @@ -83,20 +97,13 @@ export type AgentsProps = { onModelFallbacksChange: (agentId: string, fallbacks: string[]) => void; onChannelsRefresh: () => void; onCronRefresh: () => void; + onCronRunNow: (jobId: string) => void; onSkillsFilterChange: (next: string) => void; onSkillsRefresh: () => void; onAgentSkillToggle: (agentId: string, skillName: string, enabled: boolean) => void; onAgentSkillsClear: (agentId: string) => void; onAgentSkillsDisableAll: (agentId: string) => void; -}; - -export type AgentContext = { - workspace: string; - model: string; - identityName: string; - identityEmoji: string; - skillsLabel: string; - isDefault: boolean; + onSetDefault: (agentId: string) => void; }; export function renderAgents(props: AgentsProps) { @@ -106,50 +113,101 @@ export function renderAgents(props: AgentsProps) { const selectedAgent = selectedId ? (agents.find((agent) => agent.id === selectedId) ?? null) : null; + const selectedSkillCount = + selectedId && props.agentSkills.agentId === selectedId + ? (props.agentSkills.report?.skills?.length ?? null) + : null; + + const channelEntryCount = props.channels.snapshot + ? Object.keys(props.channels.snapshot.channelAccounts ?? {}).length + : null; + const cronJobCount = selectedId + ? props.cron.jobs.filter((j) => j.agentId === selectedId).length + : null; + const tabCounts: Record = { + files: props.agentFiles.list?.files?.length ?? null, + skills: selectedSkillCount, + channels: channelEntryCount, + cron: cronJobCount || null, + }; return html`
-
-
-
-
Agents
-
${agents.length} configured.
+
+
+ Agent +
+
+ +
+
+ ${ + selectedAgent + ? html` +
+ + ${ + actionsMenuOpen + ? html` +
+ + +
+ ` + : nothing + } +
+ ` + : nothing + } + +
-
${ props.error - ? html`
${props.error}
` + ? html`
${props.error}
` : nothing } -
- ${ - agents.length === 0 - ? html` -
No agents found.
- ` - : agents.map((agent) => { - const badge = agentBadgeText(agent.id, defaultId); - const emoji = resolveAgentEmoji(agent, props.agentIdentityById[agent.id] ?? null); - return html` - - `; - }) - } -
${ @@ -161,29 +219,26 @@ export function renderAgents(props: AgentsProps) {
` : html` - ${renderAgentHeader( - selectedAgent, - defaultId, - props.agentIdentityById[selectedAgent.id] ?? null, - )} - ${renderAgentTabs(props.activePanel, (panel) => props.onSelectPanel(panel))} + ${renderAgentTabs(props.activePanel, (panel) => props.onSelectPanel(panel), tabCounts)} ${ props.activePanel === "overview" ? renderAgentOverview({ agent: selectedAgent, + basePath: props.basePath, defaultId, - configForm: props.configForm, - agentFilesList: props.agentFilesList, + configForm: props.config.form, + agentFilesList: props.agentFiles.list, agentIdentity: props.agentIdentityById[selectedAgent.id] ?? null, agentIdentityError: props.agentIdentityError, agentIdentityLoading: props.agentIdentityLoading, - configLoading: props.configLoading, - configSaving: props.configSaving, - configDirty: props.configDirty, + configLoading: props.config.loading, + configSaving: props.config.saving, + configDirty: props.config.dirty, onConfigReload: props.onConfigReload, onConfigSave: props.onConfigSave, onModelChange: props.onModelChange, onModelFallbacksChange: props.onModelFallbacksChange, + onSelectPanel: props.onSelectPanel, }) : nothing } @@ -191,13 +246,13 @@ export function renderAgents(props: AgentsProps) { props.activePanel === "files" ? renderAgentFiles({ agentId: selectedAgent.id, - agentFilesList: props.agentFilesList, - agentFilesLoading: props.agentFilesLoading, - agentFilesError: props.agentFilesError, - agentFileActive: props.agentFileActive, - agentFileContents: props.agentFileContents, - agentFileDrafts: props.agentFileDrafts, - agentFileSaving: props.agentFileSaving, + agentFilesList: props.agentFiles.list, + agentFilesLoading: props.agentFiles.loading, + agentFilesError: props.agentFiles.error, + agentFileActive: props.agentFiles.active, + agentFileContents: props.agentFiles.contents, + agentFileDrafts: props.agentFiles.drafts, + agentFileSaving: props.agentFiles.saving, onLoadFiles: props.onLoadFiles, onSelectFile: props.onSelectFile, onFileDraftChange: props.onFileDraftChange, @@ -210,13 +265,13 @@ export function renderAgents(props: AgentsProps) { props.activePanel === "tools" ? renderAgentTools({ agentId: selectedAgent.id, - configForm: props.configForm, - configLoading: props.configLoading, - configSaving: props.configSaving, - configDirty: props.configDirty, - toolsCatalogLoading: props.toolsCatalogLoading, - toolsCatalogError: props.toolsCatalogError, - toolsCatalogResult: props.toolsCatalogResult, + configForm: props.config.form, + configLoading: props.config.loading, + configSaving: props.config.saving, + configDirty: props.config.dirty, + toolsCatalogLoading: props.toolsCatalog.loading, + toolsCatalogError: props.toolsCatalog.error, + toolsCatalogResult: props.toolsCatalog.result, onProfileChange: props.onToolsProfileChange, onOverridesChange: props.onToolsOverridesChange, onConfigReload: props.onConfigReload, @@ -228,15 +283,15 @@ export function renderAgents(props: AgentsProps) { props.activePanel === "skills" ? renderAgentSkills({ agentId: selectedAgent.id, - report: props.agentSkillsReport, - loading: props.agentSkillsLoading, - error: props.agentSkillsError, - activeAgentId: props.agentSkillsAgentId, - configForm: props.configForm, - configLoading: props.configLoading, - configSaving: props.configSaving, - configDirty: props.configDirty, - filter: props.skillsFilter, + report: props.agentSkills.report, + loading: props.agentSkills.loading, + error: props.agentSkills.error, + activeAgentId: props.agentSkills.agentId, + configForm: props.config.form, + configLoading: props.config.loading, + configSaving: props.config.saving, + configDirty: props.config.dirty, + filter: props.agentSkills.filter, onFilterChange: props.onSkillsFilterChange, onRefresh: props.onSkillsRefresh, onToggle: props.onAgentSkillToggle, @@ -252,16 +307,16 @@ export function renderAgents(props: AgentsProps) { ? renderAgentChannels({ context: buildAgentContext( selectedAgent, - props.configForm, - props.agentFilesList, + props.config.form, + props.agentFiles.list, defaultId, props.agentIdentityById[selectedAgent.id] ?? null, ), - configForm: props.configForm, - snapshot: props.channelsSnapshot, - loading: props.channelsLoading, - error: props.channelsError, - lastSuccess: props.channelsLastSuccess, + configForm: props.config.form, + snapshot: props.channels.snapshot, + loading: props.channels.loading, + error: props.channels.error, + lastSuccess: props.channels.lastSuccess, onRefresh: props.onChannelsRefresh, }) : nothing @@ -271,17 +326,18 @@ export function renderAgents(props: AgentsProps) { ? renderAgentCron({ context: buildAgentContext( selectedAgent, - props.configForm, - props.agentFilesList, + props.config.form, + props.agentFiles.list, defaultId, props.agentIdentityById[selectedAgent.id] ?? null, ), agentId: selectedAgent.id, - jobs: props.cronJobs, - status: props.cronStatus, - loading: props.cronLoading, - error: props.cronError, + jobs: props.cron.jobs, + status: props.cron.status, + loading: props.cron.loading, + error: props.cron.error, onRefresh: props.onCronRefresh, + onRunNow: props.onCronRunNow, }) : nothing } @@ -292,33 +348,13 @@ export function renderAgents(props: AgentsProps) { `; } -function renderAgentHeader( - agent: AgentsListResult["agents"][number], - defaultId: string | null, - agentIdentity: AgentIdentityResult | null, -) { - const badge = agentBadgeText(agent.id, defaultId); - const displayName = normalizeAgentLabel(agent); - const subtitle = agent.identity?.theme?.trim() || "Agent workspace and routing."; - const emoji = resolveAgentEmoji(agent, agentIdentity); - return html` -
-
-
${emoji || displayName.slice(0, 1)}
-
-
${displayName}
-
${subtitle}
-
-
-
-
${agent.id}
- ${badge ? html`${badge}` : nothing} -
-
- `; -} +let actionsMenuOpen = false; -function renderAgentTabs(active: AgentsPanel, onSelect: (panel: AgentsPanel) => void) { +function renderAgentTabs( + active: AgentsPanel, + onSelect: (panel: AgentsPanel) => void, + counts: Record, +) { const tabs: Array<{ id: AgentsPanel; label: string }> = [ { id: "overview", label: "Overview" }, { id: "files", label: "Files" }, @@ -336,164 +372,10 @@ function renderAgentTabs(active: AgentsPanel, onSelect: (panel: AgentsPanel) => type="button" @click=${() => onSelect(tab.id)} > - ${tab.label} + ${tab.label}${counts[tab.id] != null ? html`${counts[tab.id]}` : nothing} `, )}
`; } - -function renderAgentOverview(params: { - agent: AgentsListResult["agents"][number]; - defaultId: string | null; - configForm: Record | null; - agentFilesList: AgentsFilesListResult | null; - agentIdentity: AgentIdentityResult | null; - agentIdentityLoading: boolean; - agentIdentityError: string | null; - configLoading: boolean; - configSaving: boolean; - configDirty: boolean; - onConfigReload: () => void; - onConfigSave: () => void; - onModelChange: (agentId: string, modelId: string | null) => void; - onModelFallbacksChange: (agentId: string, fallbacks: string[]) => void; -}) { - const { - agent, - configForm, - agentFilesList, - agentIdentity, - agentIdentityLoading, - agentIdentityError, - configLoading, - configSaving, - configDirty, - onConfigReload, - onConfigSave, - onModelChange, - onModelFallbacksChange, - } = params; - const config = resolveAgentConfig(configForm, agent.id); - const workspaceFromFiles = - agentFilesList && agentFilesList.agentId === agent.id ? agentFilesList.workspace : null; - const workspace = - workspaceFromFiles || config.entry?.workspace || config.defaults?.workspace || "default"; - const model = config.entry?.model - ? resolveModelLabel(config.entry?.model) - : resolveModelLabel(config.defaults?.model); - const defaultModel = resolveModelLabel(config.defaults?.model); - const modelPrimary = - resolveModelPrimary(config.entry?.model) || (model !== "-" ? normalizeModelValue(model) : null); - const defaultPrimary = - resolveModelPrimary(config.defaults?.model) || - (defaultModel !== "-" ? normalizeModelValue(defaultModel) : null); - const effectivePrimary = modelPrimary ?? defaultPrimary ?? null; - const modelFallbacks = resolveEffectiveModelFallbacks( - config.entry?.model, - config.defaults?.model, - ); - const fallbackText = modelFallbacks ? modelFallbacks.join(", ") : ""; - const identityName = - agentIdentity?.name?.trim() || - agent.identity?.name?.trim() || - agent.name?.trim() || - config.entry?.name || - "-"; - const resolvedEmoji = resolveAgentEmoji(agent, agentIdentity); - const identityEmoji = resolvedEmoji || "-"; - const skillFilter = Array.isArray(config.entry?.skills) ? config.entry?.skills : null; - const skillCount = skillFilter?.length ?? null; - const identityStatus = agentIdentityLoading - ? "Loading…" - : agentIdentityError - ? "Unavailable" - : ""; - const isDefault = Boolean(params.defaultId && agent.id === params.defaultId); - - return html` -
-
Overview
-
Workspace paths and identity metadata.
-
-
-
Workspace
-
${workspace}
-
-
-
Primary Model
-
${model}
-
-
-
Identity Name
-
${identityName}
- ${identityStatus ? html`
${identityStatus}
` : nothing} -
-
-
Default
-
${isDefault ? "yes" : "no"}
-
-
-
Identity Emoji
-
${identityEmoji}
-
-
-
Skills Filter
-
${skillFilter ? `${skillCount} selected` : "all skills"}
-
-
- -
-
Model Selection
-
- - -
-
- - -
-
-
- `; -} diff --git a/ui/src/ui/views/bottom-tabs.ts b/ui/src/ui/views/bottom-tabs.ts new file mode 100644 index 00000000000..b8dfbebf39c --- /dev/null +++ b/ui/src/ui/views/bottom-tabs.ts @@ -0,0 +1,33 @@ +import { html } from "lit"; +import { icons } from "../icons.ts"; +import type { Tab } from "../navigation.ts"; + +export type BottomTabsProps = { + activeTab: Tab; + onTabChange: (tab: Tab) => void; +}; + +const BOTTOM_TABS: Array<{ id: Tab; label: string; icon: keyof typeof icons }> = [ + { id: "overview", label: "Dashboard", icon: "barChart" }, + { id: "chat", label: "Chat", icon: "messageSquare" }, + { id: "sessions", label: "Sessions", icon: "fileText" }, + { id: "config", label: "Settings", icon: "settings" }, +]; + +export function renderBottomTabs(props: BottomTabsProps) { + return html` + + `; +} diff --git a/ui/src/ui/views/chat.browser.test.ts b/ui/src/ui/views/chat.browser.test.ts new file mode 100644 index 00000000000..be2b5ab277e --- /dev/null +++ b/ui/src/ui/views/chat.browser.test.ts @@ -0,0 +1,82 @@ +import { render } from "lit"; +import { afterEach, describe, expect, it } from "vitest"; +import "../../styles.css"; +import { renderChat, type ChatProps } from "./chat.ts"; + +function createProps(overrides: Partial = {}): ChatProps { + return { + sessionKey: "main", + onSessionKeyChange: () => undefined, + thinkingLevel: null, + showThinking: false, + loading: false, + sending: false, + canAbort: false, + compactionStatus: null, + fallbackStatus: null, + messages: [], + toolMessages: [], + streamSegments: [], + stream: null, + streamStartedAt: null, + assistantAvatarUrl: null, + draft: "", + queue: [], + connected: true, + canSend: true, + disabledReason: null, + error: null, + sessions: { + ts: 0, + path: "", + count: 1, + defaults: { model: "gpt-5", contextTokens: null }, + sessions: [ + { + key: "main", + kind: "direct", + updatedAt: null, + inputTokens: 3_800, + contextTokens: 4_000, + }, + ], + }, + focusMode: false, + assistantName: "OpenClaw", + assistantAvatar: null, + onRefresh: () => undefined, + onToggleFocusMode: () => undefined, + onDraftChange: () => undefined, + onSend: () => undefined, + onQueueRemove: () => undefined, + onNewSession: () => undefined, + agentsList: null, + currentAgentId: "", + onAgentChange: () => undefined, + ...overrides, + }; +} + +describe("chat context notice", () => { + afterEach(() => { + document.body.innerHTML = ""; + }); + + it("keeps the warning icon badge-sized", async () => { + const container = document.createElement("div"); + document.body.append(container); + render(renderChat(createProps()), container); + await new Promise((resolve) => requestAnimationFrame(() => resolve())); + + const icon = container.querySelector(".context-notice__icon"); + expect(icon).not.toBeNull(); + if (!icon) { + return; + } + + const iconStyle = getComputedStyle(icon); + expect(iconStyle.width).toBe("16px"); + expect(iconStyle.height).toBe("16px"); + expect(icon.getBoundingClientRect().width).toBeLessThan(24); + }); +}); diff --git a/ui/src/ui/views/chat.test.ts b/ui/src/ui/views/chat.test.ts index d67acd77485..b21936e0bb8 100644 --- a/ui/src/ui/views/chat.test.ts +++ b/ui/src/ui/views/chat.test.ts @@ -1,5 +1,11 @@ +/* @vitest-environment jsdom */ + import { render } from "lit"; import { describe, expect, it, vi } from "vitest"; +import { renderChatSessionSelect } from "../app-render.helpers.ts"; +import type { AppViewState } from "../app-view-state.ts"; +import type { GatewayBrowserClient } from "../gateway.ts"; +import type { ModelCatalogEntry } from "../types.ts"; import type { SessionsListResult } from "../types.ts"; import { renderChat, type ChatProps } from "./chat.ts"; @@ -13,6 +19,104 @@ function createSessions(): SessionsListResult { }; } +function createChatHeaderState( + overrides: { + model?: string | null; + models?: ModelCatalogEntry[]; + omitSessionFromList?: boolean; + } = {}, +): { state: AppViewState; request: ReturnType } { + let currentModel = overrides.model ?? null; + const omitSessionFromList = overrides.omitSessionFromList ?? false; + const catalog = overrides.models ?? [ + { id: "gpt-5", name: "GPT-5", provider: "openai" }, + { id: "gpt-5-mini", name: "GPT-5 Mini", provider: "openai" }, + ]; + const request = vi.fn(async (method: string, params: Record) => { + if (method === "sessions.patch") { + currentModel = (params.model as string | null | undefined) ?? null; + return { ok: true, key: "main" }; + } + if (method === "chat.history") { + return { messages: [], thinkingLevel: null }; + } + if (method === "sessions.list") { + return { + ts: 0, + path: "", + count: omitSessionFromList ? 0 : 1, + defaults: { model: "gpt-5", contextTokens: null }, + sessions: omitSessionFromList + ? [] + : [{ key: "main", kind: "direct", updatedAt: null, model: currentModel }], + }; + } + if (method === "models.list") { + return { models: catalog }; + } + throw new Error(`Unexpected request: ${method}`); + }); + const state = { + sessionKey: "main", + connected: true, + sessionsHideCron: true, + sessionsResult: { + ts: 0, + path: "", + count: omitSessionFromList ? 0 : 1, + defaults: { model: "gpt-5", contextTokens: null }, + sessions: omitSessionFromList + ? [] + : [{ key: "main", kind: "direct", updatedAt: null, model: currentModel }], + }, + chatModelOverrides: {}, + chatModelCatalog: catalog, + chatModelsLoading: false, + client: { request } as unknown as GatewayBrowserClient, + settings: { + gatewayUrl: "", + token: "", + locale: "en", + sessionKey: "main", + lastActiveSessionKey: "main", + theme: "claw", + themeMode: "dark", + splitRatio: 0.6, + navCollapsed: false, + navGroupsCollapsed: {}, + chatFocusMode: false, + chatShowThinking: false, + }, + chatMessage: "", + chatStream: null, + chatStreamStartedAt: null, + chatRunId: null, + chatQueue: [], + chatMessages: [], + chatLoading: false, + chatThinkingLevel: null, + lastError: null, + chatAvatarUrl: null, + basePath: "", + hello: null, + agentsList: null, + applySettings(next: AppViewState["settings"]) { + state.settings = next; + }, + loadAssistantIdentity: vi.fn(), + resetToolStream: vi.fn(), + resetChatScroll: vi.fn(), + } as unknown as AppViewState & { + client: GatewayBrowserClient; + settings: AppViewState["settings"]; + }; + return { state, request }; +} + +function flushTasks() { + return new Promise((resolve) => setTimeout(resolve, 0)); +} + function createProps(overrides: Partial = {}): ChatProps { return { sessionKey: "main", @@ -46,11 +150,103 @@ function createProps(overrides: Partial = {}): ChatProps { onSend: () => undefined, onQueueRemove: () => undefined, onNewSession: () => undefined, + agentsList: null, + currentAgentId: "", + onAgentChange: () => undefined, ...overrides, }; } describe("chat view", () => { + it("uses the assistant avatar URL for the welcome state when the identity avatar is only initials", () => { + const container = document.createElement("div"); + render( + renderChat( + createProps({ + assistantName: "Assistant", + assistantAvatar: "A", + assistantAvatarUrl: "/avatar/main", + }), + ), + container, + ); + + const welcomeImage = container.querySelector(".agent-chat__welcome > img"); + expect(welcomeImage).not.toBeNull(); + expect(welcomeImage?.getAttribute("src")).toBe("/avatar/main"); + }); + + it("falls back to the bundled logo in the welcome state when the assistant avatar is not a URL", () => { + const container = document.createElement("div"); + render( + renderChat( + createProps({ + assistantName: "Assistant", + assistantAvatar: "A", + assistantAvatarUrl: null, + }), + ), + container, + ); + + const welcomeImage = container.querySelector(".agent-chat__welcome > img"); + const logoImage = container.querySelector( + ".agent-chat__welcome .agent-chat__avatar--logo img", + ); + expect(welcomeImage).toBeNull(); + expect(logoImage).not.toBeNull(); + expect(logoImage?.getAttribute("src")).toBe("favicon.svg"); + }); + + it("keeps the welcome logo fallback under the mounted base path", () => { + const container = document.createElement("div"); + render( + renderChat( + createProps({ + assistantName: "Assistant", + assistantAvatar: "A", + assistantAvatarUrl: null, + basePath: "/openclaw/", + }), + ), + container, + ); + + const logoImage = container.querySelector( + ".agent-chat__welcome .agent-chat__avatar--logo img", + ); + expect(logoImage).not.toBeNull(); + expect(logoImage?.getAttribute("src")).toBe("/openclaw/favicon.svg"); + }); + + it("keeps grouped assistant avatar fallbacks under the mounted base path", () => { + const container = document.createElement("div"); + render( + renderChat( + createProps({ + assistantName: "Assistant", + assistantAvatar: "A", + assistantAvatarUrl: null, + basePath: "/openclaw/", + messages: [ + { + role: "assistant", + content: "hello", + timestamp: 1000, + }, + ], + }), + ), + container, + ); + + const groupedLogo = container.querySelector( + ".chat-group.assistant .chat-avatar--logo", + ); + expect(groupedLogo).not.toBeNull(); + expect(groupedLogo?.getAttribute("src")).toBe("/openclaw/favicon.svg"); + }); + it("renders compacting indicator as a badge", () => { const container = document.createElement("div"); render( @@ -189,15 +385,14 @@ describe("chat view", () => { renderChat( createProps({ canAbort: true, + sending: true, onAbort, }), ), container, ); - const stopButton = Array.from(container.querySelectorAll("button")).find( - (btn) => btn.textContent?.trim() === "Stop", - ); + const stopButton = container.querySelector('button[title="Stop"]'); expect(stopButton).not.toBeUndefined(); stopButton?.dispatchEvent(new MouseEvent("click", { bubbles: true })); expect(onAbort).toHaveBeenCalledTimes(1); @@ -217,8 +412,8 @@ describe("chat view", () => { container, ); - const newSessionButton = Array.from(container.querySelectorAll("button")).find( - (btn) => btn.textContent?.trim() === "New session", + const newSessionButton = container.querySelector( + 'button[title="New session"]', ); expect(newSessionButton).not.toBeUndefined(); newSessionButton?.dispatchEvent(new MouseEvent("click", { bubbles: true })); @@ -283,4 +478,173 @@ describe("chat view", () => { expect(senderLabels).toContain("Iris"); expect(senderLabels).toContain("Joaquin De Rojas"); }); + + it("opens delete confirm on the left for user messages", () => { + try { + localStorage.removeItem("openclaw:skipDeleteConfirm"); + } catch { + /* noop */ + } + const container = document.createElement("div"); + render( + renderChat( + createProps({ + messages: [ + { + role: "user", + content: "hello from user", + timestamp: 1000, + }, + ], + }), + ), + container, + ); + + const deleteButton = container.querySelector( + ".chat-group.user .chat-group-delete", + ); + expect(deleteButton).not.toBeNull(); + deleteButton?.dispatchEvent(new MouseEvent("click", { bubbles: true })); + + const confirm = container.querySelector(".chat-group.user .chat-delete-confirm"); + expect(confirm).not.toBeNull(); + expect(confirm?.classList.contains("chat-delete-confirm--left")).toBe(true); + }); + + it("opens delete confirm on the right for assistant messages", () => { + try { + localStorage.removeItem("openclaw:skipDeleteConfirm"); + } catch { + /* noop */ + } + const container = document.createElement("div"); + render( + renderChat( + createProps({ + messages: [ + { + role: "assistant", + content: "hello from assistant", + timestamp: 1000, + }, + ], + }), + ), + container, + ); + + const deleteButton = container.querySelector( + ".chat-group.assistant .chat-group-delete", + ); + expect(deleteButton).not.toBeNull(); + deleteButton?.dispatchEvent(new MouseEvent("click", { bubbles: true })); + + const confirm = container.querySelector( + ".chat-group.assistant .chat-delete-confirm", + ); + expect(confirm).not.toBeNull(); + expect(confirm?.classList.contains("chat-delete-confirm--right")).toBe(true); + }); + + it("patches the current session model from the chat header picker", async () => { + vi.stubGlobal( + "fetch", + vi.fn().mockResolvedValue({ + ok: false, + } satisfies Partial), + ); + const { state, request } = createChatHeaderState(); + const container = document.createElement("div"); + render(renderChatSessionSelect(state), container); + + const modelSelect = container.querySelector( + 'select[data-chat-model-select="true"]', + ); + expect(modelSelect).not.toBeNull(); + expect(modelSelect?.value).toBe(""); + + modelSelect!.value = "gpt-5-mini"; + modelSelect!.dispatchEvent(new Event("change", { bubbles: true })); + await flushTasks(); + + expect(request).toHaveBeenCalledWith("sessions.patch", { + key: "main", + model: "gpt-5-mini", + }); + expect(request).not.toHaveBeenCalledWith("chat.history", expect.anything()); + expect(state.sessionsResult?.sessions[0]?.model).toBe("gpt-5-mini"); + vi.unstubAllGlobals(); + }); + + it("clears the session model override back to the default model", async () => { + vi.stubGlobal( + "fetch", + vi.fn().mockResolvedValue({ + ok: false, + } satisfies Partial), + ); + const { state, request } = createChatHeaderState({ model: "gpt-5-mini" }); + const container = document.createElement("div"); + render(renderChatSessionSelect(state), container); + + const modelSelect = container.querySelector( + 'select[data-chat-model-select="true"]', + ); + expect(modelSelect).not.toBeNull(); + expect(modelSelect?.value).toBe("gpt-5-mini"); + + modelSelect!.value = ""; + modelSelect!.dispatchEvent(new Event("change", { bubbles: true })); + await flushTasks(); + + expect(request).toHaveBeenCalledWith("sessions.patch", { + key: "main", + model: null, + }); + expect(state.sessionsResult?.sessions[0]?.model).toBeNull(); + vi.unstubAllGlobals(); + }); + + it("disables the chat header model picker while a run is active", () => { + const { state } = createChatHeaderState(); + state.chatRunId = "run-123"; + state.chatStream = "Working"; + const container = document.createElement("div"); + render(renderChatSessionSelect(state), container); + + const modelSelect = container.querySelector( + 'select[data-chat-model-select="true"]', + ); + expect(modelSelect).not.toBeNull(); + expect(modelSelect?.disabled).toBe(true); + }); + + it("keeps the selected model visible when the active session is absent from sessions.list", async () => { + vi.stubGlobal( + "fetch", + vi.fn().mockResolvedValue({ + ok: false, + } satisfies Partial), + ); + const { state } = createChatHeaderState({ omitSessionFromList: true }); + const container = document.createElement("div"); + render(renderChatSessionSelect(state), container); + + const modelSelect = container.querySelector( + 'select[data-chat-model-select="true"]', + ); + expect(modelSelect).not.toBeNull(); + + modelSelect!.value = "gpt-5-mini"; + modelSelect!.dispatchEvent(new Event("change", { bubbles: true })); + await flushTasks(); + render(renderChatSessionSelect(state), container); + + const rerendered = container.querySelector( + 'select[data-chat-model-select="true"]', + ); + expect(rerendered?.value).toBe("gpt-5-mini"); + vi.unstubAllGlobals(); + }); }); diff --git a/ui/src/ui/views/chat.ts b/ui/src/ui/views/chat.ts index 516042c27f1..1d0b877d042 100644 --- a/ui/src/ui/views/chat.ts +++ b/ui/src/ui/views/chat.ts @@ -1,17 +1,37 @@ -import { html, nothing } from "lit"; +import { html, nothing, type TemplateResult } from "lit"; import { ref } from "lit/directives/ref.js"; import { repeat } from "lit/directives/repeat.js"; +import { + CHAT_ATTACHMENT_ACCEPT, + isSupportedChatAttachmentMimeType, +} from "../chat/attachment-support.ts"; +import { DeletedMessages } from "../chat/deleted-messages.ts"; +import { exportChatMarkdown } from "../chat/export.ts"; import { renderMessageGroup, renderReadingIndicatorGroup, renderStreamingGroup, } from "../chat/grouped-render.ts"; +import { InputHistory } from "../chat/input-history.ts"; import { normalizeMessage, normalizeRoleForGrouping } from "../chat/message-normalizer.ts"; +import { PinnedMessages } from "../chat/pinned-messages.ts"; +import { getPinnedMessageSummary } from "../chat/pinned-summary.ts"; +import { messageMatchesSearchQuery } from "../chat/search-match.ts"; +import { getOrCreateSessionCacheValue } from "../chat/session-cache.ts"; +import { + CATEGORY_LABELS, + SLASH_COMMANDS, + getSlashCommandCompletions, + type SlashCommandCategory, + type SlashCommandDef, +} from "../chat/slash-commands.ts"; +import { isSttSupported, startStt, stopStt } from "../chat/speech.ts"; import { icons } from "../icons.ts"; import { detectTextDirection } from "../text-direction.ts"; -import type { SessionsListResult } from "../types.ts"; +import type { GatewaySessionRow, SessionsListResult } from "../types.ts"; import type { ChatItem, MessageGroup } from "../types/chat-types.ts"; import type { ChatAttachment, ChatQueueItem } from "../ui-types.ts"; +import { agentLogoUrl, resolveAgentAvatarUrl } from "./agents-utils.ts"; import { renderMarkdownSidebar } from "./markdown-sidebar.ts"; import "../components/resizable-divider.ts"; @@ -54,49 +74,124 @@ export type ChatProps = { disabledReason: string | null; error: string | null; sessions: SessionsListResult | null; - // Focus mode focusMode: boolean; - // Sidebar state sidebarOpen?: boolean; sidebarContent?: string | null; sidebarError?: string | null; splitRatio?: number; assistantName: string; assistantAvatar: string | null; - // Image attachments attachments?: ChatAttachment[]; onAttachmentsChange?: (attachments: ChatAttachment[]) => void; - // Scroll control showNewMessages?: boolean; onScrollToBottom?: () => void; - // Event handlers onRefresh: () => void; onToggleFocusMode: () => void; + getDraft?: () => string; onDraftChange: (next: string) => void; + onRequestUpdate?: () => void; onSend: () => void; onAbort?: () => void; onQueueRemove: (id: string) => void; onNewSession: () => void; + onClearHistory?: () => void; + agentsList: { + agents: Array<{ id: string; name?: string; identity?: { name?: string; avatarUrl?: string } }>; + defaultId?: string; + } | null; + currentAgentId: string; + onAgentChange: (agentId: string) => void; + onNavigateToAgent?: () => void; + onSessionSelect?: (sessionKey: string) => void; onOpenSidebar?: (content: string) => void; onCloseSidebar?: () => void; onSplitRatioChange?: (ratio: number) => void; onChatScroll?: (event: Event) => void; + basePath?: string; }; const COMPACTION_TOAST_DURATION_MS = 5000; const FALLBACK_TOAST_DURATION_MS = 8000; +// Persistent instances keyed by session +const inputHistories = new Map(); +const pinnedMessagesMap = new Map(); +const deletedMessagesMap = new Map(); + +function getInputHistory(sessionKey: string): InputHistory { + return getOrCreateSessionCacheValue(inputHistories, sessionKey, () => new InputHistory()); +} + +function getPinnedMessages(sessionKey: string): PinnedMessages { + return getOrCreateSessionCacheValue( + pinnedMessagesMap, + sessionKey, + () => new PinnedMessages(sessionKey), + ); +} + +function getDeletedMessages(sessionKey: string): DeletedMessages { + return getOrCreateSessionCacheValue( + deletedMessagesMap, + sessionKey, + () => new DeletedMessages(sessionKey), + ); +} + +interface ChatEphemeralState { + sttRecording: boolean; + sttInterimText: string; + slashMenuOpen: boolean; + slashMenuItems: SlashCommandDef[]; + slashMenuIndex: number; + slashMenuMode: "command" | "args"; + slashMenuCommand: SlashCommandDef | null; + slashMenuArgItems: string[]; + searchOpen: boolean; + searchQuery: string; + pinnedExpanded: boolean; +} + +function createChatEphemeralState(): ChatEphemeralState { + return { + sttRecording: false, + sttInterimText: "", + slashMenuOpen: false, + slashMenuItems: [], + slashMenuIndex: 0, + slashMenuMode: "command", + slashMenuCommand: null, + slashMenuArgItems: [], + searchOpen: false, + searchQuery: "", + pinnedExpanded: false, + }; +} + +const vs = createChatEphemeralState(); + +/** + * Reset chat view ephemeral state when navigating away. + * Stops STT recording and clears search/slash UI that should not survive navigation. + */ +export function resetChatViewState() { + if (vs.sttRecording) { + stopStt(); + } + Object.assign(vs, createChatEphemeralState()); +} + +export const cleanupChatModuleState = resetChatViewState; + function adjustTextareaHeight(el: HTMLTextAreaElement) { el.style.height = "auto"; - el.style.height = `${el.scrollHeight}px`; + el.style.height = `${Math.min(el.scrollHeight, 150)}px`; } function renderCompactionIndicator(status: CompactionIndicatorStatus | null | undefined) { if (!status) { return nothing; } - - // Show "compacting..." while active if (status.active) { return html`
@@ -104,8 +199,6 @@ function renderCompactionIndicator(status: CompactionIndicatorStatus | null | un
`; } - - // Show "compaction complete" briefly after completion if (status.completedAt) { const elapsed = Date.now() - status.completedAt; if (elapsed < COMPACTION_TOAST_DURATION_MS) { @@ -116,7 +209,6 @@ function renderCompactionIndicator(status: CompactionIndicatorStatus | null | un `; } } - return nothing; } @@ -148,17 +240,59 @@ function renderFallbackIndicator(status: FallbackIndicatorStatus | null | undefi : "compaction-indicator compaction-indicator--fallback"; const icon = phase === "cleared" ? icons.check : icons.brain; return html` -
+
${icon} ${message}
`; } +/** + * Compact notice when context usage reaches 85%+. + * Progressively shifts from amber (85%) to red (90%+). + */ +function renderContextNotice( + session: GatewaySessionRow | undefined, + defaultContextTokens: number | null, +) { + const used = session?.inputTokens ?? 0; + const limit = session?.contextTokens ?? defaultContextTokens ?? 0; + if (!used || !limit) { + return nothing; + } + const ratio = used / limit; + if (ratio < 0.85) { + return nothing; + } + const pct = Math.min(Math.round(ratio * 100), 100); + // Lerp from amber (#d97706) at 85% to red (#dc2626) at 95%+ + const t = Math.min(Math.max((ratio - 0.85) / 0.1, 0), 1); + // RGB: amber(217,119,6) → red(220,38,38) + const r = Math.round(217 + (220 - 217) * t); + const g = Math.round(119 + (38 - 119) * t); + const b = Math.round(6 + (38 - 6) * t); + const color = `rgb(${r}, ${g}, ${b})`; + const bgOpacity = 0.08 + 0.08 * t; + const bg = `rgba(${r}, ${g}, ${b}, ${bgOpacity})`; + return html` +
+ + ${pct}% context used + ${formatTokensCompact(used)} / ${formatTokensCompact(limit)} +
+ `; +} + +/** Format token count compactly (e.g. 128000 → "128k"). */ +function formatTokensCompact(n: number): string { + if (n >= 1_000_000) { + return `${(n / 1_000_000).toFixed(1).replace(/\.0$/, "")}M`; + } + if (n >= 1_000) { + return `${(n / 1_000).toFixed(1).replace(/\.0$/, "")}k`; + } + return String(n); +} + function generateAttachmentId(): string { return `att-${Date.now()}-${Math.random().toString(36).slice(2, 9)}`; } @@ -168,7 +302,6 @@ function handlePaste(e: ClipboardEvent, props: ChatProps) { if (!items || !props.onAttachmentsChange) { return; } - const imageItems: DataTransferItem[] = []; for (let i = 0; i < items.length; i++) { const item = items[i]; @@ -176,19 +309,15 @@ function handlePaste(e: ClipboardEvent, props: ChatProps) { imageItems.push(item); } } - if (imageItems.length === 0) { return; } - e.preventDefault(); - for (const item of imageItems) { const file = item.getAsFile(); if (!file) { continue; } - const reader = new FileReader(); reader.addEventListener("load", () => { const dataUrl = reader.result as string; @@ -204,33 +333,86 @@ function handlePaste(e: ClipboardEvent, props: ChatProps) { } } -function renderAttachmentPreview(props: ChatProps) { +function handleFileSelect(e: Event, props: ChatProps) { + const input = e.target as HTMLInputElement; + if (!input.files || !props.onAttachmentsChange) { + return; + } + const current = props.attachments ?? []; + const additions: ChatAttachment[] = []; + let pending = 0; + for (const file of input.files) { + if (!isSupportedChatAttachmentMimeType(file.type)) { + continue; + } + pending++; + const reader = new FileReader(); + reader.addEventListener("load", () => { + additions.push({ + id: generateAttachmentId(), + dataUrl: reader.result as string, + mimeType: file.type, + }); + pending--; + if (pending === 0) { + props.onAttachmentsChange?.([...current, ...additions]); + } + }); + reader.readAsDataURL(file); + } + input.value = ""; +} + +function handleDrop(e: DragEvent, props: ChatProps) { + e.preventDefault(); + const files = e.dataTransfer?.files; + if (!files || !props.onAttachmentsChange) { + return; + } + const current = props.attachments ?? []; + const additions: ChatAttachment[] = []; + let pending = 0; + for (const file of files) { + if (!isSupportedChatAttachmentMimeType(file.type)) { + continue; + } + pending++; + const reader = new FileReader(); + reader.addEventListener("load", () => { + additions.push({ + id: generateAttachmentId(), + dataUrl: reader.result as string, + mimeType: file.type, + }); + pending--; + if (pending === 0) { + props.onAttachmentsChange?.([...current, ...additions]); + } + }); + reader.readAsDataURL(file); + } +} + +function renderAttachmentPreview(props: ChatProps): TemplateResult | typeof nothing { const attachments = props.attachments ?? []; if (attachments.length === 0) { return nothing; } - return html` -
+
${attachments.map( (att) => html` -
- Attachment preview +
+ Attachment preview + >×
`, )} @@ -238,6 +420,384 @@ function renderAttachmentPreview(props: ChatProps) { `; } +function resetSlashMenuState(): void { + vs.slashMenuMode = "command"; + vs.slashMenuCommand = null; + vs.slashMenuArgItems = []; + vs.slashMenuItems = []; +} + +function updateSlashMenu(value: string, requestUpdate: () => void): void { + // Arg mode: /command + const argMatch = value.match(/^\/(\S+)\s(.*)$/); + if (argMatch) { + const cmdName = argMatch[1].toLowerCase(); + const argFilter = argMatch[2].toLowerCase(); + const cmd = SLASH_COMMANDS.find((c) => c.name === cmdName); + if (cmd?.argOptions?.length) { + const filtered = argFilter + ? cmd.argOptions.filter((opt) => opt.toLowerCase().startsWith(argFilter)) + : cmd.argOptions; + if (filtered.length > 0) { + vs.slashMenuMode = "args"; + vs.slashMenuCommand = cmd; + vs.slashMenuArgItems = filtered; + vs.slashMenuOpen = true; + vs.slashMenuIndex = 0; + vs.slashMenuItems = []; + requestUpdate(); + return; + } + } + vs.slashMenuOpen = false; + resetSlashMenuState(); + requestUpdate(); + return; + } + + // Command mode: /partial-command + const match = value.match(/^\/(\S*)$/); + if (match) { + const items = getSlashCommandCompletions(match[1]); + vs.slashMenuItems = items; + vs.slashMenuOpen = items.length > 0; + vs.slashMenuIndex = 0; + vs.slashMenuMode = "command"; + vs.slashMenuCommand = null; + vs.slashMenuArgItems = []; + } else { + vs.slashMenuOpen = false; + resetSlashMenuState(); + } + requestUpdate(); +} + +function selectSlashCommand( + cmd: SlashCommandDef, + props: ChatProps, + requestUpdate: () => void, +): void { + // Transition to arg picker when the command has fixed options + if (cmd.argOptions?.length) { + props.onDraftChange(`/${cmd.name} `); + vs.slashMenuMode = "args"; + vs.slashMenuCommand = cmd; + vs.slashMenuArgItems = cmd.argOptions; + vs.slashMenuOpen = true; + vs.slashMenuIndex = 0; + vs.slashMenuItems = []; + requestUpdate(); + return; + } + + vs.slashMenuOpen = false; + resetSlashMenuState(); + + if (cmd.executeLocal && !cmd.args) { + props.onDraftChange(`/${cmd.name}`); + requestUpdate(); + props.onSend(); + } else { + props.onDraftChange(`/${cmd.name} `); + requestUpdate(); + } +} + +function tabCompleteSlashCommand( + cmd: SlashCommandDef, + props: ChatProps, + requestUpdate: () => void, +): void { + // Tab: fill in the command text without executing + if (cmd.argOptions?.length) { + props.onDraftChange(`/${cmd.name} `); + vs.slashMenuMode = "args"; + vs.slashMenuCommand = cmd; + vs.slashMenuArgItems = cmd.argOptions; + vs.slashMenuOpen = true; + vs.slashMenuIndex = 0; + vs.slashMenuItems = []; + requestUpdate(); + return; + } + + vs.slashMenuOpen = false; + resetSlashMenuState(); + props.onDraftChange(cmd.args ? `/${cmd.name} ` : `/${cmd.name}`); + requestUpdate(); +} + +function selectSlashArg( + arg: string, + props: ChatProps, + requestUpdate: () => void, + execute: boolean, +): void { + const cmdName = vs.slashMenuCommand?.name ?? ""; + vs.slashMenuOpen = false; + resetSlashMenuState(); + props.onDraftChange(`/${cmdName} ${arg}`); + requestUpdate(); + if (execute) { + props.onSend(); + } +} + +function tokenEstimate(draft: string): string | null { + if (draft.length < 100) { + return null; + } + return `~${Math.ceil(draft.length / 4)} tokens`; +} + +/** + * Export chat markdown - delegates to shared utility. + */ +function exportMarkdown(props: ChatProps): void { + exportChatMarkdown(props.messages, props.assistantName); +} + +const WELCOME_SUGGESTIONS = [ + "What can you do?", + "Summarize my recent sessions", + "Help me configure a channel", + "Check system health", +]; + +function renderWelcomeState(props: ChatProps): TemplateResult { + const name = props.assistantName || "Assistant"; + const avatar = resolveAgentAvatarUrl({ + identity: { + avatar: props.assistantAvatar ?? undefined, + avatarUrl: props.assistantAvatarUrl ?? undefined, + }, + }); + const logoUrl = agentLogoUrl(props.basePath ?? ""); + + return html` +
+
+ ${ + avatar + ? html`${name}` + : html`` + } +

${name}

+
+ Ready to chat +
+

+ Type a message below · / for commands +

+
+ ${WELCOME_SUGGESTIONS.map( + (text) => html` + + `, + )} +
+
+ `; +} + +function renderSearchBar(requestUpdate: () => void): TemplateResult | typeof nothing { + if (!vs.searchOpen) { + return nothing; + } + return html` + + `; +} + +function renderPinnedSection( + props: ChatProps, + pinned: PinnedMessages, + requestUpdate: () => void, +): TemplateResult | typeof nothing { + const messages = Array.isArray(props.messages) ? props.messages : []; + const entries: Array<{ index: number; text: string; role: string }> = []; + for (const idx of pinned.indices) { + const msg = messages[idx] as Record | undefined; + if (!msg) { + continue; + } + const text = getPinnedMessageSummary(msg); + const role = typeof msg.role === "string" ? msg.role : "unknown"; + entries.push({ index: idx, text, role }); + } + if (entries.length === 0) { + return nothing; + } + return html` +
+ + ${ + vs.pinnedExpanded + ? html` +
+ ${entries.map( + ({ index, text, role }) => html` +
+ ${role === "user" ? "You" : "Assistant"} + ${text.slice(0, 100)}${text.length > 100 ? "..." : ""} + +
+ `, + )} +
+ ` + : nothing + } +
+ `; +} + +function renderSlashMenu( + requestUpdate: () => void, + props: ChatProps, +): TemplateResult | typeof nothing { + if (!vs.slashMenuOpen) { + return nothing; + } + + // Arg-picker mode: show options for the selected command + if (vs.slashMenuMode === "args" && vs.slashMenuCommand && vs.slashMenuArgItems.length > 0) { + return html` +
+
+
/${vs.slashMenuCommand.name} ${vs.slashMenuCommand.description}
+ ${vs.slashMenuArgItems.map( + (arg, i) => html` +
selectSlashArg(arg, props, requestUpdate, true)} + @mouseenter=${() => { + vs.slashMenuIndex = i; + requestUpdate(); + }} + > + ${vs.slashMenuCommand?.icon ? html`${icons[vs.slashMenuCommand.icon]}` : nothing} + ${arg} + /${vs.slashMenuCommand?.name} ${arg} +
+ `, + )} +
+ +
+ `; + } + + // Command mode: show grouped commands + if (vs.slashMenuItems.length === 0) { + return nothing; + } + + const grouped = new Map< + SlashCommandCategory, + Array<{ cmd: SlashCommandDef; globalIdx: number }> + >(); + for (let i = 0; i < vs.slashMenuItems.length; i++) { + const cmd = vs.slashMenuItems[i]; + const cat = cmd.category ?? "session"; + let list = grouped.get(cat); + if (!list) { + list = []; + grouped.set(cat, list); + } + list.push({ cmd, globalIdx: i }); + } + + const sections: TemplateResult[] = []; + for (const [cat, entries] of grouped) { + sections.push(html` +
+
${CATEGORY_LABELS[cat]}
+ ${entries.map( + ({ cmd, globalIdx }) => html` +
selectSlashCommand(cmd, props, requestUpdate)} + @mouseenter=${() => { + vs.slashMenuIndex = globalIdx; + requestUpdate(); + }} + > + ${cmd.icon ? html`${icons[cmd.icon]}` : nothing} + /${cmd.name} + ${cmd.args ? html`${cmd.args}` : nothing} + ${cmd.description} + ${ + cmd.argOptions?.length + ? html`${cmd.argOptions.length} options` + : cmd.executeLocal && !cmd.args + ? html` + instant + ` + : nothing + } +
+ `, + )} +
+ `); + } + + return html` +
+ ${sections} + +
+ `; +} + export function renderChat(props: ChatProps) { const canCompose = props.connected; const isBusy = props.sending || props.stream !== null; @@ -247,34 +807,101 @@ export function renderChat(props: ChatProps) { const showReasoning = props.showThinking && reasoningLevel !== "off"; const assistantIdentity = { name: props.assistantName, - avatar: props.assistantAvatar ?? props.assistantAvatarUrl ?? null, + avatar: + resolveAgentAvatarUrl({ + identity: { + avatar: props.assistantAvatar ?? undefined, + avatarUrl: props.assistantAvatarUrl ?? undefined, + }, + }) ?? null, }; - + const pinned = getPinnedMessages(props.sessionKey); + const deleted = getDeletedMessages(props.sessionKey); + const inputHistory = getInputHistory(props.sessionKey); const hasAttachments = (props.attachments?.length ?? 0) > 0; - const composePlaceholder = props.connected + const tokens = tokenEstimate(props.draft); + + const placeholder = props.connected ? hasAttachments ? "Add a message or paste more images..." - : "Message (↩ to send, Shift+↩ for line breaks, paste images)" - : "Connect to the gateway to start chatting…"; + : `Message ${props.assistantName || "agent"} (Enter to send)` + : "Connect to the gateway to start chatting..."; + + const requestUpdate = props.onRequestUpdate ?? (() => {}); + const getDraft = props.getDraft ?? (() => props.draft); const splitRatio = props.splitRatio ?? 0.6; const sidebarOpen = Boolean(props.sidebarOpen && props.onCloseSidebar); + + const handleCodeBlockCopy = (e: Event) => { + const btn = (e.target as HTMLElement).closest(".code-block-copy"); + if (!btn) { + return; + } + const code = (btn as HTMLElement).dataset.code ?? ""; + navigator.clipboard.writeText(code).then( + () => { + btn.classList.add("copied"); + setTimeout(() => btn.classList.remove("copied"), 1500); + }, + () => {}, + ); + }; + + const chatItems = buildChatItems(props); + const isEmpty = chatItems.length === 0 && !props.loading; + const thread = html`
+
${ props.loading ? html` -
Loading chat…
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ` + : nothing + } + ${isEmpty && !vs.searchOpen ? renderWelcomeState(props) : nothing} + ${ + isEmpty && vs.searchOpen + ? html` +
No matching messages
` : nothing } ${repeat( - buildChatItems(props), + chatItems, (item) => item.key, (item) => { if (item.kind === "divider") { @@ -286,39 +913,168 @@ export function renderChat(props: ChatProps) {
`; } - if (item.kind === "reading-indicator") { - return renderReadingIndicatorGroup(assistantIdentity); + return renderReadingIndicatorGroup(assistantIdentity, props.basePath); } - if (item.kind === "stream") { return renderStreamingGroup( item.text, item.startedAt, props.onOpenSidebar, assistantIdentity, + props.basePath, ); } - if (item.kind === "group") { + if (deleted.has(item.key)) { + return nothing; + } return renderMessageGroup(item, { onOpenSidebar: props.onOpenSidebar, showReasoning, assistantName: props.assistantName, assistantAvatar: assistantIdentity.avatar, + basePath: props.basePath, + contextWindow: + activeSession?.contextTokens ?? props.sessions?.defaults?.contextTokens ?? null, + onDelete: () => { + deleted.delete(item.key); + requestUpdate(); + }, }); } - return nothing; }, )} +
`; - return html` -
- ${props.disabledReason ? html`
${props.disabledReason}
` : nothing} + const handleKeyDown = (e: KeyboardEvent) => { + // Slash menu navigation — arg mode + if (vs.slashMenuOpen && vs.slashMenuMode === "args" && vs.slashMenuArgItems.length > 0) { + const len = vs.slashMenuArgItems.length; + switch (e.key) { + case "ArrowDown": + e.preventDefault(); + vs.slashMenuIndex = (vs.slashMenuIndex + 1) % len; + requestUpdate(); + return; + case "ArrowUp": + e.preventDefault(); + vs.slashMenuIndex = (vs.slashMenuIndex - 1 + len) % len; + requestUpdate(); + return; + case "Tab": + e.preventDefault(); + selectSlashArg(vs.slashMenuArgItems[vs.slashMenuIndex], props, requestUpdate, false); + return; + case "Enter": + e.preventDefault(); + selectSlashArg(vs.slashMenuArgItems[vs.slashMenuIndex], props, requestUpdate, true); + return; + case "Escape": + e.preventDefault(); + vs.slashMenuOpen = false; + resetSlashMenuState(); + requestUpdate(); + return; + } + } + // Slash menu navigation — command mode + if (vs.slashMenuOpen && vs.slashMenuItems.length > 0) { + const len = vs.slashMenuItems.length; + switch (e.key) { + case "ArrowDown": + e.preventDefault(); + vs.slashMenuIndex = (vs.slashMenuIndex + 1) % len; + requestUpdate(); + return; + case "ArrowUp": + e.preventDefault(); + vs.slashMenuIndex = (vs.slashMenuIndex - 1 + len) % len; + requestUpdate(); + return; + case "Tab": + e.preventDefault(); + tabCompleteSlashCommand(vs.slashMenuItems[vs.slashMenuIndex], props, requestUpdate); + return; + case "Enter": + e.preventDefault(); + selectSlashCommand(vs.slashMenuItems[vs.slashMenuIndex], props, requestUpdate); + return; + case "Escape": + e.preventDefault(); + vs.slashMenuOpen = false; + resetSlashMenuState(); + requestUpdate(); + return; + } + } + + // Input history (only when input is empty) + if (!props.draft.trim()) { + if (e.key === "ArrowUp") { + const prev = inputHistory.up(); + if (prev !== null) { + e.preventDefault(); + props.onDraftChange(prev); + } + return; + } + if (e.key === "ArrowDown") { + const next = inputHistory.down(); + e.preventDefault(); + props.onDraftChange(next ?? ""); + return; + } + } + + // Cmd+F for search + if ((e.metaKey || e.ctrlKey) && !e.shiftKey && e.key === "f") { + e.preventDefault(); + vs.searchOpen = !vs.searchOpen; + if (!vs.searchOpen) { + vs.searchQuery = ""; + } + requestUpdate(); + return; + } + + // Send on Enter (without shift) + if (e.key === "Enter" && !e.shiftKey) { + if (e.isComposing || e.keyCode === 229) { + return; + } + if (!props.connected) { + return; + } + e.preventDefault(); + if (canCompose) { + if (props.draft.trim()) { + inputHistory.push(props.draft); + } + props.onSend(); + } + } + }; + + const handleInput = (e: Event) => { + const target = e.target as HTMLTextAreaElement; + adjustTextareaHeight(target); + updateSlashMenu(target.value, requestUpdate); + inputHistory.reset(); + props.onDraftChange(target.value); + }; + + return html` +
handleDrop(e, props)} + @dragover=${(e: DragEvent) => e.preventDefault()} + > + ${props.disabledReason ? html`
${props.disabledReason}
` : nothing} ${props.error ? html`
${props.error}
` : nothing} ${ @@ -337,9 +1093,10 @@ export function renderChat(props: ChatProps) { : nothing } -
+ ${renderSearchBar(requestUpdate)} + ${renderPinnedSection(props, pinned, requestUpdate)} + +
- New messages ${icons.arrowDown} + ${icons.arrowDown} New messages ` : nothing } -
+ +
+ ${renderSlashMenu(requestUpdate, props)} ${renderAttachmentPreview(props)} -
- -
+ + handleFileSelect(e, props)} + /> + + ${vs.sttRecording && vs.sttInterimText ? html`
${vs.sttInterimText}
` : nothing} + + + +
+
- + + ${ + isSttSupported() + ? html` + + ` + : nothing + } + + ${tokens ? html`${tokens}` : nothing} +
+ +
+ ${nothing /* search hidden for now */} + ${ + canAbort + ? nothing + : html` + + ` + } + + + ${ + canAbort && (isBusy || props.sending) + ? html` + + ` + : html` + + ` + }
@@ -567,6 +1413,11 @@ function buildChatItems(props: ChatProps): Array { continue; } + // Apply search filter if active + if (vs.searchOpen && vs.searchQuery.trim() && !messageMatchesSearchQuery(msg, vs.searchQuery)) { + continue; + } + items.push({ kind: "message", key: messageKey(msg, i), diff --git a/ui/src/ui/views/command-palette.ts b/ui/src/ui/views/command-palette.ts new file mode 100644 index 00000000000..ec79f022873 --- /dev/null +++ b/ui/src/ui/views/command-palette.ts @@ -0,0 +1,263 @@ +import { html, nothing } from "lit"; +import { ref } from "lit/directives/ref.js"; +import { t } from "../../i18n/index.ts"; +import { SLASH_COMMANDS } from "../chat/slash-commands.ts"; +import { icons, type IconName } from "../icons.ts"; + +type PaletteItem = { + id: string; + label: string; + icon: IconName; + category: "search" | "navigation" | "skills"; + action: string; + description?: string; +}; + +const SLASH_PALETTE_ITEMS: PaletteItem[] = SLASH_COMMANDS.map((command) => ({ + id: `slash:${command.name}`, + label: `/${command.name}`, + icon: command.icon ?? "terminal", + category: "search", + action: `/${command.name}`, + description: command.description, +})); + +const PALETTE_ITEMS: PaletteItem[] = [ + ...SLASH_PALETTE_ITEMS, + { + id: "nav-overview", + label: "Overview", + icon: "barChart", + category: "navigation", + action: "nav:overview", + }, + { + id: "nav-sessions", + label: "Sessions", + icon: "fileText", + category: "navigation", + action: "nav:sessions", + }, + { + id: "nav-cron", + label: "Scheduled", + icon: "scrollText", + category: "navigation", + action: "nav:cron", + }, + { id: "nav-skills", label: "Skills", icon: "zap", category: "navigation", action: "nav:skills" }, + { + id: "nav-config", + label: "Settings", + icon: "settings", + category: "navigation", + action: "nav:config", + }, + { + id: "nav-agents", + label: "Agents", + icon: "folder", + category: "navigation", + action: "nav:agents", + }, + { + id: "skill-shell", + label: "Shell Command", + icon: "monitor", + category: "skills", + action: "/skill shell", + description: "Run shell", + }, + { + id: "skill-debug", + label: "Debug Mode", + icon: "bug", + category: "skills", + action: "/verbose full", + description: "Toggle debug", + }, +]; + +export function getPaletteItems(): readonly PaletteItem[] { + return PALETTE_ITEMS; +} + +export type CommandPaletteProps = { + open: boolean; + query: string; + activeIndex: number; + onToggle: () => void; + onQueryChange: (query: string) => void; + onActiveIndexChange: (index: number) => void; + onNavigate: (tab: string) => void; + onSlashCommand: (command: string) => void; +}; + +function filteredItems(query: string): PaletteItem[] { + if (!query) { + return PALETTE_ITEMS; + } + const q = query.toLowerCase(); + return PALETTE_ITEMS.filter( + (item) => + item.label.toLowerCase().includes(q) || + (item.description?.toLowerCase().includes(q) ?? false), + ); +} + +function groupItems(items: PaletteItem[]): Array<[string, PaletteItem[]]> { + const map = new Map(); + for (const item of items) { + const group = map.get(item.category) ?? []; + group.push(item); + map.set(item.category, group); + } + return [...map.entries()]; +} + +let previouslyFocused: Element | null = null; + +function saveFocus() { + previouslyFocused = document.activeElement; +} + +function restoreFocus() { + if (previouslyFocused && previouslyFocused instanceof HTMLElement) { + requestAnimationFrame(() => previouslyFocused && (previouslyFocused as HTMLElement).focus()); + } + previouslyFocused = null; +} + +function selectItem(item: PaletteItem, props: CommandPaletteProps) { + if (item.action.startsWith("nav:")) { + props.onNavigate(item.action.slice(4)); + } else { + props.onSlashCommand(item.action); + } + props.onToggle(); + restoreFocus(); +} + +function scrollActiveIntoView() { + requestAnimationFrame(() => { + const el = document.querySelector(".cmd-palette__item--active"); + el?.scrollIntoView({ block: "nearest" }); + }); +} + +function handleKeydown(e: KeyboardEvent, props: CommandPaletteProps) { + const items = filteredItems(props.query); + if (items.length === 0 && (e.key === "ArrowDown" || e.key === "ArrowUp" || e.key === "Enter")) { + return; + } + switch (e.key) { + case "ArrowDown": + e.preventDefault(); + props.onActiveIndexChange((props.activeIndex + 1) % items.length); + scrollActiveIntoView(); + break; + case "ArrowUp": + e.preventDefault(); + props.onActiveIndexChange((props.activeIndex - 1 + items.length) % items.length); + scrollActiveIntoView(); + break; + case "Enter": + e.preventDefault(); + if (items[props.activeIndex]) { + selectItem(items[props.activeIndex], props); + } + break; + case "Escape": + e.preventDefault(); + props.onToggle(); + restoreFocus(); + break; + } +} + +const CATEGORY_LABELS: Record = { + search: "Search", + navigation: "Navigation", + skills: "Skills", +}; + +function focusInput(el: Element | undefined) { + if (el) { + saveFocus(); + requestAnimationFrame(() => (el as HTMLInputElement).focus()); + } +} + +export function renderCommandPalette(props: CommandPaletteProps) { + if (!props.open) { + return nothing; + } + + const items = filteredItems(props.query); + const grouped = groupItems(items); + + return html` +
{ + props.onToggle(); + restoreFocus(); + }}> +
e.stopPropagation()} + @keydown=${(e: KeyboardEvent) => handleKeydown(e, props)} + > + { + props.onQueryChange((e.target as HTMLInputElement).value); + props.onActiveIndexChange(0); + }} + /> +
+ ${ + grouped.length === 0 + ? html`
+ ${icons.search} + ${t("overview.palette.noResults")} +
` + : grouped.map( + ([category, groupedItems]) => html` +
${CATEGORY_LABELS[category] ?? category}
+ ${groupedItems.map((item) => { + const globalIndex = items.indexOf(item); + const isActive = globalIndex === props.activeIndex; + return html` +
{ + e.stopPropagation(); + selectItem(item, props); + }} + @mouseenter=${() => props.onActiveIndexChange(globalIndex)} + > + ${icons[item.icon]} + ${item.label} + ${ + item.description + ? html`${item.description}` + : nothing + } +
+ `; + })} + `, + ) + } +
+ +
+
+ `; +} diff --git a/ui/src/ui/views/config-form.analyze.ts b/ui/src/ui/views/config-form.analyze.ts index 05c3bb5f1f0..82071bb4f6b 100644 --- a/ui/src/ui/views/config-form.analyze.ts +++ b/ui/src/ui/views/config-form.analyze.ts @@ -249,11 +249,21 @@ function normalizeUnion( return res; } - const primitiveTypes = new Set(["string", "number", "integer", "boolean"]); + const renderableUnionTypes = new Set([ + "string", + "number", + "integer", + "boolean", + "object", + "array", + ]); if ( remaining.length > 0 && literals.length === 0 && - remaining.every((entry) => entry.type && primitiveTypes.has(String(entry.type))) + remaining.every((entry) => { + const type = schemaType(entry); + return Boolean(type) && renderableUnionTypes.has(String(type)); + }) ) { return { schema: { diff --git a/ui/src/ui/views/config-form.node.ts b/ui/src/ui/views/config-form.node.ts index bd02be896ea..e7758e1c29a 100644 --- a/ui/src/ui/views/config-form.node.ts +++ b/ui/src/ui/views/config-form.node.ts @@ -1,10 +1,13 @@ import { html, nothing, type TemplateResult } from "lit"; +import { icons as sharedIcons } from "../icons.ts"; import type { ConfigUiHints } from "../types.ts"; import { defaultValue, + hasSensitiveConfigData, hintForPath, humanize, pathKey, + REDACTED_PLACEHOLDER, schemaType, type JsonSchema, } from "./config-form.shared.ts"; @@ -100,11 +103,77 @@ type FieldMeta = { tags: string[]; }; +type SensitiveRenderParams = { + path: Array; + value: unknown; + hints: ConfigUiHints; + revealSensitive: boolean; + isSensitivePathRevealed?: (path: Array) => boolean; +}; + +type SensitiveRenderState = { + isSensitive: boolean; + isRedacted: boolean; + isRevealed: boolean; + canReveal: boolean; +}; + export type ConfigSearchCriteria = { text: string; tags: string[]; }; +function getSensitiveRenderState(params: SensitiveRenderParams): SensitiveRenderState { + const isSensitive = hasSensitiveConfigData(params.value, params.path, params.hints); + const isRevealed = + isSensitive && + (params.revealSensitive || (params.isSensitivePathRevealed?.(params.path) ?? false)); + return { + isSensitive, + isRedacted: isSensitive && !isRevealed, + isRevealed, + canReveal: isSensitive, + }; +} + +function renderSensitiveToggleButton(params: { + path: Array; + state: SensitiveRenderState; + disabled: boolean; + onToggleSensitivePath?: (path: Array) => void; +}): TemplateResult | typeof nothing { + const { state } = params; + if (!state.isSensitive || !params.onToggleSensitivePath) { + return nothing; + } + return html` + + `; +} + function hasSearchCriteria(criteria: ConfigSearchCriteria | undefined): boolean { return Boolean(criteria && (criteria.text.length > 0 || criteria.tags.length > 0)); } @@ -331,6 +400,9 @@ export function renderNode(params: { disabled: boolean; showLabel?: boolean; searchCriteria?: ConfigSearchCriteria; + revealSensitive?: boolean; + isSensitivePathRevealed?: (path: Array) => boolean; + onToggleSensitivePath?: (path: Array) => void; onPatch: (path: Array, value: unknown) => void; }): TemplateResult | typeof nothing { const { schema, value, path, hints, unsupported, disabled, onPatch } = params; @@ -440,6 +512,20 @@ export function renderNode(params: { }); } } + + // Complex union (e.g. array | object) — render as JSON textarea + return renderJsonTextarea({ + schema, + value, + path, + hints, + disabled, + showLabel, + revealSensitive: params.revealSensitive ?? false, + isSensitivePathRevealed: params.isSensitivePathRevealed, + onToggleSensitivePath: params.onToggleSensitivePath, + onPatch, + }); } // Enum - use segmented for small, dropdown for large @@ -537,6 +623,9 @@ function renderTextInput(params: { disabled: boolean; showLabel?: boolean; searchCriteria?: ConfigSearchCriteria; + revealSensitive?: boolean; + isSensitivePathRevealed?: (path: Array) => boolean; + onToggleSensitivePath?: (path: Array) => void; inputType: "text" | "number"; onPatch: (path: Array, value: unknown) => void; }): TemplateResult { @@ -544,17 +633,22 @@ function renderTextInput(params: { const showLabel = params.showLabel ?? true; const hint = hintForPath(path, hints); const { label, help, tags } = resolveFieldMeta(path, schema, hints); - const isSensitive = - (hint?.sensitive ?? false) && !/^\$\{[^}]*\}$/.test(String(value ?? "").trim()); - const placeholder = - hint?.placeholder ?? - // oxlint-disable typescript/no-base-to-string - (isSensitive - ? "••••" - : schema.default !== undefined - ? `Default: ${String(schema.default)}` - : ""); - const displayValue = value ?? ""; + const sensitiveState = getSensitiveRenderState({ + path, + value, + hints, + revealSensitive: params.revealSensitive ?? false, + isSensitivePathRevealed: params.isSensitivePathRevealed, + }); + const placeholder = sensitiveState.isRedacted + ? REDACTED_PLACEHOLDER + : (hint?.placeholder ?? + // oxlint-disable typescript/no-base-to-string + (schema.default !== undefined ? `Default: ${String(schema.default)}` : "")); + const displayValue = sensitiveState.isRedacted ? "" : (value ?? ""); + const effectiveDisabled = disabled || sensitiveState.isRedacted; + const effectiveInputType = + sensitiveState.isSensitive && !sensitiveState.isRedacted ? "text" : inputType; return html`
@@ -563,12 +657,16 @@ function renderTextInput(params: { ${renderTags(tags)}
{ + if (sensitiveState.isRedacted) { + return; + } const raw = (e.target as HTMLInputElement).value; if (inputType === "number") { if (raw.trim() === "") { @@ -582,13 +680,19 @@ function renderTextInput(params: { onPatch(path, raw); }} @change=${(e: Event) => { - if (inputType === "number") { + if (inputType === "number" || sensitiveState.isRedacted) { return; } const raw = (e.target as HTMLInputElement).value; onPatch(path, raw.trim()); }} /> + ${renderSensitiveToggleButton({ + path, + state: sensitiveState, + disabled, + onToggleSensitivePath: params.onToggleSensitivePath, + })} ${ schema.default !== undefined ? html` @@ -596,7 +700,7 @@ function renderTextInput(params: { type="button" class="cfg-input__reset" title="Reset to default" - ?disabled=${disabled} + ?disabled=${effectiveDisabled} @click=${() => onPatch(path, schema.default)} >↺ ` @@ -702,6 +806,73 @@ function renderSelect(params: { `; } +function renderJsonTextarea(params: { + schema: JsonSchema; + value: unknown; + path: Array; + hints: ConfigUiHints; + disabled: boolean; + showLabel?: boolean; + revealSensitive?: boolean; + isSensitivePathRevealed?: (path: Array) => boolean; + onToggleSensitivePath?: (path: Array) => void; + onPatch: (path: Array, value: unknown) => void; +}): TemplateResult { + const { schema, value, path, hints, disabled, onPatch } = params; + const showLabel = params.showLabel ?? true; + const { label, help, tags } = resolveFieldMeta(path, schema, hints); + const fallback = jsonValue(value); + const sensitiveState = getSensitiveRenderState({ + path, + value, + hints, + revealSensitive: params.revealSensitive ?? false, + isSensitivePathRevealed: params.isSensitivePathRevealed, + }); + const displayValue = sensitiveState.isRedacted ? "" : fallback; + const effectiveDisabled = disabled || sensitiveState.isRedacted; + + return html` +
+ ${showLabel ? html`` : nothing} + ${help ? html`
${help}
` : nothing} + ${renderTags(tags)} +
+ + ${renderSensitiveToggleButton({ + path, + state: sensitiveState, + disabled, + onToggleSensitivePath: params.onToggleSensitivePath, + })} +
+
+ `; +} + function renderObject(params: { schema: JsonSchema; value: unknown; @@ -711,9 +882,24 @@ function renderObject(params: { disabled: boolean; showLabel?: boolean; searchCriteria?: ConfigSearchCriteria; + revealSensitive?: boolean; + isSensitivePathRevealed?: (path: Array) => boolean; + onToggleSensitivePath?: (path: Array) => void; onPatch: (path: Array, value: unknown) => void; }): TemplateResult { - const { schema, value, path, hints, unsupported, disabled, onPatch, searchCriteria } = params; + const { + schema, + value, + path, + hints, + unsupported, + disabled, + onPatch, + searchCriteria, + revealSensitive, + isSensitivePathRevealed, + onToggleSensitivePath, + } = params; const showLabel = params.showLabel ?? true; const { label, help, tags } = resolveFieldMeta(path, schema, hints); const selfMatched = @@ -754,6 +940,9 @@ function renderObject(params: { unsupported, disabled, searchCriteria: childSearchCriteria, + revealSensitive, + isSensitivePathRevealed, + onToggleSensitivePath, onPatch, }), )} @@ -768,6 +957,9 @@ function renderObject(params: { disabled, reservedKeys: reserved, searchCriteria: childSearchCriteria, + revealSensitive, + isSensitivePathRevealed, + onToggleSensitivePath, onPatch, }) : nothing @@ -818,9 +1010,24 @@ function renderArray(params: { disabled: boolean; showLabel?: boolean; searchCriteria?: ConfigSearchCriteria; + revealSensitive?: boolean; + isSensitivePathRevealed?: (path: Array) => boolean; + onToggleSensitivePath?: (path: Array) => void; onPatch: (path: Array, value: unknown) => void; }): TemplateResult { - const { schema, value, path, hints, unsupported, disabled, onPatch, searchCriteria } = params; + const { + schema, + value, + path, + hints, + unsupported, + disabled, + onPatch, + searchCriteria, + revealSensitive, + isSensitivePathRevealed, + onToggleSensitivePath, + } = params; const showLabel = params.showLabel ?? true; const { label, help, tags } = resolveFieldMeta(path, schema, hints); const selfMatched = @@ -900,6 +1107,9 @@ function renderArray(params: { disabled, searchCriteria: childSearchCriteria, showLabel: false, + revealSensitive, + isSensitivePathRevealed, + onToggleSensitivePath, onPatch, })}
@@ -922,6 +1132,9 @@ function renderMapField(params: { disabled: boolean; reservedKeys: Set; searchCriteria?: ConfigSearchCriteria; + revealSensitive?: boolean; + isSensitivePathRevealed?: (path: Array) => boolean; + onToggleSensitivePath?: (path: Array) => void; onPatch: (path: Array, value: unknown) => void; }): TemplateResult { const { @@ -934,6 +1147,9 @@ function renderMapField(params: { reservedKeys, onPatch, searchCriteria, + revealSensitive, + isSensitivePathRevealed, + onToggleSensitivePath, } = params; const anySchema = isAnySchema(schema); const entries = Object.entries(value ?? {}).filter(([key]) => !reservedKeys.has(key)); @@ -985,6 +1201,13 @@ function renderMapField(params: { ${visibleEntries.map(([key, entryValue]) => { const valuePath = [...path, key]; const fallback = jsonValue(entryValue); + const sensitiveState = getSensitiveRenderState({ + path: valuePath, + value: entryValue, + hints, + revealSensitive: revealSensitive ?? false, + isSensitivePathRevealed, + }); return html`
@@ -1028,26 +1251,40 @@ function renderMapField(params: { ${ anySchema ? html` - + rows="2" + .value=${sensitiveState.isRedacted ? "" : fallback} + ?disabled=${disabled || sensitiveState.isRedacted} + ?readonly=${sensitiveState.isRedacted} + @change=${(e: Event) => { + if (sensitiveState.isRedacted) { + return; + } + const target = e.target as HTMLTextAreaElement; + const raw = target.value.trim(); + if (!raw) { + onPatch(valuePath, undefined); + return; + } + try { + onPatch(valuePath, JSON.parse(raw)); + } catch { + target.value = fallback; + } + }} + > + ${renderSensitiveToggleButton({ + path: valuePath, + state: sensitiveState, + disabled, + onToggleSensitivePath, + })} +
` : renderNode({ schema, @@ -1058,6 +1295,9 @@ function renderMapField(params: { disabled, searchCriteria, showLabel: false, + revealSensitive, + isSensitivePathRevealed, + onToggleSensitivePath, onPatch, }) } diff --git a/ui/src/ui/views/config-form.render.ts b/ui/src/ui/views/config-form.render.ts index 124ca50a585..5f26383c2f5 100644 --- a/ui/src/ui/views/config-form.render.ts +++ b/ui/src/ui/views/config-form.render.ts @@ -13,6 +13,9 @@ export type ConfigFormProps = { searchQuery?: string; activeSection?: string | null; activeSubsection?: string | null; + revealSensitive?: boolean; + isSensitivePathRevealed?: (path: Array) => boolean; + onToggleSensitivePath?: (path: Array) => void; onPatch: (path: Array, value: unknown) => void; }; @@ -291,22 +294,16 @@ function matchesSearch(params: { const criteria = parseConfigSearchQuery(params.query); const q = criteria.text; const meta = SECTION_META[params.key]; + const sectionMetaMatches = + q && + (params.key.toLowerCase().includes(q) || + (meta?.label ? meta.label.toLowerCase().includes(q) : false) || + (meta?.description ? meta.description.toLowerCase().includes(q) : false)); - // Check key name - if (q && params.key.toLowerCase().includes(q)) { + if (sectionMetaMatches && criteria.tags.length === 0) { return true; } - // Check label and description - if (q && meta) { - if (meta.label.toLowerCase().includes(q)) { - return true; - } - if (meta.description.toLowerCase().includes(q)) { - return true; - } - } - return matchesNodeSearch({ schema: params.schema, value: params.sectionValue, @@ -431,6 +428,9 @@ export function renderConfigForm(props: ConfigFormProps) { disabled: props.disabled ?? false, showLabel: false, searchCriteria, + revealSensitive: props.revealSensitive ?? false, + isSensitivePathRevealed: props.isSensitivePathRevealed, + onToggleSensitivePath: props.onToggleSensitivePath, onPatch: props.onPatch, })}
@@ -466,6 +466,9 @@ export function renderConfigForm(props: ConfigFormProps) { disabled: props.disabled ?? false, showLabel: false, searchCriteria, + revealSensitive: props.revealSensitive ?? false, + isSensitivePathRevealed: props.isSensitivePathRevealed, + onToggleSensitivePath: props.onToggleSensitivePath, onPatch: props.onPatch, })}
diff --git a/ui/src/ui/views/config-form.shared.ts b/ui/src/ui/views/config-form.shared.ts index 366671041da..b535c49e25f 100644 --- a/ui/src/ui/views/config-form.shared.ts +++ b/ui/src/ui/views/config-form.shared.ts @@ -1,4 +1,4 @@ -import type { ConfigUiHints } from "../types.ts"; +import type { ConfigUiHint, ConfigUiHints } from "../types.ts"; export type JsonSchema = { type?: string | string[]; @@ -94,3 +94,110 @@ export function humanize(raw: string) { .replace(/\s+/g, " ") .replace(/^./, (m) => m.toUpperCase()); } + +const SENSITIVE_KEY_WHITELIST_SUFFIXES = [ + "maxtokens", + "maxoutputtokens", + "maxinputtokens", + "maxcompletiontokens", + "contexttokens", + "totaltokens", + "tokencount", + "tokenlimit", + "tokenbudget", + "passwordfile", +] as const; + +const SENSITIVE_PATTERNS = [ + /token$/i, + /password/i, + /secret/i, + /api.?key/i, + /serviceaccount(?:ref)?$/i, +]; + +const ENV_VAR_PLACEHOLDER_PATTERN = /^\$\{[^}]*\}$/; + +export const REDACTED_PLACEHOLDER = "[redacted - click reveal to view]"; + +function isEnvVarPlaceholder(value: string): boolean { + return ENV_VAR_PLACEHOLDER_PATTERN.test(value.trim()); +} + +export function isSensitiveConfigPath(path: string): boolean { + const lowerPath = path.toLowerCase(); + const whitelisted = SENSITIVE_KEY_WHITELIST_SUFFIXES.some((suffix) => lowerPath.endsWith(suffix)); + return !whitelisted && SENSITIVE_PATTERNS.some((pattern) => pattern.test(path)); +} + +function isSensitiveLeafValue(value: unknown): boolean { + if (typeof value === "string") { + return value.trim().length > 0 && !isEnvVarPlaceholder(value); + } + return value !== undefined && value !== null; +} + +function isHintSensitive(hint: ConfigUiHint | undefined): boolean { + return hint?.sensitive ?? false; +} + +export function hasSensitiveConfigData( + value: unknown, + path: Array, + hints: ConfigUiHints, +): boolean { + const key = pathKey(path); + const hint = hintForPath(path, hints); + const pathIsSensitive = isHintSensitive(hint) || isSensitiveConfigPath(key); + + if (pathIsSensitive && isSensitiveLeafValue(value)) { + return true; + } + + if (Array.isArray(value)) { + return value.some((item, index) => hasSensitiveConfigData(item, [...path, index], hints)); + } + + if (value && typeof value === "object") { + return Object.entries(value as Record).some(([childKey, childValue]) => + hasSensitiveConfigData(childValue, [...path, childKey], hints), + ); + } + + return false; +} + +export function countSensitiveConfigValues( + value: unknown, + path: Array, + hints: ConfigUiHints, +): number { + if (value == null) { + return 0; + } + + const key = pathKey(path); + const hint = hintForPath(path, hints); + const pathIsSensitive = isHintSensitive(hint) || isSensitiveConfigPath(key); + + if (pathIsSensitive && isSensitiveLeafValue(value)) { + return 1; + } + + if (Array.isArray(value)) { + return value.reduce( + (count, item, index) => count + countSensitiveConfigValues(item, [...path, index], hints), + 0, + ); + } + + if (value && typeof value === "object") { + return Object.entries(value as Record).reduce( + (count, [childKey, childValue]) => + count + countSensitiveConfigValues(childValue, [...path, childKey], hints), + 0, + ); + } + + return 0; +} diff --git a/ui/src/ui/views/config.browser.test.ts b/ui/src/ui/views/config.browser.test.ts index 889d046f942..4b546cfa0b7 100644 --- a/ui/src/ui/views/config.browser.test.ts +++ b/ui/src/ui/views/config.browser.test.ts @@ -1,5 +1,6 @@ import { render } from "lit"; import { describe, expect, it, vi } from "vitest"; +import type { ThemeMode, ThemeName } from "../theme.ts"; import { renderConfig } from "./config.ts"; describe("config view", () => { @@ -20,6 +21,7 @@ describe("config view", () => { schemaLoading: false, uiHints: {}, formMode: "form" as const, + showModeToggle: true, formValue: {}, originalValue: {}, searchQuery: "", @@ -35,6 +37,13 @@ describe("config view", () => { onApply: vi.fn(), onUpdate: vi.fn(), onSubsectionChange: vi.fn(), + version: "2026.3.11", + theme: "claw" as ThemeName, + themeMode: "system" as ThemeMode, + setTheme: vi.fn(), + setThemeMode: vi.fn(), + gatewayUrl: "", + assistantName: "OpenClaw", }); function findActionButtons(container: HTMLElement): { @@ -200,34 +209,55 @@ describe("config view", () => { expect(onSearchChange).toHaveBeenCalledWith("gateway"); }); - it("shows all tag options in compact tag picker", () => { + it("renders the top search icon inside the search input row", () => { const container = document.createElement("div"); render(renderConfig(baseProps()), container); - const options = Array.from(container.querySelectorAll(".config-search__tag-option")).map( - (option) => option.textContent?.trim(), - ); - expect(options).toContain("tag:security"); - expect(options).toContain("tag:advanced"); - expect(options).toHaveLength(15); + const icon = container.querySelector(".config-search__icon"); + expect(icon).not.toBeNull(); + expect(icon?.closest(".config-search__input-row")).not.toBeNull(); }); - it("updates search query when toggling a tag option", () => { + it("renders top tabs for root and available sections", () => { + const container = document.createElement("div"); + render( + renderConfig({ + ...baseProps(), + schema: { + type: "object", + properties: { + gateway: { type: "object", properties: {} }, + agents: { type: "object", properties: {} }, + }, + }, + }), + container, + ); + + const tabs = Array.from(container.querySelectorAll(".config-top-tabs__tab")).map((tab) => + tab.textContent?.trim(), + ); + expect(tabs).toContain("Settings"); + expect(tabs).toContain("Agents"); + expect(tabs).toContain("Gateway"); + expect(tabs).toContain("Appearance"); + }); + + it("clears the active search query", () => { const container = document.createElement("div"); const onSearchChange = vi.fn(); render( renderConfig({ ...baseProps(), + searchQuery: "gateway", onSearchChange, }), container, ); - const option = container.querySelector( - '.config-search__tag-option[data-tag="security"]', - ); - expect(option).toBeTruthy(); - option?.click(); - expect(onSearchChange).toHaveBeenCalledWith("tag:security"); + const clearButton = container.querySelector(".config-search__clear"); + expect(clearButton).toBeTruthy(); + clearButton?.click(); + expect(onSearchChange).toHaveBeenCalledWith(""); }); }); diff --git a/ui/src/ui/views/config.ts b/ui/src/ui/views/config.ts index 5fa88c53aac..06c0f38e892 100644 --- a/ui/src/ui/views/config.ts +++ b/ui/src/ui/views/config.ts @@ -1,8 +1,17 @@ -import { html, nothing } from "lit"; +import { html, nothing, type TemplateResult } from "lit"; +import { icons } from "../icons.ts"; +import type { ThemeTransitionContext } from "../theme-transition.ts"; +import type { ThemeMode, ThemeName } from "../theme.ts"; import type { ConfigUiHints } from "../types.ts"; -import { hintForPath, humanize, schemaType, type JsonSchema } from "./config-form.shared.ts"; +import { + countSensitiveConfigValues, + humanize, + pathKey, + REDACTED_PLACEHOLDER, + schemaType, + type JsonSchema, +} from "./config-form.shared.ts"; import { analyzeConfigSchema, renderConfigForm, SECTION_META } from "./config-form.ts"; -import { getTagFilters, replaceTagFilters } from "./config-search.ts"; export type ConfigProps = { raw: string; @@ -18,6 +27,7 @@ export type ConfigProps = { schemaLoading: boolean; uiHints: ConfigUiHints; formMode: "form" | "raw"; + showModeToggle?: boolean; formValue: Record | null; originalValue: Record | null; searchQuery: string; @@ -33,26 +43,21 @@ export type ConfigProps = { onSave: () => void; onApply: () => void; onUpdate: () => void; + onOpenFile?: () => void; + version: string; + theme: ThemeName; + themeMode: ThemeMode; + setTheme: (theme: ThemeName, context?: ThemeTransitionContext) => void; + setThemeMode: (mode: ThemeMode, context?: ThemeTransitionContext) => void; + gatewayUrl: string; + assistantName: string; + configPath?: string | null; + navRootLabel?: string; + includeSections?: string[]; + excludeSections?: string[]; + includeVirtualSections?: boolean; }; -const TAG_SEARCH_PRESETS = [ - "security", - "auth", - "network", - "access", - "privacy", - "observability", - "performance", - "reliability", - "storage", - "models", - "media", - "automation", - "channels", - "tools", - "advanced", -] as const; - // SVG Icons for sidebar (Lucide-style) const sidebarIcons = { all: html` @@ -273,6 +278,19 @@ const sidebarIcons = { `, + __appearance__: html` + + + + + + + + + + + + `, default: html` @@ -281,35 +299,137 @@ const sidebarIcons = { `, }; -// Section definitions -const SECTIONS: Array<{ key: string; label: string }> = [ - { key: "env", label: "Environment" }, - { key: "update", label: "Updates" }, - { key: "agents", label: "Agents" }, - { key: "auth", label: "Authentication" }, - { key: "channels", label: "Channels" }, - { key: "messages", label: "Messages" }, - { key: "commands", label: "Commands" }, - { key: "hooks", label: "Hooks" }, - { key: "skills", label: "Skills" }, - { key: "tools", label: "Tools" }, - { key: "gateway", label: "Gateway" }, - { key: "wizard", label: "Setup Wizard" }, -]; - -type SubsectionEntry = { - key: string; +// Categorised section definitions +type SectionCategory = { + id: string; label: string; - description?: string; - order: number; + sections: Array<{ key: string; label: string }>; }; -const ALL_SUBSECTION = "__all__"; +const SECTION_CATEGORIES: SectionCategory[] = [ + { + id: "core", + label: "Core", + sections: [ + { key: "env", label: "Environment" }, + { key: "auth", label: "Authentication" }, + { key: "update", label: "Updates" }, + { key: "meta", label: "Meta" }, + { key: "logging", label: "Logging" }, + ], + }, + { + id: "ai", + label: "AI & Agents", + sections: [ + { key: "agents", label: "Agents" }, + { key: "models", label: "Models" }, + { key: "skills", label: "Skills" }, + { key: "tools", label: "Tools" }, + { key: "memory", label: "Memory" }, + { key: "session", label: "Session" }, + ], + }, + { + id: "communication", + label: "Communication", + sections: [ + { key: "channels", label: "Channels" }, + { key: "messages", label: "Messages" }, + { key: "broadcast", label: "Broadcast" }, + { key: "talk", label: "Talk" }, + { key: "audio", label: "Audio" }, + ], + }, + { + id: "automation", + label: "Automation", + sections: [ + { key: "commands", label: "Commands" }, + { key: "hooks", label: "Hooks" }, + { key: "bindings", label: "Bindings" }, + { key: "cron", label: "Cron" }, + { key: "approvals", label: "Approvals" }, + { key: "plugins", label: "Plugins" }, + ], + }, + { + id: "infrastructure", + label: "Infrastructure", + sections: [ + { key: "gateway", label: "Gateway" }, + { key: "web", label: "Web" }, + { key: "browser", label: "Browser" }, + { key: "nodeHost", label: "NodeHost" }, + { key: "canvasHost", label: "CanvasHost" }, + { key: "discovery", label: "Discovery" }, + { key: "media", label: "Media" }, + ], + }, + { + id: "appearance", + label: "Appearance", + sections: [ + { key: "__appearance__", label: "Appearance" }, + { key: "ui", label: "UI" }, + { key: "wizard", label: "Setup Wizard" }, + ], + }, +]; + +// Flat lookup: all categorised keys +const CATEGORISED_KEYS = new Set(SECTION_CATEGORIES.flatMap((c) => c.sections.map((s) => s.key))); function getSectionIcon(key: string) { return sidebarIcons[key as keyof typeof sidebarIcons] ?? sidebarIcons.default; } +function scopeSchemaSections( + schema: JsonSchema | null, + params: { include?: ReadonlySet | null; exclude?: ReadonlySet | null }, +): JsonSchema | null { + if (!schema || schemaType(schema) !== "object" || !schema.properties) { + return schema; + } + const include = params.include; + const exclude = params.exclude; + const nextProps: Record = {}; + for (const [key, value] of Object.entries(schema.properties)) { + if (include && include.size > 0 && !include.has(key)) { + continue; + } + if (exclude && exclude.size > 0 && exclude.has(key)) { + continue; + } + nextProps[key] = value; + } + return { ...schema, properties: nextProps }; +} + +function scopeUnsupportedPaths( + unsupportedPaths: string[], + params: { include?: ReadonlySet | null; exclude?: ReadonlySet | null }, +): string[] { + const include = params.include; + const exclude = params.exclude; + if ((!include || include.size === 0) && (!exclude || exclude.size === 0)) { + return unsupportedPaths; + } + return unsupportedPaths.filter((entry) => { + if (entry === "") { + return true; + } + const [top] = entry.split("."); + if (include && include.size > 0) { + return include.has(top); + } + if (exclude && exclude.size > 0) { + return !exclude.has(top); + } + return true; + }); +} + function resolveSectionMeta( key: string, schema?: JsonSchema, @@ -327,26 +447,6 @@ function resolveSectionMeta( }; } -function resolveSubsections(params: { - key: string; - schema: JsonSchema | undefined; - uiHints: ConfigUiHints; -}): SubsectionEntry[] { - const { key, schema, uiHints } = params; - if (!schema || schemaType(schema) !== "object" || !schema.properties) { - return []; - } - const entries = Object.entries(schema.properties).map(([subKey, node]) => { - const hint = hintForPath([key, subKey], uiHints); - const label = hint?.label ?? node.title ?? humanize(subKey); - const description = hint?.help ?? node.description ?? ""; - const order = hint?.order ?? 50; - return { key: subKey, label, description, order }; - }); - entries.sort((a, b) => (a.order !== b.order ? a.order - b.order : a.key.localeCompare(b.key))); - return entries; -} - function computeDiff( original: Record | null, current: Record | null, @@ -402,237 +502,280 @@ function truncateValue(value: unknown, maxLen = 40): string { return str.slice(0, maxLen - 3) + "..."; } +function renderDiffValue(path: string, value: unknown, _uiHints: ConfigUiHints): string { + return truncateValue(value); +} + +type ThemeOption = { id: ThemeName; label: string; description: string; icon: TemplateResult }; +const THEME_OPTIONS: ThemeOption[] = [ + { id: "claw", label: "Claw", description: "Chroma family", icon: icons.zap }, + { id: "knot", label: "Knot", description: "Knot family", icon: icons.link }, + { id: "dash", label: "Dash", description: "Field family", icon: icons.barChart }, +]; + +function renderAppearanceSection(props: ConfigProps) { + const MODE_OPTIONS: Array<{ + id: ThemeMode; + label: string; + description: string; + icon: TemplateResult; + }> = [ + { id: "system", label: "System", description: "Follow OS light or dark", icon: icons.monitor }, + { id: "light", label: "Light", description: "Force light mode", icon: icons.sun }, + { id: "dark", label: "Dark", description: "Force dark mode", icon: icons.moon }, + ]; + + return html` +
+
+

Theme

+

Choose a theme family.

+
+ ${THEME_OPTIONS.map( + (opt) => html` + + `, + )} +
+
+ +
+

Mode

+

Choose light or dark mode for the selected theme.

+
+ ${MODE_OPTIONS.map( + (opt) => html` + + `, + )} +
+
+ +
+

Connection

+
+
+ Gateway + ${props.gatewayUrl || "-"} +
+
+ Status + + + ${props.connected ? "Connected" : "Offline"} + +
+ ${ + props.assistantName + ? html` +
+ Assistant + ${props.assistantName} +
+ ` + : nothing + } +
+
+
+ `; +} + +interface ConfigEphemeralState { + rawRevealed: boolean; + envRevealed: boolean; + validityDismissed: boolean; + revealedSensitivePaths: Set; +} + +function createConfigEphemeralState(): ConfigEphemeralState { + return { + rawRevealed: false, + envRevealed: false, + validityDismissed: false, + revealedSensitivePaths: new Set(), + }; +} + +const cvs = createConfigEphemeralState(); + +function isSensitivePathRevealed(path: Array): boolean { + const key = pathKey(path); + return key ? cvs.revealedSensitivePaths.has(key) : false; +} + +function toggleSensitivePathReveal(path: Array) { + const key = pathKey(path); + if (!key) { + return; + } + if (cvs.revealedSensitivePaths.has(key)) { + cvs.revealedSensitivePaths.delete(key); + } else { + cvs.revealedSensitivePaths.add(key); + } +} + +export function resetConfigViewStateForTests() { + Object.assign(cvs, createConfigEphemeralState()); +} + export function renderConfig(props: ConfigProps) { + const showModeToggle = props.showModeToggle ?? false; const validity = props.valid == null ? "unknown" : props.valid ? "valid" : "invalid"; - const analysis = analyzeConfigSchema(props.schema); + const includeVirtualSections = props.includeVirtualSections ?? true; + const include = props.includeSections?.length ? new Set(props.includeSections) : null; + const exclude = props.excludeSections?.length ? new Set(props.excludeSections) : null; + const rawAnalysis = analyzeConfigSchema(props.schema); + const analysis = { + schema: scopeSchemaSections(rawAnalysis.schema, { include, exclude }), + unsupportedPaths: scopeUnsupportedPaths(rawAnalysis.unsupportedPaths, { include, exclude }), + }; const formUnsafe = analysis.schema ? analysis.unsupportedPaths.length > 0 : false; + const formMode = showModeToggle ? props.formMode : "form"; + const envSensitiveVisible = cvs.envRevealed; - // Get available sections from schema + // Build categorised nav from schema - only include sections that exist in the schema const schemaProps = analysis.schema?.properties ?? {}; - const availableSections = SECTIONS.filter((s) => s.key in schemaProps); - // Add any sections in schema but not in our list - const knownKeys = new Set(SECTIONS.map((s) => s.key)); + const VIRTUAL_SECTIONS = new Set(["__appearance__"]); + const visibleCategories = SECTION_CATEGORIES.map((cat) => ({ + ...cat, + sections: cat.sections.filter( + (s) => (includeVirtualSections && VIRTUAL_SECTIONS.has(s.key)) || s.key in schemaProps, + ), + })).filter((cat) => cat.sections.length > 0); + + // Catch any schema keys not in our categories const extraSections = Object.keys(schemaProps) - .filter((k) => !knownKeys.has(k)) + .filter((k) => !CATEGORISED_KEYS.has(k)) .map((k) => ({ key: k, label: k.charAt(0).toUpperCase() + k.slice(1) })); - const allSections = [...availableSections, ...extraSections]; + const otherCategory: SectionCategory | null = + extraSections.length > 0 ? { id: "other", label: "Other", sections: extraSections } : null; + const isVirtualSection = + includeVirtualSections && + props.activeSection != null && + VIRTUAL_SECTIONS.has(props.activeSection); const activeSectionSchema = - props.activeSection && analysis.schema && schemaType(analysis.schema) === "object" + props.activeSection && + !isVirtualSection && + analysis.schema && + schemaType(analysis.schema) === "object" ? analysis.schema.properties?.[props.activeSection] : undefined; - const activeSectionMeta = props.activeSection - ? resolveSectionMeta(props.activeSection, activeSectionSchema) - : null; - const subsections = props.activeSection - ? resolveSubsections({ - key: props.activeSection, - schema: activeSectionSchema, - uiHints: props.uiHints, - }) - : []; - const allowSubnav = - props.formMode === "form" && Boolean(props.activeSection) && subsections.length > 0; - const isAllSubsection = props.activeSubsection === ALL_SUBSECTION; - const effectiveSubsection = props.searchQuery - ? null - : isAllSubsection - ? null - : (props.activeSubsection ?? subsections[0]?.key ?? null); + const activeSectionMeta = + props.activeSection && !isVirtualSection + ? resolveSectionMeta(props.activeSection, activeSectionSchema) + : null; + // Config subsections are always rendered as a single page per section. + const effectiveSubsection = null; + + const topTabs = [ + { key: null as string | null, label: props.navRootLabel ?? "Settings" }, + ...[...visibleCategories, ...(otherCategory ? [otherCategory] : [])].flatMap((cat) => + cat.sections.map((s) => ({ key: s.key, label: s.label })), + ), + ]; // Compute diff for showing changes (works for both form and raw modes) - const diff = props.formMode === "form" ? computeDiff(props.originalValue, props.formValue) : []; - const hasRawChanges = props.formMode === "raw" && props.raw !== props.originalRaw; - const hasChanges = props.formMode === "form" ? diff.length > 0 : hasRawChanges; + const diff = formMode === "form" ? computeDiff(props.originalValue, props.formValue) : []; + const hasRawChanges = formMode === "raw" && props.raw !== props.originalRaw; + const hasChanges = formMode === "form" ? diff.length > 0 : hasRawChanges; // Save/apply buttons require actual changes to be enabled. // Note: formUnsafe warns about unsupported schema paths but shouldn't block saving. const canSaveForm = Boolean(props.formValue) && !props.loading && Boolean(analysis.schema); const canSave = - props.connected && - !props.saving && - hasChanges && - (props.formMode === "raw" ? true : canSaveForm); + props.connected && !props.saving && hasChanges && (formMode === "raw" ? true : canSaveForm); const canApply = props.connected && !props.applying && !props.updating && hasChanges && - (props.formMode === "raw" ? true : canSaveForm); + (formMode === "raw" ? true : canSaveForm); const canUpdate = props.connected && !props.applying && !props.updating; - const selectedTags = new Set(getTagFilters(props.searchQuery)); + + const showAppearanceOnRoot = + includeVirtualSections && + formMode === "form" && + props.activeSection === null && + Boolean(include?.has("__appearance__")); return html`
- - - -
-
${ hasChanges ? html` - ${ - props.formMode === "raw" - ? "Unsaved changes" - : `${diff.length} unsaved change${diff.length !== 1 ? "s" : ""}` - } - ` + ${ + formMode === "raw" + ? "Unsaved changes" + : `${diff.length} unsaved change${diff.length !== 1 ? "s" : ""}` + } + ` : html` No changes ` }
+ ${ + props.onOpenFile + ? html` + + ` + : nothing + }
+
+ ${ + formMode === "form" + ? html` + + ` + : nothing + } + +
+ ${topTabs.map( + (tab) => html` + + `, + )} +
+ +
+ ${ + showModeToggle + ? html` +
+ + +
+ ` + : nothing + } +
+
+ + ${ + validity === "invalid" && !cvs.validityDismissed + ? html` +
+ + + + + + Your configuration is invalid. Some settings may not work as expected. + +
+ ` + : nothing + } + ${ - hasChanges && props.formMode === "form" + hasChanges && formMode === "form" ? html`
@@ -691,11 +940,11 @@ export function renderConfig(props: ConfigProps) {
${change.path}
${truncateValue(change.from)}${renderDiffValue(change.path, change.from, props.uiHints)} ${truncateValue(change.to)}${renderDiffValue(change.path, change.to, props.uiHints)}
@@ -706,12 +955,12 @@ export function renderConfig(props: ConfigProps) { ` : nothing } - ${ - activeSectionMeta && props.formMode === "form" - ? html` -
-
- ${getSectionIcon(props.activeSection ?? "")} + ${ + activeSectionMeta && formMode === "form" + ? html` +
+
+ ${getSectionIcon(props.activeSection ?? "")}
@@ -725,43 +974,40 @@ export function renderConfig(props: ConfigProps) { : nothing }
+ ${ + props.activeSection === "env" + ? html` + + ` + : nothing + }
` - : nothing - } - ${ - allowSubnav - ? html` -
- - ${subsections.map( - (entry) => html` - - `, - )} -
- ` - : nothing - } - + : nothing + }
${ - props.formMode === "form" - ? html` + props.activeSection === "__appearance__" + ? includeVirtualSections + ? renderAppearanceSection(props) + : nothing + : formMode === "form" + ? html` + ${showAppearanceOnRoot ? renderAppearanceSection(props) : nothing} ${ props.schemaLoading ? html` @@ -780,28 +1026,75 @@ export function renderConfig(props: ConfigProps) { searchQuery: props.searchQuery, activeSection: props.activeSection, activeSubsection: effectiveSubsection, + revealSensitive: + props.activeSection === "env" ? envSensitiveVisible : false, + isSensitivePathRevealed, + onToggleSensitivePath: (path) => { + toggleSensitivePathReveal(path); + props.onRawChange(props.raw); + }, }) } - ${ - formUnsafe - ? html` -
- Form view can't safely edit some fields. Use Raw to avoid losing config entries. -
- ` - : nothing - } - ` - : html` - ` + : (() => { + const sensitiveCount = countSensitiveConfigValues( + props.formValue, + [], + props.uiHints, + ); + const blurred = sensitiveCount > 0 && !cvs.rawRevealed; + return html` + ${ + formUnsafe + ? html` +
+ Your config contains fields the form editor can't safely represent. Use Raw mode to edit those + entries. +
+ ` + : nothing + } + + `; + })() }
diff --git a/ui/src/ui/views/cron.ts b/ui/src/ui/views/cron.ts index 296a692d115..1509637b46f 100644 --- a/ui/src/ui/views/cron.ts +++ b/ui/src/ui/views/cron.ts @@ -360,7 +360,9 @@ export function renderCron(props: CronProps) { props.runsScope === "all" ? t("cron.jobList.allJobs") : (selectedJob?.name ?? props.runsJobId ?? t("cron.jobList.selectJob")); - const runs = props.runs; + const runs = props.runs.toSorted((a, b) => + props.runsSortDir === "asc" ? a.ts - b.ts : b.ts - a.ts, + ); const runStatusOptions = getRunStatusOptions(); const runDeliveryOptions = getRunDeliveryOptions(); const selectedStatusLabels = runStatusOptions @@ -372,7 +374,7 @@ export function renderCron(props: CronProps) { const statusSummary = summarizeSelection(selectedStatusLabels, t("cron.runs.allStatuses")); const deliverySummary = summarizeSelection(selectedDeliveryLabels, t("cron.runs.allDelivery")); const supportsAnnounce = - props.form.sessionTarget === "isolated" && props.form.payloadKind === "agentTurn"; + props.form.sessionTarget !== "main" && props.form.payloadKind === "agentTurn"; const selectedDeliveryMode = props.form.deliveryMode === "announce" && !supportsAnnounce ? "none" : props.form.deliveryMode; const blockingFields = collectBlockingFields(props.fieldErrors, props.form, selectedDeliveryMode); @@ -1569,7 +1571,7 @@ function renderJob(job: CronJob, props: CronProps) { ?disabled=${props.busy} @click=${(event: Event) => { event.stopPropagation(); - selectAnd(() => props.onLoadRuns(job.id)); + props.onLoadRuns(job.id); }} > ${t("cron.jobList.history")} diff --git a/ui/src/ui/views/debug.ts b/ui/src/ui/views/debug.ts index 9ca33725993..f63e9be8267 100644 --- a/ui/src/ui/views/debug.ts +++ b/ui/src/ui/views/debug.ts @@ -9,6 +9,7 @@ export type DebugProps = { models: unknown[]; heartbeat: unknown; eventLog: EventLogEntry[]; + methods: string[]; callMethod: string; callParams: string; callResult: string | null; @@ -33,7 +34,7 @@ export function renderDebug(props: DebugProps) { critical > 0 ? `${critical} critical` : warn > 0 ? `${warn} warnings` : "No critical issues"; return html` -
+
@@ -71,14 +72,22 @@ export function renderDebug(props: DebugProps) {
Manual RPC
Send a raw gateway method with JSON params.
-
+
${ props.lastError @@ -42,16 +62,18 @@ export function renderInstances(props: InstancesProps) { ? html`
No instances reported yet.
` - : props.entries.map((entry) => renderEntry(entry)) + : props.entries.map((entry) => renderEntry(entry, masked)) }
`; } -function renderEntry(entry: PresenceEntry) { +function renderEntry(entry: PresenceEntry, masked: boolean) { const lastInput = entry.lastInputSeconds != null ? `${entry.lastInputSeconds}s ago` : "n/a"; const mode = entry.mode ?? "unknown"; + const host = entry.host ?? "unknown host"; + const ip = entry.ip ?? null; const roles = Array.isArray(entry.roles) ? entry.roles.filter(Boolean) : []; const scopes = Array.isArray(entry.scopes) ? entry.scopes.filter(Boolean) : []; const scopesLabel = @@ -63,8 +85,12 @@ function renderEntry(entry: PresenceEntry) { return html`
-
${entry.host ?? "unknown host"}
-
${formatPresenceSummary(entry)}
+
+ ${host} +
+
+ ${ip ? html`${ip} ` : nothing}${mode} ${entry.version ?? ""} +
${mode} ${roles.map((role) => html`${role}`)} diff --git a/ui/src/ui/views/login-gate.ts b/ui/src/ui/views/login-gate.ts new file mode 100644 index 00000000000..77613822cdf --- /dev/null +++ b/ui/src/ui/views/login-gate.ts @@ -0,0 +1,133 @@ +import { html } from "lit"; +import { t } from "../../i18n/index.ts"; +import { renderThemeToggle } from "../app-render.helpers.ts"; +import type { AppViewState } from "../app-view-state.ts"; +import { icons } from "../icons.ts"; +import { normalizeBasePath } from "../navigation.ts"; +import { agentLogoUrl } from "./agents-utils.ts"; + +export function renderLoginGate(state: AppViewState) { + const basePath = normalizeBasePath(state.basePath ?? ""); + const faviconSrc = agentLogoUrl(basePath); + + return html` + + `; +} diff --git a/ui/src/ui/views/overview-attention.ts b/ui/src/ui/views/overview-attention.ts new file mode 100644 index 00000000000..8e09ce1c19f --- /dev/null +++ b/ui/src/ui/views/overview-attention.ts @@ -0,0 +1,61 @@ +import { html, nothing } from "lit"; +import { t } from "../../i18n/index.ts"; +import { buildExternalLinkRel, EXTERNAL_LINK_TARGET } from "../external-link.ts"; +import { icons, type IconName } from "../icons.ts"; +import type { AttentionItem } from "../types.ts"; + +export type OverviewAttentionProps = { + items: AttentionItem[]; +}; + +function severityClass(severity: string) { + if (severity === "error") { + return "danger"; + } + if (severity === "warning") { + return "warn"; + } + return ""; +} + +function attentionIcon(name: string) { + if (name in icons) { + return icons[name as IconName]; + } + return icons.radio; +} + +export function renderOverviewAttention(props: OverviewAttentionProps) { + if (props.items.length === 0) { + return nothing; + } + + return html` +
+
${t("overview.attention.title")}
+
+ ${props.items.map( + (item) => html` +
+ ${attentionIcon(item.icon)} +
+
${item.title}
+
${item.description}
+
+ ${ + item.href + ? html`${t("common.docs")}` + : nothing + } +
+ `, + )} +
+
+ `; +} diff --git a/ui/src/ui/views/overview-cards.ts b/ui/src/ui/views/overview-cards.ts new file mode 100644 index 00000000000..61e98e94781 --- /dev/null +++ b/ui/src/ui/views/overview-cards.ts @@ -0,0 +1,162 @@ +import { html, nothing, type TemplateResult } from "lit"; +import { unsafeHTML } from "lit/directives/unsafe-html.js"; +import { t } from "../../i18n/index.ts"; +import { formatCost, formatTokens, formatRelativeTimestamp } from "../format.ts"; +import { formatNextRun } from "../presenter.ts"; +import type { + SessionsUsageResult, + SessionsListResult, + SkillStatusReport, + CronJob, + CronStatus, +} from "../types.ts"; + +export type OverviewCardsProps = { + usageResult: SessionsUsageResult | null; + sessionsResult: SessionsListResult | null; + skillsReport: SkillStatusReport | null; + cronJobs: CronJob[]; + cronStatus: CronStatus | null; + presenceCount: number; + onNavigate: (tab: string) => void; +}; + +const DIGIT_RUN = /\d{3,}/g; + +function blurDigits(value: string): TemplateResult { + const escaped = value.replace(/&/g, "&").replace(//g, ">"); + const blurred = escaped.replace(DIGIT_RUN, (m) => `${m}`); + return html`${unsafeHTML(blurred)}`; +} + +type StatCard = { + kind: string; + tab: string; + label: string; + value: string | TemplateResult; + hint: string | TemplateResult; +}; + +function renderStatCard(card: StatCard, onNavigate: (tab: string) => void) { + return html` + + `; +} + +function renderSkeletonCards() { + return html` +
+ ${[0, 1, 2, 3].map( + (i) => html` +
+ + + +
+ `, + )} +
+ `; +} + +export function renderOverviewCards(props: OverviewCardsProps) { + const dataLoaded = + props.usageResult != null || props.sessionsResult != null || props.skillsReport != null; + if (!dataLoaded) { + return renderSkeletonCards(); + } + + const totals = props.usageResult?.totals; + const totalCost = formatCost(totals?.totalCost); + const totalTokens = formatTokens(totals?.totalTokens); + const totalMessages = totals ? String(props.usageResult?.aggregates?.messages?.total ?? 0) : "0"; + const sessionCount = props.sessionsResult?.count ?? null; + + const skills = props.skillsReport?.skills ?? []; + const enabledSkills = skills.filter((s) => !s.disabled).length; + const blockedSkills = skills.filter((s) => s.blockedByAllowlist).length; + const totalSkills = skills.length; + + const cronEnabled = props.cronStatus?.enabled ?? null; + const cronNext = props.cronStatus?.nextWakeAtMs ?? null; + const cronJobCount = props.cronJobs.length; + const failedCronCount = props.cronJobs.filter((j) => j.state?.lastStatus === "error").length; + + const cronValue = + cronEnabled == null + ? t("common.na") + : cronEnabled + ? `${cronJobCount} jobs` + : t("common.disabled"); + + const cronHint = + failedCronCount > 0 + ? html`${failedCronCount} failed` + : cronNext + ? t("overview.stats.cronNext", { time: formatNextRun(cronNext) }) + : ""; + + const cards: StatCard[] = [ + { + kind: "cost", + tab: "usage", + label: t("overview.cards.cost"), + value: totalCost, + hint: `${totalTokens} tokens · ${totalMessages} msgs`, + }, + { + kind: "sessions", + tab: "sessions", + label: t("overview.stats.sessions"), + value: String(sessionCount ?? t("common.na")), + hint: t("overview.stats.sessionsHint"), + }, + { + kind: "skills", + tab: "skills", + label: t("overview.cards.skills"), + value: `${enabledSkills}/${totalSkills}`, + hint: blockedSkills > 0 ? `${blockedSkills} blocked` : `${enabledSkills} active`, + }, + { + kind: "cron", + tab: "cron", + label: t("overview.stats.cron"), + value: cronValue, + hint: cronHint, + }, + ]; + + const sessions = props.sessionsResult?.sessions.slice(0, 5) ?? []; + + return html` +
+ ${cards.map((c) => renderStatCard(c, props.onNavigate))} +
+ + ${ + sessions.length > 0 + ? html` +
+

${t("overview.cards.recentSessions")}

+
    + ${sessions.map( + (s) => html` +
  • + ${blurDigits(s.displayName || s.label || s.key)} + ${s.model ?? ""} + ${s.updatedAt ? formatRelativeTimestamp(s.updatedAt) : ""} +
  • + `, + )} +
+
+ ` + : nothing + } + `; +} diff --git a/ui/src/ui/views/overview-event-log.ts b/ui/src/ui/views/overview-event-log.ts new file mode 100644 index 00000000000..04079f5243a --- /dev/null +++ b/ui/src/ui/views/overview-event-log.ts @@ -0,0 +1,42 @@ +import { html, nothing } from "lit"; +import { t } from "../../i18n/index.ts"; +import type { EventLogEntry } from "../app-events.ts"; +import { icons } from "../icons.ts"; +import { formatEventPayload } from "../presenter.ts"; + +export type OverviewEventLogProps = { + events: EventLogEntry[]; +}; + +export function renderOverviewEventLog(props: OverviewEventLogProps) { + if (props.events.length === 0) { + return nothing; + } + + const visible = props.events.slice(0, 20); + + return html` +
+ + ${icons.radio} + ${t("overview.eventLog.title")} + ${props.events.length} + +
+ ${visible.map( + (entry) => html` +
+ ${new Date(entry.ts).toLocaleTimeString()} + ${entry.event} + ${ + entry.payload + ? html`${formatEventPayload(entry.payload).slice(0, 120)}` + : nothing + } +
+ `, + )} +
+
+ `; +} diff --git a/ui/src/ui/views/overview-hints.ts b/ui/src/ui/views/overview-hints.ts index 9db33a2b577..d4599818c48 100644 --- a/ui/src/ui/views/overview-hints.ts +++ b/ui/src/ui/views/overview-hints.ts @@ -1,5 +1,33 @@ import { ConnectErrorDetailCodes } from "../../../../src/gateway/protocol/connect-error-details.js"; +const AUTH_REQUIRED_CODES = new Set([ + ConnectErrorDetailCodes.AUTH_REQUIRED, + ConnectErrorDetailCodes.AUTH_TOKEN_MISSING, + ConnectErrorDetailCodes.AUTH_PASSWORD_MISSING, + ConnectErrorDetailCodes.AUTH_TOKEN_NOT_CONFIGURED, + ConnectErrorDetailCodes.AUTH_PASSWORD_NOT_CONFIGURED, +]); + +const AUTH_FAILURE_CODES = new Set([ + ...AUTH_REQUIRED_CODES, + ConnectErrorDetailCodes.AUTH_UNAUTHORIZED, + ConnectErrorDetailCodes.AUTH_TOKEN_MISMATCH, + ConnectErrorDetailCodes.AUTH_PASSWORD_MISMATCH, + ConnectErrorDetailCodes.AUTH_DEVICE_TOKEN_MISMATCH, + ConnectErrorDetailCodes.AUTH_RATE_LIMITED, + ConnectErrorDetailCodes.AUTH_TAILSCALE_IDENTITY_MISSING, + ConnectErrorDetailCodes.AUTH_TAILSCALE_PROXY_MISSING, + ConnectErrorDetailCodes.AUTH_TAILSCALE_WHOIS_FAILED, + ConnectErrorDetailCodes.AUTH_TAILSCALE_IDENTITY_MISMATCH, +]); + +const INSECURE_CONTEXT_CODES = new Set([ + ConnectErrorDetailCodes.CONTROL_UI_DEVICE_IDENTITY_REQUIRED, + ConnectErrorDetailCodes.DEVICE_IDENTITY_REQUIRED, +]); + +type AuthHintKind = "required" | "failed"; + /** Whether the overview should show device-pairing guidance for this error. */ export function shouldShowPairingHint( connected: boolean, @@ -14,3 +42,48 @@ export function shouldShowPairingHint( } return lastError.toLowerCase().includes("pairing required"); } + +/** + * Return the overview auth hint to show, if any. + * + * Keep fallback string matching narrow so generic "connect failed" close reasons + * do not get misclassified as token/password problems. + */ +export function resolveAuthHintKind(params: { + connected: boolean; + lastError: string | null; + lastErrorCode?: string | null; + hasToken: boolean; + hasPassword: boolean; +}): AuthHintKind | null { + if (params.connected || !params.lastError) { + return null; + } + if (params.lastErrorCode) { + if (!AUTH_FAILURE_CODES.has(params.lastErrorCode)) { + return null; + } + return AUTH_REQUIRED_CODES.has(params.lastErrorCode) ? "required" : "failed"; + } + + const lower = params.lastError.toLowerCase(); + if (!lower.includes("unauthorized")) { + return null; + } + return !params.hasToken && !params.hasPassword ? "required" : "failed"; +} + +export function shouldShowInsecureContextHint( + connected: boolean, + lastError: string | null, + lastErrorCode?: string | null, +): boolean { + if (connected || !lastError) { + return false; + } + if (lastErrorCode) { + return INSECURE_CONTEXT_CODES.has(lastErrorCode); + } + const lower = lastError.toLowerCase(); + return lower.includes("secure context") || lower.includes("device identity required"); +} diff --git a/ui/src/ui/views/overview-log-tail.ts b/ui/src/ui/views/overview-log-tail.ts new file mode 100644 index 00000000000..8be2aa9d5c5 --- /dev/null +++ b/ui/src/ui/views/overview-log-tail.ts @@ -0,0 +1,44 @@ +import { html, nothing } from "lit"; +import { t } from "../../i18n/index.ts"; +import { icons } from "../icons.ts"; + +/** Strip ANSI escape codes (SGR, OSC-8) for readable log display. */ +function stripAnsi(text: string): string { + /* eslint-disable no-control-regex -- stripping ANSI escape sequences requires matching ESC */ + return text.replace(/\x1b\]8;;.*?\x1b\\|\x1b\]8;;\x1b\\/g, "").replace(/\x1b\[[0-9;]*m/g, ""); +} + +export type OverviewLogTailProps = { + lines: string[]; + onRefreshLogs: () => void; +}; + +export function renderOverviewLogTail(props: OverviewLogTailProps) { + if (props.lines.length === 0) { + return nothing; + } + + const displayLines = props.lines + .slice(-50) + .map((line) => stripAnsi(line)) + .join("\n"); + + return html` +
+ + ${icons.scrollText} + ${t("overview.logTail.title")} + ${props.lines.length} + { + e.preventDefault(); + e.stopPropagation(); + props.onRefreshLogs(); + }} + >${icons.loader} + +
${displayLines}
+
+ `; +} diff --git a/ui/src/ui/views/overview-quick-actions.ts b/ui/src/ui/views/overview-quick-actions.ts new file mode 100644 index 00000000000..b1358ca2e67 --- /dev/null +++ b/ui/src/ui/views/overview-quick-actions.ts @@ -0,0 +1,31 @@ +import { html } from "lit"; +import { t } from "../../i18n/index.ts"; +import { icons } from "../icons.ts"; + +export type OverviewQuickActionsProps = { + onNavigate: (tab: string) => void; + onRefresh: () => void; +}; + +export function renderOverviewQuickActions(props: OverviewQuickActionsProps) { + return html` +
+ + + + +
+ `; +} diff --git a/ui/src/ui/views/overview.node.test.ts b/ui/src/ui/views/overview.node.test.ts index 3fa65b93391..313c2edf850 100644 --- a/ui/src/ui/views/overview.node.test.ts +++ b/ui/src/ui/views/overview.node.test.ts @@ -1,6 +1,6 @@ import { describe, expect, it } from "vitest"; import { ConnectErrorDetailCodes } from "../../../../src/gateway/protocol/connect-error-details.js"; -import { shouldShowPairingHint } from "./overview-hints.ts"; +import { resolveAuthHintKind, shouldShowPairingHint } from "./overview-hints.ts"; describe("shouldShowPairingHint", () => { it("returns true for 'pairing required' close reason", () => { @@ -37,3 +37,53 @@ describe("shouldShowPairingHint", () => { ).toBe(true); }); }); + +describe("resolveAuthHintKind", () => { + it("returns required for structured auth-required codes", () => { + expect( + resolveAuthHintKind({ + connected: false, + lastError: "disconnected (4008): connect failed", + lastErrorCode: ConnectErrorDetailCodes.AUTH_TOKEN_MISSING, + hasToken: false, + hasPassword: false, + }), + ).toBe("required"); + }); + + it("returns failed for structured auth mismatch codes", () => { + expect( + resolveAuthHintKind({ + connected: false, + lastError: "disconnected (4008): connect failed", + lastErrorCode: ConnectErrorDetailCodes.AUTH_TOKEN_MISMATCH, + hasToken: true, + hasPassword: false, + }), + ).toBe("failed"); + }); + + it("does not treat generic connect failures as auth failures", () => { + expect( + resolveAuthHintKind({ + connected: false, + lastError: "disconnected (4008): connect failed", + lastErrorCode: ConnectErrorDetailCodes.CONTROL_UI_DEVICE_IDENTITY_REQUIRED, + hasToken: true, + hasPassword: false, + }), + ).toBeNull(); + }); + + it("falls back to unauthorized string matching without structured codes", () => { + expect( + resolveAuthHintKind({ + connected: false, + lastError: "disconnected (4008): unauthorized", + lastErrorCode: null, + hasToken: true, + hasPassword: false, + }), + ).toBe("failed"); + }); +}); diff --git a/ui/src/ui/views/overview.ts b/ui/src/ui/views/overview.ts index 6ebcb884ff6..d24aa92ce9d 100644 --- a/ui/src/ui/views/overview.ts +++ b/ui/src/ui/views/overview.ts @@ -1,12 +1,28 @@ -import { html } from "lit"; -import { ConnectErrorDetailCodes } from "../../../../src/gateway/protocol/connect-error-details.js"; +import { html, nothing } from "lit"; import { t, i18n, SUPPORTED_LOCALES, type Locale } from "../../i18n/index.ts"; +import type { EventLogEntry } from "../app-events.ts"; import { buildExternalLinkRel, EXTERNAL_LINK_TARGET } from "../external-link.ts"; import { formatRelativeTimestamp, formatDurationHuman } from "../format.ts"; import type { GatewayHelloOk } from "../gateway.ts"; -import { formatNextRun } from "../presenter.ts"; +import { icons } from "../icons.ts"; import type { UiSettings } from "../storage.ts"; -import { shouldShowPairingHint } from "./overview-hints.ts"; +import type { + AttentionItem, + CronJob, + CronStatus, + SessionsListResult, + SessionsUsageResult, + SkillStatusReport, +} from "../types.ts"; +import { renderOverviewAttention } from "./overview-attention.ts"; +import { renderOverviewCards } from "./overview-cards.ts"; +import { renderOverviewEventLog } from "./overview-event-log.ts"; +import { + resolveAuthHintKind, + shouldShowInsecureContextHint, + shouldShowPairingHint, +} from "./overview-hints.ts"; +import { renderOverviewLogTail } from "./overview-log-tail.ts"; export type OverviewProps = { connected: boolean; @@ -20,24 +36,39 @@ export type OverviewProps = { cronEnabled: boolean | null; cronNext: number | null; lastChannelsRefresh: number | null; + // New dashboard data + usageResult: SessionsUsageResult | null; + sessionsResult: SessionsListResult | null; + skillsReport: SkillStatusReport | null; + cronJobs: CronJob[]; + cronStatus: CronStatus | null; + attentionItems: AttentionItem[]; + eventLog: EventLogEntry[]; + overviewLogLines: string[]; + showGatewayToken: boolean; + showGatewayPassword: boolean; onSettingsChange: (next: UiSettings) => void; onPasswordChange: (next: string) => void; onSessionKeyChange: (next: string) => void; + onToggleGatewayTokenVisibility: () => void; + onToggleGatewayPasswordVisibility: () => void; onConnect: () => void; onRefresh: () => void; + onNavigate: (tab: string) => void; + onRefreshLogs: () => void; }; export function renderOverview(props: OverviewProps) { const snapshot = props.hello?.snapshot as | { uptimeMs?: number; - policy?: { tickIntervalMs?: number }; authMode?: "none" | "token" | "password" | "trusted-proxy"; } | undefined; const uptime = snapshot?.uptimeMs ? formatDurationHuman(snapshot.uptimeMs) : t("common.na"); - const tick = snapshot?.policy?.tickIntervalMs - ? `${snapshot.policy.tickIntervalMs}ms` + const tickIntervalMs = props.hello?.policy?.tickIntervalMs; + const tick = tickIntervalMs + ? `${(tickIntervalMs / 1000).toFixed(tickIntervalMs % 1000 === 0 ? 0 : 1)}s` : t("common.na"); const authMode = snapshot?.authMode; const isTrustedProxy = authMode === "trusted-proxy"; @@ -71,41 +102,17 @@ export function renderOverview(props: OverviewProps) { })(); const authHint = (() => { - if (props.connected || !props.lastError) { + const authHintKind = resolveAuthHintKind({ + connected: props.connected, + lastError: props.lastError, + lastErrorCode: props.lastErrorCode, + hasToken: Boolean(props.settings.token.trim()), + hasPassword: Boolean(props.password.trim()), + }); + if (authHintKind == null) { return null; } - const lower = props.lastError.toLowerCase(); - const authRequiredCodes = new Set([ - ConnectErrorDetailCodes.AUTH_REQUIRED, - ConnectErrorDetailCodes.AUTH_TOKEN_MISSING, - ConnectErrorDetailCodes.AUTH_PASSWORD_MISSING, - ConnectErrorDetailCodes.AUTH_TOKEN_NOT_CONFIGURED, - ConnectErrorDetailCodes.AUTH_PASSWORD_NOT_CONFIGURED, - ]); - const authFailureCodes = new Set([ - ...authRequiredCodes, - ConnectErrorDetailCodes.AUTH_UNAUTHORIZED, - ConnectErrorDetailCodes.AUTH_TOKEN_MISMATCH, - ConnectErrorDetailCodes.AUTH_PASSWORD_MISMATCH, - ConnectErrorDetailCodes.AUTH_DEVICE_TOKEN_MISMATCH, - ConnectErrorDetailCodes.AUTH_RATE_LIMITED, - ConnectErrorDetailCodes.AUTH_TAILSCALE_IDENTITY_MISSING, - ConnectErrorDetailCodes.AUTH_TAILSCALE_PROXY_MISSING, - ConnectErrorDetailCodes.AUTH_TAILSCALE_WHOIS_FAILED, - ConnectErrorDetailCodes.AUTH_TAILSCALE_IDENTITY_MISMATCH, - ]); - const authFailed = props.lastErrorCode - ? authFailureCodes.has(props.lastErrorCode) - : lower.includes("unauthorized") || lower.includes("connect failed"); - if (!authFailed) { - return null; - } - const hasToken = Boolean(props.settings.token.trim()); - const hasPassword = Boolean(props.password.trim()); - const isAuthRequired = props.lastErrorCode - ? authRequiredCodes.has(props.lastErrorCode) - : !hasToken && !hasPassword; - if (isAuthRequired) { + if (authHintKind === "required") { return html`
${t("overview.auth.required")} @@ -151,15 +158,7 @@ export function renderOverview(props: OverviewProps) { if (isSecureContext) { return null; } - const lower = props.lastError.toLowerCase(); - const insecureContextCode = - props.lastErrorCode === ConnectErrorDetailCodes.CONTROL_UI_DEVICE_IDENTITY_REQUIRED || - props.lastErrorCode === ConnectErrorDetailCodes.DEVICE_IDENTITY_REQUIRED; - if ( - !insecureContextCode && - !lower.includes("secure context") && - !lower.includes("device identity required") - ) { + if (!shouldShowInsecureContextHint(props.connected, props.lastError, props.lastErrorCode)) { return null; } return html` @@ -194,12 +193,12 @@ export function renderOverview(props: OverviewProps) { const currentLocale = i18n.getLocale(); return html` -
+
${t("overview.access.title")}
${t("overview.access.subtitle")}
-
-
@@ -321,45 +375,32 @@ export function renderOverview(props: OverviewProps) {
-
-
-
${t("overview.stats.instances")}
-
${props.presenceCount}
-
${t("overview.stats.instancesHint")}
-
-
-
${t("overview.stats.sessions")}
-
${props.sessionsCount ?? t("common.na")}
-
${t("overview.stats.sessionsHint")}
-
-
-
${t("overview.stats.cron")}
-
- ${props.cronEnabled == null ? t("common.na") : props.cronEnabled ? t("common.enabled") : t("common.disabled")} -
-
${t("overview.stats.cronNext", { time: formatNextRun(props.cronNext) })}
-
-
+
+ + ${renderOverviewCards({ + usageResult: props.usageResult, + sessionsResult: props.sessionsResult, + skillsReport: props.skillsReport, + cronJobs: props.cronJobs, + cronStatus: props.cronStatus, + presenceCount: props.presenceCount, + onNavigate: props.onNavigate, + })} + + ${renderOverviewAttention({ items: props.attentionItems })} + +
+ +
+ ${renderOverviewEventLog({ + events: props.eventLog, + })} + + ${renderOverviewLogTail({ + lines: props.overviewLogLines, + onRefreshLogs: props.onRefreshLogs, + })} +
-
-
${t("overview.notes.title")}
-
${t("overview.notes.subtitle")}
-
-
-
${t("overview.notes.tailscaleTitle")}
-
- ${t("overview.notes.tailscaleText")} -
-
-
-
${t("overview.notes.sessionTitle")}
-
${t("overview.notes.sessionText")}
-
-
-
${t("overview.notes.cronTitle")}
-
${t("overview.notes.cronText")}
-
-
-
`; } diff --git a/ui/src/ui/views/sessions.test.ts b/ui/src/ui/views/sessions.test.ts index 453c216592a..fe650fef8fb 100644 --- a/ui/src/ui/views/sessions.test.ts +++ b/ui/src/ui/views/sessions.test.ts @@ -23,7 +23,18 @@ function buildProps(result: SessionsListResult): SessionsProps { includeGlobal: false, includeUnknown: false, basePath: "", + searchQuery: "", + sortColumn: "updated", + sortDir: "desc", + page: 0, + pageSize: 10, + actionsOpenKey: null, onFiltersChange: () => undefined, + onSearchChange: () => undefined, + onSortChange: () => undefined, + onPageChange: () => undefined, + onPageSizeChange: () => undefined, + onActionsOpenChange: () => undefined, onRefresh: () => undefined, onPatch: () => undefined, onDelete: () => undefined, @@ -49,7 +60,7 @@ describe("sessions view", () => { await Promise.resolve(); const selects = container.querySelectorAll("select"); - const verbose = selects[1] as HTMLSelectElement | undefined; + const verbose = selects[2] as HTMLSelectElement | undefined; expect(verbose?.value).toBe("full"); expect(Array.from(verbose?.options ?? []).some((option) => option.value === "full")).toBe(true); }); @@ -72,10 +83,32 @@ describe("sessions view", () => { await Promise.resolve(); const selects = container.querySelectorAll("select"); - const reasoning = selects[2] as HTMLSelectElement | undefined; + const reasoning = selects[3] as HTMLSelectElement | undefined; expect(reasoning?.value).toBe("custom-mode"); expect( Array.from(reasoning?.options ?? []).some((option) => option.value === "custom-mode"), ).toBe(true); }); + + it("renders explicit fast mode without falling back to inherit", async () => { + const container = document.createElement("div"); + render( + renderSessions( + buildProps( + buildResult({ + key: "agent:main:main", + kind: "direct", + updatedAt: Date.now(), + fastMode: true, + }), + ), + ), + container, + ); + await Promise.resolve(); + + const selects = container.querySelectorAll("select"); + const fast = selects[1] as HTMLSelectElement | undefined; + expect(fast?.value).toBe("on"); + }); }); diff --git a/ui/src/ui/views/sessions.ts b/ui/src/ui/views/sessions.ts index 6f0332f62be..2620ec35acf 100644 --- a/ui/src/ui/views/sessions.ts +++ b/ui/src/ui/views/sessions.ts @@ -1,5 +1,6 @@ import { html, nothing } from "lit"; import { formatRelativeTimestamp } from "../format.ts"; +import { icons } from "../icons.ts"; import { pathForTab } from "../navigation.ts"; import { formatSessionTokens } from "../presenter.ts"; import type { GatewaySessionRow, SessionsListResult } from "../types.ts"; @@ -13,18 +14,30 @@ export type SessionsProps = { includeGlobal: boolean; includeUnknown: boolean; basePath: string; + searchQuery: string; + sortColumn: "key" | "kind" | "updated" | "tokens"; + sortDir: "asc" | "desc"; + page: number; + pageSize: number; + actionsOpenKey: string | null; onFiltersChange: (next: { activeMinutes: string; limit: string; includeGlobal: boolean; includeUnknown: boolean; }) => void; + onSearchChange: (query: string) => void; + onSortChange: (column: "key" | "kind" | "updated" | "tokens", dir: "asc" | "desc") => void; + onPageChange: (page: number) => void; + onPageSizeChange: (size: number) => void; + onActionsOpenChange: (key: string | null) => void; onRefresh: () => void; onPatch: ( key: string, patch: { label?: string | null; thinkingLevel?: string | null; + fastMode?: boolean | null; verboseLevel?: string | null; reasoningLevel?: string | null; }, @@ -40,7 +53,13 @@ const VERBOSE_LEVELS = [ { value: "on", label: "on" }, { value: "full", label: "full" }, ] as const; +const FAST_LEVELS = [ + { value: "", label: "inherit" }, + { value: "on", label: "on" }, + { value: "off", label: "off" }, +] as const; const REASONING_LEVELS = ["", "off", "on", "stream"] as const; +const PAGE_SIZES = [10, 25, 50, 100] as const; function normalizeProviderId(provider?: string | null): string { if (!provider) { @@ -107,24 +126,110 @@ function resolveThinkLevelPatchValue(value: string, isBinary: boolean): string | return value; } +function filterRows(rows: GatewaySessionRow[], query: string): GatewaySessionRow[] { + const q = query.trim().toLowerCase(); + if (!q) { + return rows; + } + return rows.filter((row) => { + const key = (row.key ?? "").toLowerCase(); + const label = (row.label ?? "").toLowerCase(); + const kind = (row.kind ?? "").toLowerCase(); + const displayName = (row.displayName ?? "").toLowerCase(); + return key.includes(q) || label.includes(q) || kind.includes(q) || displayName.includes(q); + }); +} + +function sortRows( + rows: GatewaySessionRow[], + column: "key" | "kind" | "updated" | "tokens", + dir: "asc" | "desc", +): GatewaySessionRow[] { + const cmp = dir === "asc" ? 1 : -1; + return [...rows].toSorted((a, b) => { + let diff = 0; + switch (column) { + case "key": + diff = (a.key ?? "").localeCompare(b.key ?? ""); + break; + case "kind": + diff = (a.kind ?? "").localeCompare(b.kind ?? ""); + break; + case "updated": { + const au = a.updatedAt ?? 0; + const bu = b.updatedAt ?? 0; + diff = au - bu; + break; + } + case "tokens": { + const at = a.totalTokens ?? a.inputTokens ?? a.outputTokens ?? 0; + const bt = b.totalTokens ?? b.inputTokens ?? b.outputTokens ?? 0; + diff = at - bt; + break; + } + } + return diff * cmp; + }); +} + +function paginateRows(rows: T[], page: number, pageSize: number): T[] { + const start = page * pageSize; + return rows.slice(start, start + pageSize); +} + export function renderSessions(props: SessionsProps) { - const rows = props.result?.sessions ?? []; + const rawRows = props.result?.sessions ?? []; + const filtered = filterRows(rawRows, props.searchQuery); + const sorted = sortRows(filtered, props.sortColumn, props.sortDir); + const totalRows = sorted.length; + const totalPages = Math.max(1, Math.ceil(totalRows / props.pageSize)); + const page = Math.min(props.page, totalPages - 1); + const paginated = paginateRows(sorted, page, props.pageSize); + + const sortHeader = (col: "key" | "kind" | "updated" | "tokens", label: string) => { + const isActive = props.sortColumn === col; + const nextDir = isActive && props.sortDir === "asc" ? ("desc" as const) : ("asc" as const); + return html` + props.onSortChange(col, isActive ? nextDir : "desc")} + > + ${label} + ${icons.arrowUpDown} + + `; + }; + return html` -
-
+ ${ + props.actionsOpenKey + ? html` +
props.onActionsOpenChange(null)} + aria-hidden="true" + >
+ ` + : nothing + } +
+
Sessions
-
Active session keys and per-session overrides.
+
${props.result ? `Store: ${props.result.path}` : "Active session keys and per-session overrides."}
-
-
@@ -219,6 +388,8 @@ function renderRow( basePath: string, onPatch: SessionsProps["onPatch"], onDelete: SessionsProps["onDelete"], + onActionsOpenChange: (key: string | null) => void, + actionsOpenKey: string | null, disabled: boolean, ) { const updated = row.updatedAt ? formatRelativeTimestamp(row.updatedAt) : "n/a"; @@ -226,6 +397,8 @@ function renderRow( const isBinaryThinking = isBinaryThinkingProvider(row.modelProvider); const thinking = resolveThinkLevelDisplay(rawThinking, isBinaryThinking); const thinkLevels = withCurrentOption(resolveThinkLevelOptions(row.modelProvider), thinking); + const fastMode = row.fastMode === true ? "on" : row.fastMode === false ? "off" : ""; + const fastLevels = withCurrentLabeledOption(FAST_LEVELS, fastMode); const verbose = row.verboseLevel ?? ""; const verboseLevels = withCurrentLabeledOption(VERBOSE_LEVELS, verbose); const reasoning = row.reasoningLevel ?? ""; @@ -234,36 +407,58 @@ function renderRow( typeof row.displayName === "string" && row.displayName.trim().length > 0 ? row.displayName.trim() : null; - const label = typeof row.label === "string" ? row.label.trim() : ""; - const showDisplayName = Boolean(displayName && displayName !== row.key && displayName !== label); + const showDisplayName = Boolean( + displayName && + displayName !== row.key && + displayName !== (typeof row.label === "string" ? row.label.trim() : ""), + ); const canLink = row.kind !== "global"; const chatUrl = canLink ? `${pathForTab("chat", basePath)}?session=${encodeURIComponent(row.key)}` : null; + const isMenuOpen = actionsOpenKey === row.key; + const badgeClass = + row.kind === "direct" + ? "data-table-badge--direct" + : row.kind === "group" + ? "data-table-badge--group" + : row.kind === "global" + ? "data-table-badge--global" + : "data-table-badge--unknown"; return html` -
-
- ${canLink ? html`${row.key}` : row.key} - ${showDisplayName ? html`${displayName}` : nothing} -
-
+ + +
+ ${canLink ? html`${row.key}` : row.key} + ${ + showDisplayName + ? html`${displayName}` + : nothing + } +
+ + { const value = (e.target as HTMLInputElement).value.trim(); onPatch(row.key, { label: value || null }); }} /> -
-
${row.kind}
-
${updated}
-
${formatSessionTokens(row)}
-
+ + + ${row.kind} + + ${updated} + ${formatSessionTokens(row)} + -
-
+ + + + + -
-
+ + -
-
- -
-
+ + +
+ + ${ + isMenuOpen + ? html` +
+ ${ + canLink + ? html` + onActionsOpenChange(null)} + > + Open in Chat + + ` + : nothing + } + +
+ ` + : nothing + } +
+ + `; } diff --git a/ui/src/ui/views/skills.ts b/ui/src/ui/views/skills.ts index 830f97921f8..b9338971c8e 100644 --- a/ui/src/ui/views/skills.ts +++ b/ui/src/ui/views/skills.ts @@ -10,6 +10,7 @@ import { } from "./skills-shared.ts"; export type SkillsProps = { + connected: boolean; loading: boolean; report: SkillStatusReport | null; error: string | null; @@ -40,20 +41,28 @@ export function renderSkills(props: SkillsProps) {
Skills
-
Bundled, managed, and workspace skills.
+
Installed skills and their status.
-
-
-