diff --git a/.dockerignore b/.dockerignore index 3a8e436d515..f24c490e9ad 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,5 +1,11 @@ .git .worktrees + +# Sensitive files – docker-setup.sh writes .env with OPENCLAW_GATEWAY_TOKEN +# into the project root; keep it out of the build context. +.env +.env.* + .bun-cache .bun .tmp diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b365b2ed944..00670107d00 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,7 +7,7 @@ on: concurrency: group: ci-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: ${{ github.event_name == 'pull_request' }} + cancel-in-progress: true env: FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" @@ -38,9 +38,8 @@ jobs: id: check uses: ./.github/actions/detect-docs-changes - # Detect which heavy areas are touched so PRs can skip unrelated expensive jobs. - # Push to main keeps broad coverage, but this job still needs to run so - # downstream jobs that list it in `needs` are not skipped. + # Detect which heavy areas are touched so CI can skip unrelated expensive jobs. + # Fail-safe: if detection fails, downstream jobs run. changed-scope: needs: [docs-scope] if: needs.docs-scope.outputs.docs_only != 'true' @@ -82,7 +81,7 @@ jobs: # Build dist once for Node-relevant changes and share it with downstream jobs. build-artifacts: needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true') + if: github.event_name == 'push' && needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - name: Checkout @@ -141,7 +140,7 @@ jobs: checks: needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true') + if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 strategy: fail-fast: false @@ -149,6 +148,13 @@ jobs: include: - runtime: node task: test + shard_index: 1 + shard_count: 2 + command: pnpm canvas:a2ui:bundle && pnpm test + - runtime: node + task: test + shard_index: 2 + shard_count: 2 command: pnpm canvas:a2ui:bundle && pnpm test - runtime: node task: extensions @@ -160,40 +166,47 @@ jobs: task: test command: pnpm canvas:a2ui:bundle && bunx vitest run --config vitest.unit.config.ts steps: - - name: Skip bun lane on push - if: github.event_name == 'push' && matrix.runtime == 'bun' - run: echo "Skipping bun test lane on push events." + - name: Skip bun lane on pull requests + if: github.event_name == 'pull_request' && matrix.runtime == 'bun' + run: echo "Skipping Bun compatibility lane on pull requests." - name: Checkout - if: github.event_name != 'push' || matrix.runtime != 'bun' + if: github.event_name != 'pull_request' || matrix.runtime != 'bun' uses: actions/checkout@v6 with: submodules: false - name: Setup Node environment - if: matrix.runtime != 'bun' || github.event_name != 'push' + if: matrix.runtime != 'bun' || github.event_name != 'pull_request' uses: ./.github/actions/setup-node-env with: install-bun: "${{ matrix.runtime == 'bun' }}" use-sticky-disk: "false" - name: Configure Node test resources - if: (github.event_name != 'push' || matrix.runtime != 'bun') && matrix.task == 'test' && matrix.runtime == 'node' + if: (github.event_name != 'pull_request' || matrix.runtime != 'bun') && matrix.task == 'test' && matrix.runtime == 'node' + env: + SHARD_COUNT: ${{ matrix.shard_count || '' }} + SHARD_INDEX: ${{ matrix.shard_index || '' }} run: | # `pnpm test` runs `scripts/test-parallel.mjs`, which spawns multiple Node processes. # Default heap limits have been too low on Linux CI (V8 OOM near 4GB). echo "OPENCLAW_TEST_WORKERS=2" >> "$GITHUB_ENV" echo "OPENCLAW_TEST_MAX_OLD_SPACE_SIZE_MB=6144" >> "$GITHUB_ENV" + if [ -n "$SHARD_COUNT" ] && [ -n "$SHARD_INDEX" ]; then + echo "OPENCLAW_TEST_SHARDS=$SHARD_COUNT" >> "$GITHUB_ENV" + echo "OPENCLAW_TEST_SHARD_INDEX=$SHARD_INDEX" >> "$GITHUB_ENV" + fi - name: Run ${{ matrix.task }} (${{ matrix.runtime }}) - if: matrix.runtime != 'bun' || github.event_name != 'push' + if: matrix.runtime != 'bun' || github.event_name != 'pull_request' run: ${{ matrix.command }} # Types, lint, and format check. check: name: "check" needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true') + if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - name: Checkout @@ -239,7 +252,7 @@ jobs: compat-node22: name: "compat-node22" needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true') + if: github.event_name == 'push' && needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_node == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - name: Checkout @@ -272,7 +285,7 @@ jobs: skills-python: needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_node == 'true' || needs.changed-scope.outputs.run_skills_python == 'true') + if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_skills_python == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 steps: - name: Checkout @@ -365,7 +378,7 @@ jobs: checks-windows: needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_windows == 'true') + if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_windows == 'true' runs-on: blacksmith-32vcpu-windows-2025 timeout-minutes: 45 env: @@ -727,7 +740,7 @@ jobs: android: needs: [docs-scope, changed-scope] - if: needs.docs-scope.outputs.docs_only != 'true' && (github.event_name == 'push' || needs.changed-scope.outputs.run_android == 'true') + if: needs.docs-scope.outputs.docs_only != 'true' && needs.changed-scope.outputs.run_android == 'true' runs-on: blacksmith-16vcpu-ubuntu-2404 strategy: fail-fast: false @@ -747,23 +760,37 @@ jobs: uses: actions/setup-java@v5 with: distribution: temurin - # setup-android's sdkmanager currently crashes on JDK 21 in CI. + # Keep sdkmanager on the stable JDK path for Linux CI runners. java-version: 17 - - name: Setup Android SDK - uses: android-actions/setup-android@v3 - with: - accept-android-sdk-licenses: false + - name: Setup Android SDK cmdline-tools + run: | + set -euo pipefail + ANDROID_SDK_ROOT="$HOME/.android-sdk" + CMDLINE_TOOLS_VERSION="12266719" + ARCHIVE="commandlinetools-linux-${CMDLINE_TOOLS_VERSION}_latest.zip" + URL="https://dl.google.com/android/repository/${ARCHIVE}" + + mkdir -p "$ANDROID_SDK_ROOT/cmdline-tools" + curl -fsSL "$URL" -o "/tmp/${ARCHIVE}" + rm -rf "$ANDROID_SDK_ROOT/cmdline-tools/latest" + unzip -q "/tmp/${ARCHIVE}" -d "$ANDROID_SDK_ROOT/cmdline-tools" + mv "$ANDROID_SDK_ROOT/cmdline-tools/cmdline-tools" "$ANDROID_SDK_ROOT/cmdline-tools/latest" + + echo "ANDROID_SDK_ROOT=$ANDROID_SDK_ROOT" >> "$GITHUB_ENV" + echo "ANDROID_HOME=$ANDROID_SDK_ROOT" >> "$GITHUB_ENV" + echo "$ANDROID_SDK_ROOT/cmdline-tools/latest/bin" >> "$GITHUB_PATH" + echo "$ANDROID_SDK_ROOT/platform-tools" >> "$GITHUB_PATH" - name: Setup Gradle - uses: gradle/actions/setup-gradle@v4 + uses: gradle/actions/setup-gradle@v5 with: gradle-version: 8.11.1 - name: Install Android SDK packages run: | - yes | sdkmanager --licenses >/dev/null - sdkmanager --install \ + yes | sdkmanager --sdk_root="${ANDROID_SDK_ROOT}" --licenses >/dev/null + sdkmanager --sdk_root="${ANDROID_SDK_ROOT}" --install \ "platform-tools" \ "platforms;android-36" \ "build-tools;36.0.0" diff --git a/.jscpd.json b/.jscpd.json new file mode 100644 index 00000000000..777b025b0c8 --- /dev/null +++ b/.jscpd.json @@ -0,0 +1,16 @@ +{ + "gitignore": true, + "noSymlinks": true, + "ignore": [ + "**/node_modules/**", + "**/dist/**", + "dist/**", + "**/.git/**", + "**/coverage/**", + "**/build/**", + "**/.build/**", + "**/.artifacts/**", + "docs/zh-CN/**", + "**/CHANGELOG.md" + ] +} diff --git a/AGENTS.md b/AGENTS.md index 45eed9ec2ad..f7c2f34ce39 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -201,6 +201,14 @@ ## Agent-Specific Notes - Vocabulary: "makeup" = "mac app". +- Parallels macOS retests: use the snapshot most closely named like `macOS 26.3.1 fresh` when the user asks for a clean/fresh macOS rerun; avoid older Tahoe snapshots unless explicitly requested. +- Parallels macOS smoke playbook: + - `prlctl exec` is fine for deterministic repo commands, but it can misrepresent interactive shell behavior (`PATH`, `HOME`, `curl | bash`, shebang resolution). For installer parity or shell-sensitive repros, prefer the guest Terminal or `prlctl enter`. + - Fresh Tahoe snapshot current reality: `brew` exists, `node` may not be on `PATH` in noninteractive guest exec. Use absolute `/opt/homebrew/bin/node` for repo/CLI runs when needed. + - Fresh host-served tgz install: restore fresh snapshot, install tgz as guest root with `HOME=/var/root`, then run onboarding as the desktop user via `prlctl exec --current-user`. + - For `openclaw onboard --non-interactive --secret-input-mode ref --install-daemon`, expect env-backed auth-profile refs (for example `OPENAI_API_KEY`) to be copied into the service env at install time; this path was fixed and should stay green. + - Don’t run local + gateway agent turns in parallel on the same fresh workspace/session; they can collide on the session lock. Run sequentially. + - Root-installed tarball smoke on Tahoe can still log plugin blocks for world-writable `extensions/*` under `/opt/homebrew/lib/node_modules/openclaw`; treat that as separate from onboarding/gateway health unless the task is plugin loading. - Never edit `node_modules` (global/Homebrew/npm/git installs too). Updates overwrite. Skill notes go in `tools.md` or `AGENTS.md`. - When adding a new `AGENTS.md` anywhere in the repo, also add a `CLAUDE.md` symlink pointing to it (example: `ln -s AGENTS.md CLAUDE.md`). - Signal: "update fly" => `fly ssh console -a flawd-bot -C "bash -lc 'cd /data/clawd/openclaw && git pull --rebase origin main'"` then `fly machines restart e825232f34d058 -a flawd-bot`. diff --git a/CHANGELOG.md b/CHANGELOG.md index 2a8270dd154..0e61358e91e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,9 +9,13 @@ Docs: https://docs.openclaw.ai - Android/chat settings: redesign the chat settings sheet with grouped device and media sections, refresh the Connect and Voice tabs, and tighten the chat composer/session header for a denser mobile layout. (#44894) Thanks @obviyus. - Docker/timezone override: add `OPENCLAW_TZ` so `docker-setup.sh` can pin gateway and CLI containers to a chosen IANA timezone instead of inheriting the daemon default. (#34119) Thanks @Lanfei. - iOS/onboarding: add a first-run welcome pager before gateway setup, stop auto-opening the QR scanner, and show `/pair qr` instructions on the connect step. (#45054) Thanks @ngutman. +- Browser/existing-session: add an official Chrome DevTools MCP attach mode for signed-in live Chrome sessions, with docs for `chrome://inspect/#remote-debugging` enablement and direct backlinks to Chrome’s own setup guides. ### Fixes +- Browser/existing-session: accept text-only `list_pages` and `new_page` responses from Chrome DevTools MCP so live-session tab discovery and new-tab open flows keep working when the server omits structured page metadata. +- Ollama/reasoning visibility: stop promoting native `thinking` and `reasoning` fields into final assistant text so local reasoning models no longer leak internal thoughts in normal replies. (#45330) Thanks @xi7ang. +- Cron/isolated sessions: route nested cron-triggered embedded runner work onto the nested lane so isolated cron jobs no longer deadlock when compaction or other queued inner work runs. Thanks @vincentkoc. - Windows/gateway install: bound `schtasks` calls and fall back to the Startup-folder login item when task creation hangs, so native `openclaw gateway install` fails fast instead of wedging forever on broken Scheduled Task setups. - Windows/gateway auth: stop attaching device identity on local loopback shared-token and password gateway calls, so native Windows agent replies no longer log stale `device signature expired` fallback noise before succeeding. - Telegram/media downloads: thread the same direct or proxy transport policy into SSRF-guarded file fetches so inbound attachments keep working when Telegram falls back between env-proxy and direct networking. (#44639) Thanks @obviyus. @@ -21,6 +25,7 @@ Docs: https://docs.openclaw.ai - Agents/memory bootstrap: load only one root memory file, preferring `MEMORY.md` and using `memory.md` as a fallback, so case-insensitive Docker mounts no longer inject duplicate memory context. (#26054) Thanks @Lanfei. - Agents/OpenAI-compatible compat overrides: respect explicit user `models[].compat` opt-ins for non-native `openai-completions` endpoints so usage-in-streaming capability overrides no longer get forced off when the endpoint actually supports them. (#44432) Thanks @cheapestinference. - Agents/Azure OpenAI startup prompts: rephrase the built-in `/new`, `/reset`, and post-compaction startup instruction so Azure OpenAI deployments no longer hit HTTP 400 false positives from the content filter. (#43403) Thanks @xingsy97. +- Windows/gateway stop: resolve Startup-folder fallback listeners from the installed `gateway.cmd` port, so `openclaw gateway stop` now actually kills fallback-launched gateway processes before restart. - Config/validation: accept documented `agents.list[].params` per-agent overrides in strict config validation so `openclaw config validate` no longer rejects runtime-supported `cacheRetention`, `temperature`, and `maxTokens` settings. (#41171) Thanks @atian8179. - Android/onboarding QR scan: switch setup QR scanning to Google Code Scanner so onboarding uses a more reliable scanner instead of the legacy embedded ZXing flow. (#45021) Thanks @obviyus. - Config/web fetch: restore runtime validation for documented `tools.web.fetch.readability` and `tools.web.fetch.firecrawl` settings so valid web fetch configs no longer fail with unrecognized-key errors. (#42583) Thanks @stim64045-spec. @@ -28,10 +33,18 @@ Docs: https://docs.openclaw.ai - Config/discovery: accept `discovery.wideArea.domain` in strict config validation so unicast DNS-SD gateway configs no longer fail with an unrecognized-key error. (#35615) Thanks @ingyukoh. - Security/exec approvals: unwrap more `pnpm` runtime forms during approval binding, including `pnpm --reporter ... exec` and direct `pnpm node` file runs, with matching regression coverage and docs updates. - Security/exec approvals: fail closed for Perl `-M` and `-I` approval flows so preload and load-path module resolution stays outside approval-backed runtime execution unless the operator uses a broader explicit trust path. +- Security/exec approvals: recognize PowerShell `-File` and `-f` wrapper forms during inline-command extraction so approval and command-analysis paths treat file-based PowerShell launches like the existing `-Command` variants. +- Security/exec approvals: unwrap `env` dispatch wrappers inside shell-segment allowlist resolution on macOS so `env FOO=bar /path/to/bin` resolves against the effective executable instead of the wrapper token. +- Security/exec approvals: treat backslash-newline as shell line continuation during macOS shell-chain parsing so line-continued `$(` substitutions fail closed instead of slipping past command-substitution checks. +- Security/exec approvals: bind macOS skill auto-allow trust to both executable name and resolved path so same-basename binaries no longer inherit trust from unrelated skill bins. +- Security/external content: strip zero-width and soft-hyphen marker-splitting characters during boundary sanitization so spoofed `EXTERNAL_UNTRUSTED_CONTENT` markers fall back to the existing hardening path instead of bypassing marker normalization. - Control UI/insecure auth: preserve explicit shared token and password auth on plain-HTTP Control UI connects so LAN and reverse-proxy sessions no longer drop shared auth before the first WebSocket handshake. (#45088) Thanks @velvet-shark. - macOS/onboarding: avoid self-restarting freshly bootstrapped launchd gateways and give new daemon installs longer to become healthy, so `openclaw onboard --install-daemon` no longer false-fails on slower Macs and fresh VM snapshots. - Agents/compaction: preserve safeguard compaction summary language continuity via default and configurable custom instructions so persona drift is reduced after auto-compaction. (#10456) Thanks @keepitmello. - Agents/tool warnings: distinguish gated core tools like `apply_patch` from plugin-only unknown entries in `tools.profile` warnings, so unavailable core tools now report current runtime/provider/model/config gating instead of suggesting a missing plugin. +- Slack/probe: keep `auth.test()` bot and team metadata mapping stable while simplifying the probe result path. (#44775) Thanks @Cafexss. +- Dashboard/chat UI: restore the `chat-new-messages` class on the New messages scroll pill so the button uses its existing compact styling instead of rendering as a full-screen SVG overlay. (#44856) Thanks @Astro-Han. +- Windows/gateway status: reuse the installed service command environment when reading runtime status, so startup-fallback gateways keep reporting the configured port and running state in `gateway status --json` instead of falling back to `gateway port unknown`. ## 2026.3.12 @@ -44,6 +57,7 @@ Docs: https://docs.openclaw.ai - Docs/Kubernetes: Add a starter K8s install path with raw manifests, Kind setup, and deployment docs. Thanks @sallyom @dzianisv @egkristi - Agents/subagents: add `sessions_yield` so orchestrators can end the current turn immediately, skip queued tool work, and carry a hidden follow-up payload into the next session turn. (#36537) thanks @jriff - Slack/agent replies: support `channelData.slack.blocks` in the shared reply delivery path so agents can send Block Kit messages through standard Slack outbound delivery. (#44592) Thanks @vincentkoc. +- Slack/interactive replies: add opt-in Slack button and select reply directives behind `channels.slack.capabilities.interactiveReplies`, disabled by default unless explicitly enabled. (#44607) Thanks @vincentkoc. ### Fixes @@ -117,6 +131,7 @@ Docs: https://docs.openclaw.ai - Delivery/dedupe: trim completed direct-cron delivery cache correctly and keep mirrored transcript dedupe active even when transcript files contain malformed lines. (#44666) thanks @frankekn. - CLI/thinking help: add the missing `xhigh` level hints to `openclaw cron add`, `openclaw cron edit`, and `openclaw agent` so the help text matches the levels already accepted at runtime. (#44819) Thanks @kiki830621. - Agents/Anthropic replay: drop replayed assistant thinking blocks for native Anthropic and Bedrock Claude providers so persisted follow-up turns no longer fail on stored thinking blocks. (#44843) Thanks @jmcte. +- Docs/Brave pricing: escape literal dollar signs in Brave Search cost text so the docs render the free credit and per-request pricing correctly. (#44989) Thanks @keelanfh. ## 2026.3.11 diff --git a/apps/macos/Sources/OpenClaw/ExecApprovalEvaluation.swift b/apps/macos/Sources/OpenClaw/ExecApprovalEvaluation.swift index c7d9d0928e1..a36e58db1d8 100644 --- a/apps/macos/Sources/OpenClaw/ExecApprovalEvaluation.swift +++ b/apps/macos/Sources/OpenClaw/ExecApprovalEvaluation.swift @@ -45,8 +45,8 @@ enum ExecApprovalEvaluator { let skillAllow: Bool if approvals.agent.autoAllowSkills, !allowlistResolutions.isEmpty { - let bins = await SkillBinsCache.shared.currentBins() - skillAllow = allowlistResolutions.allSatisfy { bins.contains($0.executableName) } + let bins = await SkillBinsCache.shared.currentTrust() + skillAllow = self.isSkillAutoAllowed(allowlistResolutions, trustedBinsByName: bins) } else { skillAllow = false } @@ -65,4 +65,26 @@ enum ExecApprovalEvaluator { allowlistMatch: allowlistSatisfied ? allowlistMatches.first : nil, skillAllow: skillAllow) } + + static func isSkillAutoAllowed( + _ resolutions: [ExecCommandResolution], + trustedBinsByName: [String: Set]) -> Bool + { + guard !resolutions.isEmpty, !trustedBinsByName.isEmpty else { return false } + return resolutions.allSatisfy { resolution in + guard let executableName = SkillBinsCache.normalizeSkillBinName(resolution.executableName), + let resolvedPath = SkillBinsCache.normalizeResolvedPath(resolution.resolvedPath) + else { + return false + } + return trustedBinsByName[executableName]?.contains(resolvedPath) == true + } + } + + static func _testIsSkillAutoAllowed( + _ resolutions: [ExecCommandResolution], + trustedBinsByName: [String: Set]) -> Bool + { + self.isSkillAutoAllowed(resolutions, trustedBinsByName: trustedBinsByName) + } } diff --git a/apps/macos/Sources/OpenClaw/ExecApprovals.swift b/apps/macos/Sources/OpenClaw/ExecApprovals.swift index ba49b37cd9f..7fc4385b96c 100644 --- a/apps/macos/Sources/OpenClaw/ExecApprovals.swift +++ b/apps/macos/Sources/OpenClaw/ExecApprovals.swift @@ -777,6 +777,7 @@ actor SkillBinsCache { static let shared = SkillBinsCache() private var bins: Set = [] + private var trustByName: [String: Set] = [:] private var lastRefresh: Date? private let refreshInterval: TimeInterval = 90 @@ -787,27 +788,90 @@ actor SkillBinsCache { return self.bins } + func currentTrust(force: Bool = false) async -> [String: Set] { + if force || self.isStale() { + await self.refresh() + } + return self.trustByName + } + func refresh() async { do { let report = try await GatewayConnection.shared.skillsStatus() - var next = Set() - for skill in report.skills { - for bin in skill.requirements.bins { - let trimmed = bin.trimmingCharacters(in: .whitespacesAndNewlines) - if !trimmed.isEmpty { next.insert(trimmed) } - } - } - self.bins = next + let trust = Self.buildTrustIndex(report: report, searchPaths: CommandResolver.preferredPaths()) + self.bins = trust.names + self.trustByName = trust.pathsByName self.lastRefresh = Date() } catch { if self.lastRefresh == nil { self.bins = [] + self.trustByName = [:] } } } + static func normalizeSkillBinName(_ value: String) -> String? { + let trimmed = value.trimmingCharacters(in: .whitespacesAndNewlines).lowercased() + return trimmed.isEmpty ? nil : trimmed + } + + static func normalizeResolvedPath(_ value: String?) -> String? { + let trimmed = value?.trimmingCharacters(in: .whitespacesAndNewlines) ?? "" + guard !trimmed.isEmpty else { return nil } + return URL(fileURLWithPath: trimmed).standardizedFileURL.path + } + + static func buildTrustIndex( + report: SkillsStatusReport, + searchPaths: [String]) -> SkillBinTrustIndex + { + var names = Set() + var pathsByName: [String: Set] = [:] + + for skill in report.skills { + for bin in skill.requirements.bins { + let trimmed = bin.trimmingCharacters(in: .whitespacesAndNewlines) + guard !trimmed.isEmpty else { continue } + names.insert(trimmed) + + guard let name = self.normalizeSkillBinName(trimmed), + let resolvedPath = self.resolveSkillBinPath(trimmed, searchPaths: searchPaths), + let normalizedPath = self.normalizeResolvedPath(resolvedPath) + else { + continue + } + + var paths = pathsByName[name] ?? Set() + paths.insert(normalizedPath) + pathsByName[name] = paths + } + } + + return SkillBinTrustIndex(names: names, pathsByName: pathsByName) + } + + private static func resolveSkillBinPath(_ bin: String, searchPaths: [String]) -> String? { + let expanded = bin.hasPrefix("~") ? (bin as NSString).expandingTildeInPath : bin + if expanded.contains("/") || expanded.contains("\\") { + return FileManager().isExecutableFile(atPath: expanded) ? expanded : nil + } + return CommandResolver.findExecutable(named: expanded, searchPaths: searchPaths) + } + private func isStale() -> Bool { guard let lastRefresh else { return true } return Date().timeIntervalSince(lastRefresh) > self.refreshInterval } + + static func _testBuildTrustIndex( + report: SkillsStatusReport, + searchPaths: [String]) -> SkillBinTrustIndex + { + self.buildTrustIndex(report: report, searchPaths: searchPaths) + } +} + +struct SkillBinTrustIndex { + let names: Set + let pathsByName: [String: Set] } diff --git a/apps/macos/Sources/OpenClaw/ExecCommandResolution.swift b/apps/macos/Sources/OpenClaw/ExecCommandResolution.swift index 91a22153f3c..f89293a81aa 100644 --- a/apps/macos/Sources/OpenClaw/ExecCommandResolution.swift +++ b/apps/macos/Sources/OpenClaw/ExecCommandResolution.swift @@ -37,8 +37,7 @@ struct ExecCommandResolution { var resolutions: [ExecCommandResolution] = [] resolutions.reserveCapacity(segments.count) for segment in segments { - guard let token = self.parseFirstToken(segment), - let resolution = self.resolveExecutable(rawExecutable: token, cwd: cwd, env: env) + guard let resolution = self.resolveShellSegmentExecutable(segment, cwd: cwd, env: env) else { return [] } @@ -88,6 +87,20 @@ struct ExecCommandResolution { cwd: cwd) } + private static func resolveShellSegmentExecutable( + _ segment: String, + cwd: String?, + env: [String: String]?) -> ExecCommandResolution? + { + let tokens = self.tokenizeShellWords(segment) + guard !tokens.isEmpty else { return nil } + let effective = ExecEnvInvocationUnwrapper.unwrapDispatchWrappersForResolution(tokens) + guard let raw = effective.first?.trimmingCharacters(in: .whitespacesAndNewlines), !raw.isEmpty else { + return nil + } + return self.resolveExecutable(rawExecutable: raw, cwd: cwd, env: env) + } + private static func parseFirstToken(_ command: String) -> String? { let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines) guard !trimmed.isEmpty else { return nil } @@ -102,6 +115,59 @@ struct ExecCommandResolution { return trimmed.split(whereSeparator: { $0.isWhitespace }).first.map(String.init) } + private static func tokenizeShellWords(_ command: String) -> [String] { + let trimmed = command.trimmingCharacters(in: .whitespacesAndNewlines) + guard !trimmed.isEmpty else { return [] } + + var tokens: [String] = [] + var current = "" + var inSingle = false + var inDouble = false + var escaped = false + + func appendCurrent() { + guard !current.isEmpty else { return } + tokens.append(current) + current.removeAll(keepingCapacity: true) + } + + for ch in trimmed { + if escaped { + current.append(ch) + escaped = false + continue + } + + if ch == "\\", !inSingle { + escaped = true + continue + } + + if ch == "'", !inDouble { + inSingle.toggle() + continue + } + + if ch == "\"", !inSingle { + inDouble.toggle() + continue + } + + if ch.isWhitespace, !inSingle, !inDouble { + appendCurrent() + continue + } + + current.append(ch) + } + + if escaped { + current.append("\\") + } + appendCurrent() + return tokens + } + private enum ShellTokenContext { case unquoted case doubleQuoted @@ -148,8 +214,14 @@ struct ExecCommandResolution { while idx < chars.count { let ch = chars[idx] let next: Character? = idx + 1 < chars.count ? chars[idx + 1] : nil + let lookahead = self.nextShellSignificantCharacter(chars: chars, after: idx, inSingle: inSingle) if escaped { + if ch == "\n" { + escaped = false + idx += 1 + continue + } current.append(ch) escaped = false idx += 1 @@ -157,6 +229,10 @@ struct ExecCommandResolution { } if ch == "\\", !inSingle { + if next == "\n" { + idx += 2 + continue + } current.append(ch) escaped = true idx += 1 @@ -177,7 +253,7 @@ struct ExecCommandResolution { continue } - if !inSingle, self.shouldFailClosedForShell(ch: ch, next: next, inDouble: inDouble) { + if !inSingle, self.shouldFailClosedForShell(ch: ch, next: lookahead, inDouble: inDouble) { // Fail closed on command/process substitution in allowlist mode, // including command substitution inside double-quoted shell strings. return nil @@ -201,6 +277,25 @@ struct ExecCommandResolution { return segments } + private static func nextShellSignificantCharacter( + chars: [Character], + after idx: Int, + inSingle: Bool) -> Character? + { + guard !inSingle else { + return idx + 1 < chars.count ? chars[idx + 1] : nil + } + var cursor = idx + 1 + while cursor < chars.count { + if chars[cursor] == "\\", cursor + 1 < chars.count, chars[cursor + 1] == "\n" { + cursor += 2 + continue + } + return chars[cursor] + } + return nil + } + private static func shouldFailClosedForShell(ch: Character, next: Character?, inDouble: Bool) -> Bool { let context: ShellTokenContext = inDouble ? .doubleQuoted : .unquoted guard let rules = self.shellFailClosedRules[context] else { diff --git a/apps/macos/Tests/OpenClawIPCTests/ExecAllowlistTests.swift b/apps/macos/Tests/OpenClawIPCTests/ExecAllowlistTests.swift index f12b8f717dc..fa92cc81ef5 100644 --- a/apps/macos/Tests/OpenClawIPCTests/ExecAllowlistTests.swift +++ b/apps/macos/Tests/OpenClawIPCTests/ExecAllowlistTests.swift @@ -141,6 +141,26 @@ struct ExecAllowlistTests { #expect(resolutions.isEmpty) } + @Test func `resolve for allowlist fails closed on line-continued command substitution`() { + let command = ["/bin/sh", "-lc", "echo $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-line-cont-subst)"] + let resolutions = ExecCommandResolution.resolveForAllowlist( + command: command, + rawCommand: "echo $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-line-cont-subst)", + cwd: nil, + env: ["PATH": "/usr/bin:/bin"]) + #expect(resolutions.isEmpty) + } + + @Test func `resolve for allowlist fails closed on chained line-continued command substitution`() { + let command = ["/bin/sh", "-lc", "echo ok && $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-chained-line-cont-subst)"] + let resolutions = ExecCommandResolution.resolveForAllowlist( + command: command, + rawCommand: "echo ok && $\\\n(/usr/bin/touch /tmp/openclaw-allowlist-test-chained-line-cont-subst)", + cwd: nil, + env: ["PATH": "/usr/bin:/bin"]) + #expect(resolutions.isEmpty) + } + @Test func `resolve for allowlist fails closed on quoted backticks`() { let command = ["/bin/sh", "-lc", "echo \"ok `/usr/bin/id`\""] let resolutions = ExecCommandResolution.resolveForAllowlist( @@ -208,6 +228,30 @@ struct ExecAllowlistTests { #expect(resolutions[1].executableName == "touch") } + @Test func `resolve for allowlist unwraps env dispatch wrappers inside shell segments`() { + let command = ["/bin/sh", "-lc", "env /usr/bin/touch /tmp/openclaw-allowlist-test"] + let resolutions = ExecCommandResolution.resolveForAllowlist( + command: command, + rawCommand: "env /usr/bin/touch /tmp/openclaw-allowlist-test", + cwd: nil, + env: ["PATH": "/usr/bin:/bin"]) + #expect(resolutions.count == 1) + #expect(resolutions[0].resolvedPath == "/usr/bin/touch") + #expect(resolutions[0].executableName == "touch") + } + + @Test func `resolve for allowlist unwraps env assignments inside shell segments`() { + let command = ["/bin/sh", "-lc", "env FOO=bar /usr/bin/touch /tmp/openclaw-allowlist-test"] + let resolutions = ExecCommandResolution.resolveForAllowlist( + command: command, + rawCommand: "env FOO=bar /usr/bin/touch /tmp/openclaw-allowlist-test", + cwd: nil, + env: ["PATH": "/usr/bin:/bin"]) + #expect(resolutions.count == 1) + #expect(resolutions[0].resolvedPath == "/usr/bin/touch") + #expect(resolutions[0].executableName == "touch") + } + @Test func `resolve for allowlist unwraps env to effective direct executable`() { let command = ["/usr/bin/env", "FOO=bar", "/usr/bin/printf", "ok"] let resolutions = ExecCommandResolution.resolveForAllowlist( diff --git a/apps/macos/Tests/OpenClawIPCTests/ExecSkillBinTrustTests.swift b/apps/macos/Tests/OpenClawIPCTests/ExecSkillBinTrustTests.swift new file mode 100644 index 00000000000..779b59a3499 --- /dev/null +++ b/apps/macos/Tests/OpenClawIPCTests/ExecSkillBinTrustTests.swift @@ -0,0 +1,90 @@ +import Foundation +import Testing +@testable import OpenClaw + +struct ExecSkillBinTrustTests { + @Test func `build trust index resolves skill bin paths`() throws { + let fixture = try Self.makeExecutable(named: "jq") + defer { try? FileManager.default.removeItem(at: fixture.root) } + + let trust = SkillBinsCache._testBuildTrustIndex( + report: Self.makeReport(bins: ["jq"]), + searchPaths: [fixture.root.path]) + + #expect(trust.names == ["jq"]) + #expect(trust.pathsByName["jq"] == [fixture.path]) + } + + @Test func `skill auto allow accepts trusted resolved skill bin path`() throws { + let fixture = try Self.makeExecutable(named: "jq") + defer { try? FileManager.default.removeItem(at: fixture.root) } + + let trust = SkillBinsCache._testBuildTrustIndex( + report: Self.makeReport(bins: ["jq"]), + searchPaths: [fixture.root.path]) + let resolution = ExecCommandResolution( + rawExecutable: "jq", + resolvedPath: fixture.path, + executableName: "jq", + cwd: nil) + + #expect(ExecApprovalEvaluator._testIsSkillAutoAllowed([resolution], trustedBinsByName: trust.pathsByName)) + } + + @Test func `skill auto allow rejects same basename at different path`() throws { + let trusted = try Self.makeExecutable(named: "jq") + let untrusted = try Self.makeExecutable(named: "jq") + defer { + try? FileManager.default.removeItem(at: trusted.root) + try? FileManager.default.removeItem(at: untrusted.root) + } + + let trust = SkillBinsCache._testBuildTrustIndex( + report: Self.makeReport(bins: ["jq"]), + searchPaths: [trusted.root.path]) + let resolution = ExecCommandResolution( + rawExecutable: "jq", + resolvedPath: untrusted.path, + executableName: "jq", + cwd: nil) + + #expect(!ExecApprovalEvaluator._testIsSkillAutoAllowed([resolution], trustedBinsByName: trust.pathsByName)) + } + + private static func makeExecutable(named name: String) throws -> (root: URL, path: String) { + let root = FileManager.default.temporaryDirectory + .appendingPathComponent("openclaw-skill-bin-\(UUID().uuidString)", isDirectory: true) + try FileManager.default.createDirectory(at: root, withIntermediateDirectories: true) + let file = root.appendingPathComponent(name) + try "#!/bin/sh\nexit 0\n".write(to: file, atomically: true, encoding: .utf8) + try FileManager.default.setAttributes( + [.posixPermissions: NSNumber(value: Int16(0o755))], + ofItemAtPath: file.path) + return (root, file.path) + } + + private static func makeReport(bins: [String]) -> SkillsStatusReport { + SkillsStatusReport( + workspaceDir: "/tmp/workspace", + managedSkillsDir: "/tmp/skills", + skills: [ + SkillStatus( + name: "test-skill", + description: "test", + source: "local", + filePath: "/tmp/skills/test-skill/SKILL.md", + baseDir: "/tmp/skills/test-skill", + skillKey: "test-skill", + primaryEnv: nil, + emoji: nil, + homepage: nil, + always: false, + disabled: false, + eligible: true, + requirements: SkillRequirements(bins: bins, env: [], config: []), + missing: SkillMissing(bins: [], env: [], config: []), + configChecks: [], + install: []) + ]) + } +} diff --git a/docs/brave-search.md b/docs/brave-search.md index a8bba5c3e91..4a541690431 100644 --- a/docs/brave-search.md +++ b/docs/brave-search.md @@ -73,7 +73,7 @@ await web_search({ ## Notes - OpenClaw uses the Brave **Search** plan. If you have a legacy subscription (e.g. the original Free plan with 2,000 queries/month), it remains valid but does not include newer features like LLM Context or higher rate limits. -- Each Brave plan includes **$5/month in free credit** (renewing). The Search plan costs $5 per 1,000 requests, so the credit covers 1,000 queries/month. Set your usage limit in the Brave dashboard to avoid unexpected charges. See the [Brave API portal](https://brave.com/search/api/) for current plans. +- Each Brave plan includes **\$5/month in free credit** (renewing). The Search plan costs \$5 per 1,000 requests, so the credit covers 1,000 queries/month. Set your usage limit in the Brave dashboard to avoid unexpected charges. See the [Brave API portal](https://brave.com/search/api/) for current plans. - The Search plan includes the LLM Context endpoint and AI inference rights. Storing results to train or tune models requires a plan with explicit storage rights. See the Brave [Terms of Service](https://api-dashboard.search.brave.com/terms-of-service). - Results are cached for 15 minutes by default (configurable via `cacheTtlMinutes`). diff --git a/docs/channels/slack.md b/docs/channels/slack.md index 7fe44cc611b..aa9127ea630 100644 --- a/docs/channels/slack.md +++ b/docs/channels/slack.md @@ -218,6 +218,55 @@ For actions/directory reads, user token can be preferred when configured. For wr - if encoded option values exceed Slack limits, the flow falls back to buttons - For long option payloads, Slash command argument menus use a confirm dialog before dispatching a selected value. +## Interactive replies + +Slack can render agent-authored interactive reply controls, but this feature is disabled by default. + +Enable it globally: + +```json5 +{ + channels: { + slack: { + capabilities: { + interactiveReplies: true, + }, + }, + }, +} +``` + +Or enable it for one Slack account only: + +```json5 +{ + channels: { + slack: { + accounts: { + ops: { + capabilities: { + interactiveReplies: true, + }, + }, + }, + }, + }, +} +``` + +When enabled, agents can emit Slack-only reply directives: + +- `[[slack_buttons: Approve:approve, Reject:reject]]` +- `[[slack_select: Choose a target | Canary:canary, Production:production]]` + +These directives compile into Slack Block Kit and route clicks or selections back through the existing Slack interaction event path. + +Notes: + +- This is Slack-specific UI. Other channels do not translate Slack Block Kit directives into their own button systems. +- The interactive callback values are OpenClaw-generated opaque tokens, not raw agent-authored values. +- If generated interactive blocks would exceed Slack Block Kit limits, OpenClaw falls back to the original text reply instead of sending an invalid blocks payload. + Default slash command settings: - `enabled: false` diff --git a/docs/ci.md b/docs/ci.md index 16a7e670964..e8710b87cb1 100644 --- a/docs/ci.md +++ b/docs/ci.md @@ -9,32 +9,32 @@ read_when: # CI Pipeline -The CI runs on every push to `main` and every pull request. It uses smart scoping to skip expensive jobs when only docs or native code changed. +The CI runs on every push to `main` and every pull request. It uses smart scoping to skip expensive jobs when only unrelated areas changed. ## Job Overview -| Job | Purpose | When it runs | -| ----------------- | ------------------------------------------------------- | ------------------------------------------------- | -| `docs-scope` | Detect docs-only changes | Always | -| `changed-scope` | Detect which areas changed (node/macos/android/windows) | Non-docs PRs | -| `check` | TypeScript types, lint, format | Push to `main`, or PRs with Node-relevant changes | -| `check-docs` | Markdown lint + broken link check | Docs changed | -| `code-analysis` | LOC threshold check (1000 lines) | PRs only | -| `secrets` | Detect leaked secrets | Always | -| `build-artifacts` | Build dist once, share with other jobs | Non-docs, node changes | -| `release-check` | Validate npm pack contents | After build | -| `checks` | Node/Bun tests + protocol check | Non-docs, node changes | -| `checks-windows` | Windows-specific tests | Non-docs, windows-relevant changes | -| `macos` | Swift lint/build/test + TS tests | PRs with macos changes | -| `android` | Gradle build + tests | Non-docs, android changes | +| Job | Purpose | When it runs | +| ----------------- | ------------------------------------------------------- | ---------------------------------- | +| `docs-scope` | Detect docs-only changes | Always | +| `changed-scope` | Detect which areas changed (node/macos/android/windows) | Non-doc changes | +| `check` | TypeScript types, lint, format | Non-docs, node changes | +| `check-docs` | Markdown lint + broken link check | Docs changed | +| `secrets` | Detect leaked secrets | Always | +| `build-artifacts` | Build dist once, share with `release-check` | Pushes to `main`, node changes | +| `release-check` | Validate npm pack contents | Pushes to `main` after build | +| `checks` | Node tests + protocol check on PRs; Bun compat on push | Non-docs, node changes | +| `compat-node22` | Minimum supported Node runtime compatibility | Pushes to `main`, node changes | +| `checks-windows` | Windows-specific tests | Non-docs, windows-relevant changes | +| `macos` | Swift lint/build/test + TS tests | PRs with macos changes | +| `android` | Gradle build + tests | Non-docs, android changes | ## Fail-Fast Order Jobs are ordered so cheap checks fail before expensive ones run: -1. `docs-scope` + `code-analysis` + `check` (parallel, ~1-2 min) -2. `build-artifacts` (blocked on above) -3. `checks`, `checks-windows`, `macos`, `android` (blocked on build) +1. `docs-scope` + `changed-scope` + `check` + `secrets` (parallel, cheap gates first) +2. PRs: `checks` (Linux Node test split into 2 shards), `checks-windows`, `macos`, `android` +3. Pushes to `main`: `build-artifacts` + `release-check` + Bun compat + `compat-node22` Scope logic lives in `scripts/ci-changed-scope.mjs` and is covered by unit tests in `src/scripts/ci-changed-scope.test.ts`. diff --git a/docs/gateway/openresponses-http-api.md b/docs/gateway/openresponses-http-api.md index bcba166db9d..fa86f912ef5 100644 --- a/docs/gateway/openresponses-http-api.md +++ b/docs/gateway/openresponses-http-api.md @@ -18,77 +18,16 @@ This endpoint is **disabled by default**. Enable it in config first. Under the hood, requests are executed as a normal Gateway agent run (same codepath as `openclaw agent`), so routing/permissions/config match your Gateway. -## Authentication +## Authentication, security, and routing -Uses the Gateway auth configuration. Send a bearer token: +Operational behavior matches [OpenAI Chat Completions](/gateway/openai-http-api): -- `Authorization: Bearer ` +- use `Authorization: Bearer ` with the normal Gateway auth config +- treat the endpoint as full operator access for the gateway instance +- select agents with `model: "openclaw:"`, `model: "agent:"`, or `x-openclaw-agent-id` +- use `x-openclaw-session-key` for explicit session routing -Notes: - -- When `gateway.auth.mode="token"`, use `gateway.auth.token` (or `OPENCLAW_GATEWAY_TOKEN`). -- When `gateway.auth.mode="password"`, use `gateway.auth.password` (or `OPENCLAW_GATEWAY_PASSWORD`). -- If `gateway.auth.rateLimit` is configured and too many auth failures occur, the endpoint returns `429` with `Retry-After`. - -## Security boundary (important) - -Treat this endpoint as a **full operator-access** surface for the gateway instance. - -- HTTP bearer auth here is not a narrow per-user scope model. -- A valid Gateway token/password for this endpoint should be treated like an owner/operator credential. -- Requests run through the same control-plane agent path as trusted operator actions. -- There is no separate non-owner/per-user tool boundary on this endpoint; once a caller passes Gateway auth here, OpenClaw treats that caller as a trusted operator for this gateway. -- If the target agent policy allows sensitive tools, this endpoint can use them. -- Keep this endpoint on loopback/tailnet/private ingress only; do not expose it directly to the public internet. - -See [Security](/gateway/security) and [Remote access](/gateway/remote). - -## Choosing an agent - -No custom headers required: encode the agent id in the OpenResponses `model` field: - -- `model: "openclaw:"` (example: `"openclaw:main"`, `"openclaw:beta"`) -- `model: "agent:"` (alias) - -Or target a specific OpenClaw agent by header: - -- `x-openclaw-agent-id: ` (default: `main`) - -Advanced: - -- `x-openclaw-session-key: ` to fully control session routing. - -## Enabling the endpoint - -Set `gateway.http.endpoints.responses.enabled` to `true`: - -```json5 -{ - gateway: { - http: { - endpoints: { - responses: { enabled: true }, - }, - }, - }, -} -``` - -## Disabling the endpoint - -Set `gateway.http.endpoints.responses.enabled` to `false`: - -```json5 -{ - gateway: { - http: { - endpoints: { - responses: { enabled: false }, - }, - }, - }, -} -``` +Enable or disable this endpoint with `gateway.http.endpoints.responses.enabled`. ## Session behavior diff --git a/docs/help/testing.md b/docs/help/testing.md index db374bb03da..b2057e8a1da 100644 --- a/docs/help/testing.md +++ b/docs/help/testing.md @@ -53,8 +53,8 @@ Think of the suites as “increasing realism” (and increasing flakiness/cost): - No real keys required - Should be fast and stable - Pool note: - - OpenClaw uses Vitest `vmForks` on Node 22/23 for faster unit shards. - - On Node 24+, OpenClaw automatically falls back to regular `forks` to avoid Node VM linking errors (`ERR_VM_MODULE_LINK_FAILURE` / `module is already linked`). + - OpenClaw uses Vitest `vmForks` on Node 22, 23, and 24 for faster unit shards. + - On Node 25+, OpenClaw automatically falls back to regular `forks` until the repo is re-validated there. - Override manually with `OPENCLAW_TEST_VM_FORKS=0` (force `forks`) or `OPENCLAW_TEST_VM_FORKS=1` (force `vmForks`). ### E2E (gateway smoke) diff --git a/docs/install/docker-vm-runtime.md b/docs/install/docker-vm-runtime.md new file mode 100644 index 00000000000..77436f44486 --- /dev/null +++ b/docs/install/docker-vm-runtime.md @@ -0,0 +1,138 @@ +--- +summary: "Shared Docker VM runtime steps for long-lived OpenClaw Gateway hosts" +read_when: + - You are deploying OpenClaw on a cloud VM with Docker + - You need the shared binary bake, persistence, and update flow +title: "Docker VM Runtime" +--- + +# Docker VM Runtime + +Shared runtime steps for VM-based Docker installs such as GCP, Hetzner, and similar VPS providers. + +## Bake required binaries into the image + +Installing binaries inside a running container is a trap. +Anything installed at runtime will be lost on restart. + +All external binaries required by skills must be installed at image build time. + +The examples below show three common binaries only: + +- `gog` for Gmail access +- `goplaces` for Google Places +- `wacli` for WhatsApp + +These are examples, not a complete list. +You may install as many binaries as needed using the same pattern. + +If you add new skills later that depend on additional binaries, you must: + +1. Update the Dockerfile +2. Rebuild the image +3. Restart the containers + +**Example Dockerfile** + +```dockerfile +FROM node:24-bookworm + +RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/* + +# Example binary 1: Gmail CLI +RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \ + | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog + +# Example binary 2: Google Places CLI +RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \ + | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces + +# Example binary 3: WhatsApp CLI +RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \ + | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli + +# Add more binaries below using the same pattern + +WORKDIR /app +COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./ +COPY ui/package.json ./ui/package.json +COPY scripts ./scripts + +RUN corepack enable +RUN pnpm install --frozen-lockfile + +COPY . . +RUN pnpm build +RUN pnpm ui:install +RUN pnpm ui:build + +ENV NODE_ENV=production + +CMD ["node","dist/index.js"] +``` + +## Build and launch + +```bash +docker compose build +docker compose up -d openclaw-gateway +``` + +If build fails with `Killed` or `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. +Use a larger machine class before retrying. + +Verify binaries: + +```bash +docker compose exec openclaw-gateway which gog +docker compose exec openclaw-gateway which goplaces +docker compose exec openclaw-gateway which wacli +``` + +Expected output: + +``` +/usr/local/bin/gog +/usr/local/bin/goplaces +/usr/local/bin/wacli +``` + +Verify Gateway: + +```bash +docker compose logs -f openclaw-gateway +``` + +Expected output: + +``` +[gateway] listening on ws://0.0.0.0:18789 +``` + +## What persists where + +OpenClaw runs in Docker, but Docker is not the source of truth. +All long-lived state must survive restarts, rebuilds, and reboots. + +| Component | Location | Persistence mechanism | Notes | +| ------------------- | --------------------------------- | ---------------------- | -------------------------------- | +| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens | +| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys | +| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state | +| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts | +| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login | +| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` | +| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time | +| Node runtime | Container filesystem | Docker image | Rebuilt every image build | +| OS packages | Container filesystem | Docker image | Do not install at runtime | +| Docker container | Ephemeral | Restartable | Safe to destroy | + +## Updates + +To update OpenClaw on the VM: + +```bash +git pull +docker compose build +docker compose up -d +``` diff --git a/docs/install/gcp.md b/docs/install/gcp.md index dfedfe4ba38..7ff4a00d087 100644 --- a/docs/install/gcp.md +++ b/docs/install/gcp.md @@ -281,77 +281,20 @@ services: --- -## 10) Bake required binaries into the image (critical) +## 10) Shared Docker VM runtime steps -Installing binaries inside a running container is a trap. -Anything installed at runtime will be lost on restart. +Use the shared runtime guide for the common Docker host flow: -All external binaries required by skills must be installed at image build time. - -The examples below show three common binaries only: - -- `gog` for Gmail access -- `goplaces` for Google Places -- `wacli` for WhatsApp - -These are examples, not a complete list. -You may install as many binaries as needed using the same pattern. - -If you add new skills later that depend on additional binaries, you must: - -1. Update the Dockerfile -2. Rebuild the image -3. Restart the containers - -**Example Dockerfile** - -```dockerfile -FROM node:24-bookworm - -RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/* - -# Example binary 1: Gmail CLI -RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog - -# Example binary 2: Google Places CLI -RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces - -# Example binary 3: WhatsApp CLI -RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli - -# Add more binaries below using the same pattern - -WORKDIR /app -COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./ -COPY ui/package.json ./ui/package.json -COPY scripts ./scripts - -RUN corepack enable -RUN pnpm install --frozen-lockfile - -COPY . . -RUN pnpm build -RUN pnpm ui:install -RUN pnpm ui:build - -ENV NODE_ENV=production - -CMD ["node","dist/index.js"] -``` +- [Bake required binaries into the image](/install/docker-vm-runtime#bake-required-binaries-into-the-image) +- [Build and launch](/install/docker-vm-runtime#build-and-launch) +- [What persists where](/install/docker-vm-runtime#what-persists-where) +- [Updates](/install/docker-vm-runtime#updates) --- -## 11) Build and launch +## 11) GCP-specific launch notes -```bash -docker compose build -docker compose up -d openclaw-gateway -``` - -If build fails with `Killed` / `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. Use `e2-small` minimum, or `e2-medium` for more reliable first builds. +On GCP, if build fails with `Killed` or `exit code 137` during `pnpm install --frozen-lockfile`, the VM is out of memory. Use `e2-small` minimum, or `e2-medium` for more reliable first builds. When binding to LAN (`OPENCLAW_GATEWAY_BIND=lan`), configure a trusted browser origin before continuing: @@ -361,39 +304,7 @@ docker compose run --rm openclaw-cli config set gateway.controlUi.allowedOrigins If you changed the gateway port, replace `18789` with your configured port. -Verify binaries: - -```bash -docker compose exec openclaw-gateway which gog -docker compose exec openclaw-gateway which goplaces -docker compose exec openclaw-gateway which wacli -``` - -Expected output: - -``` -/usr/local/bin/gog -/usr/local/bin/goplaces -/usr/local/bin/wacli -``` - ---- - -## 12) Verify Gateway - -```bash -docker compose logs -f openclaw-gateway -``` - -Success: - -``` -[gateway] listening on ws://0.0.0.0:18789 -``` - ---- - -## 13) Access from your laptop +## 12) Access from your laptop Create an SSH tunnel to forward the Gateway port: @@ -420,38 +331,8 @@ docker compose run --rm openclaw-cli devices list docker compose run --rm openclaw-cli devices approve ``` ---- - -## What persists where (source of truth) - -OpenClaw runs in Docker, but Docker is not the source of truth. -All long-lived state must survive restarts, rebuilds, and reboots. - -| Component | Location | Persistence mechanism | Notes | -| ------------------- | --------------------------------- | ---------------------- | -------------------------------- | -| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens | -| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys | -| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state | -| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts | -| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login | -| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` | -| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time | -| Node runtime | Container filesystem | Docker image | Rebuilt every image build | -| OS packages | Container filesystem | Docker image | Do not install at runtime | -| Docker container | Ephemeral | Restartable | Safe to destroy | - ---- - -## Updates - -To update OpenClaw on the VM: - -```bash -cd ~/openclaw -git pull -docker compose build -docker compose up -d -``` +Need the shared persistence and update reference again? +See [Docker VM Runtime](/install/docker-vm-runtime#what-persists-where) and [Docker VM Runtime updates](/install/docker-vm-runtime#updates). --- diff --git a/docs/install/hetzner.md b/docs/install/hetzner.md index 4c27840cee0..46bc76d6243 100644 --- a/docs/install/hetzner.md +++ b/docs/install/hetzner.md @@ -202,107 +202,20 @@ services: --- -## 7) Bake required binaries into the image (critical) +## 7) Shared Docker VM runtime steps -Installing binaries inside a running container is a trap. -Anything installed at runtime will be lost on restart. +Use the shared runtime guide for the common Docker host flow: -All external binaries required by skills must be installed at image build time. - -The examples below show three common binaries only: - -- `gog` for Gmail access -- `goplaces` for Google Places -- `wacli` for WhatsApp - -These are examples, not a complete list. -You may install as many binaries as needed using the same pattern. - -If you add new skills later that depend on additional binaries, you must: - -1. Update the Dockerfile -2. Rebuild the image -3. Restart the containers - -**Example Dockerfile** - -```dockerfile -FROM node:24-bookworm - -RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/* - -# Example binary 1: Gmail CLI -RUN curl -L https://github.com/steipete/gog/releases/latest/download/gog_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/gog - -# Example binary 2: Google Places CLI -RUN curl -L https://github.com/steipete/goplaces/releases/latest/download/goplaces_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/goplaces - -# Example binary 3: WhatsApp CLI -RUN curl -L https://github.com/steipete/wacli/releases/latest/download/wacli_Linux_x86_64.tar.gz \ - | tar -xz -C /usr/local/bin && chmod +x /usr/local/bin/wacli - -# Add more binaries below using the same pattern - -WORKDIR /app -COPY package.json pnpm-lock.yaml pnpm-workspace.yaml .npmrc ./ -COPY ui/package.json ./ui/package.json -COPY scripts ./scripts - -RUN corepack enable -RUN pnpm install --frozen-lockfile - -COPY . . -RUN pnpm build -RUN pnpm ui:install -RUN pnpm ui:build - -ENV NODE_ENV=production - -CMD ["node","dist/index.js"] -``` +- [Bake required binaries into the image](/install/docker-vm-runtime#bake-required-binaries-into-the-image) +- [Build and launch](/install/docker-vm-runtime#build-and-launch) +- [What persists where](/install/docker-vm-runtime#what-persists-where) +- [Updates](/install/docker-vm-runtime#updates) --- -## 8) Build and launch +## 8) Hetzner-specific access -```bash -docker compose build -docker compose up -d openclaw-gateway -``` - -Verify binaries: - -```bash -docker compose exec openclaw-gateway which gog -docker compose exec openclaw-gateway which goplaces -docker compose exec openclaw-gateway which wacli -``` - -Expected output: - -``` -/usr/local/bin/gog -/usr/local/bin/goplaces -/usr/local/bin/wacli -``` - ---- - -## 9) Verify Gateway - -```bash -docker compose logs -f openclaw-gateway -``` - -Success: - -``` -[gateway] listening on ws://0.0.0.0:18789 -``` - -From your laptop: +After the shared build and launch steps, tunnel from your laptop: ```bash ssh -N -L 18789:127.0.0.1:18789 root@YOUR_VPS_IP @@ -316,25 +229,7 @@ Paste your gateway token. --- -## What persists where (source of truth) - -OpenClaw runs in Docker, but Docker is not the source of truth. -All long-lived state must survive restarts, rebuilds, and reboots. - -| Component | Location | Persistence mechanism | Notes | -| ------------------- | --------------------------------- | ---------------------- | -------------------------------- | -| Gateway config | `/home/node/.openclaw/` | Host volume mount | Includes `openclaw.json`, tokens | -| Model auth profiles | `/home/node/.openclaw/` | Host volume mount | OAuth tokens, API keys | -| Skill configs | `/home/node/.openclaw/skills/` | Host volume mount | Skill-level state | -| Agent workspace | `/home/node/.openclaw/workspace/` | Host volume mount | Code and agent artifacts | -| WhatsApp session | `/home/node/.openclaw/` | Host volume mount | Preserves QR login | -| Gmail keyring | `/home/node/.openclaw/` | Host volume + password | Requires `GOG_KEYRING_PASSWORD` | -| External binaries | `/usr/local/bin/` | Docker image | Must be baked at build time | -| Node runtime | Container filesystem | Docker image | Rebuilt every image build | -| OS packages | Container filesystem | Docker image | Do not install at runtime | -| Docker container | Ephemeral | Restartable | Safe to destroy | - ---- +The shared persistence map lives in [Docker VM Runtime](/install/docker-vm-runtime#what-persists-where). ## Infrastructure as Code (Terraform) diff --git a/docs/plugins/voice-call.md b/docs/plugins/voice-call.md index 17263ca0509..14198fdba36 100644 --- a/docs/plugins/voice-call.md +++ b/docs/plugins/voice-call.md @@ -296,6 +296,12 @@ Inbound policy defaults to `disabled`. To enable inbound calls, set: } ``` +`inboundPolicy: "allowlist"` is a low-assurance caller-ID screen. The plugin +normalizes the provider-supplied `From` value and compares it to `allowFrom`. +Webhook verification authenticates provider delivery and payload integrity, but +it does not prove PSTN/VoIP caller-number ownership. Treat `allowFrom` as +caller-ID filtering, not strong caller identity. + Auto-responses use the agent system. Tune with: - `responseModel` diff --git a/docs/reference/api-usage-costs.md b/docs/reference/api-usage-costs.md index baf4302ac0d..bbb1d90de87 100644 --- a/docs/reference/api-usage-costs.md +++ b/docs/reference/api-usage-costs.md @@ -85,8 +85,8 @@ See [Memory](/concepts/memory). - **Kimi (Moonshot)**: `KIMI_API_KEY`, `MOONSHOT_API_KEY`, or `tools.web.search.kimi.apiKey` - **Perplexity Search API**: `PERPLEXITY_API_KEY`, `OPENROUTER_API_KEY`, or `tools.web.search.perplexity.apiKey` -**Brave Search free credit:** Each Brave plan includes $5/month in renewing -free credit. The Search plan costs $5 per 1,000 requests, so the credit covers +**Brave Search free credit:** Each Brave plan includes \$5/month in renewing +free credit. The Search plan costs \$5 per 1,000 requests, so the credit covers 1,000 requests/month at no charge. Set your usage limit in the Brave dashboard to avoid unexpected charges. diff --git a/docs/reference/test.md b/docs/reference/test.md index 6d5c5535a83..378789f6d6e 100644 --- a/docs/reference/test.md +++ b/docs/reference/test.md @@ -11,7 +11,7 @@ title: "Tests" - `pnpm test:force`: Kills any lingering gateway process holding the default control port, then runs the full Vitest suite with an isolated gateway port so server tests don’t collide with a running instance. Use this when a prior gateway run left port 18789 occupied. - `pnpm test:coverage`: Runs the unit suite with V8 coverage (via `vitest.unit.config.ts`). Global thresholds are 70% lines/branches/functions/statements. Coverage excludes integration-heavy entrypoints (CLI wiring, gateway/telegram bridges, webchat static server) to keep the target focused on unit-testable logic. -- `pnpm test` on Node 24+: OpenClaw auto-disables Vitest `vmForks` and uses `forks` to avoid `ERR_VM_MODULE_LINK_FAILURE` / `module is already linked`. You can force behavior with `OPENCLAW_TEST_VM_FORKS=0|1`. +- `pnpm test` on Node 22, 23, and 24 uses Vitest `vmForks` by default for faster startup. Node 25+ falls back to `forks` until re-validated. You can force behavior with `OPENCLAW_TEST_VM_FORKS=0|1`. - `pnpm test`: runs the fast core unit lane by default for quick local feedback. - `pnpm test:channels`: runs channel-heavy suites. - `pnpm test:extensions`: runs extension/plugin suites. diff --git a/docs/reference/wizard.md b/docs/reference/wizard.md index 60e88fe4226..bbaebbdc84f 100644 --- a/docs/reference/wizard.md +++ b/docs/reference/wizard.md @@ -167,93 +167,8 @@ openclaw onboard --non-interactive \ `--json` does **not** imply non-interactive mode. Use `--non-interactive` (and `--workspace`) for scripts. - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice gemini-api-key \ - --gemini-api-key "$GEMINI_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice zai-api-key \ - --zai-api-key "$ZAI_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice ai-gateway-api-key \ - --ai-gateway-api-key "$AI_GATEWAY_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice cloudflare-ai-gateway-api-key \ - --cloudflare-ai-gateway-account-id "your-account-id" \ - --cloudflare-ai-gateway-gateway-id "your-gateway-id" \ - --cloudflare-ai-gateway-api-key "$CLOUDFLARE_AI_GATEWAY_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice moonshot-api-key \ - --moonshot-api-key "$MOONSHOT_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice synthetic-api-key \ - --synthetic-api-key "$SYNTHETIC_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice opencode-zen \ - --opencode-zen-api-key "$OPENCODE_API_KEY" \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - Swap to `--auth-choice opencode-go --opencode-go-api-key "$OPENCODE_API_KEY"` for the Go catalog. - - - ```bash - openclaw onboard --non-interactive \ - --mode local \ - --auth-choice ollama \ - --custom-model-id "qwen3.5:27b" \ - --accept-risk \ - --gateway-port 18789 \ - --gateway-bind loopback - ``` - Add `--custom-base-url "http://ollama-host:11434"` to target a remote Ollama instance. - - +Provider-specific command examples live in [CLI Automation](/start/wizard-cli-automation#provider-specific-examples). +Use this reference page for flag semantics and step ordering. ### Add agent (non-interactive) diff --git a/docs/tools/browser.md b/docs/tools/browser.md index d632e713068..8a7abe93209 100644 --- a/docs/tools/browser.md +++ b/docs/tools/browser.md @@ -48,6 +48,8 @@ Gateway. - `openclaw`: managed, isolated browser (no extension required). - `chrome`: extension relay to your **system browser** (requires the OpenClaw extension to be attached to a tab). +- `existing-session`: official Chrome MCP attach flow for a running Chrome + profile. Set `browser.defaultProfile: "openclaw"` if you want managed mode by default. @@ -77,6 +79,12 @@ Browser settings live in `~/.openclaw/openclaw.json`. profiles: { openclaw: { cdpPort: 18800, color: "#FF4500" }, work: { cdpPort: 18801, color: "#0066CC" }, + chromeLive: { + cdpPort: 18802, + driver: "existing-session", + attachOnly: true, + color: "#00AA00", + }, remote: { cdpUrl: "http://10.0.0.42:9222", color: "#00AA00" }, }, }, @@ -100,6 +108,8 @@ Notes: - Default profile is `openclaw` (OpenClaw-managed standalone browser). Use `defaultProfile: "chrome"` to opt into the Chrome extension relay. - Auto-detect order: system default browser if Chromium-based; otherwise Chrome → Brave → Edge → Chromium → Chrome Canary. - Local `openclaw` profiles auto-assign `cdpPort`/`cdpUrl` — set those only for remote CDP. +- `driver: "existing-session"` uses Chrome DevTools MCP instead of raw CDP. Do + not set `cdpUrl` for that driver. ## Use Brave (or another Chromium-based browser) @@ -264,11 +274,13 @@ OpenClaw supports multiple named profiles (routing configs). Profiles can be: - **openclaw-managed**: a dedicated Chromium-based browser instance with its own user data directory + CDP port - **remote**: an explicit CDP URL (Chromium-based browser running elsewhere) - **extension relay**: your existing Chrome tab(s) via the local relay + Chrome extension +- **existing session**: your existing Chrome profile via Chrome DevTools MCP auto-connect Defaults: - The `openclaw` profile is auto-created if missing. - The `chrome` profile is built-in for the Chrome extension relay (points at `http://127.0.0.1:18792` by default). +- Existing-session profiles are opt-in; create them with `--driver existing-session`. - Local CDP ports allocate from **18800–18899** by default. - Deleting a profile moves its local data directory to Trash. @@ -328,6 +340,66 @@ Notes: - This mode relies on Playwright-on-CDP for most operations (screenshots/snapshots/actions). - Detach by clicking the extension icon again. + +## Chrome existing-session via MCP + +OpenClaw can also attach to a running Chrome profile through the official +Chrome DevTools MCP server. This reuses the tabs and login state already open in +that Chrome profile. + +Official background and setup references: + +- [Chrome for Developers: Use Chrome DevTools MCP with your browser session](https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session) +- [Chrome DevTools MCP README](https://github.com/ChromeDevTools/chrome-devtools-mcp) + +Create a profile: + +```bash +openclaw browser create-profile \ + --name chrome-live \ + --driver existing-session \ + --color "#00AA00" +``` + +Then in Chrome: + +1. Open `chrome://inspect/#remote-debugging` +2. Enable remote debugging +3. Keep Chrome running and approve the connection prompt when OpenClaw attaches + +Live attach smoke test: + +```bash +openclaw browser --browser-profile chrome-live start +openclaw browser --browser-profile chrome-live status +openclaw browser --browser-profile chrome-live tabs +openclaw browser --browser-profile chrome-live snapshot --format ai +``` + +What success looks like: + +- `status` shows `driver: existing-session` +- `status` shows `running: true` +- `tabs` lists your already-open Chrome tabs +- `snapshot` returns refs from the selected live tab + +What to check if attach does not work: + +- Chrome is version `144+` +- remote debugging is enabled at `chrome://inspect/#remote-debugging` +- Chrome showed and you accepted the attach consent prompt +- the Gateway or node host can spawn `npx chrome-devtools-mcp@latest --autoConnect` + +Notes: + +- This path is higher-risk than the isolated `openclaw` profile because it can + act inside your signed-in browser session. +- OpenClaw does not launch Chrome for this driver; it attaches to an existing + session only. +- OpenClaw uses the official Chrome DevTools MCP `--autoConnect` flow here, not + the legacy default-profile remote debugging port workflow. +- Some features still require the extension relay or managed browser path, such + as PDF export and download interception. - Leave the relay loopback-only by default. If the relay must be reachable from a different network namespace (for example Gateway in WSL2, Chrome on Windows), set `browser.relayBindHost` to an explicit bind address such as `0.0.0.0` while keeping the surrounding network private and authenticated. WSL2 / cross-namespace example: diff --git a/docs/tools/chrome-extension.md b/docs/tools/chrome-extension.md index ce4b271ae9c..dcf2150409b 100644 --- a/docs/tools/chrome-extension.md +++ b/docs/tools/chrome-extension.md @@ -13,6 +13,13 @@ The OpenClaw Chrome extension lets the agent control your **existing Chrome tabs Attach/detach happens via a **single Chrome toolbar button**. +If you want Chrome’s official DevTools MCP attach flow instead of the OpenClaw +extension relay, use an `existing-session` browser profile instead. See +[Browser](/tools/browser#chrome-existing-session-via-mcp). For Chrome’s own +setup docs, see [Chrome for Developers: Use Chrome DevTools MCP with your +browser session](https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session) +and the [Chrome DevTools MCP README](https://github.com/ChromeDevTools/chrome-devtools-mcp). + ## What it is (concept) There are three parts: diff --git a/docs/tools/web.md b/docs/tools/web.md index e77d046ce5b..a2aa1d37bfd 100644 --- a/docs/tools/web.md +++ b/docs/tools/web.md @@ -65,8 +65,8 @@ Use `openclaw configure --section web` to set up your API key and choose a provi 2. In the dashboard, choose the **Search** plan and generate an API key. 3. Run `openclaw configure --section web` to store the key in config, or set `BRAVE_API_KEY` in your environment. -Each Brave plan includes **$5/month in free credit** (renewing). The Search -plan costs $5 per 1,000 requests, so the credit covers 1,000 queries/month. Set +Each Brave plan includes **\$5/month in free credit** (renewing). The Search +plan costs \$5 per 1,000 requests, so the credit covers 1,000 queries/month. Set your usage limit in the Brave dashboard to avoid unexpected charges. See the [Brave API portal](https://brave.com/search/api/) for current plans and pricing. diff --git a/extensions/acpx/src/ensure.test.ts b/extensions/acpx/src/ensure.test.ts index cae52f29f9b..c0bb5469b29 100644 --- a/extensions/acpx/src/ensure.test.ts +++ b/extensions/acpx/src/ensure.test.ts @@ -54,6 +54,49 @@ describe("acpx ensure", () => { } }); + function mockEnsureInstallFlow() { + spawnAndCollectMock + .mockResolvedValueOnce({ + stdout: "acpx 0.0.9\n", + stderr: "", + code: 0, + error: null, + }) + .mockResolvedValueOnce({ + stdout: "added 1 package\n", + stderr: "", + code: 0, + error: null, + }) + .mockResolvedValueOnce({ + stdout: `acpx ${ACPX_PINNED_VERSION}\n`, + stderr: "", + code: 0, + error: null, + }); + } + + function expectEnsureInstallCalls(stripProviderAuthEnvVars?: boolean) { + expect(spawnAndCollectMock.mock.calls[0]?.[0]).toMatchObject({ + command: "/plugin/node_modules/.bin/acpx", + args: ["--version"], + cwd: "/plugin", + stripProviderAuthEnvVars, + }); + expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({ + command: "npm", + args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`], + cwd: "/plugin", + stripProviderAuthEnvVars, + }); + expect(spawnAndCollectMock.mock.calls[2]?.[0]).toMatchObject({ + command: "/plugin/node_modules/.bin/acpx", + args: ["--version"], + cwd: "/plugin", + stripProviderAuthEnvVars, + }); + } + it("accepts the pinned acpx version", async () => { spawnAndCollectMock.mockResolvedValueOnce({ stdout: `acpx ${ACPX_PINNED_VERSION}\n`, @@ -177,25 +220,7 @@ describe("acpx ensure", () => { }); it("installs and verifies pinned acpx when precheck fails", async () => { - spawnAndCollectMock - .mockResolvedValueOnce({ - stdout: "acpx 0.0.9\n", - stderr: "", - code: 0, - error: null, - }) - .mockResolvedValueOnce({ - stdout: "added 1 package\n", - stderr: "", - code: 0, - error: null, - }) - .mockResolvedValueOnce({ - stdout: `acpx ${ACPX_PINNED_VERSION}\n`, - stderr: "", - code: 0, - error: null, - }); + mockEnsureInstallFlow(); await ensureAcpx({ command: "/plugin/node_modules/.bin/acpx", @@ -204,33 +229,11 @@ describe("acpx ensure", () => { }); expect(spawnAndCollectMock).toHaveBeenCalledTimes(3); - expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({ - command: "npm", - args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`], - cwd: "/plugin", - }); + expectEnsureInstallCalls(); }); it("threads stripProviderAuthEnvVars through version probes and install", async () => { - spawnAndCollectMock - .mockResolvedValueOnce({ - stdout: "acpx 0.0.9\n", - stderr: "", - code: 0, - error: null, - }) - .mockResolvedValueOnce({ - stdout: "added 1 package\n", - stderr: "", - code: 0, - error: null, - }) - .mockResolvedValueOnce({ - stdout: `acpx ${ACPX_PINNED_VERSION}\n`, - stderr: "", - code: 0, - error: null, - }); + mockEnsureInstallFlow(); await ensureAcpx({ command: "/plugin/node_modules/.bin/acpx", @@ -239,24 +242,7 @@ describe("acpx ensure", () => { stripProviderAuthEnvVars: true, }); - expect(spawnAndCollectMock.mock.calls[0]?.[0]).toMatchObject({ - command: "/plugin/node_modules/.bin/acpx", - args: ["--version"], - cwd: "/plugin", - stripProviderAuthEnvVars: true, - }); - expect(spawnAndCollectMock.mock.calls[1]?.[0]).toMatchObject({ - command: "npm", - args: ["install", "--omit=dev", "--no-save", `acpx@${ACPX_PINNED_VERSION}`], - cwd: "/plugin", - stripProviderAuthEnvVars: true, - }); - expect(spawnAndCollectMock.mock.calls[2]?.[0]).toMatchObject({ - command: "/plugin/node_modules/.bin/acpx", - args: ["--version"], - cwd: "/plugin", - stripProviderAuthEnvVars: true, - }); + expectEnsureInstallCalls(true); }); it("fails with actionable error when npm install fails", async () => { diff --git a/extensions/feishu/src/monitor.webhook-e2e.test.ts b/extensions/feishu/src/monitor.webhook-e2e.test.ts index 2e73f973408..451ebe0d2bf 100644 --- a/extensions/feishu/src/monitor.webhook-e2e.test.ts +++ b/extensions/feishu/src/monitor.webhook-e2e.test.ts @@ -1,9 +1,7 @@ import crypto from "node:crypto"; -import { createServer } from "node:http"; -import type { AddressInfo } from "node:net"; -import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu"; import { afterEach, describe, expect, it, vi } from "vitest"; import { createFeishuRuntimeMockModule } from "./monitor.test-mocks.js"; +import { withRunningWebhookMonitor } from "./monitor.webhook.test-helpers.js"; const probeFeishuMock = vi.hoisted(() => vi.fn()); @@ -23,61 +21,6 @@ vi.mock("./runtime.js", () => createFeishuRuntimeMockModule()); import { monitorFeishuProvider, stopFeishuMonitor } from "./monitor.js"; -async function getFreePort(): Promise { - const server = createServer(); - await new Promise((resolve) => server.listen(0, "127.0.0.1", () => resolve())); - const address = server.address() as AddressInfo | null; - if (!address) { - throw new Error("missing server address"); - } - await new Promise((resolve) => server.close(() => resolve())); - return address.port; -} - -async function waitUntilServerReady(url: string): Promise { - for (let i = 0; i < 50; i += 1) { - try { - const response = await fetch(url, { method: "GET" }); - if (response.status >= 200 && response.status < 500) { - return; - } - } catch { - // retry - } - await new Promise((resolve) => setTimeout(resolve, 20)); - } - throw new Error(`server did not start: ${url}`); -} - -function buildConfig(params: { - accountId: string; - path: string; - port: number; - verificationToken?: string; - encryptKey?: string; -}): ClawdbotConfig { - return { - channels: { - feishu: { - enabled: true, - accounts: { - [params.accountId]: { - enabled: true, - appId: "cli_test", - appSecret: "secret_test", // pragma: allowlist secret - connectionMode: "webhook", - webhookHost: "127.0.0.1", - webhookPort: params.port, - webhookPath: params.path, - encryptKey: params.encryptKey, - verificationToken: params.verificationToken, - }, - }, - }, - }, - } as ClawdbotConfig; -} - function signFeishuPayload(params: { encryptKey: string; payload: Record; @@ -107,43 +50,6 @@ function encryptFeishuPayload(encryptKey: string, payload: Record Promise, -) { - const port = await getFreePort(); - const cfg = buildConfig({ - accountId: params.accountId, - path: params.path, - port, - encryptKey: params.encryptKey, - verificationToken: params.verificationToken, - }); - - const abortController = new AbortController(); - const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() }; - const monitorPromise = monitorFeishuProvider({ - config: cfg, - runtime, - abortSignal: abortController.signal, - }); - - const url = `http://127.0.0.1:${port}${params.path}`; - await waitUntilServerReady(url); - - try { - await run(url); - } finally { - abortController.abort(); - await monitorPromise; - } -} - afterEach(() => { stopFeishuMonitor(); }); @@ -159,6 +65,7 @@ describe("Feishu webhook signed-request e2e", () => { verificationToken: "verify_token", encryptKey: "encrypt_key", }, + monitorFeishuProvider, async (url) => { const payload = { type: "url_verification", challenge: "challenge-token" }; const response = await fetch(url, { @@ -185,6 +92,7 @@ describe("Feishu webhook signed-request e2e", () => { verificationToken: "verify_token", encryptKey: "encrypt_key", }, + monitorFeishuProvider, async (url) => { const response = await fetch(url, { method: "POST", @@ -208,6 +116,7 @@ describe("Feishu webhook signed-request e2e", () => { verificationToken: "verify_token", encryptKey: "encrypt_key", }, + monitorFeishuProvider, async (url) => { const response = await fetch(url, { method: "POST", @@ -231,6 +140,7 @@ describe("Feishu webhook signed-request e2e", () => { verificationToken: "verify_token", encryptKey: "encrypt_key", }, + monitorFeishuProvider, async (url) => { const payload = { type: "url_verification", challenge: "challenge-token" }; const response = await fetch(url, { @@ -255,6 +165,7 @@ describe("Feishu webhook signed-request e2e", () => { verificationToken: "verify_token", encryptKey: "encrypt_key", }, + monitorFeishuProvider, async (url) => { const payload = { schema: "2.0", @@ -283,6 +194,7 @@ describe("Feishu webhook signed-request e2e", () => { verificationToken: "verify_token", encryptKey: "encrypt_key", }, + monitorFeishuProvider, async (url) => { const payload = { encrypt: encryptFeishuPayload("encrypt_key", { diff --git a/extensions/feishu/src/monitor.webhook-security.test.ts b/extensions/feishu/src/monitor.webhook-security.test.ts index e9bfa8bf008..957d874cc3a 100644 --- a/extensions/feishu/src/monitor.webhook-security.test.ts +++ b/extensions/feishu/src/monitor.webhook-security.test.ts @@ -1,11 +1,13 @@ -import { createServer } from "node:http"; -import type { AddressInfo } from "node:net"; -import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu"; import { afterEach, describe, expect, it, vi } from "vitest"; import { createFeishuClientMockModule, createFeishuRuntimeMockModule, } from "./monitor.test-mocks.js"; +import { + buildWebhookConfig, + getFreePort, + withRunningWebhookMonitor, +} from "./monitor.webhook.test-helpers.js"; const probeFeishuMock = vi.hoisted(() => vi.fn()); @@ -33,98 +35,6 @@ import { stopFeishuMonitor, } from "./monitor.js"; -async function getFreePort(): Promise { - const server = createServer(); - await new Promise((resolve) => server.listen(0, "127.0.0.1", () => resolve())); - const address = server.address() as AddressInfo | null; - if (!address) { - throw new Error("missing server address"); - } - await new Promise((resolve) => server.close(() => resolve())); - return address.port; -} - -async function waitUntilServerReady(url: string): Promise { - for (let i = 0; i < 50; i += 1) { - try { - const response = await fetch(url, { method: "GET" }); - if (response.status >= 200 && response.status < 500) { - return; - } - } catch { - // retry - } - await new Promise((resolve) => setTimeout(resolve, 20)); - } - throw new Error(`server did not start: ${url}`); -} - -function buildConfig(params: { - accountId: string; - path: string; - port: number; - verificationToken?: string; - encryptKey?: string; -}): ClawdbotConfig { - return { - channels: { - feishu: { - enabled: true, - accounts: { - [params.accountId]: { - enabled: true, - appId: "cli_test", - appSecret: "secret_test", // pragma: allowlist secret - connectionMode: "webhook", - webhookHost: "127.0.0.1", - webhookPort: params.port, - webhookPath: params.path, - encryptKey: params.encryptKey, - verificationToken: params.verificationToken, - }, - }, - }, - }, - } as ClawdbotConfig; -} - -async function withRunningWebhookMonitor( - params: { - accountId: string; - path: string; - verificationToken: string; - encryptKey: string; - }, - run: (url: string) => Promise, -) { - const port = await getFreePort(); - const cfg = buildConfig({ - accountId: params.accountId, - path: params.path, - port, - encryptKey: params.encryptKey, - verificationToken: params.verificationToken, - }); - - const abortController = new AbortController(); - const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() }; - const monitorPromise = monitorFeishuProvider({ - config: cfg, - runtime, - abortSignal: abortController.signal, - }); - - const url = `http://127.0.0.1:${port}${params.path}`; - await waitUntilServerReady(url); - - try { - await run(url); - } finally { - abortController.abort(); - await monitorPromise; - } -} - afterEach(() => { clearFeishuWebhookRateLimitStateForTest(); stopFeishuMonitor(); @@ -134,7 +44,7 @@ describe("Feishu webhook security hardening", () => { it("rejects webhook mode without verificationToken", async () => { probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" }); - const cfg = buildConfig({ + const cfg = buildWebhookConfig({ accountId: "missing-token", path: "/hook-missing-token", port: await getFreePort(), @@ -148,7 +58,7 @@ describe("Feishu webhook security hardening", () => { it("rejects webhook mode without encryptKey", async () => { probeFeishuMock.mockResolvedValue({ ok: true, botOpenId: "bot_open_id" }); - const cfg = buildConfig({ + const cfg = buildWebhookConfig({ accountId: "missing-encrypt-key", path: "/hook-missing-encrypt", port: await getFreePort(), @@ -167,6 +77,7 @@ describe("Feishu webhook security hardening", () => { verificationToken: "verify_token", encryptKey: "encrypt_key", }, + monitorFeishuProvider, async (url) => { const response = await fetch(url, { method: "POST", @@ -189,6 +100,7 @@ describe("Feishu webhook security hardening", () => { verificationToken: "verify_token", encryptKey: "encrypt_key", }, + monitorFeishuProvider, async (url) => { let saw429 = false; for (let i = 0; i < 130; i += 1) { diff --git a/extensions/feishu/src/monitor.webhook.test-helpers.ts b/extensions/feishu/src/monitor.webhook.test-helpers.ts new file mode 100644 index 00000000000..b9de2150bd4 --- /dev/null +++ b/extensions/feishu/src/monitor.webhook.test-helpers.ts @@ -0,0 +1,98 @@ +import { createServer } from "node:http"; +import type { AddressInfo } from "node:net"; +import type { ClawdbotConfig } from "openclaw/plugin-sdk/feishu"; +import { vi } from "vitest"; +import type { monitorFeishuProvider } from "./monitor.js"; + +export async function getFreePort(): Promise { + const server = createServer(); + await new Promise((resolve) => server.listen(0, "127.0.0.1", () => resolve())); + const address = server.address() as AddressInfo | null; + if (!address) { + throw new Error("missing server address"); + } + await new Promise((resolve) => server.close(() => resolve())); + return address.port; +} + +async function waitUntilServerReady(url: string): Promise { + for (let i = 0; i < 50; i += 1) { + try { + const response = await fetch(url, { method: "GET" }); + if (response.status >= 200 && response.status < 500) { + return; + } + } catch { + // retry + } + await new Promise((resolve) => setTimeout(resolve, 20)); + } + throw new Error(`server did not start: ${url}`); +} + +export function buildWebhookConfig(params: { + accountId: string; + path: string; + port: number; + verificationToken?: string; + encryptKey?: string; +}): ClawdbotConfig { + return { + channels: { + feishu: { + enabled: true, + accounts: { + [params.accountId]: { + enabled: true, + appId: "cli_test", + appSecret: "secret_test", // pragma: allowlist secret + connectionMode: "webhook", + webhookHost: "127.0.0.1", + webhookPort: params.port, + webhookPath: params.path, + encryptKey: params.encryptKey, + verificationToken: params.verificationToken, + }, + }, + }, + }, + } as ClawdbotConfig; +} + +export async function withRunningWebhookMonitor( + params: { + accountId: string; + path: string; + verificationToken: string; + encryptKey: string; + }, + monitor: typeof monitorFeishuProvider, + run: (url: string) => Promise, +) { + const port = await getFreePort(); + const cfg = buildWebhookConfig({ + accountId: params.accountId, + path: params.path, + port, + encryptKey: params.encryptKey, + verificationToken: params.verificationToken, + }); + + const abortController = new AbortController(); + const runtime = { log: vi.fn(), error: vi.fn(), exit: vi.fn() }; + const monitorPromise = monitor({ + config: cfg, + runtime, + abortSignal: abortController.signal, + }); + + const url = `http://127.0.0.1:${port}${params.path}`; + await waitUntilServerReady(url); + + try { + await run(url); + } finally { + abortController.abort(); + await monitorPromise; + } +} diff --git a/extensions/googlechat/package.json b/extensions/googlechat/package.json index a942ed3d673..8b6f42e371c 100644 --- a/extensions/googlechat/package.json +++ b/extensions/googlechat/package.json @@ -7,6 +7,9 @@ "dependencies": { "google-auth-library": "^10.6.1" }, + "devDependencies": { + "openclaw": "workspace:*" + }, "peerDependencies": { "openclaw": ">=2026.3.11" }, diff --git a/extensions/mattermost/src/mattermost/client.test.ts b/extensions/mattermost/src/mattermost/client.test.ts index 3d325dda527..7d49ad3c573 100644 --- a/extensions/mattermost/src/mattermost/client.test.ts +++ b/extensions/mattermost/src/mattermost/client.test.ts @@ -27,6 +27,28 @@ function createMockFetch(response?: { status?: number; body?: unknown; contentTy return { mockFetch: mockFetch as unknown as typeof fetch, calls }; } +function createTestClient(response?: { status?: number; body?: unknown; contentType?: string }) { + const { mockFetch, calls } = createMockFetch(response); + const client = createMattermostClient({ + baseUrl: "http://localhost:8065", + botToken: "tok", + fetchImpl: mockFetch, + }); + return { client, calls }; +} + +async function updatePostAndCapture( + update: Parameters[2], + response?: { status?: number; body?: unknown; contentType?: string }, +) { + const { client, calls } = createTestClient(response ?? { body: { id: "post1" } }); + await updateMattermostPost(client, "post1", update); + return { + calls, + body: JSON.parse(calls[0].init?.body as string) as Record, + }; +} + // ── normalizeMattermostBaseUrl ──────────────────────────────────────── describe("normalizeMattermostBaseUrl", () => { @@ -229,68 +251,38 @@ describe("createMattermostPost", () => { describe("updateMattermostPost", () => { it("sends PUT to /posts/{id}", async () => { - const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } }); - const client = createMattermostClient({ - baseUrl: "http://localhost:8065", - botToken: "tok", - fetchImpl: mockFetch, - }); - - await updateMattermostPost(client, "post1", { message: "Updated" }); + const { calls } = await updatePostAndCapture({ message: "Updated" }); expect(calls[0].url).toContain("/posts/post1"); expect(calls[0].init?.method).toBe("PUT"); }); it("includes post id in the body", async () => { - const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } }); - const client = createMattermostClient({ - baseUrl: "http://localhost:8065", - botToken: "tok", - fetchImpl: mockFetch, - }); - - await updateMattermostPost(client, "post1", { message: "Updated" }); - - const body = JSON.parse(calls[0].init?.body as string); + const { body } = await updatePostAndCapture({ message: "Updated" }); expect(body.id).toBe("post1"); expect(body.message).toBe("Updated"); }); it("includes props for button completion updates", async () => { - const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } }); - const client = createMattermostClient({ - baseUrl: "http://localhost:8065", - botToken: "tok", - fetchImpl: mockFetch, - }); - - await updateMattermostPost(client, "post1", { + const { body } = await updatePostAndCapture({ message: "Original message", props: { attachments: [{ text: "✓ **do_now** selected by @tony" }], }, }); - - const body = JSON.parse(calls[0].init?.body as string); expect(body.message).toBe("Original message"); - expect(body.props.attachments[0].text).toContain("✓"); - expect(body.props.attachments[0].text).toContain("do_now"); + expect(body.props).toMatchObject({ + attachments: [{ text: expect.stringContaining("✓") }], + }); + expect(body.props).toMatchObject({ + attachments: [{ text: expect.stringContaining("do_now") }], + }); }); it("omits message when not provided", async () => { - const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } }); - const client = createMattermostClient({ - baseUrl: "http://localhost:8065", - botToken: "tok", - fetchImpl: mockFetch, - }); - - await updateMattermostPost(client, "post1", { + const { body } = await updatePostAndCapture({ props: { attachments: [] }, }); - - const body = JSON.parse(calls[0].init?.body as string); expect(body.id).toBe("post1"); expect(body.message).toBeUndefined(); expect(body.props).toEqual({ attachments: [] }); diff --git a/extensions/mattermost/src/mattermost/interactions.test.ts b/extensions/mattermost/src/mattermost/interactions.test.ts index 3f52982cc52..62c7bdb757f 100644 --- a/extensions/mattermost/src/mattermost/interactions.test.ts +++ b/extensions/mattermost/src/mattermost/interactions.test.ts @@ -496,6 +496,104 @@ describe("createMattermostInteractionHandler", () => { return res as unknown as ServerResponse & { headers: Record; body: string }; } + function createActionContext(actionId = "approve", channelId = "chan-1") { + const context = { action_id: actionId, __openclaw_channel_id: channelId }; + return { context, token: generateInteractionToken(context, "acct") }; + } + + function createInteractionBody(params: { + context: Record; + token: string; + channelId?: string; + postId?: string; + userId?: string; + userName?: string; + }) { + return { + user_id: params.userId ?? "user-1", + ...(params.userName ? { user_name: params.userName } : {}), + channel_id: params.channelId ?? "chan-1", + post_id: params.postId ?? "post-1", + context: { ...params.context, _token: params.token }, + }; + } + + async function runHandler( + handler: ReturnType, + params: { + body: unknown; + remoteAddress?: string; + headers?: Record; + }, + ) { + const req = createReq({ + remoteAddress: params.remoteAddress, + headers: params.headers, + body: params.body, + }); + const res = createRes(); + await handler(req, res); + return res; + } + + function expectForbiddenResponse( + res: ServerResponse & { body: string }, + expectedMessage: string, + ) { + expect(res.statusCode).toBe(403); + expect(res.body).toContain(expectedMessage); + } + + function expectSuccessfulApprovalUpdate( + res: ServerResponse & { body: string }, + requestLog?: Array<{ path: string; method?: string }>, + ) { + expect(res.statusCode).toBe(200); + expect(res.body).toBe("{}"); + if (requestLog) { + expect(requestLog).toEqual([ + { path: "/posts/post-1", method: undefined }, + { path: "/posts/post-1", method: "PUT" }, + ]); + } + } + + function createActionPost(params?: { + actionId?: string; + actionName?: string; + channelId?: string; + rootId?: string; + }): MattermostPost { + return { + id: "post-1", + channel_id: params?.channelId ?? "chan-1", + ...(params?.rootId ? { root_id: params.rootId } : {}), + message: "Choose", + props: { + attachments: [ + { + actions: [ + { + id: params?.actionId ?? "approve", + name: params?.actionName ?? "Approve", + }, + ], + }, + ], + }, + }; + } + + function createUnusedInteractionHandler() { + return createMattermostInteractionHandler({ + client: { + request: async () => ({ message: "unused" }), + } as unknown as MattermostClient, + botUserId: "bot", + accountId: "acct", + }); + } + async function runApproveInteraction(params?: { actionName?: string; allowedSourceIps?: string[]; @@ -503,8 +601,7 @@ describe("createMattermostInteractionHandler", () => { remoteAddress?: string; headers?: Record; }) { - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext(); const requestLog: Array<{ path: string; method?: string }> = []; const handler = createMattermostInteractionHandler({ client: { @@ -513,15 +610,7 @@ describe("createMattermostInteractionHandler", () => { if (init?.method === "PUT") { return { id: "post-1" }; } - return { - channel_id: "chan-1", - message: "Choose", - props: { - attachments: [ - { actions: [{ id: "approve", name: params?.actionName ?? "Approve" }] }, - ], - }, - }; + return createActionPost({ actionName: params?.actionName }); }, } as unknown as MattermostClient, botUserId: "bot", @@ -530,50 +619,27 @@ describe("createMattermostInteractionHandler", () => { trustedProxies: params?.trustedProxies, }); - const req = createReq({ + const res = await runHandler(handler, { remoteAddress: params?.remoteAddress, headers: params?.headers, - body: { - user_id: "user-1", - user_name: "alice", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + body: createInteractionBody({ context, token, userName: "alice" }), }); - const res = createRes(); - await handler(req, res); return { res, requestLog }; } async function runInvalidActionRequest(actionId: string) { - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext(); const handler = createMattermostInteractionHandler({ client: { - request: async () => ({ - channel_id: "chan-1", - message: "Choose", - props: { - attachments: [{ actions: [{ id: actionId, name: actionId }] }], - }, - }), + request: async () => createActionPost({ actionId, actionName: actionId }), } as unknown as MattermostClient, botUserId: "bot", accountId: "acct", }); - const req = createReq({ - body: { - user_id: "user-1", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + return await runHandler(handler, { + body: createInteractionBody({ context, token }), }); - const res = createRes(); - await handler(req, res); - return res; } it("accepts callback requests from an allowlisted source IP", async () => { @@ -582,12 +648,7 @@ describe("createMattermostInteractionHandler", () => { remoteAddress: "198.51.100.8", }); - expect(res.statusCode).toBe(200); - expect(res.body).toBe("{}"); - expect(requestLog).toEqual([ - { path: "/posts/post-1", method: undefined }, - { path: "/posts/post-1", method: "PUT" }, - ]); + expectSuccessfulApprovalUpdate(res, requestLog); }); it("accepts forwarded Mattermost source IPs from a trusted proxy", async () => { @@ -603,8 +664,7 @@ describe("createMattermostInteractionHandler", () => { }); it("rejects callback requests from non-allowlisted source IPs", async () => { - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext(); const handler = createMattermostInteractionHandler({ client: { request: async () => { @@ -616,33 +676,17 @@ describe("createMattermostInteractionHandler", () => { allowedSourceIps: ["127.0.0.1"], }); - const req = createReq({ + const res = await runHandler(handler, { remoteAddress: "198.51.100.8", - body: { - user_id: "user-1", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + body: createInteractionBody({ context, token }), }); - const res = createRes(); - - await handler(req, res); - - expect(res.statusCode).toBe(403); - expect(res.body).toContain("Forbidden origin"); + expectForbiddenResponse(res, "Forbidden origin"); }); it("rejects requests with an invalid interaction token", async () => { - const handler = createMattermostInteractionHandler({ - client: { - request: async () => ({ message: "unused" }), - } as unknown as MattermostClient, - botUserId: "bot", - accountId: "acct", - }); + const handler = createUnusedInteractionHandler(); - const req = createReq({ + const res = await runHandler(handler, { body: { user_id: "user-1", channel_id: "chan-1", @@ -650,72 +694,33 @@ describe("createMattermostInteractionHandler", () => { context: { action_id: "approve", _token: "deadbeef" }, }, }); - const res = createRes(); - - await handler(req, res); - - expect(res.statusCode).toBe(403); - expect(res.body).toContain("Invalid token"); + expectForbiddenResponse(res, "Invalid token"); }); it("rejects requests when the signed channel does not match the callback payload", async () => { - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); - const handler = createMattermostInteractionHandler({ - client: { - request: async () => ({ message: "unused" }), - } as unknown as MattermostClient, - botUserId: "bot", - accountId: "acct", + const { context, token } = createActionContext(); + const handler = createUnusedInteractionHandler(); + + const res = await runHandler(handler, { + body: createInteractionBody({ context, token, channelId: "chan-2" }), }); - - const req = createReq({ - body: { - user_id: "user-1", - channel_id: "chan-2", - post_id: "post-1", - context: { ...context, _token: token }, - }, - }); - const res = createRes(); - - await handler(req, res); - - expect(res.statusCode).toBe(403); - expect(res.body).toContain("Channel mismatch"); + expectForbiddenResponse(res, "Channel mismatch"); }); it("rejects requests when the fetched post does not belong to the callback channel", async () => { - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext(); const handler = createMattermostInteractionHandler({ client: { - request: async () => ({ - channel_id: "chan-9", - message: "Choose", - props: { - attachments: [{ actions: [{ id: "approve", name: "Approve" }] }], - }, - }), + request: async () => createActionPost({ channelId: "chan-9" }), } as unknown as MattermostClient, botUserId: "bot", accountId: "acct", }); - const req = createReq({ - body: { - user_id: "user-1", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + const res = await runHandler(handler, { + body: createInteractionBody({ context, token }), }); - const res = createRes(); - - await handler(req, res); - - expect(res.statusCode).toBe(403); - expect(res.body).toContain("Post/channel mismatch"); + expectForbiddenResponse(res, "Post/channel mismatch"); }); it("rejects requests when the action is not present on the fetched post", async () => { @@ -730,12 +735,7 @@ describe("createMattermostInteractionHandler", () => { actionName: "approve", }); - expect(res.statusCode).toBe(200); - expect(res.body).toBe("{}"); - expect(requestLog).toEqual([ - { path: "/posts/post-1", method: undefined }, - { path: "/posts/post-1", method: "PUT" }, - ]); + expectSuccessfulApprovalUpdate(res, requestLog); }); it("forwards fetched post threading metadata to session and button callbacks", async () => { @@ -745,19 +745,10 @@ describe("createMattermostInteractionHandler", () => { enqueueSystemEvent, }, } as unknown as Parameters[0]); - const context = { action_id: "approve", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext(); const resolveSessionKey = vi.fn().mockResolvedValue("session:thread:root-9"); const dispatchButtonClick = vi.fn(); - const fetchedPost: MattermostPost = { - id: "post-1", - channel_id: "chan-1", - root_id: "root-9", - message: "Choose", - props: { - attachments: [{ actions: [{ id: "approve", name: "Approve" }] }], - }, - }; + const fetchedPost = createActionPost({ rootId: "root-9" }); const handler = createMattermostInteractionHandler({ client: { request: async (_path: string, init?: { method?: string }) => @@ -769,19 +760,9 @@ describe("createMattermostInteractionHandler", () => { dispatchButtonClick, }); - const req = createReq({ - body: { - user_id: "user-1", - user_name: "alice", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + const res = await runHandler(handler, { + body: createInteractionBody({ context, token, userName: "alice" }), }); - const res = createRes(); - - await handler(req, res); - expect(res.statusCode).toBe(200); expect(resolveSessionKey).toHaveBeenCalledWith({ channelId: "chan-1", @@ -803,8 +784,7 @@ describe("createMattermostInteractionHandler", () => { }); it("lets a custom interaction handler short-circuit generic completion updates", async () => { - const context = { action_id: "mdlprov", __openclaw_channel_id: "chan-1" }; - const token = generateInteractionToken(context, "acct"); + const { context, token } = createActionContext("mdlprov"); const requestLog: Array<{ path: string; method?: string }> = []; const handleInteraction = vi.fn().mockResolvedValue({ ephemeral_text: "Only the original requester can use this picker.", @@ -814,14 +794,10 @@ describe("createMattermostInteractionHandler", () => { client: { request: async (path: string, init?: { method?: string }) => { requestLog.push({ path, method: init?.method }); - return { - id: "post-1", - channel_id: "chan-1", - message: "Choose", - props: { - attachments: [{ actions: [{ id: "mdlprov", name: "Browse providers" }] }], - }, - }; + return createActionPost({ + actionId: "mdlprov", + actionName: "Browse providers", + }); }, } as unknown as MattermostClient, botUserId: "bot", @@ -830,18 +806,14 @@ describe("createMattermostInteractionHandler", () => { dispatchButtonClick, }); - const req = createReq({ - body: { - user_id: "user-2", - user_name: "alice", - channel_id: "chan-1", - post_id: "post-1", - context: { ...context, _token: token }, - }, + const res = await runHandler(handler, { + body: createInteractionBody({ + context, + token, + userId: "user-2", + userName: "alice", + }), }); - const res = createRes(); - - await handler(req, res); expect(res.statusCode).toBe(200); expect(res.body).toBe( diff --git a/extensions/mattermost/src/mattermost/monitor.authz.test.ts b/extensions/mattermost/src/mattermost/monitor.authz.test.ts index 92fd0a3c3f4..68919da7908 100644 --- a/extensions/mattermost/src/mattermost/monitor.authz.test.ts +++ b/extensions/mattermost/src/mattermost/monitor.authz.test.ts @@ -16,6 +16,35 @@ const accountFixture: ResolvedMattermostAccount = { config: {}, }; +function authorizeGroupCommand(senderId: string) { + return authorizeMattermostCommandInvocation({ + account: { + ...accountFixture, + config: { + groupPolicy: "allowlist", + allowFrom: ["trusted-user"], + }, + }, + cfg: { + commands: { + useAccessGroups: true, + }, + }, + senderId, + senderName: senderId, + channelId: "chan-1", + channelInfo: { + id: "chan-1", + type: "O", + name: "general", + display_name: "General", + }, + storeAllowFrom: [], + allowTextCommands: true, + hasControlCommand: true, + }); +} + describe("mattermost monitor authz", () => { it("keeps DM allowlist merged with pairing-store entries", () => { const resolved = resolveMattermostEffectiveAllowFromLists({ @@ -72,32 +101,7 @@ describe("mattermost monitor authz", () => { }); it("denies group control commands when the sender is outside the allowlist", () => { - const decision = authorizeMattermostCommandInvocation({ - account: { - ...accountFixture, - config: { - groupPolicy: "allowlist", - allowFrom: ["trusted-user"], - }, - }, - cfg: { - commands: { - useAccessGroups: true, - }, - }, - senderId: "attacker", - senderName: "attacker", - channelId: "chan-1", - channelInfo: { - id: "chan-1", - type: "O", - name: "general", - display_name: "General", - }, - storeAllowFrom: [], - allowTextCommands: true, - hasControlCommand: true, - }); + const decision = authorizeGroupCommand("attacker"); expect(decision).toMatchObject({ ok: false, @@ -107,32 +111,7 @@ describe("mattermost monitor authz", () => { }); it("authorizes group control commands for allowlisted senders", () => { - const decision = authorizeMattermostCommandInvocation({ - account: { - ...accountFixture, - config: { - groupPolicy: "allowlist", - allowFrom: ["trusted-user"], - }, - }, - cfg: { - commands: { - useAccessGroups: true, - }, - }, - senderId: "trusted-user", - senderName: "trusted-user", - channelId: "chan-1", - channelInfo: { - id: "chan-1", - type: "O", - name: "general", - display_name: "General", - }, - storeAllowFrom: [], - allowTextCommands: true, - hasControlCommand: true, - }); + const decision = authorizeGroupCommand("trusted-user"); expect(decision).toMatchObject({ ok: true, diff --git a/extensions/mattermost/src/mattermost/reactions.test.ts b/extensions/mattermost/src/mattermost/reactions.test.ts index 0b07c1b497b..2659f2e1a99 100644 --- a/extensions/mattermost/src/mattermost/reactions.test.ts +++ b/extensions/mattermost/src/mattermost/reactions.test.ts @@ -14,6 +14,28 @@ describe("mattermost reactions", () => { resetMattermostReactionBotUserCacheForTests(); }); + async function addReactionWithFetch( + fetchMock: ReturnType, + ) { + return addMattermostReaction({ + cfg: createMattermostTestConfig(), + postId: "POST1", + emojiName: "thumbsup", + fetchImpl: fetchMock as unknown as typeof fetch, + }); + } + + async function removeReactionWithFetch( + fetchMock: ReturnType, + ) { + return removeMattermostReaction({ + cfg: createMattermostTestConfig(), + postId: "POST1", + emojiName: "thumbsup", + fetchImpl: fetchMock as unknown as typeof fetch, + }); + } + it("adds reactions by calling /users/me then POST /reactions", async () => { const fetchMock = createMattermostReactionFetchMock({ mode: "add", @@ -21,12 +43,7 @@ describe("mattermost reactions", () => { emojiName: "thumbsup", }); - const result = await addMattermostReaction({ - cfg: createMattermostTestConfig(), - postId: "POST1", - emojiName: "thumbsup", - fetchImpl: fetchMock as unknown as typeof fetch, - }); + const result = await addReactionWithFetch(fetchMock); expect(result).toEqual({ ok: true }); expect(fetchMock).toHaveBeenCalled(); @@ -41,12 +58,7 @@ describe("mattermost reactions", () => { body: { id: "err", message: "boom" }, }); - const result = await addMattermostReaction({ - cfg: createMattermostTestConfig(), - postId: "POST1", - emojiName: "thumbsup", - fetchImpl: fetchMock as unknown as typeof fetch, - }); + const result = await addReactionWithFetch(fetchMock); expect(result.ok).toBe(false); if (!result.ok) { @@ -61,12 +73,7 @@ describe("mattermost reactions", () => { emojiName: "thumbsup", }); - const result = await removeMattermostReaction({ - cfg: createMattermostTestConfig(), - postId: "POST1", - emojiName: "thumbsup", - fetchImpl: fetchMock as unknown as typeof fetch, - }); + const result = await removeReactionWithFetch(fetchMock); expect(result).toEqual({ ok: true }); expect(fetchMock).toHaveBeenCalled(); diff --git a/extensions/mattermost/src/mattermost/slash-commands.test.ts b/extensions/mattermost/src/mattermost/slash-commands.test.ts index 4beaea98ca5..d53c8f99203 100644 --- a/extensions/mattermost/src/mattermost/slash-commands.test.ts +++ b/extensions/mattermost/src/mattermost/slash-commands.test.ts @@ -10,6 +10,25 @@ import { } from "./slash-commands.js"; describe("slash-commands", () => { + async function registerSingleStatusCommand( + request: (path: string, init?: { method?: string }) => Promise, + ) { + const client = { request } as unknown as MattermostClient; + return registerSlashCommands({ + client, + teamId: "team-1", + creatorUserId: "bot-user", + callbackUrl: "http://gateway/callback", + commands: [ + { + trigger: "oc_status", + description: "status", + autoComplete: true, + }, + ], + }); + } + it("parses application/x-www-form-urlencoded payloads", () => { const payload = parseSlashCommandPayload( "token=t1&team_id=team&channel_id=ch1&user_id=u1&command=%2Foc_status&text=now", @@ -101,21 +120,7 @@ describe("slash-commands", () => { } throw new Error(`unexpected request path: ${path}`); }); - const client = { request } as unknown as MattermostClient; - - const result = await registerSlashCommands({ - client, - teamId: "team-1", - creatorUserId: "bot-user", - callbackUrl: "http://gateway/callback", - commands: [ - { - trigger: "oc_status", - description: "status", - autoComplete: true, - }, - ], - }); + const result = await registerSingleStatusCommand(request); expect(result).toHaveLength(1); expect(result[0]?.managed).toBe(false); @@ -144,21 +149,7 @@ describe("slash-commands", () => { } throw new Error(`unexpected request path: ${path}`); }); - const client = { request } as unknown as MattermostClient; - - const result = await registerSlashCommands({ - client, - teamId: "team-1", - creatorUserId: "bot-user", - callbackUrl: "http://gateway/callback", - commands: [ - { - trigger: "oc_status", - description: "status", - autoComplete: true, - }, - ], - }); + const result = await registerSingleStatusCommand(request); expect(result).toHaveLength(0); expect(request).toHaveBeenCalledTimes(1); diff --git a/extensions/mattermost/src/mattermost/slash-http.test.ts b/extensions/mattermost/src/mattermost/slash-http.test.ts index 92a6babe35c..a89bfc4e33a 100644 --- a/extensions/mattermost/src/mattermost/slash-http.test.ts +++ b/extensions/mattermost/src/mattermost/slash-http.test.ts @@ -58,6 +58,23 @@ const accountFixture: ResolvedMattermostAccount = { config: {}, }; +async function runSlashRequest(params: { + commandTokens: Set; + body: string; + method?: string; +}) { + const handler = createSlashCommandHttpHandler({ + account: accountFixture, + cfg: {} as OpenClawConfig, + runtime: {} as RuntimeEnv, + commandTokens: params.commandTokens, + }); + const req = createRequest({ method: params.method, body: params.body }); + const response = createResponse(); + await handler(req, response.res); + return response; +} + describe("slash-http", () => { it("rejects non-POST methods", async () => { const handler = createSlashCommandHttpHandler({ @@ -93,36 +110,20 @@ describe("slash-http", () => { }); it("fails closed when no command tokens are registered", async () => { - const handler = createSlashCommandHttpHandler({ - account: accountFixture, - cfg: {} as OpenClawConfig, - runtime: {} as RuntimeEnv, + const response = await runSlashRequest({ commandTokens: new Set(), - }); - const req = createRequest({ body: "token=tok1&team_id=t1&channel_id=c1&user_id=u1&command=%2Foc_status&text=", }); - const response = createResponse(); - - await handler(req, response.res); expect(response.res.statusCode).toBe(401); expect(response.getBody()).toContain("Unauthorized: invalid command token."); }); it("rejects unknown command tokens", async () => { - const handler = createSlashCommandHttpHandler({ - account: accountFixture, - cfg: {} as OpenClawConfig, - runtime: {} as RuntimeEnv, + const response = await runSlashRequest({ commandTokens: new Set(["known-token"]), - }); - const req = createRequest({ body: "token=unknown&team_id=t1&channel_id=c1&user_id=u1&command=%2Foc_status&text=", }); - const response = createResponse(); - - await handler(req, response.res); expect(response.res.statusCode).toBe(401); expect(response.getBody()).toContain("Unauthorized: invalid command token."); diff --git a/extensions/memory-core/package.json b/extensions/memory-core/package.json index 9f0bc40571d..969bff3e07c 100644 --- a/extensions/memory-core/package.json +++ b/extensions/memory-core/package.json @@ -4,6 +4,9 @@ "private": true, "description": "OpenClaw core memory search plugin", "type": "module", + "devDependencies": { + "openclaw": "workspace:*" + }, "peerDependencies": { "openclaw": ">=2026.3.11" }, diff --git a/extensions/open-prose/skills/prose/alts/arabian-nights.md b/extensions/open-prose/skills/prose/alts/arabian-nights.md index cc0d146664e..c637c883bb6 100644 --- a/extensions/open-prose/skills/prose/alts/arabian-nights.md +++ b/extensions/open-prose/skills/prose/alts/arabian-nights.md @@ -78,146 +78,17 @@ An alternative register for OpenProse that draws from One Thousand and One Night | `prompt` | `command` | What is commanded of the djinn | | `model` | `spirit` | Which spirit answers | -### Unchanged +### Shared appendix -These keywords already work or are too functional to replace sensibly: +Use [shared-appendix.md](./shared-appendix.md) for unchanged keywords and the common comparison pattern. -- `**...**` discretion markers — already work -- `until`, `while` — already work -- `map`, `filter`, `reduce`, `pmap` — pipeline operators -- `max` — constraint modifier -- `as` — aliasing -- Model names: `sonnet`, `opus`, `haiku` — already poetic +Recommended Arabian Nights rewrite targets: ---- - -## Side-by-Side Comparison - -### Simple Program - -```prose -# Functional -use "@alice/research" as research -input topic: "What to investigate" - -agent helper: - model: sonnet - -let findings = session: helper - prompt: "Research {topic}" - -output summary = session "Summarize" - context: findings -``` - -```prose -# Nights -conjure "@alice/research" as research -wish topic: "What to investigate" - -djinn helper: - spirit: sonnet - -name findings = tale: helper - command: "Research {topic}" - -gift summary = tale "Summarize" - scroll: findings -``` - -### Parallel Execution - -```prose -# Functional -parallel: - security = session "Check security" - perf = session "Check performance" - style = session "Check style" - -session "Synthesize review" - context: { security, perf, style } -``` - -```prose -# Nights -bazaar: - security = tale "Check security" - perf = tale "Check performance" - style = tale "Check style" - -tale "Synthesize review" - scroll: { security, perf, style } -``` - -### Loop with Condition - -```prose -# Functional -loop until **the code is bug-free** (max: 5): - session "Find and fix bugs" -``` - -```prose -# Nights -telling until **the code is bug-free** (max: 5): - tale "Find and fix bugs" -``` - -### Error Handling - -```prose -# Functional -try: - session "Risky operation" -catch as err: - session "Handle error" - context: err -finally: - session "Cleanup" -``` - -```prose -# Nights -venture: - tale "Risky operation" -should misfortune strike as err: - tale "Handle error" - scroll: err -and so it was: - tale "Cleanup" -``` - -### Choice Block - -```prose -# Functional -choice **the severity level**: - option "Critical": - session "Escalate immediately" - option "Minor": - session "Log for later" -``` - -```prose -# Nights -crossroads **the severity level**: - path "Critical": - tale "Escalate immediately" - path "Minor": - tale "Log for later" -``` - -### Conditionals - -```prose -# Functional -if **has security issues**: - session "Fix security" -elif **has performance issues**: - session "Optimize" -else: - session "Approve" -``` +- `session` sample -> `tale` +- `parallel` sample -> `bazaar` +- `loop` sample -> `telling` +- `try/catch/finally` sample -> `venture` / `should misfortune strike` / `and so it was` +- `choice` sample -> `crossroads` / `path` ```prose # Nights diff --git a/extensions/open-prose/skills/prose/alts/homer.md b/extensions/open-prose/skills/prose/alts/homer.md index bc27905cf78..716f2052e34 100644 --- a/extensions/open-prose/skills/prose/alts/homer.md +++ b/extensions/open-prose/skills/prose/alts/homer.md @@ -78,146 +78,17 @@ An alternative register for OpenProse that draws from Greek epic poetry—the Il | `prompt` | `charge` | The quest given | | `model` | `muse` | Which muse inspires | -### Unchanged +### Shared appendix -These keywords already work or are too functional to replace sensibly: +Use [shared-appendix.md](./shared-appendix.md) for unchanged keywords and the common comparison pattern. -- `**...**` discretion markers — already work -- `until`, `while` — already work -- `map`, `filter`, `reduce`, `pmap` — pipeline operators -- `max` — constraint modifier -- `as` — aliasing -- Model names: `sonnet`, `opus`, `haiku` — already poetic +Recommended Homeric rewrite targets: ---- - -## Side-by-Side Comparison - -### Simple Program - -```prose -# Functional -use "@alice/research" as research -input topic: "What to investigate" - -agent helper: - model: sonnet - -let findings = session: helper - prompt: "Research {topic}" - -output summary = session "Summarize" - context: findings -``` - -```prose -# Homeric -invoke "@alice/research" as research -omen topic: "What to investigate" - -hero helper: - muse: sonnet - -decree findings = trial: helper - charge: "Research {topic}" - -glory summary = trial "Summarize" - tidings: findings -``` - -### Parallel Execution - -```prose -# Functional -parallel: - security = session "Check security" - perf = session "Check performance" - style = session "Check style" - -session "Synthesize review" - context: { security, perf, style } -``` - -```prose -# Homeric -host: - security = trial "Check security" - perf = trial "Check performance" - style = trial "Check style" - -trial "Synthesize review" - tidings: { security, perf, style } -``` - -### Loop with Condition - -```prose -# Functional -loop until **the code is bug-free** (max: 5): - session "Find and fix bugs" -``` - -```prose -# Homeric -ordeal until **the code is bug-free** (max: 5): - trial "Find and fix bugs" -``` - -### Error Handling - -```prose -# Functional -try: - session "Risky operation" -catch as err: - session "Handle error" - context: err -finally: - session "Cleanup" -``` - -```prose -# Homeric -venture: - trial "Risky operation" -should ruin come as err: - trial "Handle error" - tidings: err -in the end: - trial "Cleanup" -``` - -### Choice Block - -```prose -# Functional -choice **the severity level**: - option "Critical": - session "Escalate immediately" - option "Minor": - session "Log for later" -``` - -```prose -# Homeric -crossroads **the severity level**: - path "Critical": - trial "Escalate immediately" - path "Minor": - trial "Log for later" -``` - -### Conditionals - -```prose -# Functional -if **has security issues**: - session "Fix security" -elif **has performance issues**: - session "Optimize" -else: - session "Approve" -``` +- `session` sample -> `trial` +- `parallel` sample -> `host` +- `loop` sample -> `ordeal` +- `try/catch/finally` sample -> `venture` / `should ruin come` / `in the end` +- `choice` sample -> `crossroads` / `path` ```prose # Homeric diff --git a/extensions/open-prose/skills/prose/alts/shared-appendix.md b/extensions/open-prose/skills/prose/alts/shared-appendix.md new file mode 100644 index 00000000000..32a4fcbcd17 --- /dev/null +++ b/extensions/open-prose/skills/prose/alts/shared-appendix.md @@ -0,0 +1,35 @@ +--- +role: reference +summary: Shared appendix for experimental OpenProse alternate registers. +status: draft +requires: prose.md +--- + +# OpenProse Alternate Register Appendix + +Use this appendix with experimental register files such as `arabian-nights.md` and `homer.md`. + +## Unchanged keywords + +These keywords already work or are too functional to replace sensibly: + +- `**...**` discretion markers +- `until`, `while` +- `map`, `filter`, `reduce`, `pmap` +- `max` +- `as` +- model names such as `sonnet`, `opus`, and `haiku` + +## Comparison pattern + +Use the translation map in each register file to rewrite the same functional sample programs: + +- simple program +- parallel execution +- loop with condition +- error handling +- choice block +- conditionals + +The goal is consistency, not one canonical wording. +Keep the functional version intact and rewrite only the register-specific aliases. diff --git a/extensions/open-prose/skills/prose/state/sqlite.md b/extensions/open-prose/skills/prose/state/sqlite.md index cfec757567c..352a8705cd5 100644 --- a/extensions/open-prose/skills/prose/state/sqlite.md +++ b/extensions/open-prose/skills/prose/state/sqlite.md @@ -87,71 +87,28 @@ The `agents` and `agent_segments` tables for project-scoped agents live in `.pro ## Responsibility Separation -This section defines **who does what**. This is the contract between the VM and subagents. +The VM/subagent contract matches [postgres.md](./postgres.md#responsibility-separation). -### VM Responsibilities +SQLite-specific differences: -The VM (the orchestrating agent running the .prose program) is responsible for: +- the VM creates `state.db` instead of an `openprose` schema +- subagent confirmation messages point at a local database path, for example `.prose/runs//state.db` +- cleanup is typically `VACUUM` or file deletion rather than dropping schema objects -| Responsibility | Description | -| ------------------------- | -------------------------------------------------------------------------------------------------------- | -| **Database creation** | Create `state.db` and initialize core tables at run start | -| **Program registration** | Store the program source and metadata | -| **Execution tracking** | Update position, status, and timing as statements execute | -| **Subagent spawning** | Spawn sessions via Task tool with database path and instructions | -| **Parallel coordination** | Track branch status, implement join strategies | -| **Loop management** | Track iteration counts, evaluate conditions | -| **Error aggregation** | Record failures, manage retry state | -| **Context preservation** | Maintain sufficient narration in the main conversation thread so execution can be understood and resumed | -| **Completion detection** | Mark the run as complete when finished | +Example return values: -**Critical:** The VM must preserve enough context in its own conversation to understand execution state without re-reading the entire database. The database is for coordination and persistence, not a replacement for working memory. - -### Subagent Responsibilities - -Subagents (sessions spawned by the VM) are responsible for: - -| Responsibility | Description | -| ----------------------- | ----------------------------------------------------------------- | -| **Writing own outputs** | Insert/update their binding in the `bindings` table | -| **Memory management** | For persistent agents: read and update their memory record | -| **Segment recording** | For persistent agents: append segment history | -| **Attachment handling** | Write large outputs to `attachments/` directory, store path in DB | -| **Atomic writes** | Use transactions when updating multiple related records | - -**Critical:** Subagents write ONLY to `bindings`, `agents`, and `agent_segments` tables. The VM owns the `execution` table entirely. Completion signaling happens through the substrate (Task tool return), not database updates. - -**Critical:** Subagents must write their outputs directly to the database. The VM does not write subagent outputs—it only reads them after the subagent completes. - -**What subagents return to the VM:** A confirmation message with the binding location—not the full content: - -**Root scope:** - -``` +```text Binding written: research Location: .prose/runs/20260116-143052-a7b3c9/state.db (bindings table, name='research', execution_id=NULL) -Summary: AI safety research covering alignment, robustness, and interpretability with 15 citations. ``` -**Inside block invocation:** - -``` +```text Binding written: result Location: .prose/runs/20260116-143052-a7b3c9/state.db (bindings table, name='result', execution_id=43) Execution ID: 43 -Summary: Processed chunk into 3 sub-parts for recursive processing. ``` -The VM tracks locations, not values. This keeps the VM's context lean and enables arbitrarily large intermediate values. - -### Shared Concerns - -| Concern | Who Handles | -| ---------------- | ------------------------------------------------------------------ | -| Schema evolution | Either (use `CREATE TABLE IF NOT EXISTS`, `ALTER TABLE` as needed) | -| Custom tables | Either (prefix with `x_` for extensions) | -| Indexing | Either (add indexes for frequently-queried columns) | -| Cleanup | VM (at run end, optionally vacuum) | +The VM still tracks locations, not full values. --- diff --git a/extensions/slack/src/channel.test.ts b/extensions/slack/src/channel.test.ts index ad6860d6f8d..b846d6e3cd7 100644 --- a/extensions/slack/src/channel.test.ts +++ b/extensions/slack/src/channel.test.ts @@ -137,6 +137,46 @@ describe("slackPlugin outbound", () => { }); }); +describe("slackPlugin agentPrompt", () => { + it("tells agents interactive replies are disabled by default", () => { + const hints = slackPlugin.agentPrompt?.messageToolHints?.({ + cfg: { + channels: { + slack: { + botToken: "xoxb-test", + appToken: "xapp-test", + }, + }, + }, + }); + + expect(hints).toEqual([ + "- Slack interactive replies are disabled. If needed, ask to set `channels.slack.capabilities.interactiveReplies=true` (or the same under `channels.slack.accounts..capabilities`).", + ]); + }); + + it("shows Slack interactive reply directives when enabled", () => { + const hints = slackPlugin.agentPrompt?.messageToolHints?.({ + cfg: { + channels: { + slack: { + botToken: "xoxb-test", + appToken: "xapp-test", + capabilities: { interactiveReplies: true }, + }, + }, + }, + }); + + expect(hints).toContain( + "- Slack interactive replies: use `[[slack_buttons: Label:value, Other:other]]` to add action buttons that route clicks back as Slack interaction system events.", + ); + expect(hints).toContain( + "- Slack selects: use `[[slack_select: Placeholder | Label:value, Other:other]]` to add a static select menu that routes the chosen value back as a Slack interaction system event.", + ); + }); +}); + describe("slackPlugin config", () => { it("treats HTTP mode accounts with bot token + signing secret as configured", async () => { const cfg: OpenClawConfig = { diff --git a/extensions/slack/src/channel.ts b/extensions/slack/src/channel.ts index 570ef20ffa1..bd2b640c510 100644 --- a/extensions/slack/src/channel.ts +++ b/extensions/slack/src/channel.ts @@ -29,6 +29,7 @@ import { resolveDefaultSlackAccountId, resolveSlackAccount, resolveSlackReplyToMode, + isSlackInteractiveRepliesEnabled, resolveSlackGroupRequireMention, resolveSlackGroupToolPolicy, buildSlackThreadingToolContext, @@ -146,6 +147,17 @@ export const slackPlugin: ChannelPlugin = { media: true, nativeCommands: true, }, + agentPrompt: { + messageToolHints: ({ cfg, accountId }) => + isSlackInteractiveRepliesEnabled({ cfg, accountId }) + ? [ + "- Slack interactive replies: use `[[slack_buttons: Label:value, Other:other]]` to add action buttons that route clicks back as Slack interaction system events.", + "- Slack selects: use `[[slack_select: Placeholder | Label:value, Other:other]]` to add a static select menu that routes the chosen value back as a Slack interaction system event.", + ] + : [ + "- Slack interactive replies are disabled. If needed, ask to set `channels.slack.capabilities.interactiveReplies=true` (or the same under `channels.slack.accounts..capabilities`).", + ], + }, streaming: { blockStreamingCoalesceDefaults: { minChars: 1500, idleMs: 1000 }, }, diff --git a/extensions/tlon/src/urbit/upload.test.ts b/extensions/tlon/src/urbit/upload.test.ts index 1a573a6b359..34dd6186d20 100644 --- a/extensions/tlon/src/urbit/upload.test.ts +++ b/extensions/tlon/src/urbit/upload.test.ts @@ -45,6 +45,27 @@ describe("uploadImageFromUrl", () => { }); } + async function setupSuccessfulUpload(params?: { + sourceUrl?: string; + contentType?: string; + uploadedUrl?: string; + }) { + const { mockFetch, mockUploadFile, uploadImageFromUrl } = await loadUploadMocks(); + const sourceUrl = params?.sourceUrl ?? "https://example.com/image.png"; + const contentType = params?.contentType ?? "image/png"; + const mockBlob = new Blob(["fake-image"], { type: contentType }); + mockSuccessfulFetch({ + mockFetch, + blob: mockBlob, + finalUrl: sourceUrl, + contentType, + }); + if (params?.uploadedUrl) { + mockUploadFile.mockResolvedValue({ url: params.uploadedUrl }); + } + return { mockBlob, mockUploadFile, uploadImageFromUrl }; + } + beforeEach(() => { vi.clearAllMocks(); }); @@ -54,16 +75,9 @@ describe("uploadImageFromUrl", () => { }); it("fetches image and calls uploadFile, returns uploaded URL", async () => { - const { mockFetch, mockUploadFile, uploadImageFromUrl } = await loadUploadMocks(); - - const mockBlob = new Blob(["fake-image"], { type: "image/png" }); - mockSuccessfulFetch({ - mockFetch, - blob: mockBlob, - finalUrl: "https://example.com/image.png", - contentType: "image/png", + const { mockBlob, mockUploadFile, uploadImageFromUrl } = await setupSuccessfulUpload({ + uploadedUrl: "https://memex.tlon.network/uploaded.png", }); - mockUploadFile.mockResolvedValue({ url: "https://memex.tlon.network/uploaded.png" }); const result = await uploadImageFromUrl("https://example.com/image.png"); @@ -95,15 +109,7 @@ describe("uploadImageFromUrl", () => { }); it("returns original URL if upload fails", async () => { - const { mockFetch, mockUploadFile, uploadImageFromUrl } = await loadUploadMocks(); - - const mockBlob = new Blob(["fake-image"], { type: "image/png" }); - mockSuccessfulFetch({ - mockFetch, - blob: mockBlob, - finalUrl: "https://example.com/image.png", - contentType: "image/png", - }); + const { mockUploadFile, uploadImageFromUrl } = await setupSuccessfulUpload(); mockUploadFile.mockRejectedValue(new Error("Upload failed")); const result = await uploadImageFromUrl("https://example.com/image.png"); diff --git a/extensions/voice-call/README.md b/extensions/voice-call/README.md index 9acc9aec987..fe228537ee8 100644 --- a/extensions/voice-call/README.md +++ b/extensions/voice-call/README.md @@ -89,56 +89,18 @@ Notes: - Twilio/Telnyx/Plivo require a **publicly reachable** webhook URL. - `mock` is a local dev provider (no network calls). - Telnyx requires `telnyx.publicKey` (or `TELNYX_PUBLIC_KEY`) unless `skipSignatureVerification` is true. -- `tunnel.allowNgrokFreeTierLoopbackBypass: true` allows Twilio webhooks with invalid signatures **only** when `tunnel.provider="ngrok"` and `serve.bind` is loopback (ngrok local agent). Use for local dev only. - -Streaming security defaults: - -- `streaming.preStartTimeoutMs` closes sockets that never send a valid `start` frame. -- `streaming.maxPendingConnections` caps total unauthenticated pre-start sockets. -- `streaming.maxPendingConnectionsPerIp` caps unauthenticated pre-start sockets per source IP. -- `streaming.maxConnections` caps total open media stream sockets (pending + active). +- advanced webhook, streaming, and tunnel notes: `https://docs.openclaw.ai/plugins/voice-call` ## Stale call reaper -Use `staleCallReaperSeconds` to end calls that never receive a terminal webhook -(for example, notify-mode calls that never complete). The default is `0` -(disabled). - -Recommended ranges: - -- **Production:** `120`–`300` seconds for notify-style flows. -- Keep this value **higher than `maxDurationSeconds`** so normal calls can - finish. A good starting point is `maxDurationSeconds + 30–60` seconds. - -Example: - -```json5 -{ - staleCallReaperSeconds: 360, -} -``` +See the plugin docs for recommended ranges and production examples: +`https://docs.openclaw.ai/plugins/voice-call#stale-call-reaper` ## TTS for calls Voice Call uses the core `messages.tts` configuration (OpenAI or ElevenLabs) for -streaming speech on calls. You can override it under the plugin config with the -same shape — overrides deep-merge with `messages.tts`. - -```json5 -{ - tts: { - provider: "openai", - openai: { - voice: "alloy", - }, - }, -} -``` - -Notes: - -- Edge TTS is ignored for voice calls (telephony audio needs PCM; Edge output is unreliable). -- Core TTS is used when Twilio media streaming is enabled; otherwise calls fall back to provider native voices. +streaming speech on calls. Override examples and provider caveats live here: +`https://docs.openclaw.ai/plugins/voice-call#tts-for-calls` ## CLI diff --git a/extensions/voice-call/src/manager.restore.test.ts b/extensions/voice-call/src/manager.restore.test.ts index f7f142a16ff..8f76169546f 100644 --- a/extensions/voice-call/src/manager.restore.test.ts +++ b/extensions/voice-call/src/manager.restore.test.ts @@ -9,121 +9,87 @@ import { } from "./manager.test-harness.js"; describe("CallManager verification on restore", () => { - it("skips stale calls reported terminal by provider", async () => { + async function initializeManager(params?: { + callOverrides?: Parameters[0]; + providerResult?: FakeProvider["getCallStatusResult"]; + configureProvider?: (provider: FakeProvider) => void; + configOverrides?: Partial<{ maxDurationSeconds: number }>; + }) { const storePath = createTestStorePath(); - const call = makePersistedCall(); + const call = makePersistedCall(params?.callOverrides); writeCallsToStore(storePath, [call]); const provider = new FakeProvider(); - provider.getCallStatusResult = { status: "completed", isTerminal: true }; + if (params?.providerResult) { + provider.getCallStatusResult = params.providerResult; + } + params?.configureProvider?.(provider); const config = VoiceCallConfigSchema.parse({ enabled: true, provider: "plivo", fromNumber: "+15550000000", + ...params?.configOverrides, }); const manager = new CallManager(config, storePath); await manager.initialize(provider, "https://example.com/voice/webhook"); + return { call, manager }; + } + + it("skips stale calls reported terminal by provider", async () => { + const { manager } = await initializeManager({ + providerResult: { status: "completed", isTerminal: true }, + }); + expect(manager.getActiveCalls()).toHaveLength(0); }); it("keeps calls reported active by provider", async () => { - const storePath = createTestStorePath(); - const call = makePersistedCall(); - writeCallsToStore(storePath, [call]); - - const provider = new FakeProvider(); - provider.getCallStatusResult = { status: "in-progress", isTerminal: false }; - - const config = VoiceCallConfigSchema.parse({ - enabled: true, - provider: "plivo", - fromNumber: "+15550000000", + const { call, manager } = await initializeManager({ + providerResult: { status: "in-progress", isTerminal: false }, }); - const manager = new CallManager(config, storePath); - await manager.initialize(provider, "https://example.com/voice/webhook"); expect(manager.getActiveCalls()).toHaveLength(1); expect(manager.getActiveCalls()[0]?.callId).toBe(call.callId); }); it("keeps calls when provider returns unknown (transient error)", async () => { - const storePath = createTestStorePath(); - const call = makePersistedCall(); - writeCallsToStore(storePath, [call]); - - const provider = new FakeProvider(); - provider.getCallStatusResult = { status: "error", isTerminal: false, isUnknown: true }; - - const config = VoiceCallConfigSchema.parse({ - enabled: true, - provider: "plivo", - fromNumber: "+15550000000", + const { manager } = await initializeManager({ + providerResult: { status: "error", isTerminal: false, isUnknown: true }, }); - const manager = new CallManager(config, storePath); - await manager.initialize(provider, "https://example.com/voice/webhook"); expect(manager.getActiveCalls()).toHaveLength(1); }); it("skips calls older than maxDurationSeconds", async () => { - const storePath = createTestStorePath(); - const call = makePersistedCall({ - startedAt: Date.now() - 600_000, - answeredAt: Date.now() - 590_000, + const { manager } = await initializeManager({ + callOverrides: { + startedAt: Date.now() - 600_000, + answeredAt: Date.now() - 590_000, + }, + configOverrides: { maxDurationSeconds: 300 }, }); - writeCallsToStore(storePath, [call]); - - const provider = new FakeProvider(); - - const config = VoiceCallConfigSchema.parse({ - enabled: true, - provider: "plivo", - fromNumber: "+15550000000", - maxDurationSeconds: 300, - }); - const manager = new CallManager(config, storePath); - await manager.initialize(provider, "https://example.com/voice/webhook"); expect(manager.getActiveCalls()).toHaveLength(0); }); it("skips calls without providerCallId", async () => { - const storePath = createTestStorePath(); - const call = makePersistedCall({ providerCallId: undefined, state: "initiated" }); - writeCallsToStore(storePath, [call]); - - const provider = new FakeProvider(); - - const config = VoiceCallConfigSchema.parse({ - enabled: true, - provider: "plivo", - fromNumber: "+15550000000", + const { manager } = await initializeManager({ + callOverrides: { providerCallId: undefined, state: "initiated" }, }); - const manager = new CallManager(config, storePath); - await manager.initialize(provider, "https://example.com/voice/webhook"); expect(manager.getActiveCalls()).toHaveLength(0); }); it("keeps call when getCallStatus throws (verification failure)", async () => { - const storePath = createTestStorePath(); - const call = makePersistedCall(); - writeCallsToStore(storePath, [call]); - - const provider = new FakeProvider(); - provider.getCallStatus = async () => { - throw new Error("network failure"); - }; - - const config = VoiceCallConfigSchema.parse({ - enabled: true, - provider: "plivo", - fromNumber: "+15550000000", + const { manager } = await initializeManager({ + configureProvider: (provider) => { + provider.getCallStatus = async () => { + throw new Error("network failure"); + }; + }, }); - const manager = new CallManager(config, storePath); - await manager.initialize(provider, "https://example.com/voice/webhook"); expect(manager.getActiveCalls()).toHaveLength(1); }); diff --git a/extensions/voice-call/src/providers/twilio.test.ts b/extensions/voice-call/src/providers/twilio.test.ts index 0a88bdeae07..4e23783b93a 100644 --- a/extensions/voice-call/src/providers/twilio.test.ts +++ b/extensions/voice-call/src/providers/twilio.test.ts @@ -21,6 +21,12 @@ function createContext(rawBody: string, query?: WebhookContext["query"]): Webhoo }; } +function expectStreamingTwiml(body: string) { + expect(body).toContain(STREAM_URL); + expect(body).toContain('"); +} + describe("TwilioProvider", () => { it("returns streaming TwiML for outbound conversation calls before in-progress", () => { const provider = createProvider(); @@ -30,9 +36,8 @@ describe("TwilioProvider", () => { const result = provider.parseWebhookEvent(ctx); - expect(result.providerResponseBody).toContain(STREAM_URL); - expect(result.providerResponseBody).toContain('"); + expect(result.providerResponseBody).toBeDefined(); + expectStreamingTwiml(result.providerResponseBody ?? ""); }); it("returns empty TwiML for status callbacks", () => { @@ -55,9 +60,8 @@ describe("TwilioProvider", () => { const result = provider.parseWebhookEvent(ctx); - expect(result.providerResponseBody).toContain(STREAM_URL); - expect(result.providerResponseBody).toContain('"); + expect(result.providerResponseBody).toBeDefined(); + expectStreamingTwiml(result.providerResponseBody ?? ""); }); it("returns queue TwiML for second inbound call when first call is active", () => { diff --git a/extensions/zalo/src/monitor.lifecycle.test.ts b/extensions/zalo/src/monitor.lifecycle.test.ts index 6cce789da56..e5fa65e1063 100644 --- a/extensions/zalo/src/monitor.lifecycle.test.ts +++ b/extensions/zalo/src/monitor.lifecycle.test.ts @@ -32,6 +32,41 @@ async function waitForPollingLoopStart(): Promise { await vi.waitFor(() => expect(getUpdatesMock).toHaveBeenCalledTimes(1)); } +const TEST_ACCOUNT = { + accountId: "default", + config: {}, +} as unknown as ResolvedZaloAccount; + +const TEST_CONFIG = {} as OpenClawConfig; + +function createLifecycleRuntime() { + return { + log: vi.fn<(message: string) => void>(), + error: vi.fn<(message: string) => void>(), + }; +} + +async function startLifecycleMonitor( + options: { + useWebhook?: boolean; + webhookSecret?: string; + webhookUrl?: string; + } = {}, +) { + const { monitorZaloProvider } = await import("./monitor.js"); + const abort = new AbortController(); + const runtime = createLifecycleRuntime(); + const run = monitorZaloProvider({ + token: "test-token", + account: TEST_ACCOUNT, + config: TEST_CONFIG, + runtime, + abortSignal: abort.signal, + ...options, + }); + return { abort, runtime, run }; +} + describe("monitorZaloProvider lifecycle", () => { afterEach(() => { vi.clearAllMocks(); @@ -39,26 +74,9 @@ describe("monitorZaloProvider lifecycle", () => { }); it("stays alive in polling mode until abort", async () => { - const { monitorZaloProvider } = await import("./monitor.js"); - const abort = new AbortController(); - const runtime = { - log: vi.fn<(message: string) => void>(), - error: vi.fn<(message: string) => void>(), - }; - const account = { - accountId: "default", - config: {}, - } as unknown as ResolvedZaloAccount; - const config = {} as OpenClawConfig; - let settled = false; - const run = monitorZaloProvider({ - token: "test-token", - account, - config, - runtime, - abortSignal: abort.signal, - }).then(() => { + const { abort, runtime, run } = await startLifecycleMonitor(); + const monitoredRun = run.then(() => { settled = true; }); @@ -70,7 +88,7 @@ describe("monitorZaloProvider lifecycle", () => { expect(settled).toBe(false); abort.abort(); - await run; + await monitoredRun; expect(settled).toBe(true); expect(runtime.log).toHaveBeenCalledWith( @@ -84,25 +102,7 @@ describe("monitorZaloProvider lifecycle", () => { result: { url: "https://example.com/hooks/zalo" }, }); - const { monitorZaloProvider } = await import("./monitor.js"); - const abort = new AbortController(); - const runtime = { - log: vi.fn<(message: string) => void>(), - error: vi.fn<(message: string) => void>(), - }; - const account = { - accountId: "default", - config: {}, - } as unknown as ResolvedZaloAccount; - const config = {} as OpenClawConfig; - - const run = monitorZaloProvider({ - token: "test-token", - account, - config, - runtime, - abortSignal: abort.signal, - }); + const { abort, runtime, run } = await startLifecycleMonitor(); await waitForPollingLoopStart(); @@ -120,25 +120,7 @@ describe("monitorZaloProvider lifecycle", () => { const { ZaloApiError } = await import("./api.js"); getWebhookInfoMock.mockRejectedValueOnce(new ZaloApiError("Not Found", 404, "Not Found")); - const { monitorZaloProvider } = await import("./monitor.js"); - const abort = new AbortController(); - const runtime = { - log: vi.fn<(message: string) => void>(), - error: vi.fn<(message: string) => void>(), - }; - const account = { - accountId: "default", - config: {}, - } as unknown as ResolvedZaloAccount; - const config = {} as OpenClawConfig; - - const run = monitorZaloProvider({ - token: "test-token", - account, - config, - runtime, - abortSignal: abort.signal, - }); + const { abort, runtime, run } = await startLifecycleMonitor(); await waitForPollingLoopStart(); @@ -165,29 +147,13 @@ describe("monitorZaloProvider lifecycle", () => { }), ); - const { monitorZaloProvider } = await import("./monitor.js"); - const abort = new AbortController(); - const runtime = { - log: vi.fn<(message: string) => void>(), - error: vi.fn<(message: string) => void>(), - }; - const account = { - accountId: "default", - config: {}, - } as unknown as ResolvedZaloAccount; - const config = {} as OpenClawConfig; - let settled = false; - const run = monitorZaloProvider({ - token: "test-token", - account, - config, - runtime, - abortSignal: abort.signal, + const { abort, runtime, run } = await startLifecycleMonitor({ useWebhook: true, webhookUrl: "https://example.com/hooks/zalo", webhookSecret: "supersecret", // pragma: allowlist secret - }).then(() => { + }); + const monitoredRun = run.then(() => { settled = true; }); @@ -202,7 +168,7 @@ describe("monitorZaloProvider lifecycle", () => { expect(registry.httpRoutes).toHaveLength(1); resolveDeleteWebhook?.(); - await run; + await monitoredRun; expect(settled).toBe(true); expect(registry.httpRoutes).toHaveLength(0); diff --git a/extensions/zalouser/src/monitor.group-gating.test.ts b/extensions/zalouser/src/monitor.group-gating.test.ts index f6723cad3d7..ca42edde43a 100644 --- a/extensions/zalouser/src/monitor.group-gating.test.ts +++ b/extensions/zalouser/src/monitor.group-gating.test.ts @@ -187,6 +187,31 @@ function installRuntime(params: { }; } +function installGroupCommandAuthRuntime() { + return installRuntime({ + resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) => + useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed), + }); +} + +async function processGroupControlCommand(params: { + account: ResolvedZalouserAccount; + content?: string; + commandContent?: string; +}) { + await __testing.processMessage({ + message: createGroupMessage({ + content: params.content ?? "/new", + commandContent: params.commandContent ?? "/new", + hasAnyMention: true, + wasExplicitlyMentioned: true, + }), + account: params.account, + config: createConfig(), + runtime: createRuntimeEnv(), + }); +} + function createGroupMessage(overrides: Partial = {}): ZaloInboundMessage { return { threadId: "g-1", @@ -229,57 +254,152 @@ describe("zalouser monitor group mention gating", () => { sendSeenZalouserMock.mockClear(); }); - it("skips unmentioned group messages when requireMention=true", async () => { - const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ - commandAuthorized: false, - }); + async function processMessageWithDefaults(params: { + message: ZaloInboundMessage; + account?: ResolvedZalouserAccount; + historyState?: { + historyLimit: number; + groupHistories: Map< + string, + Array<{ sender: string; body: string; timestamp?: number; messageId?: string }> + >; + }; + }) { await __testing.processMessage({ - message: createGroupMessage(), - account: createAccount(), + message: params.message, + account: params.account ?? createAccount(), config: createConfig(), runtime: createRuntimeEnv(), + historyState: params.historyState, }); + } - expect(dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled(); - expect(sendTypingZalouserMock).not.toHaveBeenCalled(); - }); - - it("fails closed when requireMention=true but mention detection is unavailable", async () => { + async function expectSkippedGroupMessage(message?: Partial) { const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ commandAuthorized: false, }); - await __testing.processMessage({ - message: createGroupMessage({ - canResolveExplicitMention: false, - hasAnyMention: false, - wasExplicitlyMentioned: false, - }), - account: createAccount(), - config: createConfig(), - runtime: createRuntimeEnv(), + await processMessageWithDefaults({ + message: createGroupMessage(message), }); - expect(dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled(); expect(sendTypingZalouserMock).not.toHaveBeenCalled(); - }); + } - it("dispatches explicitly-mentioned group messages and marks WasMentioned", async () => { + async function expectGroupCommandAuthorizers(params: { + accountConfig: ResolvedZalouserAccount["config"]; + expectedAuthorizers: Array<{ configured: boolean; allowed: boolean }>; + }) { + const { dispatchReplyWithBufferedBlockDispatcher, resolveCommandAuthorizedFromAuthorizers } = + installGroupCommandAuthRuntime(); + await processGroupControlCommand({ + account: { + ...createAccount(), + config: params.accountConfig, + }, + }); + expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); + const authCall = resolveCommandAuthorizedFromAuthorizers.mock.calls[0]?.[0]; + expect(authCall?.authorizers).toEqual(params.expectedAuthorizers); + } + + async function processOpenDmMessage(params?: { + message?: Partial; + readSessionUpdatedAt?: (input?: { + storePath: string; + sessionKey: string; + }) => number | undefined; + }) { + const runtime = installRuntime({ + commandAuthorized: false, + }); + if (params?.readSessionUpdatedAt) { + runtime.readSessionUpdatedAt.mockImplementation(params.readSessionUpdatedAt); + } + const account = createAccount(); + await processMessageWithDefaults({ + message: createDmMessage(params?.message), + account: { + ...account, + config: { + ...account.config, + dmPolicy: "open", + }, + }, + }); + return runtime; + } + + async function expectDangerousNameMatching(params: { + dangerouslyAllowNameMatching?: boolean; + expectedDispatches: number; + }) { const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ commandAuthorized: false, }); - await __testing.processMessage({ + await processMessageWithDefaults({ message: createGroupMessage({ + threadId: "g-attacker-001", + groupName: "Trusted Team", + senderId: "666", hasAnyMention: true, wasExplicitlyMentioned: true, content: "ping @bot", }), - account: createAccount(), - config: createConfig(), - runtime: createRuntimeEnv(), + account: { + ...createAccount(), + config: { + ...createAccount().config, + ...(params.dangerouslyAllowNameMatching ? { dangerouslyAllowNameMatching: true } : {}), + groupPolicy: "allowlist", + groupAllowFrom: ["*"], + groups: { + "group:g-trusted-001": { allow: true }, + "Trusted Team": { allow: true }, + }, + }, + }, }); + expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes( + params.expectedDispatches, + ); + return dispatchReplyWithBufferedBlockDispatcher; + } + async function dispatchGroupMessage(params: { + commandAuthorized: boolean; + message: Partial; + }) { + const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ + commandAuthorized: params.commandAuthorized, + }); + await processMessageWithDefaults({ + message: createGroupMessage(params.message), + }); expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); - const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; + return dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; + } + + it("skips unmentioned group messages when requireMention=true", async () => { + await expectSkippedGroupMessage(); + }); + + it("fails closed when requireMention=true but mention detection is unavailable", async () => { + await expectSkippedGroupMessage({ + canResolveExplicitMention: false, + hasAnyMention: false, + wasExplicitlyMentioned: false, + }); + }); + + it("dispatches explicitly-mentioned group messages and marks WasMentioned", async () => { + const callArg = await dispatchGroupMessage({ + commandAuthorized: false, + message: { + hasAnyMention: true, + wasExplicitlyMentioned: true, + content: "ping @bot", + }, + }); expect(callArg?.ctx?.WasMentioned).toBe(true); expect(callArg?.ctx?.To).toBe("zalouser:group:g-1"); expect(callArg?.ctx?.OriginatingTo).toBe("zalouser:group:g-1"); @@ -290,22 +410,14 @@ describe("zalouser monitor group mention gating", () => { }); it("allows authorized control commands to bypass mention gating", async () => { - const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ + const callArg = await dispatchGroupMessage({ commandAuthorized: true, - }); - await __testing.processMessage({ - message: createGroupMessage({ + message: { content: "/status", hasAnyMention: false, wasExplicitlyMentioned: false, - }), - account: createAccount(), - config: createConfig(), - runtime: createRuntimeEnv(), + }, }); - - expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); - const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; expect(callArg?.ctx?.WasMentioned).toBe(true); }); @@ -346,57 +458,30 @@ describe("zalouser monitor group mention gating", () => { }); it("uses commandContent for mention-prefixed control commands", async () => { - const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ + const callArg = await dispatchGroupMessage({ commandAuthorized: true, - }); - await __testing.processMessage({ - message: createGroupMessage({ + message: { content: "@Bot /new", commandContent: "/new", hasAnyMention: true, wasExplicitlyMentioned: true, - }), - account: createAccount(), - config: createConfig(), - runtime: createRuntimeEnv(), + }, }); - - expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); - const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; expect(callArg?.ctx?.CommandBody).toBe("/new"); expect(callArg?.ctx?.BodyForCommands).toBe("/new"); }); it("allows group control commands when only allowFrom is configured", async () => { - const { dispatchReplyWithBufferedBlockDispatcher, resolveCommandAuthorizedFromAuthorizers } = - installRuntime({ - resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) => - useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed), - }); - await __testing.processMessage({ - message: createGroupMessage({ - content: "/new", - commandContent: "/new", - hasAnyMention: true, - wasExplicitlyMentioned: true, - }), - account: { - ...createAccount(), - config: { - ...createAccount().config, - allowFrom: ["123"], - }, + await expectGroupCommandAuthorizers({ + accountConfig: { + ...createAccount().config, + allowFrom: ["123"], }, - config: createConfig(), - runtime: createRuntimeEnv(), + expectedAuthorizers: [ + { configured: true, allowed: true }, + { configured: true, allowed: true }, + ], }); - - expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); - const authCall = resolveCommandAuthorizedFromAuthorizers.mock.calls[0]?.[0]; - expect(authCall?.authorizers).toEqual([ - { configured: true, allowed: true }, - { configured: true, allowed: true }, - ]); }); it("blocks group messages when sender is not in groupAllowFrom/allowFrom", async () => { @@ -425,123 +510,35 @@ describe("zalouser monitor group mention gating", () => { }); it("does not accept a different group id by matching only the mutable group name by default", async () => { - const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ - commandAuthorized: false, - }); - await __testing.processMessage({ - message: createGroupMessage({ - threadId: "g-attacker-001", - groupName: "Trusted Team", - senderId: "666", - hasAnyMention: true, - wasExplicitlyMentioned: true, - content: "ping @bot", - }), - account: { - ...createAccount(), - config: { - ...createAccount().config, - groupPolicy: "allowlist", - groupAllowFrom: ["*"], - groups: { - "group:g-trusted-001": { allow: true }, - "Trusted Team": { allow: true }, - }, - }, - }, - config: createConfig(), - runtime: createRuntimeEnv(), - }); - - expect(dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled(); + await expectDangerousNameMatching({ expectedDispatches: 0 }); }); it("accepts mutable group-name matches only when dangerouslyAllowNameMatching is enabled", async () => { - const { dispatchReplyWithBufferedBlockDispatcher } = installRuntime({ - commandAuthorized: false, + const dispatchReplyWithBufferedBlockDispatcher = await expectDangerousNameMatching({ + dangerouslyAllowNameMatching: true, + expectedDispatches: 1, }); - await __testing.processMessage({ - message: createGroupMessage({ - threadId: "g-attacker-001", - groupName: "Trusted Team", - senderId: "666", - hasAnyMention: true, - wasExplicitlyMentioned: true, - content: "ping @bot", - }), - account: { - ...createAccount(), - config: { - ...createAccount().config, - dangerouslyAllowNameMatching: true, - groupPolicy: "allowlist", - groupAllowFrom: ["*"], - groups: { - "group:g-trusted-001": { allow: true }, - "Trusted Team": { allow: true }, - }, - }, - }, - config: createConfig(), - runtime: createRuntimeEnv(), - }); - - expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; expect(callArg?.ctx?.To).toBe("zalouser:group:g-attacker-001"); }); it("allows group control commands when sender is in groupAllowFrom", async () => { - const { dispatchReplyWithBufferedBlockDispatcher, resolveCommandAuthorizedFromAuthorizers } = - installRuntime({ - resolveCommandAuthorizedFromAuthorizers: ({ useAccessGroups, authorizers }) => - useAccessGroups && authorizers.some((entry) => entry.configured && entry.allowed), - }); - await __testing.processMessage({ - message: createGroupMessage({ - content: "/new", - commandContent: "/new", - hasAnyMention: true, - wasExplicitlyMentioned: true, - }), - account: { - ...createAccount(), - config: { - ...createAccount().config, - allowFrom: ["999"], - groupAllowFrom: ["123"], - }, + await expectGroupCommandAuthorizers({ + accountConfig: { + ...createAccount().config, + allowFrom: ["999"], + groupAllowFrom: ["123"], }, - config: createConfig(), - runtime: createRuntimeEnv(), + expectedAuthorizers: [ + { configured: true, allowed: false }, + { configured: true, allowed: true }, + ], }); - - expect(dispatchReplyWithBufferedBlockDispatcher).toHaveBeenCalledTimes(1); - const authCall = resolveCommandAuthorizedFromAuthorizers.mock.calls[0]?.[0]; - expect(authCall?.authorizers).toEqual([ - { configured: true, allowed: false }, - { configured: true, allowed: true }, - ]); }); it("routes DM messages with direct peer kind", async () => { const { dispatchReplyWithBufferedBlockDispatcher, resolveAgentRoute, buildAgentSessionKey } = - installRuntime({ - commandAuthorized: false, - }); - const account = createAccount(); - await __testing.processMessage({ - message: createDmMessage(), - account: { - ...account, - config: { - ...account.config, - dmPolicy: "open", - }, - }, - config: createConfig(), - runtime: createRuntimeEnv(), - }); + await processOpenDmMessage(); expect(resolveAgentRoute).toHaveBeenCalledWith( expect.objectContaining({ @@ -559,24 +556,9 @@ describe("zalouser monitor group mention gating", () => { }); it("reuses the legacy DM session key when only the old group-shaped session exists", async () => { - const { dispatchReplyWithBufferedBlockDispatcher, readSessionUpdatedAt } = installRuntime({ - commandAuthorized: false, - }); - readSessionUpdatedAt.mockImplementation((input?: { storePath: string; sessionKey: string }) => - input?.sessionKey === "agent:main:zalouser:group:321" ? 123 : undefined, - ); - const account = createAccount(); - await __testing.processMessage({ - message: createDmMessage(), - account: { - ...account, - config: { - ...account.config, - dmPolicy: "open", - }, - }, - config: createConfig(), - runtime: createRuntimeEnv(), + const { dispatchReplyWithBufferedBlockDispatcher } = await processOpenDmMessage({ + readSessionUpdatedAt: (input?: { storePath: string; sessionKey: string }) => + input?.sessionKey === "agent:main:zalouser:group:321" ? 123 : undefined, }); const callArg = dispatchReplyWithBufferedBlockDispatcher.mock.calls[0]?.[0]; diff --git a/package.json b/package.json index c63e72f66fa..54d897eb66f 100644 --- a/package.json +++ b/package.json @@ -353,6 +353,7 @@ "@mariozechner/pi-ai": "0.57.1", "@mariozechner/pi-coding-agent": "0.57.1", "@mariozechner/pi-tui": "0.57.1", + "@modelcontextprotocol/sdk": "1.27.1", "@mozilla/readability": "^0.6.0", "@sinclair/typebox": "0.34.48", "@slack/bolt": "^4.6.0", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index ac32d145c57..aed82644da9 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -60,16 +60,19 @@ importers: version: 1.2.0-beta.3 '@mariozechner/pi-agent-core': specifier: 0.57.1 - version: 0.57.1(ws@8.19.0)(zod@4.3.6) + version: 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-ai': specifier: 0.57.1 - version: 0.57.1(ws@8.19.0)(zod@4.3.6) + version: 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-coding-agent': specifier: 0.57.1 - version: 0.57.1(ws@8.19.0)(zod@4.3.6) + version: 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-tui': specifier: 0.57.1 version: 0.57.1 + '@modelcontextprotocol/sdk': + specifier: 1.27.1 + version: 1.27.1(zod@4.3.6) '@mozilla/readability': specifier: ^0.6.0 version: 0.6.0 @@ -344,9 +347,10 @@ importers: google-auth-library: specifier: ^10.6.1 version: 10.6.1 + devDependencies: openclaw: - specifier: '>=2026.3.11' - version: 2026.3.11(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3)) + specifier: workspace:* + version: link:../.. extensions/imessage: {} @@ -377,7 +381,7 @@ importers: dependencies: '@mariozechner/pi-agent-core': specifier: 0.57.1 - version: 0.57.1(ws@8.19.0)(zod@4.3.6) + version: 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) '@matrix-org/matrix-sdk-crypto-nodejs': specifier: ^0.4.0 version: 0.4.0 @@ -404,10 +408,10 @@ importers: version: 4.3.6 extensions/memory-core: - dependencies: + devDependencies: openclaw: - specifier: '>=2026.3.11' - version: 2026.3.11(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3)) + specifier: workspace:* + version: link:../.. extensions/memory-lancedb: dependencies: @@ -651,10 +655,6 @@ packages: resolution: {integrity: sha512-t8cl+bPLlHZQD2Sw1a4hSLUybqJZU71+m8znkyeU8CHntFqEp2mMbuLKdHKaAYQ1fAApXMsvzenCAkDzNeeJlw==} engines: {node: '>=20.0.0'} - '@aws-sdk/client-bedrock@3.1007.0': - resolution: {integrity: sha512-49hH8o6ALKkCiBUgg20HkwxNamP1yYA/n8Si73Z438EqhZGpCfScP3FfxVhrfD5o+4bV4Whi9BTzPKCa/PfUww==} - engines: {node: '>=20.0.0'} - '@aws-sdk/client-bedrock@3.1008.0': resolution: {integrity: sha512-mzxO/DplpZZT7AIZUCG7Q78OlaeHeDybYz+ZlWZPaXFjGDJwUv1E3SKskmaaQvTsMeieie0WX7gzueYrCx4YfQ==} engines: {node: '>=20.0.0'} @@ -711,10 +711,6 @@ packages: resolution: {integrity: sha512-dFqh7nfX43B8dO1aPQHOcjC0SnCJ83H3F+1LoCh3X1P7E7N09I+0/taID0asU6GCddfDExqnEvQtDdkuMe5tKQ==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-ini@3.972.18': - resolution: {integrity: sha512-vthIAXJISZnj2576HeyLBj4WTeX+I7PwWeRkbOa0mVX39K13SCGxCgOFuKj2ytm9qTlLOmXe4cdEnroteFtJfw==} - engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-ini@3.972.19': resolution: {integrity: sha512-pVJVjWqVrPqjpFq7o0mCmeZu1Y0c94OCHSYgivdCD2wfmYVtBbwQErakruhgOD8pcMcx9SCqRw1pzHKR7OGBcA==} engines: {node: '>=20.0.0'} @@ -727,10 +723,6 @@ packages: resolution: {integrity: sha512-gf2E5b7LpKb+JX2oQsRIDxdRZjBFZt2olCGlWCdb3vBERbXIPgm2t1R5mEnwd4j0UEO/Tbg5zN2KJbHXttJqwA==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-login@3.972.18': - resolution: {integrity: sha512-kINzc5BBxdYBkPZ0/i1AMPMOk5b5QaFNbYMElVw5QTX13AKj6jcxnv/YNl9oW9mg+Y08ti19hh01HhyEAxsSJQ==} - engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-login@3.972.19': resolution: {integrity: sha512-jOXdZ1o+CywQKr6gyxgxuUmnGwTTnY2Kxs1PM7fI6AYtDWDnmW/yKXayNqkF8KjP1unflqMWKVbVt5VgmE3L0g==} engines: {node: '>=20.0.0'} @@ -743,10 +735,6 @@ packages: resolution: {integrity: sha512-ZDJa2gd1xiPg/nBDGhUlat02O8obaDEnICBAVS8qieZ0+nDfaB0Z3ec6gjZj27OqFTjnB/Q5a0GwQwb7rMVViw==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-node@3.972.19': - resolution: {integrity: sha512-yDWQ9dFTr+IMxwanFe7+tbN5++q8psZBjlUwOiCXn1EzANoBgtqBwcpYcHaMGtn0Wlfj4NuXdf2JaEx1lz5RaQ==} - engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-node@3.972.20': resolution: {integrity: sha512-0xHca2BnPY0kzjDYPH7vk8YbfdBPpWVS67rtqQMalYDQUCBYS37cZ55K6TuFxCoIyNZgSCFrVKr9PXC5BVvQQw==} engines: {node: '>=20.0.0'} @@ -771,10 +759,6 @@ packages: resolution: {integrity: sha512-wGtte+48xnhnhHMl/MsxzacBPs5A+7JJedjiP452IkHY7vsbYKcvQBqFye8LwdTJVeHtBHv+JFeTscnwepoWGg==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-sso@3.972.18': - resolution: {integrity: sha512-YHYEfj5S2aqInRt5ub8nDOX8vAxgMvd84wm2Y3WVNfFa/53vOv9T7WOAqXI25qjj3uEcV46xxfqdDQk04h5XQA==} - engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-sso@3.972.19': resolution: {integrity: sha512-kVjQsEU3b///q7EZGrUzol9wzwJFKbEzqJKSq82A9ShrUTEO7FNylTtby3sPV19ndADZh1H3FB3+5ZrvKtEEeg==} engines: {node: '>=20.0.0'} @@ -787,10 +771,6 @@ packages: resolution: {integrity: sha512-8aiVJh6fTdl8gcyL+sVNcNwTtWpmoFa1Sh7xlj6Z7L/cZ/tYMEBHq44wTYG8Kt0z/PpGNopD89nbj3FHl9QmTA==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-web-identity@3.972.18': - resolution: {integrity: sha512-OqlEQpJ+J3T5B96qtC1zLLwkBloechP+fezKbCH0sbd2cCc0Ra55XpxWpk/hRj69xAOYtHvoC4orx6eTa4zU7g==} - engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-web-identity@3.972.19': resolution: {integrity: sha512-BV1BlTFdG4w4tAihxN7iXDBoNcNewXD4q8uZlNQiUrnqxwGWUhKHODIQVSPlQGxXClEj+63m+cqZskw+ESmeZg==} engines: {node: '>=20.0.0'} @@ -875,10 +855,6 @@ packages: resolution: {integrity: sha512-MlGWA8uPaOs5AiTZ5JLM4uuWDm9EEAnm9cqwvqQIc6kEgel/8s1BaOWm9QgUcfc9K8qd7KkC3n43yDbeXOA2tg==} engines: {node: '>=20.0.0'} - '@aws-sdk/nested-clients@3.996.8': - resolution: {integrity: sha512-6HlLm8ciMW8VzfB80kfIx16PBA9lOa9Dl+dmCBi78JDhvGlx3I7Rorwi5PpVRkL31RprXnYna3yBf6UKkD/PqA==} - engines: {node: '>=20.0.0'} - '@aws-sdk/nested-clients@3.996.9': resolution: {integrity: sha512-+RpVtpmQbbtzFOKhMlsRcXM/3f1Z49qTOHaA8gEpHOYruERmog6f2AUtf/oTRLCWjR9H2b3roqryV/hI7QMW8w==} engines: {node: '>=20.0.0'} @@ -903,14 +879,6 @@ packages: resolution: {integrity: sha512-j9BwZZId9sFp+4GPhf6KrwO8Tben2sXibZA8D1vv2I1zBdvkUHcBA2g4pkqIpTRalMTLC0NPkBPX0gERxfy/iA==} engines: {node: '>=20.0.0'} - '@aws-sdk/token-providers@3.1005.0': - resolution: {integrity: sha512-vMxd+ivKqSxU9bHx5vmAlFKDAkjGotFU56IOkDa5DaTu1WWwbcse0yFHEm9I537oVvodaiwMl3VBwgHfzQ2rvw==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/token-providers@3.1007.0': - resolution: {integrity: sha512-kKvVyr53vvVc5k6RbvI6jhafxufxO2SkEw8QeEzJqwOXH/IMY7Cm0IyhnBGdqj80iiIIiIM2jGe7Fn3TIdwdrw==} - engines: {node: '>=20.0.0'} - '@aws-sdk/token-providers@3.1008.0': resolution: {integrity: sha512-TulwlHQBWcJs668kNUDMZHN51DeLrDsYT59Ux4a/nbvr025gM6HjKJJ3LvnZccam7OS/ZKUVkWomCneRQKJbBg==} engines: {node: '>=20.0.0'} @@ -979,15 +947,6 @@ packages: aws-crt: optional: true - '@aws-sdk/util-user-agent-node@3.973.5': - resolution: {integrity: sha512-Dyy38O4GeMk7UQ48RupfHif//gqnOPbq/zlvRssc11E2mClT+aUfc3VS2yD8oLtzqO3RsqQ9I3gOBB4/+HjPOw==} - engines: {node: '>=20.0.0'} - peerDependencies: - aws-crt: '>=1.0.0' - peerDependenciesMeta: - aws-crt: - optional: true - '@aws-sdk/util-user-agent-node@3.973.6': resolution: {integrity: sha512-iF7G0prk7AvmOK64FcLvc/fW+Ty1H+vttajL7PvJFReU8urMxfYmynTTuFKDTA76Wgpq3FzTPKwabMQIXQHiXQ==} engines: {node: '>=20.0.0'} @@ -1828,6 +1787,16 @@ packages: '@mistralai/mistralai@1.14.1': resolution: {integrity: sha512-IiLmmZFCCTReQgPAT33r7KQ1nYo5JPdvGkrkZqA8qQ2qB1GHgs5LoP5K2ICyrjnpw2n8oSxMM/VP+liiKcGNlQ==} + '@modelcontextprotocol/sdk@1.27.1': + resolution: {integrity: sha512-sr6GbP+4edBwFndLbM60gf07z0FQ79gaExpnsjMGePXqFcSSb7t6iscpjk9DhFhwd+mTEQrzNafGP8/iGGFYaA==} + engines: {node: '>=18'} + peerDependencies: + '@cfworker/json-schema': ^4.1.1 + zod: ^3.25 || ^4.0 + peerDependenciesMeta: + '@cfworker/json-schema': + optional: true + '@mozilla/readability@0.6.0': resolution: {integrity: sha512-juG5VWh4qAivzTAeMzvY9xs9HY5rAcr2E4I7tiSSCokRFi7XIZCAu92ZkSTsIj1OPceCifL3cpfteP3pDT9/QQ==} engines: {node: '>=14.0.0'} @@ -4271,6 +4240,10 @@ packages: core-util-is@1.0.3: resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==} + cors@2.8.6: + resolution: {integrity: sha512-tJtZBBHA6vjIAaF6EnIaq6laBBP9aq/Y3ouVJjEfoHbRBcHBAHYcMh/w8LDrk2PvIMMq8gmopa5D4V8RmbrxGw==} + engines: {node: '>= 0.10'} + croner@10.0.1: resolution: {integrity: sha512-ixNtAJndqh173VQ4KodSdJEI6nuioBWI0V1ITNKhZZsO0pEMoDxz539T4FTTbSZ/xIOSuDnzxLVRqBVSvPNE2g==} engines: {node: '>=18.0'} @@ -4550,6 +4523,14 @@ packages: events-universal@1.0.1: resolution: {integrity: sha512-LUd5euvbMLpwOF8m6ivPCbhQeSiYVNb8Vs0fQ8QjXo0JTkEHpz8pxdQf0gStltaPpw0Cca8b39KxvK9cfKRiAw==} + eventsource-parser@3.0.6: + resolution: {integrity: sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==} + engines: {node: '>=18.0.0'} + + eventsource@3.0.7: + resolution: {integrity: sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA==} + engines: {node: '>=18.0.0'} + execa@4.1.0: resolution: {integrity: sha512-j5W0//W7f8UxAn8hXVnwG8tLwdiUy4FJLcSupCg6maBYZDpyBvTApK7KyuI4bKj8KOh1r2YH+6ucuYtJv1bTZA==} engines: {node: '>=10'} @@ -4561,6 +4542,12 @@ packages: exponential-backoff@3.1.3: resolution: {integrity: sha512-ZgEeZXj30q+I0EN+CbSSpIyPaJ5HVQD18Z1m+u1FXbAeT94mr1zw50q4q6jiiC447Nl/YTcIYSAftiGqetwXCA==} + express-rate-limit@8.3.1: + resolution: {integrity: sha512-D1dKN+cmyPWuvB+G2SREQDzPY1agpBIcTa9sJxOPMCNeH3gwzhqJRDWCXW3gg0y//+LQ/8j52JbMROWyrKdMdw==} + engines: {node: '>= 16'} + peerDependencies: + express: '>= 4.11' + express@4.22.1: resolution: {integrity: sha512-F2X8g9P1X7uCPZMA3MVf9wcTqlyNp7IhH5qPCI0izhaOIYXaW9L535tGA3qmjRzpH+bZczqq7hVKxTR4NWnu+g==} engines: {node: '>= 0.10.0'} @@ -5058,6 +5045,9 @@ packages: jose@4.15.9: resolution: {integrity: sha512-1vUQX+IdDMVPj4k8kOxgUqlcK518yluMuGZwqlr44FS1ppZB/5GWh4rZG89erpOBOJjU/OBsnCVFfapsRz6nEA==} + jose@6.2.1: + resolution: {integrity: sha512-jUaKr1yrbfaImV7R2TN/b3IcZzsw38/chqMpo2XJ7i2F8AfM/lA4G1goC3JVEwg0H7UldTmSt3P68nt31W7/mw==} + js-stringify@1.0.2: resolution: {integrity: sha512-rtS5ATOo2Q5k1G+DADISilDA6lv79zIiwFd6CcjuIxGKLFm5C+RLImRscVap9k55i+MOZwgliw+NejvkLuGD5g==} @@ -5102,6 +5092,9 @@ packages: json-schema-traverse@1.0.0: resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==} + json-schema-typed@8.0.2: + resolution: {integrity: sha512-fQhoXdcvc3V28x7C7BMs4P5+kNlgUURe2jmUT1T//oBRMDrqy1QPelJimwZGo7Hg9VPV3EQV5Bnq4hbFy2vetA==} + json-schema@0.4.0: resolution: {integrity: sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==} @@ -5689,14 +5682,6 @@ packages: zod: optional: true - openclaw@2026.3.11: - resolution: {integrity: sha512-bxwiBmHPakwfpY5tqC9lrV5TCu5PKf0c1bHNc3nhrb+pqKcPEWV4zOjDVFLQUHr98ihgWA+3pacy4b3LQ8wduQ==} - engines: {node: '>=22.12.0'} - hasBin: true - peerDependencies: - '@napi-rs/canvas': ^0.1.89 - node-llama-cpp: 3.16.2 - opus-decoder@0.7.11: resolution: {integrity: sha512-+e+Jz3vGQLxRTBHs8YJQPRPc1Tr+/aC6coV/DlZylriA29BdHQAYXhvNRKtjftof17OFng0+P4wsFIqQu3a48A==} @@ -5870,6 +5855,10 @@ packages: resolution: {integrity: sha512-8OEwKp5juEvb/MjpIc4hjqfgCNysrS94RIOMXYvpYCdm/jglrKEiAYmiumbmGhCvs+IcInsphYDFwqrjr7398w==} hasBin: true + pkce-challenge@5.0.1: + resolution: {integrity: sha512-wQ0b/W4Fr01qtpHlqSqspcj3EhBvimsdh0KlHhH8HRZnMsEa0ea2fTULOXOS9ccQr3om+GcGRk4e+isrZWV8qQ==} + engines: {node: '>=16.20.0'} + playwright-core@1.58.2: resolution: {integrity: sha512-yZkEtftgwS8CsfYo7nm0KE8jsvm6i/PTgVtB8DL726wNf6H2IMsDuxCpJj59KDaxCtSnrWan2AeDqM7JBaultg==} engines: {node: '>=18'} @@ -6667,10 +6656,6 @@ packages: undici-types@7.18.2: resolution: {integrity: sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w==} - undici@7.22.0: - resolution: {integrity: sha512-RqslV2Us5BrllB+JeiZnK4peryVTndy9Dnqq62S3yYRRTj0tFQCwEniUy2167skdGOy3vqRzEvl1Dm4sV2ReDg==} - engines: {node: '>=20.18.1'} - undici@7.24.0: resolution: {integrity: sha512-jxytwMHhsbdpBXxLAcuu0fzlQeXCNnWdDyRHpvWsUl8vd98UwYdl9YTyn8/HcpcJPC3pwUveefsa3zTxyD/ERg==} engines: {node: '>=20.18.1'} @@ -7120,51 +7105,6 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/client-bedrock@3.1007.0': - dependencies: - '@aws-crypto/sha256-browser': 5.2.0 - '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.19 - '@aws-sdk/credential-provider-node': 3.972.19 - '@aws-sdk/middleware-host-header': 3.972.7 - '@aws-sdk/middleware-logger': 3.972.7 - '@aws-sdk/middleware-recursion-detection': 3.972.7 - '@aws-sdk/middleware-user-agent': 3.972.20 - '@aws-sdk/region-config-resolver': 3.972.7 - '@aws-sdk/token-providers': 3.1007.0 - '@aws-sdk/types': 3.973.5 - '@aws-sdk/util-endpoints': 3.996.4 - '@aws-sdk/util-user-agent-browser': 3.972.7 - '@aws-sdk/util-user-agent-node': 3.973.5 - '@smithy/config-resolver': 4.4.10 - '@smithy/core': 3.23.9 - '@smithy/fetch-http-handler': 5.3.13 - '@smithy/hash-node': 4.2.11 - '@smithy/invalid-dependency': 4.2.11 - '@smithy/middleware-content-length': 4.2.11 - '@smithy/middleware-endpoint': 4.4.23 - '@smithy/middleware-retry': 4.4.40 - '@smithy/middleware-serde': 4.2.12 - '@smithy/middleware-stack': 4.2.11 - '@smithy/node-config-provider': 4.3.11 - '@smithy/node-http-handler': 4.4.14 - '@smithy/protocol-http': 5.3.11 - '@smithy/smithy-client': 4.12.3 - '@smithy/types': 4.13.0 - '@smithy/url-parser': 4.2.11 - '@smithy/util-base64': 4.3.2 - '@smithy/util-body-length-browser': 4.2.2 - '@smithy/util-body-length-node': 4.2.3 - '@smithy/util-defaults-mode-browser': 4.3.39 - '@smithy/util-defaults-mode-node': 4.2.42 - '@smithy/util-endpoints': 3.3.2 - '@smithy/util-middleware': 4.2.11 - '@smithy/util-retry': 4.2.11 - '@smithy/util-utf8': 4.2.2 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - '@aws-sdk/client-bedrock@3.1008.0': dependencies: '@aws-crypto/sha256-browser': 5.2.0 @@ -7424,25 +7364,6 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-ini@3.972.18': - dependencies: - '@aws-sdk/core': 3.973.19 - '@aws-sdk/credential-provider-env': 3.972.17 - '@aws-sdk/credential-provider-http': 3.972.19 - '@aws-sdk/credential-provider-login': 3.972.18 - '@aws-sdk/credential-provider-process': 3.972.17 - '@aws-sdk/credential-provider-sso': 3.972.18 - '@aws-sdk/credential-provider-web-identity': 3.972.18 - '@aws-sdk/nested-clients': 3.996.8 - '@aws-sdk/types': 3.973.5 - '@smithy/credential-provider-imds': 4.2.11 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - '@aws-sdk/credential-provider-ini@3.972.19': dependencies: '@aws-sdk/core': 3.973.19 @@ -7488,19 +7409,6 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-login@3.972.18': - dependencies: - '@aws-sdk/core': 3.973.19 - '@aws-sdk/nested-clients': 3.996.8 - '@aws-sdk/types': 3.973.5 - '@smithy/property-provider': 4.2.11 - '@smithy/protocol-http': 5.3.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - '@aws-sdk/credential-provider-login@3.972.19': dependencies: '@aws-sdk/core': 3.973.19 @@ -7548,23 +7456,6 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-node@3.972.19': - dependencies: - '@aws-sdk/credential-provider-env': 3.972.17 - '@aws-sdk/credential-provider-http': 3.972.19 - '@aws-sdk/credential-provider-ini': 3.972.18 - '@aws-sdk/credential-provider-process': 3.972.17 - '@aws-sdk/credential-provider-sso': 3.972.18 - '@aws-sdk/credential-provider-web-identity': 3.972.18 - '@aws-sdk/types': 3.973.5 - '@smithy/credential-provider-imds': 4.2.11 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - '@aws-sdk/credential-provider-node@3.972.20': dependencies: '@aws-sdk/credential-provider-env': 3.972.17 @@ -7635,19 +7526,6 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-sso@3.972.18': - dependencies: - '@aws-sdk/core': 3.973.19 - '@aws-sdk/nested-clients': 3.996.8 - '@aws-sdk/token-providers': 3.1005.0 - '@aws-sdk/types': 3.973.5 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - '@aws-sdk/credential-provider-sso@3.972.19': dependencies: '@aws-sdk/core': 3.973.19 @@ -7685,18 +7563,6 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-web-identity@3.972.18': - dependencies: - '@aws-sdk/core': 3.973.19 - '@aws-sdk/nested-clients': 3.996.8 - '@aws-sdk/types': 3.973.5 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - '@aws-sdk/credential-provider-web-identity@3.972.19': dependencies: '@aws-sdk/core': 3.973.19 @@ -7961,49 +7827,6 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/nested-clients@3.996.8': - dependencies: - '@aws-crypto/sha256-browser': 5.2.0 - '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.19 - '@aws-sdk/middleware-host-header': 3.972.7 - '@aws-sdk/middleware-logger': 3.972.7 - '@aws-sdk/middleware-recursion-detection': 3.972.7 - '@aws-sdk/middleware-user-agent': 3.972.20 - '@aws-sdk/region-config-resolver': 3.972.7 - '@aws-sdk/types': 3.973.5 - '@aws-sdk/util-endpoints': 3.996.4 - '@aws-sdk/util-user-agent-browser': 3.972.7 - '@aws-sdk/util-user-agent-node': 3.973.5 - '@smithy/config-resolver': 4.4.10 - '@smithy/core': 3.23.9 - '@smithy/fetch-http-handler': 5.3.13 - '@smithy/hash-node': 4.2.11 - '@smithy/invalid-dependency': 4.2.11 - '@smithy/middleware-content-length': 4.2.11 - '@smithy/middleware-endpoint': 4.4.23 - '@smithy/middleware-retry': 4.4.40 - '@smithy/middleware-serde': 4.2.12 - '@smithy/middleware-stack': 4.2.11 - '@smithy/node-config-provider': 4.3.11 - '@smithy/node-http-handler': 4.4.14 - '@smithy/protocol-http': 5.3.11 - '@smithy/smithy-client': 4.12.3 - '@smithy/types': 4.13.0 - '@smithy/url-parser': 4.2.11 - '@smithy/util-base64': 4.3.2 - '@smithy/util-body-length-browser': 4.2.2 - '@smithy/util-body-length-node': 4.2.3 - '@smithy/util-defaults-mode-browser': 4.3.39 - '@smithy/util-defaults-mode-node': 4.2.42 - '@smithy/util-endpoints': 3.3.2 - '@smithy/util-middleware': 4.2.11 - '@smithy/util-retry': 4.2.11 - '@smithy/util-utf8': 4.2.2 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - '@aws-sdk/nested-clients@3.996.9': dependencies: '@aws-crypto/sha256-browser': 5.2.0 @@ -8095,30 +7918,6 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/token-providers@3.1005.0': - dependencies: - '@aws-sdk/core': 3.973.19 - '@aws-sdk/nested-clients': 3.996.8 - '@aws-sdk/types': 3.973.5 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/token-providers@3.1007.0': - dependencies: - '@aws-sdk/core': 3.973.19 - '@aws-sdk/nested-clients': 3.996.8 - '@aws-sdk/types': 3.973.5 - '@smithy/property-provider': 4.2.11 - '@smithy/shared-ini-file-loader': 4.4.6 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - '@aws-sdk/token-providers@3.1008.0': dependencies: '@aws-sdk/core': 3.973.19 @@ -8225,14 +8024,6 @@ snapshots: '@smithy/types': 4.13.0 tslib: 2.8.1 - '@aws-sdk/util-user-agent-node@3.973.5': - dependencies: - '@aws-sdk/middleware-user-agent': 3.972.20 - '@aws-sdk/types': 3.973.5 - '@smithy/node-config-provider': 4.3.11 - '@smithy/types': 4.13.0 - tslib: 2.8.1 - '@aws-sdk/util-user-agent-node@3.973.6': dependencies: '@aws-sdk/middleware-user-agent': 3.972.20 @@ -8645,12 +8436,14 @@ snapshots: optionalDependencies: '@noble/hashes': 2.0.1 - '@google/genai@1.44.0': + '@google/genai@1.44.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))': dependencies: google-auth-library: 10.6.1 p-retry: 4.6.2 protobufjs: 7.5.4 ws: 8.19.0 + optionalDependencies: + '@modelcontextprotocol/sdk': 1.27.1(zod@4.3.6) transitivePeerDependencies: - bufferutil - supports-color @@ -8698,7 +8491,6 @@ snapshots: '@hono/node-server@1.19.10(hono@4.12.7)': dependencies: hono: 4.12.7 - optional: true '@huggingface/jinja@0.5.5': {} @@ -9025,9 +8817,9 @@ snapshots: std-env: 3.10.0 yoctocolors: 2.1.2 - '@mariozechner/pi-agent-core@0.57.1(ws@8.19.0)(zod@4.3.6)': + '@mariozechner/pi-agent-core@0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)': dependencies: - '@mariozechner/pi-ai': 0.57.1(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-ai': 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) transitivePeerDependencies: - '@modelcontextprotocol/sdk' - aws-crt @@ -9037,11 +8829,11 @@ snapshots: - ws - zod - '@mariozechner/pi-ai@0.57.1(ws@8.19.0)(zod@4.3.6)': + '@mariozechner/pi-ai@0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)': dependencies: '@anthropic-ai/sdk': 0.73.0(zod@4.3.6) '@aws-sdk/client-bedrock-runtime': 3.1004.0 - '@google/genai': 1.44.0 + '@google/genai': 1.44.0(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6)) '@mistralai/mistralai': 1.14.1 '@sinclair/typebox': 0.34.48 ajv: 8.18.0 @@ -9061,11 +8853,11 @@ snapshots: - ws - zod - '@mariozechner/pi-coding-agent@0.57.1(ws@8.19.0)(zod@4.3.6)': + '@mariozechner/pi-coding-agent@0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6)': dependencies: '@mariozechner/jiti': 2.6.5 - '@mariozechner/pi-agent-core': 0.57.1(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-ai': 0.57.1(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-agent-core': 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-ai': 0.57.1(@modelcontextprotocol/sdk@1.27.1(zod@4.3.6))(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-tui': 0.57.1 '@silvia-odwyer/photon-node': 0.3.4 chalk: 5.6.2 @@ -9141,6 +8933,28 @@ snapshots: - bufferutil - utf-8-validate + '@modelcontextprotocol/sdk@1.27.1(zod@4.3.6)': + dependencies: + '@hono/node-server': 1.19.10(hono@4.12.7) + ajv: 8.18.0 + ajv-formats: 3.0.1(ajv@8.18.0) + content-type: 1.0.5 + cors: 2.8.6 + cross-spawn: 7.0.6 + eventsource: 3.0.7 + eventsource-parser: 3.0.6 + express: 5.2.1 + express-rate-limit: 8.3.1(express@5.2.1) + hono: 4.12.7 + jose: 6.2.1 + json-schema-typed: 8.0.2 + pkce-challenge: 5.0.1 + raw-body: 3.0.2 + zod: 4.3.6 + zod-to-json-schema: 3.25.1(zod@4.3.6) + transitivePeerDependencies: + - supports-color + '@mozilla/readability@0.6.0': {} '@napi-rs/canvas-android-arm64@0.1.95': @@ -11916,6 +11730,11 @@ snapshots: core-util-is@1.0.3: {} + cors@2.8.6: + dependencies: + object-assign: 4.1.1 + vary: 1.1.2 + croner@10.0.1: {} cross-spawn@7.0.6: @@ -12167,6 +11986,12 @@ snapshots: transitivePeerDependencies: - bare-abort-controller + eventsource-parser@3.0.6: {} + + eventsource@3.0.7: + dependencies: + eventsource-parser: 3.0.6 + execa@4.1.0: dependencies: cross-spawn: 7.0.6 @@ -12183,6 +12008,11 @@ snapshots: exponential-backoff@3.1.3: {} + express-rate-limit@8.3.1(express@5.2.1): + dependencies: + express: 5.2.1 + ip-address: 10.1.0 + express@4.22.1: dependencies: accepts: 1.3.8 @@ -12826,6 +12656,8 @@ snapshots: jose@4.15.9: {} + jose@6.2.1: {} + js-stringify@1.0.2: {} js-tokens@10.0.0: {} @@ -12893,6 +12725,8 @@ snapshots: json-schema-traverse@1.0.0: {} + json-schema-typed@8.0.2: {} + json-schema@0.4.0: {} json-stringify-safe@5.0.1: {} @@ -13497,81 +13331,6 @@ snapshots: ws: 8.19.0 zod: 4.3.6 - openclaw@2026.3.11(@discordjs/opus@0.10.0)(@napi-rs/canvas@0.1.95)(@types/express@5.0.6)(audio-decode@2.2.3)(node-llama-cpp@3.16.2(typescript@5.9.3)): - dependencies: - '@agentclientprotocol/sdk': 0.16.1(zod@4.3.6) - '@aws-sdk/client-bedrock': 3.1007.0 - '@buape/carbon': 0.0.0-beta-20260216184201(@discordjs/opus@0.10.0)(hono@4.12.7)(opusscript@0.1.1) - '@clack/prompts': 1.1.0 - '@discordjs/voice': 0.19.1(@discordjs/opus@0.10.0)(opusscript@0.1.1) - '@grammyjs/runner': 2.0.3(grammy@1.41.1) - '@grammyjs/transformer-throttler': 1.2.1(grammy@1.41.1) - '@homebridge/ciao': 1.3.5 - '@larksuiteoapi/node-sdk': 1.59.0 - '@line/bot-sdk': 10.6.0 - '@lydell/node-pty': 1.2.0-beta.3 - '@mariozechner/pi-agent-core': 0.57.1(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-ai': 0.57.1(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-coding-agent': 0.57.1(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-tui': 0.57.1 - '@mozilla/readability': 0.6.0 - '@napi-rs/canvas': 0.1.95 - '@sinclair/typebox': 0.34.48 - '@slack/bolt': 4.6.0(@types/express@5.0.6) - '@slack/web-api': 7.14.1 - '@whiskeysockets/baileys': 7.0.0-rc.9(audio-decode@2.2.3)(sharp@0.34.5) - ajv: 8.18.0 - chalk: 5.6.2 - chokidar: 5.0.0 - cli-highlight: 2.1.11 - commander: 14.0.3 - croner: 10.0.1 - discord-api-types: 0.38.42 - dotenv: 17.3.1 - express: 5.2.1 - file-type: 21.3.1 - grammy: 1.41.1 - hono: 4.12.7 - https-proxy-agent: 8.0.0 - ipaddr.js: 2.3.0 - jiti: 2.6.1 - json5: 2.2.3 - jszip: 3.10.1 - linkedom: 0.18.12 - long: 5.3.2 - markdown-it: 14.1.1 - node-edge-tts: 1.2.10 - node-llama-cpp: 3.16.2(typescript@5.9.3) - opusscript: 0.1.1 - osc-progress: 0.3.0 - pdfjs-dist: 5.5.207 - playwright-core: 1.58.2 - qrcode-terminal: 0.12.0 - sharp: 0.34.5 - sqlite-vec: 0.1.7-alpha.2 - tar: 7.5.11 - tslog: 4.10.2 - undici: 7.22.0 - ws: 8.19.0 - yaml: 2.8.2 - zod: 4.3.6 - transitivePeerDependencies: - - '@discordjs/opus' - - '@modelcontextprotocol/sdk' - - '@types/express' - - audio-decode - - aws-crt - - bufferutil - - canvas - - debug - - encoding - - ffmpeg-static - - jimp - - link-preview-js - - node-opus - - supports-color - - utf-8-validate - opus-decoder@0.7.11: dependencies: '@wasm-audio-decoders/common': 9.0.7 @@ -13784,6 +13543,8 @@ snapshots: sonic-boom: 4.2.1 thread-stream: 3.1.0 + pkce-challenge@5.0.1: {} + playwright-core@1.58.2: {} playwright@1.58.2: @@ -14725,8 +14486,6 @@ snapshots: undici-types@7.18.2: {} - undici@7.22.0: {} - undici@7.24.0: {} unist-util-is@6.0.1: diff --git a/scripts/ci-changed-scope.mjs b/scripts/ci-changed-scope.mjs index a4018b30a2c..c5ed28319b1 100644 --- a/scripts/ci-changed-scope.mjs +++ b/scripts/ci-changed-scope.mjs @@ -5,6 +5,7 @@ import { appendFileSync } from "node:fs"; const DOCS_PATH_RE = /^(docs\/|.*\.mdx?$)/; const SKILLS_PYTHON_SCOPE_RE = /^skills\//; +const CI_WORKFLOW_SCOPE_RE = /^\.github\/workflows\/ci\.yml$/; const MACOS_PROTOCOL_GEN_RE = /^(apps\/macos\/Sources\/OpenClawProtocol\/|apps\/shared\/OpenClawKit\/Sources\/OpenClawProtocol\/)/; const MACOS_NATIVE_RE = /^(apps\/macos\/|apps\/ios\/|apps\/shared\/|Swabble\/)/; @@ -55,6 +56,12 @@ export function detectChangedScope(changedPaths) { runSkillsPython = true; } + if (CI_WORKFLOW_SCOPE_RE.test(path)) { + runMacos = true; + runAndroid = true; + runSkillsPython = true; + } + if (!MACOS_PROTOCOL_GEN_RE.test(path) && MACOS_NATIVE_RE.test(path)) { runMacos = true; } diff --git a/scripts/test-parallel.mjs b/scripts/test-parallel.mjs index ca7636394bb..1716f724bff 100644 --- a/scripts/test-parallel.mjs +++ b/scripts/test-parallel.mjs @@ -104,11 +104,11 @@ const hostMemoryGiB = Math.floor(os.totalmem() / 1024 ** 3); const highMemLocalHost = !isCI && hostMemoryGiB >= 96; const lowMemLocalHost = !isCI && hostMemoryGiB < 64; const nodeMajor = Number.parseInt(process.versions.node.split(".")[0] ?? "", 10); -// vmForks is a big win for transform/import heavy suites, but Node 24+ -// regressed with Vitest's vm runtime in this repo, and low-memory local hosts -// are more likely to hit per-worker V8 heap ceilings. Keep it opt-out via -// OPENCLAW_TEST_VM_FORKS=0, and let users force-enable with =1. -const supportsVmForks = Number.isFinite(nodeMajor) ? nodeMajor < 24 : true; +// vmForks is a big win for transform/import heavy suites. Node 24 is stable again +// for the default unit-fast lane after moving the known flaky files to fork-only +// isolation, but Node 25+ still falls back to process forks until re-validated. +// Keep it opt-out via OPENCLAW_TEST_VM_FORKS=0, and let users force-enable with =1. +const supportsVmForks = Number.isFinite(nodeMajor) ? nodeMajor <= 24 : true; const useVmForks = process.env.OPENCLAW_TEST_VM_FORKS === "1" || (process.env.OPENCLAW_TEST_VM_FORKS !== "0" && !isWindows && supportsVmForks && !lowMemLocalHost); diff --git a/src/acp/translator.prompt-prefix.test.ts b/src/acp/translator.prompt-prefix.test.ts index 38c186519c0..9d53e3aa103 100644 --- a/src/acp/translator.prompt-prefix.test.ts +++ b/src/acp/translator.prompt-prefix.test.ts @@ -7,7 +7,52 @@ import { createInMemorySessionStore } from "./session.js"; import { AcpGatewayAgent } from "./translator.js"; import { createAcpConnection, createAcpGateway } from "./translator.test-helpers.js"; +const TEST_SESSION_ID = "session-1"; +const TEST_SESSION_KEY = "agent:main:main"; +const TEST_PROMPT = { + sessionId: TEST_SESSION_ID, + prompt: [{ type: "text", text: "hello" }], + _meta: {}, +} as unknown as PromptRequest; + describe("acp prompt cwd prefix", () => { + const createStopAfterSendSpy = () => + vi.fn(async (method: string) => { + if (method === "chat.send") { + throw new Error("stop-after-send"); + } + return {}; + }); + + async function runPromptAndCaptureRequest( + options: { + cwd?: string; + prefixCwd?: boolean; + provenanceMode?: "meta" | "meta+receipt"; + } = {}, + ) { + const sessionStore = createInMemorySessionStore(); + sessionStore.createSession({ + sessionId: TEST_SESSION_ID, + sessionKey: TEST_SESSION_KEY, + cwd: options.cwd ?? path.join(os.homedir(), "openclaw-test"), + }); + + const requestSpy = createStopAfterSendSpy(); + const agent = new AcpGatewayAgent( + createAcpConnection(), + createAcpGateway(requestSpy as unknown as GatewayClient["request"]), + { + sessionStore, + prefixCwd: options.prefixCwd, + provenanceMode: options.provenanceMode, + }, + ); + + await expect(agent.prompt(TEST_PROMPT)).rejects.toThrow("stop-after-send"); + return requestSpy; + } + async function runPromptWithCwd(cwd: string) { const pinnedHome = os.homedir(); const previousOpenClawHome = process.env.OPENCLAW_HOME; @@ -15,37 +60,8 @@ describe("acp prompt cwd prefix", () => { delete process.env.OPENCLAW_HOME; process.env.HOME = pinnedHome; - const sessionStore = createInMemorySessionStore(); - sessionStore.createSession({ - sessionId: "session-1", - sessionKey: "agent:main:main", - cwd, - }); - - const requestSpy = vi.fn(async (method: string) => { - if (method === "chat.send") { - throw new Error("stop-after-send"); - } - return {}; - }); - const agent = new AcpGatewayAgent( - createAcpConnection(), - createAcpGateway(requestSpy as unknown as GatewayClient["request"]), - { - sessionStore, - prefixCwd: true, - }, - ); - try { - await expect( - agent.prompt({ - sessionId: "session-1", - prompt: [{ type: "text", text: "hello" }], - _meta: {}, - } as unknown as PromptRequest), - ).rejects.toThrow("stop-after-send"); - return requestSpy; + return await runPromptAndCaptureRequest({ cwd, prefixCwd: true }); } finally { if (previousOpenClawHome === undefined) { delete process.env.OPENCLAW_HOME; @@ -83,42 +99,13 @@ describe("acp prompt cwd prefix", () => { }); it("injects system provenance metadata when enabled", async () => { - const sessionStore = createInMemorySessionStore(); - sessionStore.createSession({ - sessionId: "session-1", - sessionKey: "agent:main:main", - cwd: path.join(os.homedir(), "openclaw-test"), - }); - - const requestSpy = vi.fn(async (method: string) => { - if (method === "chat.send") { - throw new Error("stop-after-send"); - } - return {}; - }); - const agent = new AcpGatewayAgent( - createAcpConnection(), - createAcpGateway(requestSpy as unknown as GatewayClient["request"]), - { - sessionStore, - provenanceMode: "meta", - }, - ); - - await expect( - agent.prompt({ - sessionId: "session-1", - prompt: [{ type: "text", text: "hello" }], - _meta: {}, - } as unknown as PromptRequest), - ).rejects.toThrow("stop-after-send"); - + const requestSpy = await runPromptAndCaptureRequest({ provenanceMode: "meta" }); expect(requestSpy).toHaveBeenCalledWith( "chat.send", expect.objectContaining({ systemInputProvenance: { kind: "external_user", - originSessionId: "session-1", + originSessionId: TEST_SESSION_ID, sourceChannel: "acp", sourceTool: "openclaw_acp", }, @@ -129,42 +116,13 @@ describe("acp prompt cwd prefix", () => { }); it("injects a system provenance receipt when requested", async () => { - const sessionStore = createInMemorySessionStore(); - sessionStore.createSession({ - sessionId: "session-1", - sessionKey: "agent:main:main", - cwd: path.join(os.homedir(), "openclaw-test"), - }); - - const requestSpy = vi.fn(async (method: string) => { - if (method === "chat.send") { - throw new Error("stop-after-send"); - } - return {}; - }); - const agent = new AcpGatewayAgent( - createAcpConnection(), - createAcpGateway(requestSpy as unknown as GatewayClient["request"]), - { - sessionStore, - provenanceMode: "meta+receipt", - }, - ); - - await expect( - agent.prompt({ - sessionId: "session-1", - prompt: [{ type: "text", text: "hello" }], - _meta: {}, - } as unknown as PromptRequest), - ).rejects.toThrow("stop-after-send"); - + const requestSpy = await runPromptAndCaptureRequest({ provenanceMode: "meta+receipt" }); expect(requestSpy).toHaveBeenCalledWith( "chat.send", expect.objectContaining({ systemInputProvenance: { kind: "external_user", - originSessionId: "session-1", + originSessionId: TEST_SESSION_ID, sourceChannel: "acp", sourceTool: "openclaw_acp", }, @@ -182,14 +140,14 @@ describe("acp prompt cwd prefix", () => { expect(requestSpy).toHaveBeenCalledWith( "chat.send", expect.objectContaining({ - systemProvenanceReceipt: expect.stringContaining("originSessionId=session-1"), + systemProvenanceReceipt: expect.stringContaining(`originSessionId=${TEST_SESSION_ID}`), }), { expectFinal: true }, ); expect(requestSpy).toHaveBeenCalledWith( "chat.send", expect.objectContaining({ - systemProvenanceReceipt: expect.stringContaining("targetSession=agent:main:main"), + systemProvenanceReceipt: expect.stringContaining(`targetSession=${TEST_SESSION_KEY}`), }), { expectFinal: true }, ); diff --git a/src/agents/bash-tools.exec-host-gateway.ts b/src/agents/bash-tools.exec-host-gateway.ts index 6b43fbe8663..ac6ed57aa72 100644 --- a/src/agents/bash-tools.exec-host-gateway.ts +++ b/src/agents/bash-tools.exec-host-gateway.ts @@ -1,10 +1,5 @@ import type { AgentToolResult } from "@mariozechner/pi-agent-core"; -import { loadConfig } from "../config/config.js"; import { buildExecApprovalUnavailableReplyPayload } from "../infra/exec-approval-reply.js"; -import { - hasConfiguredExecApprovalDmRoute, - resolveExecApprovalInitiatingSurfaceState, -} from "../infra/exec-approval-surface.js"; import { addAllowlistEntry, type ExecAsk, @@ -26,7 +21,7 @@ import { registerExecApprovalRequestForHostOrThrow, } from "./bash-tools.exec-approval-request.js"; import { - createDefaultExecApprovalRequestContext, + createAndRegisterDefaultExecApprovalRequest, resolveBaseExecApprovalDecision, resolveApprovalDecisionOrUndefined, resolveExecHostApprovalContext, @@ -149,52 +144,36 @@ export async function processGatewayAllowlist( approvalId, approvalSlug, warningText, - expiresAtMs: defaultExpiresAtMs, - preResolvedDecision: defaultPreResolvedDecision, - } = createDefaultExecApprovalRequestContext({ + expiresAtMs, + preResolvedDecision, + initiatingSurface, + sentApproverDms, + unavailableReason, + } = await createAndRegisterDefaultExecApprovalRequest({ warnings: params.warnings, approvalRunningNoticeMs: params.approvalRunningNoticeMs, createApprovalSlug, + turnSourceChannel: params.turnSourceChannel, + turnSourceAccountId: params.turnSourceAccountId, + register: async (approvalId) => + await registerExecApprovalRequestForHostOrThrow({ + approvalId, + command: params.command, + workdir: params.workdir, + host: "gateway", + security: hostSecurity, + ask: hostAsk, + ...buildExecApprovalRequesterContext({ + agentId: params.agentId, + sessionKey: params.sessionKey, + }), + resolvedPath: allowlistEval.segments[0]?.resolution?.resolvedPath, + ...buildExecApprovalTurnSourceContext(params), + }), }); const resolvedPath = allowlistEval.segments[0]?.resolution?.resolvedPath; const effectiveTimeout = typeof params.timeoutSec === "number" ? params.timeoutSec : params.defaultTimeoutSec; - let expiresAtMs = defaultExpiresAtMs; - let preResolvedDecision = defaultPreResolvedDecision; - - // Register first so the returned approval ID is actionable immediately. - const registration = await registerExecApprovalRequestForHostOrThrow({ - approvalId, - command: params.command, - workdir: params.workdir, - host: "gateway", - security: hostSecurity, - ask: hostAsk, - ...buildExecApprovalRequesterContext({ - agentId: params.agentId, - sessionKey: params.sessionKey, - }), - resolvedPath, - ...buildExecApprovalTurnSourceContext(params), - }); - expiresAtMs = registration.expiresAtMs; - preResolvedDecision = registration.finalDecision; - const initiatingSurface = resolveExecApprovalInitiatingSurfaceState({ - channel: params.turnSourceChannel, - accountId: params.turnSourceAccountId, - }); - const cfg = loadConfig(); - const sentApproverDms = - (initiatingSurface.kind === "disabled" || initiatingSurface.kind === "unsupported") && - hasConfiguredExecApprovalDmRoute(cfg); - const unavailableReason = - preResolvedDecision === null - ? "no-approval-route" - : initiatingSurface.kind === "disabled" - ? "initiating-platform-disabled" - : initiatingSurface.kind === "unsupported" - ? "initiating-platform-unsupported" - : null; void (async () => { const decision = await resolveApprovalDecisionOrUndefined({ diff --git a/src/agents/bash-tools.exec-host-node.ts b/src/agents/bash-tools.exec-host-node.ts index c3a23197f0a..6f5fc25f966 100644 --- a/src/agents/bash-tools.exec-host-node.ts +++ b/src/agents/bash-tools.exec-host-node.ts @@ -1,11 +1,6 @@ import crypto from "node:crypto"; import type { AgentToolResult } from "@mariozechner/pi-agent-core"; -import { loadConfig } from "../config/config.js"; import { buildExecApprovalUnavailableReplyPayload } from "../infra/exec-approval-reply.js"; -import { - hasConfiguredExecApprovalDmRoute, - resolveExecApprovalInitiatingSurfaceState, -} from "../infra/exec-approval-surface.js"; import { type ExecApprovalsFile, type ExecAsk, @@ -25,7 +20,7 @@ import { registerExecApprovalRequestForHostOrThrow, } from "./bash-tools.exec-approval-request.js"; import { - createDefaultExecApprovalRequestContext, + createAndRegisterDefaultExecApprovalRequest, resolveBaseExecApprovalDecision, resolveApprovalDecisionOrUndefined, resolveExecHostApprovalContext, @@ -225,50 +220,34 @@ export async function executeNodeHostCommand( approvalId, approvalSlug, warningText, - expiresAtMs: defaultExpiresAtMs, - preResolvedDecision: defaultPreResolvedDecision, - } = createDefaultExecApprovalRequestContext({ + expiresAtMs, + preResolvedDecision, + initiatingSurface, + sentApproverDms, + unavailableReason, + } = await createAndRegisterDefaultExecApprovalRequest({ warnings: params.warnings, approvalRunningNoticeMs: params.approvalRunningNoticeMs, createApprovalSlug, + turnSourceChannel: params.turnSourceChannel, + turnSourceAccountId: params.turnSourceAccountId, + register: async (approvalId) => + await registerExecApprovalRequestForHostOrThrow({ + approvalId, + systemRunPlan: prepared.plan, + env: nodeEnv, + workdir: runCwd, + host: "node", + nodeId, + security: hostSecurity, + ask: hostAsk, + ...buildExecApprovalRequesterContext({ + agentId: runAgentId, + sessionKey: runSessionKey, + }), + ...buildExecApprovalTurnSourceContext(params), + }), }); - let expiresAtMs = defaultExpiresAtMs; - let preResolvedDecision = defaultPreResolvedDecision; - - // Register first so the returned approval ID is actionable immediately. - const registration = await registerExecApprovalRequestForHostOrThrow({ - approvalId, - systemRunPlan: prepared.plan, - env: nodeEnv, - workdir: runCwd, - host: "node", - nodeId, - security: hostSecurity, - ask: hostAsk, - ...buildExecApprovalRequesterContext({ - agentId: runAgentId, - sessionKey: runSessionKey, - }), - ...buildExecApprovalTurnSourceContext(params), - }); - expiresAtMs = registration.expiresAtMs; - preResolvedDecision = registration.finalDecision; - const initiatingSurface = resolveExecApprovalInitiatingSurfaceState({ - channel: params.turnSourceChannel, - accountId: params.turnSourceAccountId, - }); - const cfg = loadConfig(); - const sentApproverDms = - (initiatingSurface.kind === "disabled" || initiatingSurface.kind === "unsupported") && - hasConfiguredExecApprovalDmRoute(cfg); - const unavailableReason = - preResolvedDecision === null - ? "no-approval-route" - : initiatingSurface.kind === "disabled" - ? "initiating-platform-disabled" - : initiatingSurface.kind === "unsupported" - ? "initiating-platform-unsupported" - : null; void (async () => { const decision = await resolveApprovalDecisionOrUndefined({ diff --git a/src/agents/bash-tools.exec-host-shared.ts b/src/agents/bash-tools.exec-host-shared.ts index c24e0a2f1fa..e62bc8d484a 100644 --- a/src/agents/bash-tools.exec-host-shared.ts +++ b/src/agents/bash-tools.exec-host-shared.ts @@ -1,4 +1,10 @@ import crypto from "node:crypto"; +import { loadConfig } from "../config/config.js"; +import { + hasConfiguredExecApprovalDmRoute, + type ExecApprovalInitiatingSurfaceState, + resolveExecApprovalInitiatingSurfaceState, +} from "../infra/exec-approval-surface.js"; import { maxAsk, minSecurity, @@ -6,7 +12,10 @@ import { type ExecAsk, type ExecSecurity, } from "../infra/exec-approvals.js"; -import { resolveRegisteredExecApprovalDecision } from "./bash-tools.exec-approval-request.js"; +import { + type ExecApprovalRegistration, + resolveRegisteredExecApprovalDecision, +} from "./bash-tools.exec-approval-request.js"; import { DEFAULT_APPROVAL_TIMEOUT_MS } from "./bash-tools.exec-runtime.js"; type ResolvedExecApprovals = ReturnType; @@ -28,6 +37,22 @@ export type ExecApprovalRequestState = ExecApprovalPendingState & { noticeSeconds: number; }; +export type ExecApprovalUnavailableReason = + | "no-approval-route" + | "initiating-platform-disabled" + | "initiating-platform-unsupported"; + +export type RegisteredExecApprovalRequestContext = { + approvalId: string; + approvalSlug: string; + warningText: string; + expiresAtMs: number; + preResolvedDecision: string | null | undefined; + initiatingSurface: ExecApprovalInitiatingSurfaceState; + sentApproverDms: boolean; + unavailableReason: ExecApprovalUnavailableReason | null; +}; + export function createExecApprovalPendingState(params: { warnings: string[]; timeoutMs: number; @@ -158,3 +183,77 @@ export async function resolveApprovalDecisionOrUndefined(params: { return undefined; } } + +export function resolveExecApprovalUnavailableState(params: { + turnSourceChannel?: string; + turnSourceAccountId?: string; + preResolvedDecision: string | null | undefined; +}): { + initiatingSurface: ExecApprovalInitiatingSurfaceState; + sentApproverDms: boolean; + unavailableReason: ExecApprovalUnavailableReason | null; +} { + const initiatingSurface = resolveExecApprovalInitiatingSurfaceState({ + channel: params.turnSourceChannel, + accountId: params.turnSourceAccountId, + }); + const sentApproverDms = + (initiatingSurface.kind === "disabled" || initiatingSurface.kind === "unsupported") && + hasConfiguredExecApprovalDmRoute(loadConfig()); + const unavailableReason = + params.preResolvedDecision === null + ? "no-approval-route" + : initiatingSurface.kind === "disabled" + ? "initiating-platform-disabled" + : initiatingSurface.kind === "unsupported" + ? "initiating-platform-unsupported" + : null; + return { + initiatingSurface, + sentApproverDms, + unavailableReason, + }; +} + +export async function createAndRegisterDefaultExecApprovalRequest(params: { + warnings: string[]; + approvalRunningNoticeMs: number; + createApprovalSlug: (approvalId: string) => string; + turnSourceChannel?: string; + turnSourceAccountId?: string; + register: (approvalId: string) => Promise; +}): Promise { + const { + approvalId, + approvalSlug, + warningText, + expiresAtMs: defaultExpiresAtMs, + preResolvedDecision: defaultPreResolvedDecision, + } = createDefaultExecApprovalRequestContext({ + warnings: params.warnings, + approvalRunningNoticeMs: params.approvalRunningNoticeMs, + createApprovalSlug: params.createApprovalSlug, + }); + const registration = await params.register(approvalId); + const preResolvedDecision = registration.finalDecision; + const { initiatingSurface, sentApproverDms, unavailableReason } = + resolveExecApprovalUnavailableState({ + turnSourceChannel: params.turnSourceChannel, + turnSourceAccountId: params.turnSourceAccountId, + preResolvedDecision, + }); + + return { + approvalId, + approvalSlug, + warningText, + expiresAtMs: registration.expiresAtMs ?? defaultExpiresAtMs, + preResolvedDecision: + registration.finalDecision === undefined + ? defaultPreResolvedDecision + : registration.finalDecision, + initiatingSurface, + sentApproverDms, + unavailableReason, + }; +} diff --git a/src/agents/bash-tools.exec.approval-id.test.ts b/src/agents/bash-tools.exec.approval-id.test.ts index cc94f83d665..211d8e3dcaa 100644 --- a/src/agents/bash-tools.exec.approval-id.test.ts +++ b/src/agents/bash-tools.exec.approval-id.test.ts @@ -43,6 +43,162 @@ function buildPreparedSystemRunPayload(rawInvokeParams: unknown) { return buildSystemRunPreparePayload(params); } +function getTestConfigPath() { + return path.join(process.env.HOME ?? "", ".openclaw", "openclaw.json"); +} + +async function writeOpenClawConfig(config: Record, pretty = false) { + const configPath = getTestConfigPath(); + await fs.mkdir(path.dirname(configPath), { recursive: true }); + await fs.writeFile(configPath, JSON.stringify(config, null, pretty ? 2 : undefined)); +} + +async function writeExecApprovalsConfig(config: Record) { + const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json"); + await fs.mkdir(path.dirname(approvalsPath), { recursive: true }); + await fs.writeFile(approvalsPath, JSON.stringify(config, null, 2)); +} + +function acceptedApprovalResponse(params: unknown) { + return { status: "accepted", id: (params as { id?: string })?.id }; +} + +function getResultText(result: { content: Array<{ type?: string; text?: string }> }) { + return result.content.find((part) => part.type === "text")?.text ?? ""; +} + +function expectPendingApprovalText( + result: { + details: { status?: string }; + content: Array<{ type?: string; text?: string }>; + }, + options: { + command: string; + host: "gateway" | "node"; + nodeId?: string; + interactive?: boolean; + }, +) { + expect(result.details.status).toBe("approval-pending"); + const details = result.details as { approvalId: string; approvalSlug: string }; + const pendingText = getResultText(result); + expect(pendingText).toContain( + `Reply with: /approve ${details.approvalSlug} allow-once|allow-always|deny`, + ); + expect(pendingText).toContain(`full ${details.approvalId}`); + expect(pendingText).toContain(`Host: ${options.host}`); + if (options.nodeId) { + expect(pendingText).toContain(`Node: ${options.nodeId}`); + } + expect(pendingText).toContain(`CWD: ${process.cwd()}`); + expect(pendingText).toContain("Command:\n```sh\n"); + expect(pendingText).toContain(options.command); + if (options.interactive) { + expect(pendingText).toContain("Mode: foreground (interactive approvals available)."); + expect(pendingText).toContain("Background mode requires pre-approved policy"); + } + return details; +} + +function expectPendingCommandText( + result: { + details: { status?: string }; + content: Array<{ type?: string; text?: string }>; + }, + command: string, +) { + expect(result.details.status).toBe("approval-pending"); + const text = getResultText(result); + expect(text).toContain("Command:\n```sh\n"); + expect(text).toContain(command); +} + +function mockGatewayOkCalls(calls: string[]) { + vi.mocked(callGatewayTool).mockImplementation(async (method) => { + calls.push(method); + return { ok: true }; + }); +} + +function createElevatedAllowlistExecTool() { + return createExecTool({ + ask: "on-miss", + security: "allowlist", + approvalRunningNoticeMs: 0, + elevated: { enabled: true, allowed: true, defaultLevel: "ask" }, + }); +} + +async function expectGatewayExecWithoutApproval(options: { + config: Record; + command: string; + ask?: "always" | "on-miss" | "off"; +}) { + await writeExecApprovalsConfig(options.config); + const calls: string[] = []; + mockGatewayOkCalls(calls); + + const tool = createExecTool({ + host: "gateway", + ask: options.ask, + security: "full", + approvalRunningNoticeMs: 0, + }); + + const result = await tool.execute("call-no-approval", { command: options.command }); + expect(result.details.status).toBe("completed"); + expect(calls).not.toContain("exec.approval.request"); + expect(calls).not.toContain("exec.approval.waitDecision"); +} + +function mockAcceptedApprovalFlow(options: { + onAgent?: (params: Record) => void; + onNodeInvoke?: (params: unknown) => unknown; +}) { + vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => { + if (method === "exec.approval.request") { + return acceptedApprovalResponse(params); + } + if (method === "exec.approval.waitDecision") { + return { decision: "allow-once" }; + } + if (method === "agent" && options.onAgent) { + options.onAgent(params as Record); + return { status: "ok" }; + } + if (method === "node.invoke" && options.onNodeInvoke) { + return await options.onNodeInvoke(params); + } + return { ok: true }; + }); +} + +function mockPendingApprovalRegistration() { + vi.mocked(callGatewayTool).mockImplementation(async (method) => { + if (method === "exec.approval.request") { + return { status: "accepted", id: "approval-id" }; + } + if (method === "exec.approval.waitDecision") { + return { decision: null }; + } + return { ok: true }; + }); +} + +function expectApprovalUnavailableText(result: { + details: { status?: string }; + content: Array<{ type?: string; text?: string }>; +}) { + expect(result.details.status).toBe("approval-unavailable"); + const text = result.content.find((part) => part.type === "text")?.text ?? ""; + expect(text).not.toContain("/approve"); + expect(text).not.toContain("npm view diver name version description"); + expect(text).not.toContain("Pending command:"); + expect(text).not.toContain("Host:"); + expect(text).not.toContain("CWD:"); + return text; +} + describe("exec approvals", () => { let previousHome: string | undefined; let previousUserProfile: string | undefined; @@ -81,18 +237,11 @@ describe("exec approvals", () => { let invokeParams: unknown; let agentParams: unknown; - vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => { - if (method === "exec.approval.request") { - return { status: "accepted", id: (params as { id?: string })?.id }; - } - if (method === "exec.approval.waitDecision") { - return { decision: "allow-once" }; - } - if (method === "agent") { + mockAcceptedApprovalFlow({ + onAgent: (params) => { agentParams = params; - return { status: "ok" }; - } - if (method === "node.invoke") { + }, + onNodeInvoke: (params) => { const invoke = params as { command?: string }; if (invoke.command === "system.run.prepare") { return buildPreparedSystemRunPayload(params); @@ -101,8 +250,7 @@ describe("exec approvals", () => { invokeParams = params; return { payload: { success: true, stdout: "ok" } }; } - } - return { ok: true }; + }, }); const tool = createExecTool({ @@ -113,19 +261,12 @@ describe("exec approvals", () => { }); const result = await tool.execute("call1", { command: "ls -la" }); - expect(result.details.status).toBe("approval-pending"); - const details = result.details as { approvalId: string; approvalSlug: string }; - const pendingText = result.content.find((part) => part.type === "text")?.text ?? ""; - expect(pendingText).toContain( - `Reply with: /approve ${details.approvalSlug} allow-once|allow-always|deny`, - ); - expect(pendingText).toContain(`full ${details.approvalId}`); - expect(pendingText).toContain("Host: node"); - expect(pendingText).toContain("Node: node-1"); - expect(pendingText).toContain(`CWD: ${process.cwd()}`); - expect(pendingText).toContain("Command:\n```sh\nls -la\n```"); - expect(pendingText).toContain("Mode: foreground (interactive approvals available)."); - expect(pendingText).toContain("Background mode requires pre-approved policy"); + const details = expectPendingApprovalText(result, { + command: "ls -la", + host: "node", + nodeId: "node-1", + interactive: true, + }); const approvalId = details.approvalId; await expect @@ -214,74 +355,28 @@ describe("exec approvals", () => { }); it("uses exec-approvals ask=off to suppress gateway prompts", async () => { - const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json"); - await fs.mkdir(path.dirname(approvalsPath), { recursive: true }); - await fs.writeFile( - approvalsPath, - JSON.stringify( - { - version: 1, - defaults: { security: "full", ask: "off", askFallback: "full" }, - agents: { - main: { security: "full", ask: "off", askFallback: "full" }, - }, + await expectGatewayExecWithoutApproval({ + config: { + version: 1, + defaults: { security: "full", ask: "off", askFallback: "full" }, + agents: { + main: { security: "full", ask: "off", askFallback: "full" }, }, - null, - 2, - ), - ); - - const calls: string[] = []; - vi.mocked(callGatewayTool).mockImplementation(async (method) => { - calls.push(method); - return { ok: true }; - }); - - const tool = createExecTool({ - host: "gateway", + }, + command: "echo ok", ask: "on-miss", - security: "full", - approvalRunningNoticeMs: 0, }); - - const result = await tool.execute("call3b", { command: "echo ok" }); - expect(result.details.status).toBe("completed"); - expect(calls).not.toContain("exec.approval.request"); - expect(calls).not.toContain("exec.approval.waitDecision"); }); it("inherits ask=off from exec-approvals defaults when tool ask is unset", async () => { - const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json"); - await fs.mkdir(path.dirname(approvalsPath), { recursive: true }); - await fs.writeFile( - approvalsPath, - JSON.stringify( - { - version: 1, - defaults: { security: "full", ask: "off", askFallback: "full" }, - agents: {}, - }, - null, - 2, - ), - ); - - const calls: string[] = []; - vi.mocked(callGatewayTool).mockImplementation(async (method) => { - calls.push(method); - return { ok: true }; + await expectGatewayExecWithoutApproval({ + config: { + version: 1, + defaults: { security: "full", ask: "off", askFallback: "full" }, + agents: {}, + }, + command: "echo ok", }); - - const tool = createExecTool({ - host: "gateway", - security: "full", - approvalRunningNoticeMs: 0, - }); - - const result = await tool.execute("call3c", { command: "echo ok" }); - expect(result.details.status).toBe("completed"); - expect(calls).not.toContain("exec.approval.request"); - expect(calls).not.toContain("exec.approval.waitDecision"); }); it("requires approval for elevated ask when allowlist misses", async () => { @@ -296,7 +391,7 @@ describe("exec approvals", () => { if (method === "exec.approval.request") { resolveApproval?.(); // Return registration confirmation - return { status: "accepted", id: (params as { id?: string })?.id }; + return acceptedApprovalResponse(params); } if (method === "exec.approval.waitDecision") { return { decision: "deny" }; @@ -304,24 +399,10 @@ describe("exec approvals", () => { return { ok: true }; }); - const tool = createExecTool({ - ask: "on-miss", - security: "allowlist", - approvalRunningNoticeMs: 0, - elevated: { enabled: true, allowed: true, defaultLevel: "ask" }, - }); + const tool = createElevatedAllowlistExecTool(); const result = await tool.execute("call4", { command: "echo ok", elevated: true }); - expect(result.details.status).toBe("approval-pending"); - const details = result.details as { approvalId: string; approvalSlug: string }; - const pendingText = result.content.find((part) => part.type === "text")?.text ?? ""; - expect(pendingText).toContain( - `Reply with: /approve ${details.approvalSlug} allow-once|allow-always|deny`, - ); - expect(pendingText).toContain(`full ${details.approvalId}`); - expect(pendingText).toContain("Host: gateway"); - expect(pendingText).toContain(`CWD: ${process.cwd()}`); - expect(pendingText).toContain("Command:\n```sh\necho ok\n```"); + expectPendingApprovalText(result, { command: "echo ok", host: "gateway" }); await approvalSeen; expect(calls).toContain("exec.approval.request"); expect(calls).toContain("exec.approval.waitDecision"); @@ -330,18 +411,10 @@ describe("exec approvals", () => { it("starts a direct agent follow-up after approved gateway exec completes", async () => { const agentCalls: Array> = []; - vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => { - if (method === "exec.approval.request") { - return { status: "accepted", id: (params as { id?: string })?.id }; - } - if (method === "exec.approval.waitDecision") { - return { decision: "allow-once" }; - } - if (method === "agent") { - agentCalls.push(params as Record); - return { status: "ok" }; - } - return { ok: true }; + mockAcceptedApprovalFlow({ + onAgent: (params) => { + agentCalls.push(params); + }, }); const tool = createExecTool({ @@ -388,7 +461,7 @@ describe("exec approvals", () => { if (typeof request.id === "string") { requestIds.push(request.id); } - return { status: "accepted", id: request.id }; + return acceptedApprovalResponse(request); } if (method === "exec.approval.waitDecision") { const wait = params as { id?: string }; @@ -400,12 +473,7 @@ describe("exec approvals", () => { return { ok: true }; }); - const tool = createExecTool({ - ask: "on-miss", - security: "allowlist", - approvalRunningNoticeMs: 0, - elevated: { enabled: true, allowed: true, defaultLevel: "ask" }, - }); + const tool = createElevatedAllowlistExecTool(); const first = await tool.execute("call-seq-1", { command: "npm view diver --json", @@ -429,7 +497,7 @@ describe("exec approvals", () => { vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => { calls.push(method); if (method === "exec.approval.request") { - return { status: "accepted", id: (params as { id?: string })?.id }; + return acceptedApprovalResponse(params); } if (method === "exec.approval.waitDecision") { return { decision: "deny" }; @@ -448,11 +516,7 @@ describe("exec approvals", () => { command: "npm view diver --json | jq .name && brew outdated", }); - expect(result.details.status).toBe("approval-pending"); - const pendingText = result.content.find((part) => part.type === "text")?.text ?? ""; - expect(pendingText).toContain( - "Command:\n```sh\nnpm view diver --json | jq .name && brew outdated\n```", - ); + expectPendingCommandText(result, "npm view diver --json | jq .name && brew outdated"); expect(calls).toContain("exec.approval.request"); }); @@ -480,11 +544,7 @@ describe("exec approvals", () => { command: "npm view diver --json | jq .name && brew outdated", }); - expect(result.details.status).toBe("approval-pending"); - const pendingText = result.content.find((part) => part.type === "text")?.text ?? ""; - expect(pendingText).toContain( - "Command:\n```sh\nnpm view diver --json | jq .name && brew outdated\n```", - ); + expectPendingCommandText(result, "npm view diver --json | jq .name && brew outdated"); expect(calls).toContain("exec.approval.request"); }); @@ -551,30 +611,17 @@ describe("exec approvals", () => { }); it("returns an unavailable approval message instead of a local /approve prompt when discord exec approvals are disabled", async () => { - const configPath = path.join(process.env.HOME ?? "", ".openclaw", "openclaw.json"); - await fs.mkdir(path.dirname(configPath), { recursive: true }); - await fs.writeFile( - configPath, - JSON.stringify({ - channels: { - discord: { - enabled: true, - execApprovals: { enabled: false }, - }, + await writeOpenClawConfig({ + channels: { + discord: { + enabled: true, + execApprovals: { enabled: false }, }, - }), - ); - - vi.mocked(callGatewayTool).mockImplementation(async (method) => { - if (method === "exec.approval.request") { - return { status: "accepted", id: "approval-id" }; - } - if (method === "exec.approval.waitDecision") { - return { decision: null }; - } - return { ok: true }; + }, }); + mockPendingApprovalRegistration(); + const tool = createExecTool({ host: "gateway", ask: "always", @@ -588,49 +635,29 @@ describe("exec approvals", () => { command: "npm view diver name version description", }); - expect(result.details.status).toBe("approval-unavailable"); - const text = result.content.find((part) => part.type === "text")?.text ?? ""; + const text = expectApprovalUnavailableText(result); expect(text).toContain("chat exec approvals are not enabled on Discord"); expect(text).toContain("Web UI or terminal UI"); - expect(text).not.toContain("/approve"); - expect(text).not.toContain("npm view diver name version description"); - expect(text).not.toContain("Pending command:"); - expect(text).not.toContain("Host:"); - expect(text).not.toContain("CWD:"); }); it("tells Telegram users that allowed approvers were DMed when Telegram approvals are disabled but Discord DM approvals are enabled", async () => { - const configPath = path.join(process.env.HOME ?? "", ".openclaw", "openclaw.json"); - await fs.mkdir(path.dirname(configPath), { recursive: true }); - await fs.writeFile( - configPath, - JSON.stringify( - { - channels: { - telegram: { - enabled: true, - execApprovals: { enabled: false }, - }, - discord: { - enabled: true, - execApprovals: { enabled: true, approvers: ["123"], target: "dm" }, - }, + await writeOpenClawConfig( + { + channels: { + telegram: { + enabled: true, + execApprovals: { enabled: false }, + }, + discord: { + enabled: true, + execApprovals: { enabled: true, approvers: ["123"], target: "dm" }, }, }, - null, - 2, - ), + }, + true, ); - vi.mocked(callGatewayTool).mockImplementation(async (method) => { - if (method === "exec.approval.request") { - return { status: "accepted", id: "approval-id" }; - } - if (method === "exec.approval.waitDecision") { - return { decision: null }; - } - return { ok: true }; - }); + mockPendingApprovalRegistration(); const tool = createExecTool({ host: "gateway", @@ -645,14 +672,8 @@ describe("exec approvals", () => { command: "npm view diver name version description", }); - expect(result.details.status).toBe("approval-unavailable"); - const text = result.content.find((part) => part.type === "text")?.text ?? ""; + const text = expectApprovalUnavailableText(result); expect(text).toContain("Approval required. I sent the allowed approvers DMs."); - expect(text).not.toContain("/approve"); - expect(text).not.toContain("npm view diver name version description"); - expect(text).not.toContain("Pending command:"); - expect(text).not.toContain("Host:"); - expect(text).not.toContain("CWD:"); }); it("denies node obfuscated command when approval request times out", async () => { diff --git a/src/agents/model-fallback.probe.test.ts b/src/agents/model-fallback.probe.test.ts index d08bd0d4beb..3969416cd38 100644 --- a/src/agents/model-fallback.probe.test.ts +++ b/src/agents/model-fallback.probe.test.ts @@ -46,6 +46,20 @@ function expectFallbackUsed( expect(result.attempts[0]?.reason).toBe("rate_limit"); } +function expectPrimarySkippedForReason( + result: { result: unknown; attempts: Array<{ reason?: string }> }, + run: { + (...args: unknown[]): unknown; + mock: { calls: unknown[][] }; + }, + reason: string, +) { + expect(result.result).toBe("ok"); + expect(run).toHaveBeenCalledTimes(1); + expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5"); + expect(result.attempts[0]?.reason).toBe(reason); +} + function expectPrimaryProbeSuccess( result: { result: unknown }, run: { @@ -183,11 +197,7 @@ describe("runWithModelFallback – probe logic", () => { const run = vi.fn().mockResolvedValue("ok"); const result = await runPrimaryCandidate(cfg, run); - - expect(result.result).toBe("ok"); - expect(run).toHaveBeenCalledTimes(1); - expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5"); - expect(result.attempts[0]?.reason).toBe("billing"); + expectPrimarySkippedForReason(result, run, "billing"); }); it("probes primary model when within 2-min margin of cooldown expiry", async () => { @@ -540,10 +550,6 @@ describe("runWithModelFallback – probe logic", () => { const run = vi.fn().mockResolvedValue("ok"); const result = await runPrimaryCandidate(cfg, run); - - expect(result.result).toBe("ok"); - expect(run).toHaveBeenCalledTimes(1); - expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5"); - expect(result.attempts[0]?.reason).toBe("billing"); + expectPrimarySkippedForReason(result, run, "billing"); }); }); diff --git a/src/agents/model-selection.test.ts b/src/agents/model-selection.test.ts index 63aef63561c..35ac52dcf26 100644 --- a/src/agents/model-selection.test.ts +++ b/src/agents/model-selection.test.ts @@ -80,131 +80,121 @@ describe("model-selection", () => { }); describe("parseModelRef", () => { - it("should parse full model refs", () => { - expect(parseModelRef("anthropic/claude-3-5-sonnet", "openai")).toEqual({ - provider: "anthropic", - model: "claude-3-5-sonnet", - }); + const expectParsedModelVariants = ( + variants: string[], + defaultProvider: string, + expected: { provider: string; model: string }, + ) => { + for (const raw of variants) { + expect(parseModelRef(raw, defaultProvider), raw).toEqual(expected); + } + }; + + it.each([ + { + name: "parses explicit provider/model refs", + variants: ["anthropic/claude-3-5-sonnet"], + defaultProvider: "openai", + expected: { provider: "anthropic", model: "claude-3-5-sonnet" }, + }, + { + name: "uses the default provider when omitted", + variants: ["claude-3-5-sonnet"], + defaultProvider: "anthropic", + expected: { provider: "anthropic", model: "claude-3-5-sonnet" }, + }, + { + name: "preserves nested model ids after the provider prefix", + variants: ["nvidia/moonshotai/kimi-k2.5"], + defaultProvider: "anthropic", + expected: { provider: "nvidia", model: "moonshotai/kimi-k2.5" }, + }, + { + name: "normalizes anthropic shorthand aliases", + variants: ["anthropic/opus-4.6", "opus-4.6", " anthropic / opus-4.6 "], + defaultProvider: "anthropic", + expected: { provider: "anthropic", model: "claude-opus-4-6" }, + }, + { + name: "normalizes anthropic sonnet aliases", + variants: ["anthropic/sonnet-4.6", "sonnet-4.6"], + defaultProvider: "anthropic", + expected: { provider: "anthropic", model: "claude-sonnet-4-6" }, + }, + { + name: "normalizes deprecated google flash preview ids", + variants: ["google/gemini-3.1-flash-preview", "gemini-3.1-flash-preview"], + defaultProvider: "google", + expected: { provider: "google", model: "gemini-3-flash-preview" }, + }, + { + name: "normalizes gemini 3.1 flash-lite ids", + variants: ["google/gemini-3.1-flash-lite", "gemini-3.1-flash-lite"], + defaultProvider: "google", + expected: { provider: "google", model: "gemini-3.1-flash-lite-preview" }, + }, + { + name: "keeps OpenAI codex refs on the openai provider", + variants: ["openai/gpt-5.3-codex", "gpt-5.3-codex"], + defaultProvider: "openai", + expected: { provider: "openai", model: "gpt-5.3-codex" }, + }, + { + name: "preserves openrouter native model prefixes", + variants: ["openrouter/aurora-alpha"], + defaultProvider: "openai", + expected: { provider: "openrouter", model: "openrouter/aurora-alpha" }, + }, + { + name: "passes through openrouter upstream provider ids", + variants: ["openrouter/anthropic/claude-sonnet-4-5"], + defaultProvider: "openai", + expected: { provider: "openrouter", model: "anthropic/claude-sonnet-4-5" }, + }, + { + name: "normalizes Vercel Claude shorthand to anthropic-prefixed model ids", + variants: ["vercel-ai-gateway/claude-opus-4.6"], + defaultProvider: "openai", + expected: { provider: "vercel-ai-gateway", model: "anthropic/claude-opus-4.6" }, + }, + { + name: "normalizes Vercel Anthropic aliases without double-prefixing", + variants: ["vercel-ai-gateway/opus-4.6"], + defaultProvider: "openai", + expected: { provider: "vercel-ai-gateway", model: "anthropic/claude-opus-4-6" }, + }, + { + name: "keeps already-prefixed Vercel Anthropic models unchanged", + variants: ["vercel-ai-gateway/anthropic/claude-opus-4.6"], + defaultProvider: "openai", + expected: { provider: "vercel-ai-gateway", model: "anthropic/claude-opus-4.6" }, + }, + { + name: "passes through non-Claude Vercel model ids unchanged", + variants: ["vercel-ai-gateway/openai/gpt-5.2"], + defaultProvider: "openai", + expected: { provider: "vercel-ai-gateway", model: "openai/gpt-5.2" }, + }, + { + name: "keeps already-suffixed codex variants unchanged", + variants: ["openai/gpt-5.3-codex-codex"], + defaultProvider: "anthropic", + expected: { provider: "openai", model: "gpt-5.3-codex-codex" }, + }, + ])("$name", ({ variants, defaultProvider, expected }) => { + expectParsedModelVariants(variants, defaultProvider, expected); }); - it("preserves nested model ids after provider prefix", () => { - expect(parseModelRef("nvidia/moonshotai/kimi-k2.5", "anthropic")).toEqual({ - provider: "nvidia", - model: "moonshotai/kimi-k2.5", - }); + it("round-trips normalized refs through modelKey", () => { + const parsed = parseModelRef(" opus-4.6 ", "anthropic"); + expect(parsed).toEqual({ provider: "anthropic", model: "claude-opus-4-6" }); + expect(modelKey(parsed?.provider ?? "", parsed?.model ?? "")).toBe( + "anthropic/claude-opus-4-6", + ); }); - it("normalizes anthropic alias refs to canonical model ids", () => { - expect(parseModelRef("anthropic/opus-4.6", "openai")).toEqual({ - provider: "anthropic", - model: "claude-opus-4-6", - }); - expect(parseModelRef("opus-4.6", "anthropic")).toEqual({ - provider: "anthropic", - model: "claude-opus-4-6", - }); - expect(parseModelRef("anthropic/sonnet-4.6", "openai")).toEqual({ - provider: "anthropic", - model: "claude-sonnet-4-6", - }); - expect(parseModelRef("sonnet-4.6", "anthropic")).toEqual({ - provider: "anthropic", - model: "claude-sonnet-4-6", - }); - }); - - it("should use default provider if none specified", () => { - expect(parseModelRef("claude-3-5-sonnet", "anthropic")).toEqual({ - provider: "anthropic", - model: "claude-3-5-sonnet", - }); - }); - - it("normalizes deprecated google flash preview ids to the working model id", () => { - expect(parseModelRef("google/gemini-3.1-flash-preview", "openai")).toEqual({ - provider: "google", - model: "gemini-3-flash-preview", - }); - expect(parseModelRef("gemini-3.1-flash-preview", "google")).toEqual({ - provider: "google", - model: "gemini-3-flash-preview", - }); - }); - - it("normalizes gemini 3.1 flash-lite to the preview model id", () => { - expect(parseModelRef("google/gemini-3.1-flash-lite", "openai")).toEqual({ - provider: "google", - model: "gemini-3.1-flash-lite-preview", - }); - expect(parseModelRef("gemini-3.1-flash-lite", "google")).toEqual({ - provider: "google", - model: "gemini-3.1-flash-lite-preview", - }); - }); - - it("keeps openai gpt-5.3 codex refs on the openai provider", () => { - expect(parseModelRef("openai/gpt-5.3-codex", "anthropic")).toEqual({ - provider: "openai", - model: "gpt-5.3-codex", - }); - expect(parseModelRef("gpt-5.3-codex", "openai")).toEqual({ - provider: "openai", - model: "gpt-5.3-codex", - }); - expect(parseModelRef("openai/gpt-5.3-codex-codex", "anthropic")).toEqual({ - provider: "openai", - model: "gpt-5.3-codex-codex", - }); - }); - - it("should return null for empty strings", () => { - expect(parseModelRef("", "anthropic")).toBeNull(); - expect(parseModelRef(" ", "anthropic")).toBeNull(); - }); - - it("should preserve openrouter/ prefix for native models", () => { - expect(parseModelRef("openrouter/aurora-alpha", "openai")).toEqual({ - provider: "openrouter", - model: "openrouter/aurora-alpha", - }); - }); - - it("should pass through openrouter external provider models as-is", () => { - expect(parseModelRef("openrouter/anthropic/claude-sonnet-4-5", "openai")).toEqual({ - provider: "openrouter", - model: "anthropic/claude-sonnet-4-5", - }); - }); - - it("normalizes Vercel Claude shorthand to anthropic-prefixed model ids", () => { - expect(parseModelRef("vercel-ai-gateway/claude-opus-4.6", "openai")).toEqual({ - provider: "vercel-ai-gateway", - model: "anthropic/claude-opus-4.6", - }); - expect(parseModelRef("vercel-ai-gateway/opus-4.6", "openai")).toEqual({ - provider: "vercel-ai-gateway", - model: "anthropic/claude-opus-4-6", - }); - }); - - it("keeps already-prefixed Vercel Anthropic models unchanged", () => { - expect(parseModelRef("vercel-ai-gateway/anthropic/claude-opus-4.6", "openai")).toEqual({ - provider: "vercel-ai-gateway", - model: "anthropic/claude-opus-4.6", - }); - }); - - it("passes through non-Claude Vercel model ids unchanged", () => { - expect(parseModelRef("vercel-ai-gateway/openai/gpt-5.2", "openai")).toEqual({ - provider: "vercel-ai-gateway", - model: "openai/gpt-5.2", - }); - }); - - it("should handle invalid slash usage", () => { - expect(parseModelRef("/", "anthropic")).toBeNull(); - expect(parseModelRef("anthropic/", "anthropic")).toBeNull(); - expect(parseModelRef("/model", "anthropic")).toBeNull(); + it.each(["", " ", "/", "anthropic/", "/model"])("returns null for invalid ref %j", (raw) => { + expect(parseModelRef(raw, "anthropic")).toBeNull(); }); }); diff --git a/src/agents/models-config.fills-missing-provider-apikey-from-env-var.test.ts b/src/agents/models-config.fills-missing-provider-apikey-from-env-var.test.ts index 1d214e2cc1a..36944d67601 100644 --- a/src/agents/models-config.fills-missing-provider-apikey-from-env-var.test.ts +++ b/src/agents/models-config.fills-missing-provider-apikey-from-env-var.test.ts @@ -113,6 +113,92 @@ function createMoonshotConfig(overrides: { }; } +function createOpenAiConfigWithResolvedApiKey(mergeMode = false): OpenClawConfig { + return { + models: { + ...(mergeMode ? { mode: "merge" as const } : {}), + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; simulates resolved ${OPENAI_API_KEY} + api: "openai-completions", + models: [ + { + id: "gpt-4.1", + name: "GPT-4.1", + input: ["text"], + reasoning: false, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 128000, + maxTokens: 16384, + }, + ], + }, + }, + }, + }; +} + +async function expectOpenAiEnvMarkerApiKey(options?: { seedMergedProvider?: boolean }) { + await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => { + await withTempHome(async () => { + if (options?.seedMergedProvider) { + await writeAgentModelsJson({ + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: "STALE_AGENT_KEY", // pragma: allowlist secret + api: "openai-completions", + models: [{ id: "gpt-4.1", name: "GPT-4.1", input: ["text"] }], + }, + }, + }); + } + + await ensureOpenClawModelsJson( + createOpenAiConfigWithResolvedApiKey(options?.seedMergedProvider), + ); + const result = await readGeneratedModelsJson<{ + providers: Record; + }>(); + expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret + }); + }); +} + +async function expectMoonshotTokenLimits(params: { + contextWindow: number; + maxTokens: number; + expectedContextWindow: number; + expectedMaxTokens: number; +}) { + await withTempHome(async () => { + await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => { + await ensureOpenClawModelsJson( + createMoonshotConfig({ + contextWindow: params.contextWindow, + maxTokens: params.maxTokens, + }), + ); + const parsed = await readGeneratedModelsJson<{ + providers: Record< + string, + { + models?: Array<{ + id: string; + contextWindow?: number; + maxTokens?: number; + }>; + } + >; + }>(); + const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5"); + expect(kimi?.contextWindow).toBe(params.expectedContextWindow); + expect(kimi?.maxTokens).toBe(params.expectedMaxTokens); + }); + }); +} + describe("models-config", () => { it("keeps anthropic api defaults when model entries omit api", async () => { await withTempHome(async () => { @@ -444,131 +530,28 @@ describe("models-config", () => { }); it("does not persist resolved env var value as plaintext in models.json", async () => { - await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => { - await withTempHome(async () => { - const cfg: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; already resolved by loadConfig - api: "openai-completions", - models: [ - { - id: "gpt-4.1", - name: "GPT-4.1", - input: ["text"], - reasoning: false, - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 128000, - maxTokens: 16384, - }, - ], - }, - }, - }, - }; - await ensureOpenClawModelsJson(cfg); - const result = await readGeneratedModelsJson<{ - providers: Record; - }>(); - expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); - }); - }); + await expectOpenAiEnvMarkerApiKey(); }); it("replaces stale merged apiKey when config key normalizes to a known env marker", async () => { - await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => { - await withTempHome(async () => { - await writeAgentModelsJson({ - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: "STALE_AGENT_KEY", // pragma: allowlist secret - api: "openai-completions", - models: [{ id: "gpt-4.1", name: "GPT-4.1", input: ["text"] }], - }, - }, - }); - const cfg: OpenClawConfig = { - models: { - mode: "merge", - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; simulates resolved ${OPENAI_API_KEY} - api: "openai-completions", - models: [ - { - id: "gpt-4.1", - name: "GPT-4.1", - input: ["text"], - reasoning: false, - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 128000, - maxTokens: 16384, - }, - ], - }, - }, - }, - }; - await ensureOpenClawModelsJson(cfg); - const result = await readGeneratedModelsJson<{ - providers: Record; - }>(); - expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret - }); - }); + await expectOpenAiEnvMarkerApiKey({ seedMergedProvider: true }); }); it("preserves explicit larger token limits when they exceed implicit catalog defaults", async () => { - await withTempHome(async () => { - await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => { - const cfg = createMoonshotConfig({ contextWindow: 350000, maxTokens: 16384 }); - - await ensureOpenClawModelsJson(cfg); - const parsed = await readGeneratedModelsJson<{ - providers: Record< - string, - { - models?: Array<{ - id: string; - contextWindow?: number; - maxTokens?: number; - }>; - } - >; - }>(); - const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5"); - expect(kimi?.contextWindow).toBe(350000); - expect(kimi?.maxTokens).toBe(16384); - }); + await expectMoonshotTokenLimits({ + contextWindow: 350000, + maxTokens: 16384, + expectedContextWindow: 350000, + expectedMaxTokens: 16384, }); }); it("falls back to implicit token limits when explicit values are invalid", async () => { - await withTempHome(async () => { - await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => { - const cfg = createMoonshotConfig({ contextWindow: 0, maxTokens: -1 }); - - await ensureOpenClawModelsJson(cfg); - const parsed = await readGeneratedModelsJson<{ - providers: Record< - string, - { - models?: Array<{ - id: string; - contextWindow?: number; - maxTokens?: number; - }>; - } - >; - }>(); - const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5"); - expect(kimi?.contextWindow).toBe(256000); - expect(kimi?.maxTokens).toBe(8192); - }); + await expectMoonshotTokenLimits({ + contextWindow: 0, + maxTokens: -1, + expectedContextWindow: 256000, + expectedMaxTokens: 8192, }); }); }); diff --git a/src/agents/models-config.normalizes-gemini-3-ids-preview-google-providers.test.ts b/src/agents/models-config.normalizes-gemini-3-ids-preview-google-providers.test.ts index 8414fb10d08..890be151c6f 100644 --- a/src/agents/models-config.normalizes-gemini-3-ids-preview-google-providers.test.ts +++ b/src/agents/models-config.normalizes-gemini-3-ids-preview-google-providers.test.ts @@ -1,91 +1,82 @@ import { describe, expect, it } from "vitest"; import type { OpenClawConfig } from "../config/config.js"; +import type { ModelDefinitionConfig } from "../config/types.models.js"; import { installModelsConfigTestHooks, withModelsTempHome } from "./models-config.e2e-harness.js"; import { ensureOpenClawModelsJson } from "./models-config.js"; import { readGeneratedModelsJson } from "./models-config.test-utils.js"; +function createGoogleModelsConfig(models: ModelDefinitionConfig[]): OpenClawConfig { + return { + models: { + providers: { + google: { + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + apiKey: "GEMINI_KEY", // pragma: allowlist secret + api: "google-generative-ai", + models, + }, + }, + }, + }; +} + +async function expectGeneratedGoogleModelIds(ids: string[]) { + const parsed = await readGeneratedModelsJson<{ + providers: Record }>; + }>(); + expect(parsed.providers.google?.models?.map((model) => model.id)).toEqual(ids); +} + describe("models-config", () => { installModelsConfigTestHooks(); it("normalizes gemini 3 ids to preview for google providers", async () => { await withModelsTempHome(async () => { - const cfg: OpenClawConfig = { - models: { - providers: { - google: { - baseUrl: "https://generativelanguage.googleapis.com/v1beta", - apiKey: "GEMINI_KEY", // pragma: allowlist secret - api: "google-generative-ai", - models: [ - { - id: "gemini-3-pro", - name: "Gemini 3 Pro", - api: "google-generative-ai", - reasoning: true, - input: ["text", "image"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 1048576, - maxTokens: 65536, - }, - { - id: "gemini-3-flash", - name: "Gemini 3 Flash", - api: "google-generative-ai", - reasoning: false, - input: ["text", "image"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 1048576, - maxTokens: 65536, - }, - ], - }, - }, + const cfg = createGoogleModelsConfig([ + { + id: "gemini-3-pro", + name: "Gemini 3 Pro", + api: "google-generative-ai", + reasoning: true, + input: ["text", "image"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 1048576, + maxTokens: 65536, }, - }; + { + id: "gemini-3-flash", + name: "Gemini 3 Flash", + api: "google-generative-ai", + reasoning: false, + input: ["text", "image"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 1048576, + maxTokens: 65536, + }, + ]); await ensureOpenClawModelsJson(cfg); - - const parsed = await readGeneratedModelsJson<{ - providers: Record }>; - }>(); - const ids = parsed.providers.google?.models?.map((model) => model.id); - expect(ids).toEqual(["gemini-3-pro-preview", "gemini-3-flash-preview"]); + await expectGeneratedGoogleModelIds(["gemini-3-pro-preview", "gemini-3-flash-preview"]); }); }); it("normalizes the deprecated google flash preview id to the working preview id", async () => { await withModelsTempHome(async () => { - const cfg: OpenClawConfig = { - models: { - providers: { - google: { - baseUrl: "https://generativelanguage.googleapis.com/v1beta", - apiKey: "GEMINI_KEY", // pragma: allowlist secret - api: "google-generative-ai", - models: [ - { - id: "gemini-3.1-flash-preview", - name: "Gemini 3.1 Flash Preview", - api: "google-generative-ai", - reasoning: false, - input: ["text", "image"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 1048576, - maxTokens: 65536, - }, - ], - }, - }, + const cfg = createGoogleModelsConfig([ + { + id: "gemini-3.1-flash-preview", + name: "Gemini 3.1 Flash Preview", + api: "google-generative-ai", + reasoning: false, + input: ["text", "image"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 1048576, + maxTokens: 65536, }, - }; + ]); await ensureOpenClawModelsJson(cfg); - - const parsed = await readGeneratedModelsJson<{ - providers: Record }>; - }>(); - const ids = parsed.providers.google?.models?.map((model) => model.id); - expect(ids).toEqual(["gemini-3-flash-preview"]); + await expectGeneratedGoogleModelIds(["gemini-3-flash-preview"]); }); }); }); diff --git a/src/agents/models-config.runtime-source-snapshot.test.ts b/src/agents/models-config.runtime-source-snapshot.test.ts index cc033fb56a6..a80ac010e86 100644 --- a/src/agents/models-config.runtime-source-snapshot.test.ts +++ b/src/agents/models-config.runtime-source-snapshot.test.ts @@ -16,47 +16,137 @@ import { readGeneratedModelsJson } from "./models-config.test-utils.js"; installModelsConfigTestHooks(); +function createOpenAiApiKeySourceConfig(): OpenClawConfig { + return { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret + api: "openai-completions" as const, + models: [], + }, + }, + }, + }; +} + +function createOpenAiApiKeyRuntimeConfig(): OpenClawConfig { + return { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + apiKey: "sk-runtime-resolved", // pragma: allowlist secret + api: "openai-completions" as const, + models: [], + }, + }, + }, + }; +} + +function createOpenAiHeaderSourceConfig(): OpenClawConfig { + return { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions" as const, + headers: { + Authorization: { + source: "env", + provider: "default", + id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret + }, + "X-Tenant-Token": { + source: "file", + provider: "vault", + id: "/providers/openai/tenantToken", + }, + }, + models: [], + }, + }, + }, + }; +} + +function createOpenAiHeaderRuntimeConfig(): OpenClawConfig { + return { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions" as const, + headers: { + Authorization: "Bearer runtime-openai-token", + "X-Tenant-Token": "runtime-tenant-token", + }, + models: [], + }, + }, + }, + }; +} + +function withGatewayTokenMode(config: OpenClawConfig): OpenClawConfig { + return { + ...config, + gateway: { + auth: { + mode: "token", + }, + }, + }; +} + +async function withGeneratedModelsFromRuntimeSource( + params: { + sourceConfig: OpenClawConfig; + runtimeConfig: OpenClawConfig; + candidateConfig?: OpenClawConfig; + }, + runAssertions: () => Promise, +) { + await withTempHome(async () => { + try { + setRuntimeConfigSnapshot(params.runtimeConfig, params.sourceConfig); + await ensureOpenClawModelsJson(params.candidateConfig ?? loadConfig()); + await runAssertions(); + } finally { + clearRuntimeConfigSnapshot(); + clearConfigCache(); + } + }); +} + +async function expectGeneratedProviderApiKey(providerId: string, expected: string) { + const parsed = await readGeneratedModelsJson<{ + providers: Record; + }>(); + expect(parsed.providers[providerId]?.apiKey).toBe(expected); +} + +async function expectGeneratedOpenAiHeaderMarkers() { + const parsed = await readGeneratedModelsJson<{ + providers: Record }>; + }>(); + expect(parsed.providers.openai?.headers?.Authorization).toBe( + "secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret + ); + expect(parsed.providers.openai?.headers?.["X-Tenant-Token"]).toBe(NON_ENV_SECRETREF_MARKER); +} + describe("models-config runtime source snapshot", () => { it("uses runtime source snapshot markers when passed the active runtime config", async () => { - await withTempHome(async () => { - const sourceConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret - api: "openai-completions" as const, - models: [], - }, - }, - }, - }; - const runtimeConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: "sk-runtime-resolved", // pragma: allowlist secret - api: "openai-completions" as const, - models: [], - }, - }, - }, - }; - - try { - setRuntimeConfigSnapshot(runtimeConfig, sourceConfig); - await ensureOpenClawModelsJson(loadConfig()); - - const parsed = await readGeneratedModelsJson<{ - providers: Record; - }>(); - expect(parsed.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret - } finally { - clearRuntimeConfigSnapshot(); - clearConfigCache(); - } - }); + await withGeneratedModelsFromRuntimeSource( + { + sourceConfig: createOpenAiApiKeySourceConfig(), + runtimeConfig: createOpenAiApiKeyRuntimeConfig(), + }, + async () => expectGeneratedProviderApiKey("openai", "OPENAI_API_KEY"), // pragma: allowlist secret + ); }); it("uses non-env marker from runtime source snapshot for file refs", async () => { @@ -103,30 +193,8 @@ describe("models-config runtime source snapshot", () => { it("projects cloned runtime configs onto source snapshot when preserving provider auth", async () => { await withTempHome(async () => { - const sourceConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret - api: "openai-completions" as const, - models: [], - }, - }, - }, - }; - const runtimeConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: "sk-runtime-resolved", // pragma: allowlist secret - api: "openai-completions" as const, - models: [], - }, - }, - }, - }; + const sourceConfig = createOpenAiApiKeySourceConfig(); + const runtimeConfig = createOpenAiApiKeyRuntimeConfig(); const clonedRuntimeConfig: OpenClawConfig = { ...runtimeConfig, agents: { @@ -139,11 +207,7 @@ describe("models-config runtime source snapshot", () => { try { setRuntimeConfigSnapshot(runtimeConfig, sourceConfig); await ensureOpenClawModelsJson(clonedRuntimeConfig); - - const parsed = await readGeneratedModelsJson<{ - providers: Record; - }>(); - expect(parsed.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret + await expectGeneratedProviderApiKey("openai", "OPENAI_API_KEY"); // pragma: allowlist secret } finally { clearRuntimeConfigSnapshot(); clearConfigCache(); @@ -152,121 +216,27 @@ describe("models-config runtime source snapshot", () => { }); it("uses header markers from runtime source snapshot instead of resolved runtime values", async () => { - await withTempHome(async () => { - const sourceConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions" as const, - headers: { - Authorization: { - source: "env", - provider: "default", - id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret - }, - "X-Tenant-Token": { - source: "file", - provider: "vault", - id: "/providers/openai/tenantToken", - }, - }, - models: [], - }, - }, - }, - }; - const runtimeConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions" as const, - headers: { - Authorization: "Bearer runtime-openai-token", - "X-Tenant-Token": "runtime-tenant-token", - }, - models: [], - }, - }, - }, - }; - - try { - setRuntimeConfigSnapshot(runtimeConfig, sourceConfig); - await ensureOpenClawModelsJson(loadConfig()); - - const parsed = await readGeneratedModelsJson<{ - providers: Record }>; - }>(); - expect(parsed.providers.openai?.headers?.Authorization).toBe( - "secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret - ); - expect(parsed.providers.openai?.headers?.["X-Tenant-Token"]).toBe(NON_ENV_SECRETREF_MARKER); - } finally { - clearRuntimeConfigSnapshot(); - clearConfigCache(); - } - }); + await withGeneratedModelsFromRuntimeSource( + { + sourceConfig: createOpenAiHeaderSourceConfig(), + runtimeConfig: createOpenAiHeaderRuntimeConfig(), + }, + expectGeneratedOpenAiHeaderMarkers, + ); }); it("keeps source markers when runtime projection is skipped for incompatible top-level shape", async () => { await withTempHome(async () => { - const sourceConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret - api: "openai-completions" as const, - models: [], - }, - }, - }, - gateway: { - auth: { - mode: "token", - }, - }, - }; - const runtimeConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: "sk-runtime-resolved", // pragma: allowlist secret - api: "openai-completions" as const, - models: [], - }, - }, - }, - gateway: { - auth: { - mode: "token", - }, - }, - }; + const sourceConfig = withGatewayTokenMode(createOpenAiApiKeySourceConfig()); + const runtimeConfig = withGatewayTokenMode(createOpenAiApiKeyRuntimeConfig()); const incompatibleCandidate: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - apiKey: "sk-runtime-resolved", // pragma: allowlist secret - api: "openai-completions" as const, - models: [], - }, - }, - }, + ...createOpenAiApiKeyRuntimeConfig(), }; try { setRuntimeConfigSnapshot(runtimeConfig, sourceConfig); await ensureOpenClawModelsJson(incompatibleCandidate); - - const parsed = await readGeneratedModelsJson<{ - providers: Record; - }>(); - expect(parsed.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret + await expectGeneratedProviderApiKey("openai", "OPENAI_API_KEY"); // pragma: allowlist secret } finally { clearRuntimeConfigSnapshot(); clearConfigCache(); @@ -276,81 +246,16 @@ describe("models-config runtime source snapshot", () => { it("keeps source header markers when runtime projection is skipped for incompatible top-level shape", async () => { await withTempHome(async () => { - const sourceConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions" as const, - headers: { - Authorization: { - source: "env", - provider: "default", - id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret - }, - "X-Tenant-Token": { - source: "file", - provider: "vault", - id: "/providers/openai/tenantToken", - }, - }, - models: [], - }, - }, - }, - gateway: { - auth: { - mode: "token", - }, - }, - }; - const runtimeConfig: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions" as const, - headers: { - Authorization: "Bearer runtime-openai-token", - "X-Tenant-Token": "runtime-tenant-token", - }, - models: [], - }, - }, - }, - gateway: { - auth: { - mode: "token", - }, - }, - }; + const sourceConfig = withGatewayTokenMode(createOpenAiHeaderSourceConfig()); + const runtimeConfig = withGatewayTokenMode(createOpenAiHeaderRuntimeConfig()); const incompatibleCandidate: OpenClawConfig = { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions" as const, - headers: { - Authorization: "Bearer runtime-openai-token", - "X-Tenant-Token": "runtime-tenant-token", - }, - models: [], - }, - }, - }, + ...createOpenAiHeaderRuntimeConfig(), }; try { setRuntimeConfigSnapshot(runtimeConfig, sourceConfig); await ensureOpenClawModelsJson(incompatibleCandidate); - - const parsed = await readGeneratedModelsJson<{ - providers: Record }>; - }>(); - expect(parsed.providers.openai?.headers?.Authorization).toBe( - "secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret - ); - expect(parsed.providers.openai?.headers?.["X-Tenant-Token"]).toBe(NON_ENV_SECRETREF_MARKER); + await expectGeneratedOpenAiHeaderMarkers(); } finally { clearRuntimeConfigSnapshot(); clearConfigCache(); diff --git a/src/agents/ollama-models.test.ts b/src/agents/ollama-models.test.ts index 7877d40bdf9..d7b7d066c6f 100644 --- a/src/agents/ollama-models.test.ts +++ b/src/agents/ollama-models.test.ts @@ -1,31 +1,11 @@ import { afterEach, describe, expect, it, vi } from "vitest"; +import { jsonResponse, requestBodyText, requestUrl } from "../test-helpers/http.js"; import { enrichOllamaModelsWithContext, resolveOllamaApiBase, type OllamaTagModel, } from "./ollama-models.js"; -function jsonResponse(body: unknown, status = 200): Response { - return new Response(JSON.stringify(body), { - status, - headers: { "Content-Type": "application/json" }, - }); -} - -function requestUrl(input: string | URL | Request): string { - if (typeof input === "string") { - return input; - } - if (input instanceof URL) { - return input.toString(); - } - return input.url; -} - -function requestBody(body: BodyInit | null | undefined): string { - return typeof body === "string" ? body : "{}"; -} - describe("ollama-models", () => { afterEach(() => { vi.unstubAllGlobals(); @@ -43,7 +23,7 @@ describe("ollama-models", () => { if (!url.endsWith("/api/show")) { throw new Error(`Unexpected fetch: ${url}`); } - const body = JSON.parse(requestBody(init?.body)) as { name?: string }; + const body = JSON.parse(requestBodyText(init?.body)) as { name?: string }; if (body.name === "llama3:8b") { return jsonResponse({ model_info: { "llama.context_length": 65536 } }); } diff --git a/src/agents/ollama-stream.test.ts b/src/agents/ollama-stream.test.ts index 2af5e490c7f..ded8064ea19 100644 --- a/src/agents/ollama-stream.test.ts +++ b/src/agents/ollama-stream.test.ts @@ -106,7 +106,7 @@ describe("buildAssistantMessage", () => { expect(result.usage.totalTokens).toBe(15); }); - it("falls back to thinking when content is empty", () => { + it("drops thinking-only output when content is empty", () => { const response = { model: "qwen3:32b", created_at: "2026-01-01T00:00:00Z", @@ -119,10 +119,10 @@ describe("buildAssistantMessage", () => { }; const result = buildAssistantMessage(response, modelInfo); expect(result.stopReason).toBe("stop"); - expect(result.content).toEqual([{ type: "text", text: "Thinking output" }]); + expect(result.content).toEqual([]); }); - it("falls back to reasoning when content and thinking are empty", () => { + it("drops reasoning-only output when content and thinking are empty", () => { const response = { model: "qwen3:32b", created_at: "2026-01-01T00:00:00Z", @@ -135,7 +135,7 @@ describe("buildAssistantMessage", () => { }; const result = buildAssistantMessage(response, modelInfo); expect(result.stopReason).toBe("stop"); - expect(result.content).toEqual([{ type: "text", text: "Reasoning output" }]); + expect(result.content).toEqual([]); }); it("builds response with tool calls", () => { @@ -203,6 +203,20 @@ function mockNdjsonReader(lines: string[]): ReadableStreamDefaultReader; } +async function expectDoneEventContent(lines: string[], expectedContent: unknown) { + await withMockNdjsonFetch(lines, async () => { + const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" }); + const events = await collectStreamEvents(stream); + + const doneEvent = events.at(-1); + if (!doneEvent || doneEvent.type !== "done") { + throw new Error("Expected done event"); + } + + expect(doneEvent.message.content).toEqual(expectedContent); + }); +} + describe("parseNdjsonStream", () => { it("parses text-only streaming chunks", async () => { const reader = mockNdjsonReader([ @@ -485,89 +499,49 @@ describe("createOllamaStreamFn", () => { ); }); - it("accumulates thinking chunks when content is empty", async () => { - await withMockNdjsonFetch( + it("drops thinking chunks when no final content is emitted", async () => { + await expectDoneEventContent( [ '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":"reasoned"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":" output"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}', ], - async () => { - const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" }); - const events = await collectStreamEvents(stream); - - const doneEvent = events.at(-1); - if (!doneEvent || doneEvent.type !== "done") { - throw new Error("Expected done event"); - } - - expect(doneEvent.message.content).toEqual([{ type: "text", text: "reasoned output" }]); - }, + [], ); }); it("prefers streamed content over earlier thinking chunks", async () => { - await withMockNdjsonFetch( + await expectDoneEventContent( [ '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":"internal"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":"final"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":" answer"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}', ], - async () => { - const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" }); - const events = await collectStreamEvents(stream); - - const doneEvent = events.at(-1); - if (!doneEvent || doneEvent.type !== "done") { - throw new Error("Expected done event"); - } - - expect(doneEvent.message.content).toEqual([{ type: "text", text: "final answer" }]); - }, + [{ type: "text", text: "final answer" }], ); }); - it("accumulates reasoning chunks when thinking is absent", async () => { - await withMockNdjsonFetch( + it("drops reasoning chunks when no final content is emitted", async () => { + await expectDoneEventContent( [ '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":"reasoned"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":" output"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}', ], - async () => { - const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" }); - const events = await collectStreamEvents(stream); - - const doneEvent = events.at(-1); - if (!doneEvent || doneEvent.type !== "done") { - throw new Error("Expected done event"); - } - - expect(doneEvent.message.content).toEqual([{ type: "text", text: "reasoned output" }]); - }, + [], ); }); it("prefers streamed content over earlier reasoning chunks", async () => { - await withMockNdjsonFetch( + await expectDoneEventContent( [ '{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":"internal"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":"final"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":" answer"},"done":false}', '{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}', ], - async () => { - const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" }); - const events = await collectStreamEvents(stream); - - const doneEvent = events.at(-1); - if (!doneEvent || doneEvent.type !== "done") { - throw new Error("Expected done event"); - } - - expect(doneEvent.message.content).toEqual([{ type: "text", text: "final answer" }]); - }, + [{ type: "text", text: "final answer" }], ); }); }); diff --git a/src/agents/ollama-stream.ts b/src/agents/ollama-stream.ts index 9d23852bb31..70a2ef33cf1 100644 --- a/src/agents/ollama-stream.ts +++ b/src/agents/ollama-stream.ts @@ -340,10 +340,9 @@ export function buildAssistantMessage( ): AssistantMessage { const content: (TextContent | ToolCall)[] = []; - // Ollama-native reasoning models may emit their answer in `thinking` or - // `reasoning` with an empty `content`. Fall back so replies are not dropped. - const text = - response.message.content || response.message.thinking || response.message.reasoning || ""; + // Native Ollama reasoning fields are internal model output. The reply text + // must come from `content`; reasoning visibility is controlled elsewhere. + const text = response.message.content || ""; if (text) { content.push({ type: "text", text }); } @@ -497,20 +496,12 @@ export function createOllamaStreamFn( const reader = response.body.getReader(); let accumulatedContent = ""; - let fallbackContent = ""; - let sawContent = false; const accumulatedToolCalls: OllamaToolCall[] = []; let finalResponse: OllamaChatResponse | undefined; for await (const chunk of parseNdjsonStream(reader)) { if (chunk.message?.content) { - sawContent = true; accumulatedContent += chunk.message.content; - } else if (!sawContent && chunk.message?.thinking) { - fallbackContent += chunk.message.thinking; - } else if (!sawContent && chunk.message?.reasoning) { - // Backward compatibility for older/native variants that still use reasoning. - fallbackContent += chunk.message.reasoning; } // Ollama sends tool_calls in intermediate (done:false) chunks, @@ -529,7 +520,7 @@ export function createOllamaStreamFn( throw new Error("Ollama API stream ended without a final response"); } - finalResponse.message.content = accumulatedContent || fallbackContent; + finalResponse.message.content = accumulatedContent; if (accumulatedToolCalls.length > 0) { finalResponse.message.tool_calls = accumulatedToolCalls; } diff --git a/src/agents/openclaw-tools.session-status.test.ts b/src/agents/openclaw-tools.session-status.test.ts index 8b2d9fc467f..0bc079d4ced 100644 --- a/src/agents/openclaw-tools.session-status.test.ts +++ b/src/agents/openclaw-tools.session-status.test.ts @@ -115,6 +115,50 @@ function resetSessionStore(store: Record) { mockConfig = createMockConfig(); } +function installSandboxedSessionStatusConfig() { + mockConfig = { + session: { mainKey: "main", scope: "per-sender" }, + tools: { + sessions: { visibility: "all" }, + agentToAgent: { enabled: true, allow: ["*"] }, + }, + agents: { + defaults: { + model: { primary: "anthropic/claude-opus-4-5" }, + models: {}, + sandbox: { sessionToolsVisibility: "spawned" }, + }, + }, + }; +} + +function mockSpawnedSessionList( + resolveSessions: (spawnedBy: string | undefined) => Array>, +) { + callGatewayMock.mockImplementation(async (opts: unknown) => { + const request = opts as { method?: string; params?: Record }; + if (request.method === "sessions.list") { + return { sessions: resolveSessions(request.params?.spawnedBy as string | undefined) }; + } + return {}; + }); +} + +function expectSpawnedSessionLookupCalls(spawnedBy: string) { + const expectedCall = { + method: "sessions.list", + params: { + includeGlobal: false, + includeUnknown: false, + limit: 500, + spawnedBy, + }, + }; + expect(callGatewayMock).toHaveBeenCalledTimes(2); + expect(callGatewayMock).toHaveBeenNthCalledWith(1, expectedCall); + expect(callGatewayMock).toHaveBeenNthCalledWith(2, expectedCall); +} + function getSessionStatusTool(agentSessionKey = "main", options?: { sandboxed?: boolean }) { const tool = createOpenClawTools({ agentSessionKey, @@ -242,27 +286,8 @@ describe("session_status tool", () => { updatedAt: 10, }, }); - mockConfig = { - session: { mainKey: "main", scope: "per-sender" }, - tools: { - sessions: { visibility: "all" }, - agentToAgent: { enabled: true, allow: ["*"] }, - }, - agents: { - defaults: { - model: { primary: "anthropic/claude-opus-4-5" }, - models: {}, - sandbox: { sessionToolsVisibility: "spawned" }, - }, - }, - }; - callGatewayMock.mockImplementation(async (opts: unknown) => { - const request = opts as { method?: string; params?: Record }; - if (request.method === "sessions.list") { - return { sessions: [] }; - } - return {}; - }); + installSandboxedSessionStatusConfig(); + mockSpawnedSessionList(() => []); const tool = getSessionStatusTool("agent:main:subagent:child", { sandboxed: true, @@ -284,25 +309,7 @@ describe("session_status tool", () => { expect(loadSessionStoreMock).not.toHaveBeenCalled(); expect(updateSessionStoreMock).not.toHaveBeenCalled(); - expect(callGatewayMock).toHaveBeenCalledTimes(2); - expect(callGatewayMock).toHaveBeenNthCalledWith(1, { - method: "sessions.list", - params: { - includeGlobal: false, - includeUnknown: false, - limit: 500, - spawnedBy: "agent:main:subagent:child", - }, - }); - expect(callGatewayMock).toHaveBeenNthCalledWith(2, { - method: "sessions.list", - params: { - includeGlobal: false, - includeUnknown: false, - limit: 500, - spawnedBy: "agent:main:subagent:child", - }, - }); + expectSpawnedSessionLookupCalls("agent:main:subagent:child"); }); it("keeps legacy main requester keys for sandboxed session tree checks", async () => { @@ -316,30 +323,10 @@ describe("session_status tool", () => { updatedAt: 20, }, }); - mockConfig = { - session: { mainKey: "main", scope: "per-sender" }, - tools: { - sessions: { visibility: "all" }, - agentToAgent: { enabled: true, allow: ["*"] }, - }, - agents: { - defaults: { - model: { primary: "anthropic/claude-opus-4-5" }, - models: {}, - sandbox: { sessionToolsVisibility: "spawned" }, - }, - }, - }; - callGatewayMock.mockImplementation(async (opts: unknown) => { - const request = opts as { method?: string; params?: Record }; - if (request.method === "sessions.list") { - return { - sessions: - request.params?.spawnedBy === "main" ? [{ key: "agent:main:subagent:child" }] : [], - }; - } - return {}; - }); + installSandboxedSessionStatusConfig(); + mockSpawnedSessionList((spawnedBy) => + spawnedBy === "main" ? [{ key: "agent:main:subagent:child" }] : [], + ); const tool = getSessionStatusTool("main", { sandboxed: true, @@ -357,25 +344,7 @@ describe("session_status tool", () => { expect(childDetails.ok).toBe(true); expect(childDetails.sessionKey).toBe("agent:main:subagent:child"); - expect(callGatewayMock).toHaveBeenCalledTimes(2); - expect(callGatewayMock).toHaveBeenNthCalledWith(1, { - method: "sessions.list", - params: { - includeGlobal: false, - includeUnknown: false, - limit: 500, - spawnedBy: "main", - }, - }); - expect(callGatewayMock).toHaveBeenNthCalledWith(2, { - method: "sessions.list", - params: { - includeGlobal: false, - includeUnknown: false, - limit: 500, - spawnedBy: "main", - }, - }); + expectSpawnedSessionLookupCalls("main"); }); it("scopes bare session keys to the requester agent", async () => { diff --git a/src/agents/openclaw-tools.subagents.scope.test.ts b/src/agents/openclaw-tools.subagents.scope.test.ts index c985f1712e1..fc233015064 100644 --- a/src/agents/openclaw-tools.subagents.scope.test.ts +++ b/src/agents/openclaw-tools.subagents.scope.test.ts @@ -17,6 +17,63 @@ function writeStore(storePath: string, store: Record) { fs.writeFileSync(storePath, JSON.stringify(store, null, 2), "utf-8"); } +function seedLeafOwnedChildSession(storePath: string, leafKey = "agent:main:subagent:leaf") { + const childKey = `${leafKey}:subagent:child`; + writeStore(storePath, { + [leafKey]: { + sessionId: "leaf-session", + updatedAt: Date.now(), + spawnedBy: "agent:main:main", + subagentRole: "leaf", + subagentControlScope: "none", + }, + [childKey]: { + sessionId: "child-session", + updatedAt: Date.now(), + spawnedBy: leafKey, + subagentRole: "leaf", + subagentControlScope: "none", + }, + }); + + addSubagentRunForTests({ + runId: "run-child", + childSessionKey: childKey, + controllerSessionKey: leafKey, + requesterSessionKey: leafKey, + requesterDisplayKey: leafKey, + task: "impossible child", + cleanup: "keep", + createdAt: Date.now() - 30_000, + startedAt: Date.now() - 30_000, + }); + + return { + childKey, + tool: createSubagentsTool({ agentSessionKey: leafKey }), + }; +} + +async function expectLeafSubagentControlForbidden(params: { + storePath: string; + action: "kill" | "steer"; + callId: string; + message?: string; +}) { + const { childKey, tool } = seedLeafOwnedChildSession(params.storePath); + const result = await tool.execute(params.callId, { + action: params.action, + target: childKey, + ...(params.message ? { message: params.message } : {}), + }); + + expect(result.details).toMatchObject({ + status: "forbidden", + error: "Leaf subagents cannot control other sessions.", + }); + expect(callGatewayMock).not.toHaveBeenCalled(); +} + describe("openclaw-tools: subagents scope isolation", () => { let storePath = ""; @@ -151,95 +208,19 @@ describe("openclaw-tools: subagents scope isolation", () => { }); it("leaf subagents cannot kill even explicitly-owned child sessions", async () => { - const leafKey = "agent:main:subagent:leaf"; - const childKey = `${leafKey}:subagent:child`; - - writeStore(storePath, { - [leafKey]: { - sessionId: "leaf-session", - updatedAt: Date.now(), - spawnedBy: "agent:main:main", - subagentRole: "leaf", - subagentControlScope: "none", - }, - [childKey]: { - sessionId: "child-session", - updatedAt: Date.now(), - spawnedBy: leafKey, - subagentRole: "leaf", - subagentControlScope: "none", - }, - }); - - addSubagentRunForTests({ - runId: "run-child", - childSessionKey: childKey, - controllerSessionKey: leafKey, - requesterSessionKey: leafKey, - requesterDisplayKey: leafKey, - task: "impossible child", - cleanup: "keep", - createdAt: Date.now() - 30_000, - startedAt: Date.now() - 30_000, - }); - - const tool = createSubagentsTool({ agentSessionKey: leafKey }); - const result = await tool.execute("call-leaf-kill", { + await expectLeafSubagentControlForbidden({ + storePath, action: "kill", - target: childKey, + callId: "call-leaf-kill", }); - - expect(result.details).toMatchObject({ - status: "forbidden", - error: "Leaf subagents cannot control other sessions.", - }); - expect(callGatewayMock).not.toHaveBeenCalled(); }); it("leaf subagents cannot steer even explicitly-owned child sessions", async () => { - const leafKey = "agent:main:subagent:leaf"; - const childKey = `${leafKey}:subagent:child`; - - writeStore(storePath, { - [leafKey]: { - sessionId: "leaf-session", - updatedAt: Date.now(), - spawnedBy: "agent:main:main", - subagentRole: "leaf", - subagentControlScope: "none", - }, - [childKey]: { - sessionId: "child-session", - updatedAt: Date.now(), - spawnedBy: leafKey, - subagentRole: "leaf", - subagentControlScope: "none", - }, - }); - - addSubagentRunForTests({ - runId: "run-child", - childSessionKey: childKey, - controllerSessionKey: leafKey, - requesterSessionKey: leafKey, - requesterDisplayKey: leafKey, - task: "impossible child", - cleanup: "keep", - createdAt: Date.now() - 30_000, - startedAt: Date.now() - 30_000, - }); - - const tool = createSubagentsTool({ agentSessionKey: leafKey }); - const result = await tool.execute("call-leaf-steer", { + await expectLeafSubagentControlForbidden({ + storePath, action: "steer", - target: childKey, + callId: "call-leaf-steer", message: "continue", }); - - expect(result.details).toMatchObject({ - status: "forbidden", - error: "Leaf subagents cannot control other sessions.", - }); - expect(callGatewayMock).not.toHaveBeenCalled(); }); }); diff --git a/src/agents/openclaw-tools.ts b/src/agents/openclaw-tools.ts index 58b3570eb89..ea12b5121d8 100644 --- a/src/agents/openclaw-tools.ts +++ b/src/agents/openclaw-tools.ts @@ -174,15 +174,18 @@ export function createOpenClawTools( createSessionsListTool({ agentSessionKey: options?.agentSessionKey, sandboxed: options?.sandboxed, + config: options?.config, }), createSessionsHistoryTool({ agentSessionKey: options?.agentSessionKey, sandboxed: options?.sandboxed, + config: options?.config, }), createSessionsSendTool({ agentSessionKey: options?.agentSessionKey, agentChannel: options?.agentChannel, sandboxed: options?.sandboxed, + config: options?.config, }), createSessionsYieldTool({ sessionId: options?.sessionId, diff --git a/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts b/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts index 3cbefadbce8..e8578c7feb2 100644 --- a/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts +++ b/src/agents/pi-embedded-helpers.isbillingerrormessage.test.ts @@ -45,98 +45,117 @@ const GROQ_TOO_MANY_REQUESTS_MESSAGE = const GROQ_SERVICE_UNAVAILABLE_MESSAGE = "503 Service Unavailable: The server is temporarily unable to handle the request due to overloading or maintenance."; // pragma: allowlist secret +function expectMessageMatches( + matcher: (message: string) => boolean, + samples: readonly string[], + expected: boolean, +) { + for (const sample of samples) { + expect(matcher(sample), sample).toBe(expected); + } +} + describe("isAuthPermanentErrorMessage", () => { - it("matches permanent auth failure patterns", () => { - const samples = [ - "invalid_api_key", - "api key revoked", - "api key deactivated", - "key has been disabled", - "key has been revoked", - "account has been deactivated", - "could not authenticate api key", - "could not validate credentials", - "API_KEY_REVOKED", - "api_key_deleted", - ]; - for (const sample of samples) { - expect(isAuthPermanentErrorMessage(sample)).toBe(true); - } - }); - it("does not match transient auth errors", () => { - const samples = [ - "unauthorized", - "invalid token", - "authentication failed", - "forbidden", - "access denied", - "token has expired", - ]; - for (const sample of samples) { - expect(isAuthPermanentErrorMessage(sample)).toBe(false); - } + it.each([ + { + name: "matches permanent auth failure patterns", + samples: [ + "invalid_api_key", + "api key revoked", + "api key deactivated", + "key has been disabled", + "key has been revoked", + "account has been deactivated", + "could not authenticate api key", + "could not validate credentials", + "API_KEY_REVOKED", + "api_key_deleted", + ], + expected: true, + }, + { + name: "does not match transient auth errors", + samples: [ + "unauthorized", + "invalid token", + "authentication failed", + "forbidden", + "access denied", + "token has expired", + ], + expected: false, + }, + ])("$name", ({ samples, expected }) => { + expectMessageMatches(isAuthPermanentErrorMessage, samples, expected); }); }); describe("isAuthErrorMessage", () => { - it("matches credential validation errors", () => { - const samples = [ - 'No credentials found for profile "anthropic:default".', - "No API key found for profile openai.", - ]; - for (const sample of samples) { - expect(isAuthErrorMessage(sample)).toBe(true); - } - }); - it("matches OAuth refresh failures", () => { - const samples = [ - "OAuth token refresh failed for anthropic: Failed to refresh OAuth token for anthropic. Please try again or re-authenticate.", - "Please re-authenticate to continue.", - ]; - for (const sample of samples) { - expect(isAuthErrorMessage(sample)).toBe(true); - } + it.each([ + 'No credentials found for profile "anthropic:default".', + "No API key found for profile openai.", + "OAuth token refresh failed for anthropic: Failed to refresh OAuth token for anthropic. Please try again or re-authenticate.", + "Please re-authenticate to continue.", + ])("matches auth errors for %j", (sample) => { + expect(isAuthErrorMessage(sample)).toBe(true); }); }); describe("isBillingErrorMessage", () => { - it("matches credit / payment failures", () => { - const samples = [ - "Your credit balance is too low to access the Anthropic API.", - "insufficient credits", - "Payment Required", - "HTTP 402 Payment Required", - "plans & billing", - // Venice returns "Insufficient USD or Diem balance" which has extra words - // between "insufficient" and "balance" - "Insufficient USD or Diem balance to complete request. Visit https://venice.ai/settings/api to add credits.", - // OpenRouter returns "requires more credits" for underfunded accounts - "This model requires more credits to use", - "This endpoint require more credits", - ]; - for (const sample of samples) { - expect(isBillingErrorMessage(sample)).toBe(true); - } - }); - it("does not false-positive on issue IDs or text containing 402", () => { - const falsePositives = [ - "Fixed issue CHE-402 in the latest release", - "See ticket #402 for details", - "ISSUE-402 has been resolved", - "Room 402 is available", - "Error code 403 was returned, not 402-related", - "The building at 402 Main Street", - "processed 402 records", - "402 items found in the database", - "port 402 is open", - "Use a 402 stainless bolt", - "Book a 402 room", - "There is a 402 near me", - ]; - for (const sample of falsePositives) { - expect(isBillingErrorMessage(sample)).toBe(false); - } + it.each([ + { + name: "matches credit and payment failures", + samples: [ + "Your credit balance is too low to access the Anthropic API.", + "insufficient credits", + "Payment Required", + "HTTP 402 Payment Required", + "plans & billing", + "Insufficient USD or Diem balance to complete request. Visit https://venice.ai/settings/api to add credits.", + "This model requires more credits to use", + "This endpoint require more credits", + ], + expected: true, + }, + { + name: "does not false-positive on issue ids and numeric references", + samples: [ + "Fixed issue CHE-402 in the latest release", + "See ticket #402 for details", + "ISSUE-402 has been resolved", + "Room 402 is available", + "Error code 403 was returned, not 402-related", + "The building at 402 Main Street", + "processed 402 records", + "402 items found in the database", + "port 402 is open", + "Use a 402 stainless bolt", + "Book a 402 room", + "There is a 402 near me", + ], + expected: false, + }, + { + name: "still matches real HTTP 402 billing errors", + samples: [ + "HTTP 402 Payment Required", + "status: 402", + "error code 402", + "http 402", + "status=402 payment required", + "got a 402 from the API", + "returned 402", + "received a 402 response", + '{"status":402,"type":"error"}', + '{"code":402,"message":"payment required"}', + '{"error":{"code":402,"message":"billing hard limit reached"}}', + ], + expected: true, + }, + ])("$name", ({ samples, expected }) => { + expectMessageMatches(isBillingErrorMessage, samples, expected); }); + it("does not false-positive on long assistant responses mentioning billing keywords", () => { // Simulate a multi-paragraph assistant response that mentions billing terms const longResponse = @@ -176,37 +195,27 @@ describe("isBillingErrorMessage", () => { expect(longNonError.length).toBeGreaterThan(512); expect(isBillingErrorMessage(longNonError)).toBe(false); }); - it("still matches real HTTP 402 billing errors", () => { - const realErrors = [ - "HTTP 402 Payment Required", - "status: 402", - "error code 402", - "http 402", - "status=402 payment required", - "got a 402 from the API", - "returned 402", - "received a 402 response", - '{"status":402,"type":"error"}', - '{"code":402,"message":"payment required"}', - '{"error":{"code":402,"message":"billing hard limit reached"}}', - ]; - for (const sample of realErrors) { - expect(isBillingErrorMessage(sample)).toBe(true); - } + + it("prefers billing when API-key and 402 hints both appear", () => { + const sample = + "402 Payment Required: The account associated with this API key has reached its maximum allowed monthly spending limit."; + expect(isBillingErrorMessage(sample)).toBe(true); + expect(classifyFailoverReason(sample)).toBe("billing"); }); }); describe("isCloudCodeAssistFormatError", () => { it("matches format errors", () => { - const samples = [ - "INVALID_REQUEST_ERROR: string should match pattern", - "messages.1.content.1.tool_use.id", - "tool_use.id should match pattern", - "invalid request format", - ]; - for (const sample of samples) { - expect(isCloudCodeAssistFormatError(sample)).toBe(true); - } + expectMessageMatches( + isCloudCodeAssistFormatError, + [ + "INVALID_REQUEST_ERROR: string should match pattern", + "messages.1.content.1.tool_use.id", + "tool_use.id should match pattern", + "invalid request format", + ], + true, + ); }); }); @@ -238,20 +247,24 @@ describe("isCloudflareOrHtmlErrorPage", () => { }); describe("isCompactionFailureError", () => { - it("matches compaction overflow failures", () => { - const samples = [ - 'Context overflow: Summarization failed: 400 {"message":"prompt is too long"}', - "auto-compaction failed due to context overflow", - "Compaction failed: prompt is too long", - "Summarization failed: context window exceeded for this request", - ]; - for (const sample of samples) { - expect(isCompactionFailureError(sample)).toBe(true); - } - }); - it("ignores non-compaction overflow errors", () => { - expect(isCompactionFailureError("Context overflow: prompt too large")).toBe(false); - expect(isCompactionFailureError("rate limit exceeded")).toBe(false); + it.each([ + { + name: "matches compaction overflow failures", + samples: [ + 'Context overflow: Summarization failed: 400 {"message":"prompt is too long"}', + "auto-compaction failed due to context overflow", + "Compaction failed: prompt is too long", + "Summarization failed: context window exceeded for this request", + ], + expected: true, + }, + { + name: "ignores non-compaction overflow errors", + samples: ["Context overflow: prompt too large", "rate limit exceeded"], + expected: false, + }, + ])("$name", ({ samples, expected }) => { + expectMessageMatches(isCompactionFailureError, samples, expected); }); }); @@ -506,6 +519,10 @@ describe("isTransientHttpError", () => { }); describe("classifyFailoverReasonFromHttpStatus", () => { + it("treats HTTP 401 permanent auth failures as auth_permanent", () => { + expect(classifyFailoverReasonFromHttpStatus(401, "invalid_api_key")).toBe("auth_permanent"); + }); + it("treats HTTP 422 as format error", () => { expect(classifyFailoverReasonFromHttpStatus(422)).toBe("format"); expect(classifyFailoverReasonFromHttpStatus(422, "check open ai req parameter error")).toBe( @@ -518,6 +535,10 @@ describe("classifyFailoverReasonFromHttpStatus", () => { expect(classifyFailoverReasonFromHttpStatus(422, "insufficient credits")).toBe("billing"); }); + it("treats HTTP 400 insufficient-quota payloads as billing instead of format", () => { + expect(classifyFailoverReasonFromHttpStatus(400, INSUFFICIENT_QUOTA_PAYLOAD)).toBe("billing"); + }); + it("treats HTTP 499 as transient for structured errors", () => { expect(classifyFailoverReasonFromHttpStatus(499)).toBe("timeout"); expect(classifyFailoverReasonFromHttpStatus(499, "499 Client Closed Request")).toBe("timeout"); diff --git a/src/agents/pi-embedded-runner.e2e.test.ts b/src/agents/pi-embedded-runner.e2e.test.ts index 31056f6ffe1..5c7722b5d16 100644 --- a/src/agents/pi-embedded-runner.e2e.test.ts +++ b/src/agents/pi-embedded-runner.e2e.test.ts @@ -1,9 +1,14 @@ import fs from "node:fs/promises"; -import os from "node:os"; import path from "node:path"; import "./test-helpers/fast-coding-tools.js"; import { afterAll, beforeAll, describe, expect, it, vi } from "vitest"; -import type { OpenClawConfig } from "../config/config.js"; +import { + cleanupEmbeddedPiRunnerTestWorkspace, + createEmbeddedPiRunnerOpenAiConfig, + createEmbeddedPiRunnerTestWorkspace, + type EmbeddedPiRunnerTestWorkspace, + immediateEnqueue, +} from "./test-helpers/pi-embedded-runner-e2e-fixtures.js"; function createMockUsage(input: number, output: number) { return { @@ -88,7 +93,7 @@ vi.mock("@mariozechner/pi-ai", async () => { let runEmbeddedPiAgent: typeof import("./pi-embedded-runner/run.js").runEmbeddedPiAgent; let SessionManager: typeof import("@mariozechner/pi-coding-agent").SessionManager; -let tempRoot: string | undefined; +let e2eWorkspace: EmbeddedPiRunnerTestWorkspace | undefined; let agentDir: string; let workspaceDir: string; let sessionCounter = 0; @@ -98,50 +103,21 @@ beforeAll(async () => { vi.useRealTimers(); ({ runEmbeddedPiAgent } = await import("./pi-embedded-runner/run.js")); ({ SessionManager } = await import("@mariozechner/pi-coding-agent")); - tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-embedded-agent-")); - agentDir = path.join(tempRoot, "agent"); - workspaceDir = path.join(tempRoot, "workspace"); - await fs.mkdir(agentDir, { recursive: true }); - await fs.mkdir(workspaceDir, { recursive: true }); + e2eWorkspace = await createEmbeddedPiRunnerTestWorkspace("openclaw-embedded-agent-"); + ({ agentDir, workspaceDir } = e2eWorkspace); }, 180_000); afterAll(async () => { - if (!tempRoot) { - return; - } - await fs.rm(tempRoot, { recursive: true, force: true }); - tempRoot = undefined; + await cleanupEmbeddedPiRunnerTestWorkspace(e2eWorkspace); + e2eWorkspace = undefined; }); -const makeOpenAiConfig = (modelIds: string[]) => - ({ - models: { - providers: { - openai: { - api: "openai-responses", - apiKey: "sk-test", - baseUrl: "https://example.com", - models: modelIds.map((id) => ({ - id, - name: `Mock ${id}`, - reasoning: false, - input: ["text"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 16_000, - maxTokens: 2048, - })), - }, - }, - }, - }) satisfies OpenClawConfig; - const nextSessionFile = () => { sessionCounter += 1; return path.join(workspaceDir, `session-${sessionCounter}.jsonl`); }; const nextRunId = (prefix = "run-embedded-test") => `${prefix}-${++runCounter}`; const nextSessionKey = () => `agent:test:embedded:${nextRunId("session-key")}`; -const immediateEnqueue = async (task: () => Promise) => task(); const runWithOrphanedSingleUserMessage = async (text: string, sessionKey: string) => { const sessionFile = nextSessionFile(); @@ -152,7 +128,7 @@ const runWithOrphanedSingleUserMessage = async (text: string, sessionKey: string timestamp: Date.now(), }); - const cfg = makeOpenAiConfig(["mock-1"]); + const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-1"]); return await runEmbeddedPiAgent({ sessionId: "session:test", sessionKey, @@ -197,7 +173,7 @@ const readSessionMessages = async (sessionFile: string) => { }; const runDefaultEmbeddedTurn = async (sessionFile: string, prompt: string, sessionKey: string) => { - const cfg = makeOpenAiConfig(["mock-error"]); + const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-error"]); await runEmbeddedPiAgent({ sessionId: "session:test", sessionKey, @@ -217,7 +193,7 @@ const runDefaultEmbeddedTurn = async (sessionFile: string, prompt: string, sessi describe("runEmbeddedPiAgent", () => { it("handles prompt error paths without dropping user state", async () => { const sessionFile = nextSessionFile(); - const cfg = makeOpenAiConfig(["mock-error"]); + const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-error"]); const sessionKey = nextSessionKey(); const result = await runEmbeddedPiAgent({ sessionId: "session:test", diff --git a/src/agents/pi-embedded-runner.sessions-yield.e2e.test.ts b/src/agents/pi-embedded-runner.sessions-yield.e2e.test.ts index 18f439cd01f..d91cf63539b 100644 --- a/src/agents/pi-embedded-runner.sessions-yield.e2e.test.ts +++ b/src/agents/pi-embedded-runner.sessions-yield.e2e.test.ts @@ -8,12 +8,17 @@ * Follows the same pattern as pi-embedded-runner.e2e.test.ts. */ import fs from "node:fs/promises"; -import os from "node:os"; import path from "node:path"; import "./test-helpers/fast-coding-tools.js"; import { afterAll, beforeAll, describe, expect, it, vi } from "vitest"; -import type { OpenClawConfig } from "../config/config.js"; import { isEmbeddedPiRunActive, queueEmbeddedPiMessage } from "./pi-embedded-runner/runs.js"; +import { + cleanupEmbeddedPiRunnerTestWorkspace, + createEmbeddedPiRunnerOpenAiConfig, + createEmbeddedPiRunnerTestWorkspace, + type EmbeddedPiRunnerTestWorkspace, + immediateEnqueue, +} from "./test-helpers/pi-embedded-runner-e2e-fixtures.js"; function createMockUsage(input: number, output: number) { return { @@ -126,7 +131,7 @@ vi.mock("@mariozechner/pi-ai", async () => { }); let runEmbeddedPiAgent: typeof import("./pi-embedded-runner/run.js").runEmbeddedPiAgent; -let tempRoot: string | undefined; +let e2eWorkspace: EmbeddedPiRunnerTestWorkspace | undefined; let agentDir: string; let workspaceDir: string; @@ -136,45 +141,15 @@ beforeAll(async () => { responsePlan = []; observedContexts = []; ({ runEmbeddedPiAgent } = await import("./pi-embedded-runner/run.js")); - tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-yield-e2e-")); - agentDir = path.join(tempRoot, "agent"); - workspaceDir = path.join(tempRoot, "workspace"); - await fs.mkdir(agentDir, { recursive: true }); - await fs.mkdir(workspaceDir, { recursive: true }); + e2eWorkspace = await createEmbeddedPiRunnerTestWorkspace("openclaw-yield-e2e-"); + ({ agentDir, workspaceDir } = e2eWorkspace); }, 180_000); afterAll(async () => { - if (!tempRoot) { - return; - } - await fs.rm(tempRoot, { recursive: true, force: true }); - tempRoot = undefined; + await cleanupEmbeddedPiRunnerTestWorkspace(e2eWorkspace); + e2eWorkspace = undefined; }); -const makeConfig = (modelIds: string[]) => - ({ - models: { - providers: { - openai: { - api: "openai-responses", - apiKey: "sk-test", - baseUrl: "https://example.com", - models: modelIds.map((id) => ({ - id, - name: `Mock ${id}`, - reasoning: false, - input: ["text"], - cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: 16_000, - maxTokens: 2048, - })), - }, - }, - }, - }) satisfies OpenClawConfig; - -const immediateEnqueue = async (task: () => Promise) => task(); - const readSessionMessages = async (sessionFile: string) => { const raw = await fs.readFile(sessionFile, "utf-8"); return raw @@ -205,7 +180,7 @@ describe("sessions_yield e2e", () => { const sessionId = "yield-e2e-parent"; const sessionFile = path.join(workspaceDir, "session-yield-e2e.jsonl"); - const cfg = makeConfig(["mock-yield"]); + const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-yield"]); const result = await runEmbeddedPiAgent({ sessionId, @@ -304,7 +279,7 @@ describe("sessions_yield e2e", () => { const sessionId = "yield-e2e-abort"; const sessionFile = path.join(workspaceDir, "session-yield-abort.jsonl"); - const cfg = makeConfig(["mock-yield-abort"]); + const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-yield-abort"]); const result = await runEmbeddedPiAgent({ sessionId, diff --git a/src/agents/pi-embedded-runner/anthropic-stream-wrappers.ts b/src/agents/pi-embedded-runner/anthropic-stream-wrappers.ts index efed941762d..19b5701eaaa 100644 --- a/src/agents/pi-embedded-runner/anthropic-stream-wrappers.ts +++ b/src/agents/pi-embedded-runner/anthropic-stream-wrappers.ts @@ -7,6 +7,7 @@ import { usesOpenAiStringModeAnthropicToolChoice, } from "../provider-capabilities.js"; import { log } from "./logger.js"; +import { streamWithPayloadPatch } from "./stream-payload-utils.js"; const ANTHROPIC_CONTEXT_1M_BETA = "context-1m-2025-08-07"; const ANTHROPIC_1M_MODEL_PREFIXES = ["claude-opus-4", "claude-sonnet-4"] as const; @@ -341,18 +342,10 @@ export function createAnthropicFastModeWrapper( return underlying(model, context, options); } - const originalOnPayload = options?.onPayload; - return underlying(model, context, { - ...options, - onPayload: (payload) => { - if (payload && typeof payload === "object") { - const payloadObj = payload as Record; - if (payloadObj.service_tier === undefined) { - payloadObj.service_tier = serviceTier; - } - } - return originalOnPayload?.(payload, model); - }, + return streamWithPayloadPatch(underlying, model, context, options, (payloadObj) => { + if (payloadObj.service_tier === undefined) { + payloadObj.service_tier = serviceTier; + } }); }; } diff --git a/src/agents/pi-embedded-runner/compact.hooks.test.ts b/src/agents/pi-embedded-runner/compact.hooks.test.ts index e3ef243b429..a35060173ff 100644 --- a/src/agents/pi-embedded-runner/compact.hooks.test.ts +++ b/src/agents/pi-embedded-runner/compact.hooks.test.ts @@ -278,6 +278,7 @@ vi.mock("../../config/channel-capabilities.js", () => ({ })); vi.mock("../../utils/message-channel.js", () => ({ + INTERNAL_MESSAGE_CHANNEL: "webchat", normalizeMessageChannel: vi.fn(() => undefined), })); @@ -375,6 +376,16 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { unregisterApiProviders(getCustomApiRegistrySourceId("ollama")); }); + async function runDirectCompaction(customInstructions = "focus on decisions") { + return await compactEmbeddedPiSessionDirect({ + sessionId: "session-1", + sessionKey: "agent:main:session-1", + sessionFile: "/tmp/session.jsonl", + workspaceDir: "/tmp", + customInstructions, + }); + } + it("bootstraps runtime plugins with the resolved workspace", async () => { await compactEmbeddedPiSessionDirect({ sessionId: "session-1", @@ -472,13 +483,7 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { hookRunner.hasHooks.mockReturnValue(true); sanitizeSessionHistoryMock.mockResolvedValue([]); - const result = await compactEmbeddedPiSessionDirect({ - sessionId: "session-1", - sessionKey: "agent:main:session-1", - sessionFile: "/tmp/session.jsonl", - workspaceDir: "/tmp", - customInstructions: "focus on decisions", - }); + const result = await runDirectCompaction(); expect(result.ok).toBe(true); const beforeContext = sessionHook("compact:before")?.context; @@ -528,13 +533,7 @@ describe("compactEmbeddedPiSessionDirect hooks", () => { details: { ok: true }, }); - const result = await compactEmbeddedPiSessionDirect({ - sessionId: "session-1", - sessionKey: "agent:main:session-1", - sessionFile: "/tmp/session.jsonl", - workspaceDir: "/tmp", - customInstructions: "focus on decisions", - }); + const result = await runDirectCompaction(); expect(result).toMatchObject({ ok: true, diff --git a/src/agents/pi-embedded-runner/lanes.test.ts b/src/agents/pi-embedded-runner/lanes.test.ts new file mode 100644 index 00000000000..f3625ddc6ec --- /dev/null +++ b/src/agents/pi-embedded-runner/lanes.test.ts @@ -0,0 +1,44 @@ +import { describe, expect, it } from "vitest"; +import { CommandLane } from "../../process/lanes.js"; +import { resolveGlobalLane, resolveSessionLane } from "./lanes.js"; + +describe("resolveGlobalLane", () => { + it("defaults to main lane when no lane is provided", () => { + expect(resolveGlobalLane()).toBe(CommandLane.Main); + expect(resolveGlobalLane("")).toBe(CommandLane.Main); + expect(resolveGlobalLane(" ")).toBe(CommandLane.Main); + }); + + it("maps cron lane to nested lane to prevent deadlocks", () => { + // When cron jobs trigger nested agent runs, the outer execution holds + // the cron lane slot. Inner work must use a separate lane to avoid + // deadlock. See: https://github.com/openclaw/openclaw/issues/44805 + expect(resolveGlobalLane("cron")).toBe(CommandLane.Nested); + expect(resolveGlobalLane(" cron ")).toBe(CommandLane.Nested); + }); + + it("preserves other lanes as-is", () => { + expect(resolveGlobalLane("main")).toBe(CommandLane.Main); + expect(resolveGlobalLane("subagent")).toBe(CommandLane.Subagent); + expect(resolveGlobalLane("nested")).toBe(CommandLane.Nested); + expect(resolveGlobalLane("custom-lane")).toBe("custom-lane"); + expect(resolveGlobalLane(" custom ")).toBe("custom"); + }); +}); + +describe("resolveSessionLane", () => { + it("defaults to main lane and prefixes with session:", () => { + expect(resolveSessionLane("")).toBe("session:main"); + expect(resolveSessionLane(" ")).toBe("session:main"); + }); + + it("adds session: prefix if not present", () => { + expect(resolveSessionLane("abc123")).toBe("session:abc123"); + expect(resolveSessionLane(" xyz ")).toBe("session:xyz"); + }); + + it("preserves existing session: prefix", () => { + expect(resolveSessionLane("session:abc")).toBe("session:abc"); + expect(resolveSessionLane("session:main")).toBe("session:main"); + }); +}); diff --git a/src/agents/pi-embedded-runner/lanes.ts b/src/agents/pi-embedded-runner/lanes.ts index 81b742ded9f..57ffd1b4255 100644 --- a/src/agents/pi-embedded-runner/lanes.ts +++ b/src/agents/pi-embedded-runner/lanes.ts @@ -7,6 +7,10 @@ export function resolveSessionLane(key: string) { export function resolveGlobalLane(lane?: string) { const cleaned = lane?.trim(); + // Cron jobs hold the cron lane slot; inner operations must use nested to avoid deadlock. + if (cleaned === CommandLane.Cron) { + return CommandLane.Nested; + } return cleaned ? cleaned : CommandLane.Main; } diff --git a/src/agents/pi-embedded-runner/openai-stream-wrappers.ts b/src/agents/pi-embedded-runner/openai-stream-wrappers.ts index d0b483e83ec..8542f329cbe 100644 --- a/src/agents/pi-embedded-runner/openai-stream-wrappers.ts +++ b/src/agents/pi-embedded-runner/openai-stream-wrappers.ts @@ -2,6 +2,7 @@ import type { StreamFn } from "@mariozechner/pi-agent-core"; import type { SimpleStreamOptions } from "@mariozechner/pi-ai"; import { streamSimple } from "@mariozechner/pi-ai"; import { log } from "./logger.js"; +import { streamWithPayloadPatch } from "./stream-payload-utils.js"; type OpenAIServiceTier = "auto" | "default" | "flex" | "priority"; type OpenAIReasoningEffort = "low" | "medium" | "high"; @@ -325,18 +326,10 @@ export function createOpenAIServiceTierWrapper( ) { return underlying(model, context, options); } - const originalOnPayload = options?.onPayload; - return underlying(model, context, { - ...options, - onPayload: (payload) => { - if (payload && typeof payload === "object") { - const payloadObj = payload as Record; - if (payloadObj.service_tier === undefined) { - payloadObj.service_tier = serviceTier; - } - } - return originalOnPayload?.(payload, model); - }, + return streamWithPayloadPatch(underlying, model, context, options, (payloadObj) => { + if (payloadObj.service_tier === undefined) { + payloadObj.service_tier = serviceTier; + } }); }; } diff --git a/src/agents/pi-embedded-runner/run/attempt.spawn-workspace.test.ts b/src/agents/pi-embedded-runner/run/attempt.spawn-workspace.test.ts index c18d439e632..53edfbbc6cc 100644 --- a/src/agents/pi-embedded-runner/run/attempt.spawn-workspace.test.ts +++ b/src/agents/pi-embedded-runner/run/attempt.spawn-workspace.test.ts @@ -249,6 +249,72 @@ function createSubscriptionMock() { }; } +function resetEmbeddedAttemptHarness( + params: { + includeSpawnSubagent?: boolean; + subscribeImpl?: () => ReturnType; + sessionMessages?: AgentMessage[]; + } = {}, +) { + if (params.includeSpawnSubagent) { + hoisted.spawnSubagentDirectMock.mockReset().mockResolvedValue({ + status: "accepted", + childSessionKey: "agent:main:subagent:child", + runId: "run-child", + }); + } + hoisted.createAgentSessionMock.mockReset(); + hoisted.sessionManagerOpenMock.mockReset().mockReturnValue(hoisted.sessionManager); + hoisted.resolveSandboxContextMock.mockReset(); + hoisted.acquireSessionWriteLockMock.mockReset().mockResolvedValue({ + release: async () => {}, + }); + hoisted.sessionManager.getLeafEntry.mockReset().mockReturnValue(null); + hoisted.sessionManager.branch.mockReset(); + hoisted.sessionManager.resetLeaf.mockReset(); + hoisted.sessionManager.buildSessionContext + .mockReset() + .mockReturnValue({ messages: params.sessionMessages ?? [] }); + hoisted.sessionManager.appendCustomEntry.mockReset(); + if (params.subscribeImpl) { + hoisted.subscribeEmbeddedPiSessionMock.mockReset().mockImplementation(params.subscribeImpl); + } +} + +async function cleanupTempPaths(tempPaths: string[]) { + while (tempPaths.length > 0) { + const target = tempPaths.pop(); + if (target) { + await fs.rm(target, { recursive: true, force: true }); + } + } +} + +function createDefaultEmbeddedSession(): MutableSession { + const session: MutableSession = { + sessionId: "embedded-session", + messages: [], + isCompacting: false, + isStreaming: false, + agent: { + replaceMessages: (messages: unknown[]) => { + session.messages = [...messages]; + }, + }, + prompt: async () => { + session.messages = [ + ...session.messages, + { role: "assistant", content: "done", timestamp: 2 }, + ]; + }, + abort: async () => {}, + dispose: () => {}, + steer: async () => {}, + }; + + return session; +} + const testModel = { api: "openai-completions", provider: "openai", @@ -269,32 +335,14 @@ describe("runEmbeddedAttempt sessions_spawn workspace inheritance", () => { const tempPaths: string[] = []; beforeEach(() => { - hoisted.spawnSubagentDirectMock.mockReset().mockResolvedValue({ - status: "accepted", - childSessionKey: "agent:main:subagent:child", - runId: "run-child", + resetEmbeddedAttemptHarness({ + includeSpawnSubagent: true, + subscribeImpl: createSubscriptionMock, }); - hoisted.createAgentSessionMock.mockReset(); - hoisted.sessionManagerOpenMock.mockReset().mockReturnValue(hoisted.sessionManager); - hoisted.resolveSandboxContextMock.mockReset(); - hoisted.subscribeEmbeddedPiSessionMock.mockReset().mockImplementation(createSubscriptionMock); - hoisted.acquireSessionWriteLockMock.mockReset().mockResolvedValue({ - release: async () => {}, - }); - hoisted.sessionManager.getLeafEntry.mockReset().mockReturnValue(null); - hoisted.sessionManager.branch.mockReset(); - hoisted.sessionManager.resetLeaf.mockReset(); - hoisted.sessionManager.buildSessionContext.mockReset().mockReturnValue({ messages: [] }); - hoisted.sessionManager.appendCustomEntry.mockReset(); }); afterEach(async () => { - while (tempPaths.length > 0) { - const target = tempPaths.pop(); - if (target) { - await fs.rm(target, { recursive: true, force: true }); - } - } + await cleanupTempPaths(tempPaths); }); it("passes the real workspace to sessions_spawn when workspaceAccess is ro", async () => { @@ -394,26 +442,11 @@ describe("runEmbeddedAttempt cache-ttl tracking after compaction", () => { const tempPaths: string[] = []; beforeEach(() => { - hoisted.createAgentSessionMock.mockReset(); - hoisted.sessionManagerOpenMock.mockReset().mockReturnValue(hoisted.sessionManager); - hoisted.resolveSandboxContextMock.mockReset(); - hoisted.acquireSessionWriteLockMock.mockReset().mockResolvedValue({ - release: async () => {}, - }); - hoisted.sessionManager.getLeafEntry.mockReset().mockReturnValue(null); - hoisted.sessionManager.branch.mockReset(); - hoisted.sessionManager.resetLeaf.mockReset(); - hoisted.sessionManager.buildSessionContext.mockReset().mockReturnValue({ messages: [] }); - hoisted.sessionManager.appendCustomEntry.mockReset(); + resetEmbeddedAttemptHarness(); }); afterEach(async () => { - while (tempPaths.length > 0) { - const target = tempPaths.pop(); - if (target) { - await fs.rm(target, { recursive: true, force: true }); - } - } + await cleanupTempPaths(tempPaths); }); async function runAttemptWithCacheTtl(compactionCount: number) { @@ -428,30 +461,9 @@ describe("runEmbeddedAttempt cache-ttl tracking after compaction", () => { getCompactionCount: () => compactionCount, })); - hoisted.createAgentSessionMock.mockImplementation(async () => { - const session: MutableSession = { - sessionId: "embedded-session", - messages: [], - isCompacting: false, - isStreaming: false, - agent: { - replaceMessages: (messages: unknown[]) => { - session.messages = [...messages]; - }, - }, - prompt: async () => { - session.messages = [ - ...session.messages, - { role: "assistant", content: "done", timestamp: 2 }, - ]; - }, - abort: async () => {}, - dispose: () => {}, - steer: async () => {}, - }; - - return { session }; - }); + hoisted.createAgentSessionMock.mockImplementation(async () => ({ + session: createDefaultEmbeddedSession(), + })); return await runEmbeddedAttempt({ sessionId: "embedded-session", @@ -591,30 +603,9 @@ describe("runEmbeddedAttempt context engine sessionKey forwarding", () => { .mockReset() .mockReturnValue({ messages: seedMessages }); - hoisted.createAgentSessionMock.mockImplementation(async () => { - const session: MutableSession = { - sessionId: "embedded-session", - messages: [], - isCompacting: false, - isStreaming: false, - agent: { - replaceMessages: (messages: unknown[]) => { - session.messages = [...messages]; - }, - }, - prompt: async () => { - session.messages = [ - ...session.messages, - { role: "assistant", content: "done", timestamp: 2 }, - ]; - }, - abort: async () => {}, - dispose: () => {}, - steer: async () => {}, - }; - - return { session }; - }); + hoisted.createAgentSessionMock.mockImplementation(async () => ({ + session: createDefaultEmbeddedSession(), + })); return await runEmbeddedAttempt({ sessionId: "embedded-session", diff --git a/src/agents/pi-embedded-runner/stream-payload-utils.ts b/src/agents/pi-embedded-runner/stream-payload-utils.ts new file mode 100644 index 00000000000..580bf5b1391 --- /dev/null +++ b/src/agents/pi-embedded-runner/stream-payload-utils.ts @@ -0,0 +1,20 @@ +import type { StreamFn } from "@mariozechner/pi-agent-core"; + +export function streamWithPayloadPatch( + underlying: StreamFn, + model: Parameters[0], + context: Parameters[1], + options: Parameters[2], + patchPayload: (payload: Record) => void, +) { + const originalOnPayload = options?.onPayload; + return underlying(model, context, { + ...options, + onPayload: (payload) => { + if (payload && typeof payload === "object") { + patchPayload(payload as Record); + } + return originalOnPayload?.(payload, model); + }, + }); +} diff --git a/src/agents/sandbox/fs-bridge-mutation-helper.test.ts b/src/agents/sandbox/fs-bridge-mutation-helper.test.ts index 57f22cc84b6..973c81341d1 100644 --- a/src/agents/sandbox/fs-bridge-mutation-helper.test.ts +++ b/src/agents/sandbox/fs-bridge-mutation-helper.test.ts @@ -1,22 +1,13 @@ import { spawnSync } from "node:child_process"; import fs from "node:fs/promises"; -import os from "node:os"; import path from "node:path"; import { describe, expect, it } from "vitest"; +import { withTempDir } from "../../test-helpers/temp-dir.js"; import { buildPinnedWritePlan, SANDBOX_PINNED_MUTATION_PYTHON, } from "./fs-bridge-mutation-helper.js"; -async function withTempRoot(prefix: string, run: (root: string) => Promise): Promise { - const root = await fs.mkdtemp(path.join(os.tmpdir(), prefix)); - try { - return await run(root); - } finally { - await fs.rm(root, { recursive: true, force: true }); - } -} - function runMutation(args: string[], input?: string) { return spawnSync("python3", ["-c", SANDBOX_PINNED_MUTATION_PYTHON, ...args], { input, @@ -56,7 +47,7 @@ function runWritePlan(args: string[], input?: string) { describe("sandbox pinned mutation helper", () => { it("writes through a pinned directory fd", async () => { - await withTempRoot("openclaw-mutation-helper-", async (root) => { + await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => { const workspace = path.join(root, "workspace"); await fs.mkdir(workspace, { recursive: true }); @@ -72,7 +63,7 @@ describe("sandbox pinned mutation helper", () => { it.runIf(process.platform !== "win32")( "preserves stdin payload bytes when the pinned write plan runs through sh", async () => { - await withTempRoot("openclaw-mutation-helper-", async (root) => { + await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => { const workspace = path.join(root, "workspace"); await fs.mkdir(workspace, { recursive: true }); @@ -92,7 +83,7 @@ describe("sandbox pinned mutation helper", () => { it.runIf(process.platform !== "win32")( "rejects symlink-parent writes instead of materializing a temp file outside the mount", async () => { - await withTempRoot("openclaw-mutation-helper-", async (root) => { + await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => { const workspace = path.join(root, "workspace"); const outside = path.join(root, "outside"); await fs.mkdir(workspace, { recursive: true }); @@ -108,7 +99,7 @@ describe("sandbox pinned mutation helper", () => { ); it.runIf(process.platform !== "win32")("rejects symlink segments during mkdirp", async () => { - await withTempRoot("openclaw-mutation-helper-", async (root) => { + await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => { const workspace = path.join(root, "workspace"); const outside = path.join(root, "outside"); await fs.mkdir(workspace, { recursive: true }); @@ -123,7 +114,7 @@ describe("sandbox pinned mutation helper", () => { }); it.runIf(process.platform !== "win32")("remove unlinks the symlink itself", async () => { - await withTempRoot("openclaw-mutation-helper-", async (root) => { + await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => { const workspace = path.join(root, "workspace"); const outside = path.join(root, "outside"); await fs.mkdir(workspace, { recursive: true }); @@ -144,7 +135,7 @@ describe("sandbox pinned mutation helper", () => { it.runIf(process.platform !== "win32")( "rejects symlink destination parents during rename", async () => { - await withTempRoot("openclaw-mutation-helper-", async (root) => { + await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => { const workspace = path.join(root, "workspace"); const outside = path.join(root, "outside"); await fs.mkdir(workspace, { recursive: true }); @@ -175,7 +166,7 @@ describe("sandbox pinned mutation helper", () => { it.runIf(process.platform !== "win32")( "copies directories across different mount roots during rename fallback", async () => { - await withTempRoot("openclaw-mutation-helper-", async (root) => { + await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => { const sourceRoot = path.join(root, "source"); const destRoot = path.join(root, "dest"); await fs.mkdir(path.join(sourceRoot, "dir", "nested"), { recursive: true }); diff --git a/src/agents/test-helpers/pi-embedded-runner-e2e-fixtures.ts b/src/agents/test-helpers/pi-embedded-runner-e2e-fixtures.ts new file mode 100644 index 00000000000..1d987c44d1a --- /dev/null +++ b/src/agents/test-helpers/pi-embedded-runner-e2e-fixtures.ts @@ -0,0 +1,57 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import type { OpenClawConfig } from "../../config/config.js"; + +export type EmbeddedPiRunnerTestWorkspace = { + tempRoot: string; + agentDir: string; + workspaceDir: string; +}; + +export async function createEmbeddedPiRunnerTestWorkspace( + prefix: string, +): Promise { + const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), prefix)); + const agentDir = path.join(tempRoot, "agent"); + const workspaceDir = path.join(tempRoot, "workspace"); + await fs.mkdir(agentDir, { recursive: true }); + await fs.mkdir(workspaceDir, { recursive: true }); + return { tempRoot, agentDir, workspaceDir }; +} + +export async function cleanupEmbeddedPiRunnerTestWorkspace( + workspace: EmbeddedPiRunnerTestWorkspace | undefined, +): Promise { + if (!workspace) { + return; + } + await fs.rm(workspace.tempRoot, { recursive: true, force: true }); +} + +export function createEmbeddedPiRunnerOpenAiConfig(modelIds: string[]): OpenClawConfig { + return { + models: { + providers: { + openai: { + api: "openai-responses", + apiKey: "sk-test", + baseUrl: "https://example.com", + models: modelIds.map((id) => ({ + id, + name: `Mock ${id}`, + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 16_000, + maxTokens: 2048, + })), + }, + }, + }, + }; +} + +export async function immediateEnqueue(task: () => Promise): Promise { + return await task(); +} diff --git a/src/agents/tools/sessions-history-tool.ts b/src/agents/tools/sessions-history-tool.ts index 3d5deeadcdb..a3e8d4d9461 100644 --- a/src/agents/tools/sessions-history-tool.ts +++ b/src/agents/tools/sessions-history-tool.ts @@ -1,5 +1,5 @@ import { Type } from "@sinclair/typebox"; -import { loadConfig } from "../../config/config.js"; +import { type OpenClawConfig, loadConfig } from "../../config/config.js"; import { callGateway } from "../../gateway/call.js"; import { capArrayByJsonBytes } from "../../gateway/session-utils.fs.js"; import { jsonUtf8Bytes } from "../../infra/json-utf8-bytes.js"; @@ -169,6 +169,7 @@ function enforceSessionsHistoryHardCap(params: { export function createSessionsHistoryTool(opts?: { agentSessionKey?: string; sandboxed?: boolean; + config?: OpenClawConfig; }): AnyAgentTool { return { label: "Session History", @@ -180,7 +181,7 @@ export function createSessionsHistoryTool(opts?: { const sessionKeyParam = readStringParam(params, "sessionKey", { required: true, }); - const cfg = loadConfig(); + const cfg = opts?.config ?? loadConfig(); const { mainKey, alias, effectiveRequesterKey, restrictToSpawned } = resolveSandboxedSessionToolContext({ cfg, diff --git a/src/agents/tools/sessions-list-tool.ts b/src/agents/tools/sessions-list-tool.ts index 0cba87e5653..ff3f56212d2 100644 --- a/src/agents/tools/sessions-list-tool.ts +++ b/src/agents/tools/sessions-list-tool.ts @@ -1,6 +1,6 @@ import path from "node:path"; import { Type } from "@sinclair/typebox"; -import { loadConfig } from "../../config/config.js"; +import { type OpenClawConfig, loadConfig } from "../../config/config.js"; import { resolveSessionFilePath, resolveSessionFilePathOptions, @@ -33,6 +33,7 @@ const SessionsListToolSchema = Type.Object({ export function createSessionsListTool(opts?: { agentSessionKey?: string; sandboxed?: boolean; + config?: OpenClawConfig; }): AnyAgentTool { return { label: "Sessions", @@ -41,7 +42,7 @@ export function createSessionsListTool(opts?: { parameters: SessionsListToolSchema, execute: async (_toolCallId, args) => { const params = args as Record; - const cfg = loadConfig(); + const cfg = opts?.config ?? loadConfig(); const { mainKey, alias, requesterInternalKey, restrictToSpawned } = resolveSandboxedSessionToolContext({ cfg, diff --git a/src/agents/tools/sessions-send-tool.ts b/src/agents/tools/sessions-send-tool.ts index 82eff0adf7a..d9ad6e6b907 100644 --- a/src/agents/tools/sessions-send-tool.ts +++ b/src/agents/tools/sessions-send-tool.ts @@ -1,6 +1,6 @@ import crypto from "node:crypto"; import { Type } from "@sinclair/typebox"; -import { loadConfig } from "../../config/config.js"; +import { type OpenClawConfig, loadConfig } from "../../config/config.js"; import { callGateway } from "../../gateway/call.js"; import { normalizeAgentId, resolveAgentIdFromSessionKey } from "../../routing/session-key.js"; import { SESSION_LABEL_MAX_LENGTH } from "../../sessions/session-label.js"; @@ -36,6 +36,7 @@ export function createSessionsSendTool(opts?: { agentSessionKey?: string; agentChannel?: GatewayMessageChannel; sandboxed?: boolean; + config?: OpenClawConfig; }): AnyAgentTool { return { label: "Session Send", @@ -46,7 +47,7 @@ export function createSessionsSendTool(opts?: { execute: async (_toolCallId, args) => { const params = args as Record; const message = readStringParam(params, "message", { required: true }); - const cfg = loadConfig(); + const cfg = opts?.config ?? loadConfig(); const { mainKey, alias, effectiveRequesterKey, restrictToSpawned } = resolveSandboxedSessionToolContext({ cfg, diff --git a/src/auto-reply/reply/normalize-reply.ts b/src/auto-reply/reply/normalize-reply.ts index 9aafb66bd34..793cbcc326f 100644 --- a/src/auto-reply/reply/normalize-reply.ts +++ b/src/auto-reply/reply/normalize-reply.ts @@ -12,11 +12,13 @@ import { resolveResponsePrefixTemplate, type ResponsePrefixContext, } from "./response-prefix-template.js"; +import { hasSlackDirectives, parseSlackDirectives } from "./slack-directives.js"; export type NormalizeReplySkipReason = "empty" | "silent" | "heartbeat"; export type NormalizeReplyOptions = { responsePrefix?: string; + enableSlackInteractiveReplies?: boolean; /** Context for template variable interpolation in responsePrefix */ responsePrefixContext?: ResponsePrefixContext; onHeartbeatStrip?: () => void; @@ -105,5 +107,10 @@ export function normalizeReplyPayload( text = `${effectivePrefix} ${text}`; } - return { ...enrichedPayload, text }; + enrichedPayload = { ...enrichedPayload, text }; + if (opts.enableSlackInteractiveReplies && text && hasSlackDirectives(text)) { + enrichedPayload = parseSlackDirectives(enrichedPayload); + } + + return enrichedPayload; } diff --git a/src/auto-reply/reply/reply-dispatcher.ts b/src/auto-reply/reply/reply-dispatcher.ts index 7272a3081a2..d212245ef59 100644 --- a/src/auto-reply/reply/reply-dispatcher.ts +++ b/src/auto-reply/reply/reply-dispatcher.ts @@ -43,6 +43,7 @@ function getHumanDelay(config: HumanDelayConfig | undefined): number { export type ReplyDispatcherOptions = { deliver: ReplyDispatchDeliverer; responsePrefix?: string; + enableSlackInteractiveReplies?: boolean; /** Static context for response prefix template interpolation. */ responsePrefixContext?: ResponsePrefixContext; /** Dynamic context provider for response prefix template interpolation. @@ -84,7 +85,11 @@ export type ReplyDispatcher = { type NormalizeReplyPayloadInternalOptions = Pick< ReplyDispatcherOptions, - "responsePrefix" | "responsePrefixContext" | "responsePrefixContextProvider" | "onHeartbeatStrip" + | "responsePrefix" + | "enableSlackInteractiveReplies" + | "responsePrefixContext" + | "responsePrefixContextProvider" + | "onHeartbeatStrip" > & { onSkip?: (reason: NormalizeReplySkipReason) => void; }; @@ -98,6 +103,7 @@ function normalizeReplyPayloadInternal( return normalizeReplyPayload(payload, { responsePrefix: opts.responsePrefix, + enableSlackInteractiveReplies: opts.enableSlackInteractiveReplies, responsePrefixContext: prefixContext, onHeartbeatStrip: opts.onHeartbeatStrip, onSkip: opts.onSkip, @@ -129,6 +135,7 @@ export function createReplyDispatcher(options: ReplyDispatcherOptions): ReplyDis const enqueue = (kind: ReplyDispatchKind, payload: ReplyPayload) => { const normalized = normalizeReplyPayloadInternal(payload, { responsePrefix: options.responsePrefix, + enableSlackInteractiveReplies: options.enableSlackInteractiveReplies, responsePrefixContext: options.responsePrefixContext, responsePrefixContextProvider: options.responsePrefixContextProvider, onHeartbeatStrip: options.onHeartbeatStrip, diff --git a/src/auto-reply/reply/reply-flow.test.ts b/src/auto-reply/reply/reply-flow.test.ts index d0fd692c2e1..d7efa640b1c 100644 --- a/src/auto-reply/reply/reply-flow.test.ts +++ b/src/auto-reply/reply/reply-flow.test.ts @@ -16,6 +16,7 @@ import { } from "./queue.js"; import { createReplyDispatcher } from "./reply-dispatcher.js"; import { createReplyToModeFilter, resolveReplyToMode } from "./reply-threading.js"; +import { parseSlackDirectives, hasSlackDirectives } from "./slack-directives.js"; describe("normalizeInboundTextNewlines", () => { it("normalizes real newlines and preserves literal backslash-n sequences", () => { @@ -196,6 +197,8 @@ describe("inbound context contract (providers + extensions)", () => { const getLineData = (result: ReturnType) => (result.channelData?.line as Record | undefined) ?? {}; +const getSlackData = (result: ReturnType) => + (result.channelData?.slack as Record | undefined) ?? {}; describe("hasLineDirectives", () => { it("matches expected detection across directive patterns", () => { @@ -219,6 +222,24 @@ describe("hasLineDirectives", () => { }); }); +describe("hasSlackDirectives", () => { + it("matches expected detection across Slack directive patterns", () => { + const cases: Array<{ text: string; expected: boolean }> = [ + { text: "Pick one [[slack_buttons: Approve:approve, Reject:reject]]", expected: true }, + { + text: "[[slack_select: Choose a project | Alpha:alpha, Beta:beta]]", + expected: true, + }, + { text: "Just regular text", expected: false }, + { text: "[[buttons: Menu | Choose | A:a]]", expected: false }, + ]; + + for (const testCase of cases) { + expect(hasSlackDirectives(testCase.text)).toBe(testCase.expected); + } + }); +}); + describe("parseLineDirectives", () => { describe("quick_replies", () => { it("parses quick replies variants", () => { @@ -579,6 +600,279 @@ describe("parseLineDirectives", () => { }); }); +describe("parseSlackDirectives", () => { + it("builds section and button blocks from slack_buttons directives", () => { + const result = parseSlackDirectives({ + text: "Choose an action [[slack_buttons: Approve:approve, Reject:reject]]", + }); + + expect(result.text).toBe("Choose an action"); + expect(getSlackData(result).blocks).toEqual([ + { + type: "section", + text: { + type: "mrkdwn", + text: "Choose an action", + }, + }, + { + type: "actions", + block_id: "openclaw_reply_buttons_1", + elements: [ + { + type: "button", + action_id: "openclaw:reply_button", + text: { + type: "plain_text", + text: "Approve", + emoji: true, + }, + value: "reply_1_approve", + }, + { + type: "button", + action_id: "openclaw:reply_button", + text: { + type: "plain_text", + text: "Reject", + emoji: true, + }, + value: "reply_2_reject", + }, + ], + }, + ]); + }); + + it("builds static select blocks from slack_select directives", () => { + const result = parseSlackDirectives({ + text: "[[slack_select: Choose a project | Alpha:alpha, Beta:beta]]", + }); + + expect(result.text).toBeUndefined(); + expect(getSlackData(result).blocks).toEqual([ + { + type: "actions", + block_id: "openclaw_reply_select_1", + elements: [ + { + type: "static_select", + action_id: "openclaw:reply_select", + placeholder: { + type: "plain_text", + text: "Choose a project", + emoji: true, + }, + options: [ + { + text: { + type: "plain_text", + text: "Alpha", + emoji: true, + }, + value: "reply_1_alpha", + }, + { + text: { + type: "plain_text", + text: "Beta", + emoji: true, + }, + value: "reply_2_beta", + }, + ], + }, + ], + }, + ]); + }); + + it("appends Slack interactive blocks to existing slack blocks", () => { + const result = parseSlackDirectives({ + text: "Act now [[slack_buttons: Retry:retry]]", + channelData: { + slack: { + blocks: [{ type: "divider" }], + }, + }, + }); + + expect(result.text).toBe("Act now"); + expect(getSlackData(result).blocks).toEqual([ + { type: "divider" }, + { + type: "section", + text: { + type: "mrkdwn", + text: "Act now", + }, + }, + { + type: "actions", + block_id: "openclaw_reply_buttons_1", + elements: [ + { + type: "button", + action_id: "openclaw:reply_button", + text: { + type: "plain_text", + text: "Retry", + emoji: true, + }, + value: "reply_1_retry", + }, + ], + }, + ]); + }); + + it("preserves authored order for mixed Slack directives", () => { + const result = parseSlackDirectives({ + text: "[[slack_select: Pick one | Alpha:alpha]] then [[slack_buttons: Retry:retry]]", + }); + + expect(getSlackData(result).blocks).toEqual([ + { + type: "actions", + block_id: "openclaw_reply_select_1", + elements: [ + { + type: "static_select", + action_id: "openclaw:reply_select", + placeholder: { + type: "plain_text", + text: "Pick one", + emoji: true, + }, + options: [ + { + text: { + type: "plain_text", + text: "Alpha", + emoji: true, + }, + value: "reply_1_alpha", + }, + ], + }, + ], + }, + { + type: "section", + text: { + type: "mrkdwn", + text: "then", + }, + }, + { + type: "actions", + block_id: "openclaw_reply_buttons_1", + elements: [ + { + type: "button", + action_id: "openclaw:reply_button", + text: { + type: "plain_text", + text: "Retry", + emoji: true, + }, + value: "reply_1_retry", + }, + ], + }, + ]); + }); + + it("truncates Slack interactive reply strings to safe Block Kit limits", () => { + const long = "x".repeat(120); + const result = parseSlackDirectives({ + text: `${"y".repeat(3100)} [[slack_select: ${long} | ${long}:${long}]] [[slack_buttons: ${long}:${long}]]`, + }); + + const blocks = getSlackData(result).blocks as Array>; + expect(blocks).toHaveLength(3); + expect(((blocks[0]?.text as { text?: string })?.text ?? "").length).toBeLessThanOrEqual(3000); + expect( + ( + ( + (blocks[1]?.elements as Array>)?.[0]?.placeholder as { + text?: string; + } + )?.text ?? "" + ).length, + ).toBeLessThanOrEqual(75); + expect( + ( + ( + ( + (blocks[1]?.elements as Array>)?.[0]?.options as Array< + Record + > + )?.[0]?.text as { text?: string } + )?.text ?? "" + ).length, + ).toBeLessThanOrEqual(75); + expect( + ( + (( + (blocks[1]?.elements as Array>)?.[0]?.options as Array< + Record + > + )?.[0]?.value as string | undefined) ?? "" + ).length, + ).toBeLessThanOrEqual(75); + expect( + ( + ( + (blocks[2]?.elements as Array>)?.[0]?.text as { + text?: string; + } + )?.text ?? "" + ).length, + ).toBeLessThanOrEqual(75); + expect( + ( + ((blocks[2]?.elements as Array>)?.[0]?.value as + | string + | undefined) ?? "" + ).length, + ).toBeLessThanOrEqual(75); + }); + + it("falls back to the original payload when generated blocks would exceed Slack limits", () => { + const result = parseSlackDirectives({ + text: "Choose [[slack_buttons: Retry:retry]]", + channelData: { + slack: { + blocks: Array.from({ length: 49 }, () => ({ type: "divider" })), + }, + }, + }); + + expect(result).toEqual({ + text: "Choose [[slack_buttons: Retry:retry]]", + channelData: { + slack: { + blocks: Array.from({ length: 49 }, () => ({ type: "divider" })), + }, + }, + }); + }); + + it("ignores malformed existing Slack blocks during directive compilation", () => { + expect(() => + parseSlackDirectives({ + text: "Choose [[slack_buttons: Retry:retry]]", + channelData: { + slack: { + blocks: "{not json}", + }, + }, + }), + ).not.toThrow(); + }); +}); + function createDeferred() { let resolve!: (value: T) => void; let reject!: (reason?: unknown) => void; @@ -1485,6 +1779,43 @@ describe("createReplyDispatcher", () => { expect(onHeartbeatStrip).toHaveBeenCalledTimes(2); }); + it("compiles Slack directives in dispatcher flows when enabled", async () => { + const deliver = vi.fn().mockResolvedValue(undefined); + const dispatcher = createReplyDispatcher({ + deliver, + enableSlackInteractiveReplies: true, + }); + + expect( + dispatcher.sendFinalReply({ + text: "Choose [[slack_buttons: Retry:retry]]", + }), + ).toBe(true); + await dispatcher.waitForIdle(); + + expect(deliver).toHaveBeenCalledTimes(1); + expect(deliver.mock.calls[0]?.[0]).toMatchObject({ + text: "Choose", + channelData: { + slack: { + blocks: [ + { + type: "section", + text: { + type: "mrkdwn", + text: "Choose", + }, + }, + { + type: "actions", + block_id: "openclaw_reply_buttons_1", + }, + ], + }, + }, + }); + }); + it("avoids double-prefixing and keeps media when heartbeat is the only text", async () => { const deliver = vi.fn().mockResolvedValue(undefined); const dispatcher = createReplyDispatcher({ diff --git a/src/auto-reply/reply/reply-utils.test.ts b/src/auto-reply/reply/reply-utils.test.ts index c1e76e50403..88f092bf1e5 100644 --- a/src/auto-reply/reply/reply-utils.test.ts +++ b/src/auto-reply/reply/reply-utils.test.ts @@ -150,6 +150,67 @@ describe("normalizeReplyPayload", () => { expect(result!.text).toBe(""); expect(result!.mediaUrl).toBe("https://example.com/img.png"); }); + + it("does not compile Slack directives unless interactive replies are enabled", () => { + const result = normalizeReplyPayload({ + text: "hello [[slack_buttons: Retry:retry, Ignore:ignore]]", + }); + + expect(result).not.toBeNull(); + expect(result!.text).toBe("hello [[slack_buttons: Retry:retry, Ignore:ignore]]"); + expect(result!.channelData).toBeUndefined(); + }); + + it("applies responsePrefix before compiling Slack directives into blocks", () => { + const result = normalizeReplyPayload( + { + text: "hello [[slack_buttons: Retry:retry, Ignore:ignore]]", + }, + { responsePrefix: "[bot]", enableSlackInteractiveReplies: true }, + ); + + expect(result).not.toBeNull(); + expect(result!.text).toBe("[bot] hello"); + expect(result!.channelData).toEqual({ + slack: { + blocks: [ + { + type: "section", + text: { + type: "mrkdwn", + text: "[bot] hello", + }, + }, + { + type: "actions", + block_id: "openclaw_reply_buttons_1", + elements: [ + { + type: "button", + action_id: "openclaw:reply_button", + text: { + type: "plain_text", + text: "Retry", + emoji: true, + }, + value: "reply_1_retry", + }, + { + type: "button", + action_id: "openclaw:reply_button", + text: { + type: "plain_text", + text: "Ignore", + emoji: true, + }, + value: "reply_2_ignore", + }, + ], + }, + ], + }, + }); + }); }); describe("typing controller", () => { diff --git a/src/auto-reply/reply/route-reply.test.ts b/src/auto-reply/reply/route-reply.test.ts index 9b5d432149a..5a0405da22b 100644 --- a/src/auto-reply/reply/route-reply.test.ts +++ b/src/auto-reply/reply/route-reply.test.ts @@ -201,6 +201,55 @@ describe("routeReply", () => { ); }); + it("routes directive-only Slack replies when interactive replies are enabled", async () => { + mocks.sendMessageSlack.mockClear(); + const cfg = { + channels: { + slack: { + capabilities: { interactiveReplies: true }, + }, + }, + } as unknown as OpenClawConfig; + await routeReply({ + payload: { text: "[[slack_select: Choose one | Alpha:alpha]]" }, + channel: "slack", + to: "channel:C123", + cfg, + }); + expect(mocks.sendMessageSlack).toHaveBeenCalledWith( + "channel:C123", + "", + expect.objectContaining({ + blocks: [ + expect.objectContaining({ + type: "actions", + block_id: "openclaw_reply_select_1", + }), + ], + }), + ); + }); + + it("does not bypass the empty-reply guard for invalid Slack blocks", async () => { + mocks.sendMessageSlack.mockClear(); + const res = await routeReply({ + payload: { + text: " ", + channelData: { + slack: { + blocks: " ", + }, + }, + }, + channel: "slack", + to: "channel:C123", + cfg: {} as never, + }); + + expect(res.ok).toBe(true); + expect(mocks.sendMessageSlack).not.toHaveBeenCalled(); + }); + it("does not derive responsePrefix from agent identity when routing", async () => { mocks.sendMessageSlack.mockClear(); const cfg = { diff --git a/src/auto-reply/reply/route-reply.ts b/src/auto-reply/reply/route-reply.ts index a489bedcbbf..8b3319698b2 100644 --- a/src/auto-reply/reply/route-reply.ts +++ b/src/auto-reply/reply/route-reply.ts @@ -12,6 +12,8 @@ import { resolveEffectiveMessagesConfig } from "../../agents/identity.js"; import { normalizeChannelId } from "../../channels/plugins/index.js"; import type { OpenClawConfig } from "../../config/config.js"; import { buildOutboundSessionContext } from "../../infra/outbound/session-context.js"; +import { parseSlackBlocksInput } from "../../slack/blocks-input.js"; +import { isSlackInteractiveRepliesEnabled } from "../../slack/interactive-replies.js"; import { INTERNAL_MESSAGE_CHANNEL, normalizeMessageChannel } from "../../utils/message-channel.js"; import type { OriginatingChannelType } from "../templating.js"; import type { ReplyPayload } from "../types.js"; @@ -94,6 +96,8 @@ export async function routeReply(params: RouteReplyParams): Promise; +type SlackChannelData = { + blocks?: unknown; +}; + +type SlackChoice = { + label: string; + value: string; +}; + +function truncateSlackText(value: string, max: number): string { + const trimmed = value.trim(); + if (trimmed.length <= max) { + return trimmed; + } + if (max <= 1) { + return trimmed.slice(0, max); + } + return `${trimmed.slice(0, max - 1)}…`; +} + +function parseChoice(raw: string): SlackChoice | null { + const trimmed = raw.trim(); + if (!trimmed) { + return null; + } + const delimiter = trimmed.indexOf(":"); + if (delimiter === -1) { + return { + label: trimmed, + value: trimmed, + }; + } + const label = trimmed.slice(0, delimiter).trim(); + const value = trimmed.slice(delimiter + 1).trim(); + if (!label || !value) { + return null; + } + return { label, value }; +} + +function parseChoices(raw: string, maxItems: number): SlackChoice[] { + return raw + .split(",") + .map((entry) => parseChoice(entry)) + .filter((entry): entry is SlackChoice => Boolean(entry)) + .slice(0, maxItems); +} + +function buildSlackReplyChoiceToken(value: string, index: number): string { + const slug = value + .trim() + .toLowerCase() + .replace(/[^a-z0-9]+/g, "_") + .replace(/^_+|_+$/g, ""); + return truncateSlackText(`reply_${index}_${slug || "choice"}`, SLACK_OPTION_VALUE_MAX); +} + +function buildSectionBlock(text: string): SlackBlock | null { + const trimmed = text.trim(); + if (!trimmed) { + return null; + } + return { + type: "section", + text: { + type: "mrkdwn", + text: truncateSlackText(trimmed, SLACK_SECTION_TEXT_MAX), + }, + }; +} + +function buildButtonsBlock(raw: string, index: number): SlackBlock | null { + const choices = parseChoices(raw, SLACK_BUTTON_MAX_ITEMS); + if (choices.length === 0) { + return null; + } + return { + type: "actions", + block_id: `openclaw_reply_buttons_${index}`, + elements: choices.map((choice, choiceIndex) => ({ + type: "button", + action_id: SLACK_REPLY_BUTTON_ACTION_ID, + text: { + type: "plain_text", + text: truncateSlackText(choice.label, SLACK_PLAIN_TEXT_MAX), + emoji: true, + }, + value: buildSlackReplyChoiceToken(choice.value, choiceIndex + 1), + })), + }; +} + +function buildSelectBlock(raw: string, index: number): SlackBlock | null { + const parts = raw + .split("|") + .map((entry) => entry.trim()) + .filter(Boolean); + if (parts.length === 0) { + return null; + } + const [first, second] = parts; + const placeholder = parts.length >= 2 ? first : "Choose an option"; + const choices = parseChoices(parts.length >= 2 ? second : first, SLACK_SELECT_MAX_ITEMS); + if (choices.length === 0) { + return null; + } + return { + type: "actions", + block_id: `openclaw_reply_select_${index}`, + elements: [ + { + type: "static_select", + action_id: SLACK_REPLY_SELECT_ACTION_ID, + placeholder: { + type: "plain_text", + text: truncateSlackText(placeholder, SLACK_PLAIN_TEXT_MAX), + emoji: true, + }, + options: choices.map((choice, choiceIndex) => ({ + text: { + type: "plain_text", + text: truncateSlackText(choice.label, SLACK_PLAIN_TEXT_MAX), + emoji: true, + }, + value: buildSlackReplyChoiceToken(choice.value, choiceIndex + 1), + })), + }, + ], + }; +} + +function readExistingSlackBlocks(payload: ReplyPayload): SlackBlock[] { + const slackData = payload.channelData?.slack as SlackChannelData | undefined; + try { + const blocks = parseSlackBlocksInput(slackData?.blocks) as SlackBlock[] | undefined; + return blocks ?? []; + } catch { + return []; + } +} + +export function hasSlackDirectives(text: string): boolean { + SLACK_DIRECTIVE_RE.lastIndex = 0; + return SLACK_DIRECTIVE_RE.test(text); +} + +export function parseSlackDirectives(payload: ReplyPayload): ReplyPayload { + const text = payload.text; + if (!text) { + return payload; + } + + const generatedBlocks: SlackBlock[] = []; + const visibleTextParts: string[] = []; + let buttonIndex = 0; + let selectIndex = 0; + let cursor = 0; + let matchedDirective = false; + let generatedInteractiveBlock = false; + SLACK_DIRECTIVE_RE.lastIndex = 0; + + for (const match of text.matchAll(SLACK_DIRECTIVE_RE)) { + matchedDirective = true; + const matchText = match[0]; + const directiveType = match[1]; + const body = match[2]; + const index = match.index ?? 0; + const precedingText = text.slice(cursor, index); + visibleTextParts.push(precedingText); + const section = buildSectionBlock(precedingText); + if (section) { + generatedBlocks.push(section); + } + const block = + directiveType.toLowerCase() === "slack_buttons" + ? buildButtonsBlock(body, ++buttonIndex) + : buildSelectBlock(body, ++selectIndex); + if (block) { + generatedInteractiveBlock = true; + generatedBlocks.push(block); + } + cursor = index + matchText.length; + } + + const trailingText = text.slice(cursor); + visibleTextParts.push(trailingText); + const trailingSection = buildSectionBlock(trailingText); + if (trailingSection) { + generatedBlocks.push(trailingSection); + } + const cleanedText = visibleTextParts.join(""); + + if (!matchedDirective || !generatedInteractiveBlock) { + return payload; + } + + const existingBlocks = readExistingSlackBlocks(payload); + if (existingBlocks.length + generatedBlocks.length > SLACK_MAX_BLOCKS) { + return payload; + } + const nextBlocks = [...existingBlocks, ...generatedBlocks]; + + return { + ...payload, + text: cleanedText.trim() || undefined, + channelData: { + ...payload.channelData, + slack: { + ...(payload.channelData?.slack as Record | undefined), + blocks: nextBlocks, + }, + }, + }; +} diff --git a/src/browser/chrome-mcp.snapshot.test.ts b/src/browser/chrome-mcp.snapshot.test.ts new file mode 100644 index 00000000000..3fe3288848f --- /dev/null +++ b/src/browser/chrome-mcp.snapshot.test.ts @@ -0,0 +1,68 @@ +import { describe, expect, it } from "vitest"; +import { + buildAiSnapshotFromChromeMcpSnapshot, + flattenChromeMcpSnapshotToAriaNodes, +} from "./chrome-mcp.snapshot.js"; + +const snapshot = { + id: "root", + role: "document", + name: "Example", + children: [ + { + id: "btn-1", + role: "button", + name: "Continue", + }, + { + id: "txt-1", + role: "textbox", + name: "Email", + value: "peter@example.com", + }, + ], +}; + +describe("chrome MCP snapshot conversion", () => { + it("flattens structured snapshots into aria-style nodes", () => { + const nodes = flattenChromeMcpSnapshotToAriaNodes(snapshot, 10); + expect(nodes).toEqual([ + { + ref: "root", + role: "document", + name: "Example", + value: undefined, + description: undefined, + depth: 0, + }, + { + ref: "btn-1", + role: "button", + name: "Continue", + value: undefined, + description: undefined, + depth: 1, + }, + { + ref: "txt-1", + role: "textbox", + name: "Email", + value: "peter@example.com", + description: undefined, + depth: 1, + }, + ]); + }); + + it("builds AI snapshots that preserve Chrome MCP uids as refs", () => { + const result = buildAiSnapshotFromChromeMcpSnapshot({ root: snapshot }); + + expect(result.snapshot).toContain('- button "Continue" [ref=btn-1]'); + expect(result.snapshot).toContain('- textbox "Email" [ref=txt-1] value="peter@example.com"'); + expect(result.refs).toEqual({ + "btn-1": { role: "button", name: "Continue" }, + "txt-1": { role: "textbox", name: "Email" }, + }); + expect(result.stats.refs).toBe(2); + }); +}); diff --git a/src/browser/chrome-mcp.snapshot.ts b/src/browser/chrome-mcp.snapshot.ts new file mode 100644 index 00000000000..e92709df6f2 --- /dev/null +++ b/src/browser/chrome-mcp.snapshot.ts @@ -0,0 +1,246 @@ +import type { SnapshotAriaNode } from "./client.js"; +import { + getRoleSnapshotStats, + type RoleRefMap, + type RoleSnapshotOptions, +} from "./pw-role-snapshot.js"; + +export type ChromeMcpSnapshotNode = { + id?: string; + role?: string; + name?: string; + value?: string | number | boolean; + description?: string; + children?: ChromeMcpSnapshotNode[]; +}; + +const INTERACTIVE_ROLES = new Set([ + "button", + "checkbox", + "combobox", + "link", + "listbox", + "menuitem", + "menuitemcheckbox", + "menuitemradio", + "option", + "radio", + "searchbox", + "slider", + "spinbutton", + "switch", + "tab", + "textbox", + "treeitem", +]); + +const CONTENT_ROLES = new Set([ + "article", + "cell", + "columnheader", + "gridcell", + "heading", + "listitem", + "main", + "navigation", + "region", + "rowheader", +]); + +const STRUCTURAL_ROLES = new Set([ + "application", + "directory", + "document", + "generic", + "group", + "ignored", + "list", + "menu", + "menubar", + "none", + "presentation", + "row", + "rowgroup", + "tablist", + "table", + "toolbar", + "tree", + "treegrid", +]); + +function normalizeRole(node: ChromeMcpSnapshotNode): string { + const role = typeof node.role === "string" ? node.role.trim().toLowerCase() : ""; + return role || "generic"; +} + +function normalizeString(value: unknown): string | undefined { + if (typeof value === "string") { + const trimmed = value.trim(); + return trimmed || undefined; + } + if (typeof value === "number" || typeof value === "boolean") { + return String(value); + } + return undefined; +} + +function escapeQuoted(value: string): string { + return value.replaceAll("\\", "\\\\").replaceAll('"', '\\"'); +} + +function shouldIncludeNode(params: { + role: string; + name?: string; + options?: RoleSnapshotOptions; +}): boolean { + if (params.options?.interactive && !INTERACTIVE_ROLES.has(params.role)) { + return false; + } + if (params.options?.compact && STRUCTURAL_ROLES.has(params.role) && !params.name) { + return false; + } + return true; +} + +function shouldCreateRef(role: string, name?: string): boolean { + return INTERACTIVE_ROLES.has(role) || (CONTENT_ROLES.has(role) && Boolean(name)); +} + +type DuplicateTracker = { + counts: Map; + keysByRef: Map; + duplicates: Set; +}; + +function createDuplicateTracker(): DuplicateTracker { + return { + counts: new Map(), + keysByRef: new Map(), + duplicates: new Set(), + }; +} + +function registerRef( + tracker: DuplicateTracker, + ref: string, + role: string, + name?: string, +): number | undefined { + const key = `${role}:${name ?? ""}`; + const count = tracker.counts.get(key) ?? 0; + tracker.counts.set(key, count + 1); + tracker.keysByRef.set(ref, key); + if (count > 0) { + tracker.duplicates.add(key); + return count; + } + return undefined; +} + +export function flattenChromeMcpSnapshotToAriaNodes( + root: ChromeMcpSnapshotNode, + limit = 500, +): SnapshotAriaNode[] { + const boundedLimit = Math.max(1, Math.min(2000, Math.floor(limit))); + const out: SnapshotAriaNode[] = []; + + const visit = (node: ChromeMcpSnapshotNode, depth: number) => { + if (out.length >= boundedLimit) { + return; + } + const ref = normalizeString(node.id); + if (ref) { + out.push({ + ref, + role: normalizeRole(node), + name: normalizeString(node.name) ?? "", + value: normalizeString(node.value), + description: normalizeString(node.description), + depth, + }); + } + for (const child of node.children ?? []) { + visit(child, depth + 1); + if (out.length >= boundedLimit) { + return; + } + } + }; + + visit(root, 0); + return out; +} + +export function buildAiSnapshotFromChromeMcpSnapshot(params: { + root: ChromeMcpSnapshotNode; + options?: RoleSnapshotOptions; + maxChars?: number; +}): { + snapshot: string; + truncated?: boolean; + refs: RoleRefMap; + stats: { lines: number; chars: number; refs: number; interactive: number }; +} { + const refs: RoleRefMap = {}; + const tracker = createDuplicateTracker(); + const lines: string[] = []; + + const visit = (node: ChromeMcpSnapshotNode, depth: number) => { + const role = normalizeRole(node); + const name = normalizeString(node.name); + const value = normalizeString(node.value); + const description = normalizeString(node.description); + const maxDepth = params.options?.maxDepth; + if (maxDepth !== undefined && depth > maxDepth) { + return; + } + + const includeNode = shouldIncludeNode({ role, name, options: params.options }); + if (includeNode) { + let line = `${" ".repeat(depth)}- ${role}`; + if (name) { + line += ` "${escapeQuoted(name)}"`; + } + const ref = normalizeString(node.id); + if (ref && shouldCreateRef(role, name)) { + const nth = registerRef(tracker, ref, role, name); + refs[ref] = nth === undefined ? { role, name } : { role, name, nth }; + line += ` [ref=${ref}]`; + } + if (value) { + line += ` value="${escapeQuoted(value)}"`; + } + if (description) { + line += ` description="${escapeQuoted(description)}"`; + } + lines.push(line); + } + + for (const child of node.children ?? []) { + visit(child, depth + 1); + } + }; + + visit(params.root, 0); + + for (const [ref, data] of Object.entries(refs)) { + const key = tracker.keysByRef.get(ref); + if (key && !tracker.duplicates.has(key)) { + delete data.nth; + } + } + + let snapshot = lines.join("\n"); + let truncated = false; + const maxChars = + typeof params.maxChars === "number" && Number.isFinite(params.maxChars) && params.maxChars > 0 + ? Math.floor(params.maxChars) + : undefined; + if (maxChars && snapshot.length > maxChars) { + snapshot = `${snapshot.slice(0, maxChars)}\n\n[...TRUNCATED - page too large]`; + truncated = true; + } + + const stats = getRoleSnapshotStats(snapshot, refs); + return truncated ? { snapshot, truncated, refs, stats } : { snapshot, refs, stats }; +} diff --git a/src/browser/chrome-mcp.test.ts b/src/browser/chrome-mcp.test.ts new file mode 100644 index 00000000000..3b64054c407 --- /dev/null +++ b/src/browser/chrome-mcp.test.ts @@ -0,0 +1,108 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { + listChromeMcpTabs, + openChromeMcpTab, + resetChromeMcpSessionsForTest, + setChromeMcpSessionFactoryForTest, +} from "./chrome-mcp.js"; + +type ToolCall = { + name: string; + arguments?: Record; +}; + +type ChromeMcpSessionFactory = Exclude< + Parameters[0], + null +>; +type ChromeMcpSession = Awaited>; + +function createFakeSession(): ChromeMcpSession { + const callTool = vi.fn(async ({ name }: ToolCall) => { + if (name === "list_pages") { + return { + content: [ + { + type: "text", + text: [ + "## Pages", + "1: https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session [selected]", + "2: https://github.com/openclaw/openclaw/pull/45318", + ].join("\n"), + }, + ], + }; + } + if (name === "new_page") { + return { + content: [ + { + type: "text", + text: [ + "## Pages", + "1: https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session", + "2: https://github.com/openclaw/openclaw/pull/45318", + "3: https://example.com/ [selected]", + ].join("\n"), + }, + ], + }; + } + throw new Error(`unexpected tool ${name}`); + }); + + return { + client: { + callTool, + listTools: vi.fn().mockResolvedValue({ tools: [{ name: "list_pages" }] }), + close: vi.fn().mockResolvedValue(undefined), + connect: vi.fn().mockResolvedValue(undefined), + }, + transport: { + pid: 123, + }, + ready: Promise.resolve(), + } as unknown as ChromeMcpSession; +} + +describe("chrome MCP page parsing", () => { + beforeEach(async () => { + await resetChromeMcpSessionsForTest(); + }); + + it("parses list_pages text responses when structuredContent is missing", async () => { + const factory: ChromeMcpSessionFactory = async () => createFakeSession(); + setChromeMcpSessionFactoryForTest(factory); + + const tabs = await listChromeMcpTabs("chrome-live"); + + expect(tabs).toEqual([ + { + targetId: "1", + title: "", + url: "https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session", + type: "page", + }, + { + targetId: "2", + title: "", + url: "https://github.com/openclaw/openclaw/pull/45318", + type: "page", + }, + ]); + }); + + it("parses new_page text responses and returns the created tab", async () => { + const factory: ChromeMcpSessionFactory = async () => createFakeSession(); + setChromeMcpSessionFactoryForTest(factory); + + const tab = await openChromeMcpTab("chrome-live", "https://example.com/"); + + expect(tab).toEqual({ + targetId: "3", + title: "", + url: "https://example.com/", + type: "page", + }); + }); +}); diff --git a/src/browser/chrome-mcp.ts b/src/browser/chrome-mcp.ts new file mode 100644 index 00000000000..7719a2338e3 --- /dev/null +++ b/src/browser/chrome-mcp.ts @@ -0,0 +1,488 @@ +import { randomUUID } from "node:crypto"; +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { Client } from "@modelcontextprotocol/sdk/client/index.js"; +import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js"; +import type { ChromeMcpSnapshotNode } from "./chrome-mcp.snapshot.js"; +import type { BrowserTab } from "./client.js"; +import { BrowserProfileUnavailableError, BrowserTabNotFoundError } from "./errors.js"; + +type ChromeMcpStructuredPage = { + id: number; + url?: string; + selected?: boolean; +}; + +type ChromeMcpToolResult = { + structuredContent?: Record; + content?: Array>; + isError?: boolean; +}; + +type ChromeMcpSession = { + client: Client; + transport: StdioClientTransport; + ready: Promise; +}; + +type ChromeMcpSessionFactory = (profileName: string) => Promise; + +const DEFAULT_CHROME_MCP_COMMAND = "npx"; +const DEFAULT_CHROME_MCP_ARGS = [ + "-y", + "chrome-devtools-mcp@latest", + "--autoConnect", + "--experimental-page-id-routing", +]; + +const sessions = new Map(); +let sessionFactory: ChromeMcpSessionFactory | null = null; + +function asRecord(value: unknown): Record | null { + return value && typeof value === "object" && !Array.isArray(value) + ? (value as Record) + : null; +} + +function asPages(value: unknown): ChromeMcpStructuredPage[] { + if (!Array.isArray(value)) { + return []; + } + const out: ChromeMcpStructuredPage[] = []; + for (const entry of value) { + const record = asRecord(entry); + if (!record || typeof record.id !== "number") { + continue; + } + out.push({ + id: record.id, + url: typeof record.url === "string" ? record.url : undefined, + selected: record.selected === true, + }); + } + return out; +} + +function parsePageId(targetId: string): number { + const parsed = Number.parseInt(targetId.trim(), 10); + if (!Number.isFinite(parsed)) { + throw new BrowserTabNotFoundError(); + } + return parsed; +} + +function toBrowserTabs(pages: ChromeMcpStructuredPage[]): BrowserTab[] { + return pages.map((page) => ({ + targetId: String(page.id), + title: "", + url: page.url ?? "", + type: "page", + })); +} + +function extractStructuredContent(result: ChromeMcpToolResult): Record { + return asRecord(result.structuredContent) ?? {}; +} + +function extractTextContent(result: ChromeMcpToolResult): string[] { + const content = Array.isArray(result.content) ? result.content : []; + return content + .map((entry) => { + const record = asRecord(entry); + return record && typeof record.text === "string" ? record.text : ""; + }) + .filter(Boolean); +} + +function extractTextPages(result: ChromeMcpToolResult): ChromeMcpStructuredPage[] { + const pages: ChromeMcpStructuredPage[] = []; + for (const block of extractTextContent(result)) { + for (const line of block.split(/\r?\n/)) { + const match = line.match(/^\s*(\d+):\s+(.+?)(?:\s+\[(selected)\])?\s*$/i); + if (!match) { + continue; + } + pages.push({ + id: Number.parseInt(match[1] ?? "", 10), + url: match[2]?.trim() || undefined, + selected: Boolean(match[3]), + }); + } + } + return pages; +} + +function extractStructuredPages(result: ChromeMcpToolResult): ChromeMcpStructuredPage[] { + const structured = asPages(extractStructuredContent(result).pages); + return structured.length > 0 ? structured : extractTextPages(result); +} + +function extractSnapshot(result: ChromeMcpToolResult): ChromeMcpSnapshotNode { + const structured = extractStructuredContent(result); + const snapshot = asRecord(structured.snapshot); + if (!snapshot) { + throw new Error("Chrome MCP snapshot response was missing structured snapshot data."); + } + return snapshot as unknown as ChromeMcpSnapshotNode; +} + +function extractJsonBlock(text: string): unknown { + const match = text.match(/```json\s*([\s\S]*?)\s*```/i); + const raw = match?.[1]?.trim() || text.trim(); + return raw ? JSON.parse(raw) : null; +} + +async function createRealSession(profileName: string): Promise { + const transport = new StdioClientTransport({ + command: DEFAULT_CHROME_MCP_COMMAND, + args: DEFAULT_CHROME_MCP_ARGS, + stderr: "pipe", + }); + const client = new Client( + { + name: "openclaw-browser", + version: "0.0.0", + }, + {}, + ); + + const ready = (async () => { + try { + await client.connect(transport); + const tools = await client.listTools(); + if (!tools.tools.some((tool) => tool.name === "list_pages")) { + throw new Error("Chrome MCP server did not expose the expected navigation tools."); + } + } catch (err) { + await client.close().catch(() => {}); + throw new BrowserProfileUnavailableError( + `Chrome MCP existing-session attach failed for profile "${profileName}". ` + + `Make sure Chrome is running, enable chrome://inspect/#remote-debugging, and approve the connection. ` + + `Details: ${String(err)}`, + ); + } + })(); + + return { + client, + transport, + ready, + }; +} + +async function getSession(profileName: string): Promise { + let session = sessions.get(profileName); + if (session && session.transport.pid === null) { + sessions.delete(profileName); + session = undefined; + } + if (!session) { + session = await (sessionFactory ?? createRealSession)(profileName); + sessions.set(profileName, session); + } + try { + await session.ready; + return session; + } catch (err) { + const current = sessions.get(profileName); + if (current?.transport === session.transport) { + sessions.delete(profileName); + } + throw err; + } +} + +async function callTool( + profileName: string, + name: string, + args: Record = {}, +): Promise { + const session = await getSession(profileName); + try { + return (await session.client.callTool({ + name, + arguments: args, + })) as ChromeMcpToolResult; + } catch (err) { + sessions.delete(profileName); + await session.client.close().catch(() => {}); + throw err; + } +} + +async function withTempFile(fn: (filePath: string) => Promise): Promise { + const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-chrome-mcp-")); + const filePath = path.join(dir, randomUUID()); + try { + return await fn(filePath); + } finally { + await fs.rm(dir, { recursive: true, force: true }).catch(() => {}); + } +} + +async function findPageById(profileName: string, pageId: number): Promise { + const pages = await listChromeMcpPages(profileName); + const page = pages.find((entry) => entry.id === pageId); + if (!page) { + throw new BrowserTabNotFoundError(); + } + return page; +} + +export async function ensureChromeMcpAvailable(profileName: string): Promise { + await getSession(profileName); +} + +export function getChromeMcpPid(profileName: string): number | null { + return sessions.get(profileName)?.transport.pid ?? null; +} + +export async function closeChromeMcpSession(profileName: string): Promise { + const session = sessions.get(profileName); + if (!session) { + return false; + } + sessions.delete(profileName); + await session.client.close().catch(() => {}); + return true; +} + +export async function stopAllChromeMcpSessions(): Promise { + const names = [...sessions.keys()]; + for (const name of names) { + await closeChromeMcpSession(name).catch(() => {}); + } +} + +export async function listChromeMcpPages(profileName: string): Promise { + const result = await callTool(profileName, "list_pages"); + return extractStructuredPages(result); +} + +export async function listChromeMcpTabs(profileName: string): Promise { + return toBrowserTabs(await listChromeMcpPages(profileName)); +} + +export async function openChromeMcpTab(profileName: string, url: string): Promise { + const result = await callTool(profileName, "new_page", { url }); + const pages = extractStructuredPages(result); + const chosen = pages.find((page) => page.selected) ?? pages.at(-1); + if (!chosen) { + throw new Error("Chrome MCP did not return the created page."); + } + return { + targetId: String(chosen.id), + title: "", + url: chosen.url ?? url, + type: "page", + }; +} + +export async function focusChromeMcpTab(profileName: string, targetId: string): Promise { + await callTool(profileName, "select_page", { + pageId: parsePageId(targetId), + bringToFront: true, + }); +} + +export async function closeChromeMcpTab(profileName: string, targetId: string): Promise { + await callTool(profileName, "close_page", { pageId: parsePageId(targetId) }); +} + +export async function navigateChromeMcpPage(params: { + profileName: string; + targetId: string; + url: string; + timeoutMs?: number; +}): Promise<{ url: string }> { + await callTool(params.profileName, "navigate_page", { + pageId: parsePageId(params.targetId), + type: "url", + url: params.url, + ...(typeof params.timeoutMs === "number" ? { timeout: params.timeoutMs } : {}), + }); + const page = await findPageById(params.profileName, parsePageId(params.targetId)); + return { url: page.url ?? params.url }; +} + +export async function takeChromeMcpSnapshot(params: { + profileName: string; + targetId: string; +}): Promise { + const result = await callTool(params.profileName, "take_snapshot", { + pageId: parsePageId(params.targetId), + }); + return extractSnapshot(result); +} + +export async function takeChromeMcpScreenshot(params: { + profileName: string; + targetId: string; + uid?: string; + fullPage?: boolean; + format?: "png" | "jpeg"; +}): Promise { + return await withTempFile(async (filePath) => { + await callTool(params.profileName, "take_screenshot", { + pageId: parsePageId(params.targetId), + filePath, + format: params.format ?? "png", + ...(params.uid ? { uid: params.uid } : {}), + ...(params.fullPage ? { fullPage: true } : {}), + }); + return await fs.readFile(filePath); + }); +} + +export async function clickChromeMcpElement(params: { + profileName: string; + targetId: string; + uid: string; + doubleClick?: boolean; +}): Promise { + await callTool(params.profileName, "click", { + pageId: parsePageId(params.targetId), + uid: params.uid, + ...(params.doubleClick ? { dblClick: true } : {}), + }); +} + +export async function fillChromeMcpElement(params: { + profileName: string; + targetId: string; + uid: string; + value: string; +}): Promise { + await callTool(params.profileName, "fill", { + pageId: parsePageId(params.targetId), + uid: params.uid, + value: params.value, + }); +} + +export async function fillChromeMcpForm(params: { + profileName: string; + targetId: string; + elements: Array<{ uid: string; value: string }>; +}): Promise { + await callTool(params.profileName, "fill_form", { + pageId: parsePageId(params.targetId), + elements: params.elements, + }); +} + +export async function hoverChromeMcpElement(params: { + profileName: string; + targetId: string; + uid: string; +}): Promise { + await callTool(params.profileName, "hover", { + pageId: parsePageId(params.targetId), + uid: params.uid, + }); +} + +export async function dragChromeMcpElement(params: { + profileName: string; + targetId: string; + fromUid: string; + toUid: string; +}): Promise { + await callTool(params.profileName, "drag", { + pageId: parsePageId(params.targetId), + from_uid: params.fromUid, + to_uid: params.toUid, + }); +} + +export async function uploadChromeMcpFile(params: { + profileName: string; + targetId: string; + uid: string; + filePath: string; +}): Promise { + await callTool(params.profileName, "upload_file", { + pageId: parsePageId(params.targetId), + uid: params.uid, + filePath: params.filePath, + }); +} + +export async function pressChromeMcpKey(params: { + profileName: string; + targetId: string; + key: string; +}): Promise { + await callTool(params.profileName, "press_key", { + pageId: parsePageId(params.targetId), + key: params.key, + }); +} + +export async function resizeChromeMcpPage(params: { + profileName: string; + targetId: string; + width: number; + height: number; +}): Promise { + await callTool(params.profileName, "resize_page", { + pageId: parsePageId(params.targetId), + width: params.width, + height: params.height, + }); +} + +export async function handleChromeMcpDialog(params: { + profileName: string; + targetId: string; + action: "accept" | "dismiss"; + promptText?: string; +}): Promise { + await callTool(params.profileName, "handle_dialog", { + pageId: parsePageId(params.targetId), + action: params.action, + ...(params.promptText ? { promptText: params.promptText } : {}), + }); +} + +export async function evaluateChromeMcpScript(params: { + profileName: string; + targetId: string; + fn: string; + args?: string[]; +}): Promise { + const result = await callTool(params.profileName, "evaluate_script", { + pageId: parsePageId(params.targetId), + function: params.fn, + ...(params.args?.length ? { args: params.args } : {}), + }); + const message = extractStructuredContent(result).message; + const text = typeof message === "string" ? message : ""; + if (!text.trim()) { + return null; + } + return extractJsonBlock(text); +} + +export async function waitForChromeMcpText(params: { + profileName: string; + targetId: string; + text: string[]; + timeoutMs?: number; +}): Promise { + await callTool(params.profileName, "wait_for", { + pageId: parsePageId(params.targetId), + text: params.text, + ...(typeof params.timeoutMs === "number" ? { timeout: params.timeoutMs } : {}), + }); +} + +export function setChromeMcpSessionFactoryForTest(factory: ChromeMcpSessionFactory | null): void { + sessionFactory = factory; +} + +export async function resetChromeMcpSessionsForTest(): Promise { + sessionFactory = null; + await stopAllChromeMcpSessions(); +} diff --git a/src/browser/client.ts b/src/browser/client.ts index 953c9efcd11..dc418cf3b4a 100644 --- a/src/browser/client.ts +++ b/src/browser/client.ts @@ -3,6 +3,7 @@ import { fetchBrowserJson } from "./client-fetch.js"; export type BrowserStatus = { enabled: boolean; profile?: string; + driver?: "openclaw" | "extension" | "existing-session"; running: boolean; cdpReady?: boolean; cdpHttp?: boolean; @@ -26,6 +27,7 @@ export type ProfileStatus = { cdpPort: number; cdpUrl: string; color: string; + driver: "openclaw" | "extension" | "existing-session"; running: boolean; tabCount: number; isDefault: boolean; @@ -165,7 +167,7 @@ export async function browserCreateProfile( name: string; color?: string; cdpUrl?: string; - driver?: "openclaw" | "extension"; + driver?: "openclaw" | "extension" | "existing-session"; }, ): Promise { return await fetchBrowserJson( diff --git a/src/browser/config.ts b/src/browser/config.ts index 6d24a07a287..529ee791c40 100644 --- a/src/browser/config.ts +++ b/src/browser/config.ts @@ -46,7 +46,7 @@ export type ResolvedBrowserProfile = { cdpHost: string; cdpIsLoopback: boolean; color: string; - driver: "openclaw" | "extension"; + driver: "openclaw" | "extension" | "existing-session"; attachOnly: boolean; }; @@ -335,7 +335,12 @@ export function resolveProfile( let cdpHost = resolved.cdpHost; let cdpPort = profile.cdpPort ?? 0; let cdpUrl = ""; - const driver = profile.driver === "extension" ? "extension" : "openclaw"; + const driver = + profile.driver === "extension" + ? "extension" + : profile.driver === "existing-session" + ? "existing-session" + : "openclaw"; if (rawProfileUrl) { const parsed = parseHttpUrl(rawProfileUrl, `browser.profiles.${profileName}.cdpUrl`); @@ -356,7 +361,7 @@ export function resolveProfile( cdpIsLoopback: isLoopbackHost(cdpHost), color: profile.color, driver, - attachOnly: profile.attachOnly ?? resolved.attachOnly, + attachOnly: driver === "existing-session" ? true : (profile.attachOnly ?? resolved.attachOnly), }; } diff --git a/src/browser/profile-capabilities.ts b/src/browser/profile-capabilities.ts index 07a70ba00c4..2bcf4f8fe9e 100644 --- a/src/browser/profile-capabilities.ts +++ b/src/browser/profile-capabilities.ts @@ -1,6 +1,10 @@ import type { ResolvedBrowserProfile } from "./config.js"; -export type BrowserProfileMode = "local-managed" | "local-extension-relay" | "remote-cdp"; +export type BrowserProfileMode = + | "local-managed" + | "local-extension-relay" + | "local-existing-session" + | "remote-cdp"; export type BrowserProfileCapabilities = { mode: BrowserProfileMode; @@ -31,6 +35,20 @@ export function getBrowserProfileCapabilities( }; } + if (profile.driver === "existing-session") { + return { + mode: "local-existing-session", + isRemote: false, + requiresRelay: false, + requiresAttachedTab: false, + usesPersistentPlaywright: false, + supportsPerTabWs: false, + supportsJsonTabEndpoints: false, + supportsReset: false, + supportsManagedTabLimit: false, + }; + } + if (!profile.cdpIsLoopback) { return { mode: "remote-cdp", @@ -75,6 +93,9 @@ export function resolveDefaultSnapshotFormat(params: { if (capabilities.mode === "local-extension-relay") { return "aria"; } + if (capabilities.mode === "local-existing-session") { + return "ai"; + } return params.hasPlaywright ? "ai" : "aria"; } diff --git a/src/browser/profiles-service.test.ts b/src/browser/profiles-service.test.ts index 3dc714d33f3..f70e23ddb67 100644 --- a/src/browser/profiles-service.test.ts +++ b/src/browser/profiles-service.test.ts @@ -1,6 +1,6 @@ import fs from "node:fs"; import path from "node:path"; -import { describe, expect, it, vi } from "vitest"; +import { beforeEach, describe, expect, it, vi } from "vitest"; import { resolveBrowserConfig } from "./config.js"; import { createBrowserProfilesService } from "./profiles-service.js"; import type { BrowserRouteContext, BrowserServerState } from "./server-context.js"; @@ -57,6 +57,10 @@ async function createWorkProfileWithConfig(params: { } describe("BrowserProfilesService", () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + it("allocates next local port for new profiles", async () => { const { result, state } = await createWorkProfileWithConfig({ resolved: resolveBrowserConfig({}), @@ -163,6 +167,56 @@ describe("BrowserProfilesService", () => { ).rejects.toThrow(/requires an explicit loopback cdpUrl/i); }); + it("creates existing-session profiles as attach-only local entries", async () => { + const resolved = resolveBrowserConfig({}); + const { ctx, state } = createCtx(resolved); + vi.mocked(loadConfig).mockReturnValue({ browser: { profiles: {} } }); + + const service = createBrowserProfilesService(ctx); + const result = await service.createProfile({ + name: "chrome-live", + driver: "existing-session", + }); + + expect(result.cdpPort).toBe(18801); + expect(result.isRemote).toBe(false); + expect(state.resolved.profiles["chrome-live"]).toEqual({ + cdpPort: 18801, + driver: "existing-session", + attachOnly: true, + color: expect.any(String), + }); + expect(writeConfigFile).toHaveBeenCalledWith( + expect.objectContaining({ + browser: expect.objectContaining({ + profiles: expect.objectContaining({ + "chrome-live": expect.objectContaining({ + cdpPort: 18801, + driver: "existing-session", + attachOnly: true, + }), + }), + }), + }), + ); + }); + + it("rejects driver=existing-session when cdpUrl is provided", async () => { + const resolved = resolveBrowserConfig({}); + const { ctx } = createCtx(resolved); + vi.mocked(loadConfig).mockReturnValue({ browser: { profiles: {} } }); + + const service = createBrowserProfilesService(ctx); + + await expect( + service.createProfile({ + name: "chrome-live", + driver: "existing-session", + cdpUrl: "http://127.0.0.1:9222", + }), + ).rejects.toThrow(/does not accept cdpUrl/i); + }); + it("deletes remote profiles without stopping or removing local data", async () => { const resolved = resolveBrowserConfig({ profiles: { @@ -218,4 +272,40 @@ describe("BrowserProfilesService", () => { expect(result.deleted).toBe(true); expect(movePathToTrash).toHaveBeenCalledWith(path.dirname(userDataDir)); }); + + it("deletes existing-session profiles without touching local browser data", async () => { + const resolved = resolveBrowserConfig({ + profiles: { + "chrome-live": { + cdpPort: 18801, + color: "#0066CC", + driver: "existing-session", + attachOnly: true, + }, + }, + }); + const { ctx } = createCtx(resolved); + + vi.mocked(loadConfig).mockReturnValue({ + browser: { + defaultProfile: "openclaw", + profiles: { + openclaw: { cdpPort: 18800, color: "#FF4500" }, + "chrome-live": { + cdpPort: 18801, + color: "#0066CC", + driver: "existing-session", + attachOnly: true, + }, + }, + }, + }); + + const service = createBrowserProfilesService(ctx); + const result = await service.deleteProfile("chrome-live"); + + expect(result.deleted).toBe(false); + expect(ctx.forProfile).not.toHaveBeenCalled(); + expect(movePathToTrash).not.toHaveBeenCalled(); + }); }); diff --git a/src/browser/profiles-service.ts b/src/browser/profiles-service.ts index 962c6408522..936a55c1ffa 100644 --- a/src/browser/profiles-service.ts +++ b/src/browser/profiles-service.ts @@ -27,7 +27,7 @@ export type CreateProfileParams = { name: string; color?: string; cdpUrl?: string; - driver?: "openclaw" | "extension"; + driver?: "openclaw" | "extension" | "existing-session"; }; export type CreateProfileResult = { @@ -79,7 +79,12 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) { const createProfile = async (params: CreateProfileParams): Promise => { const name = params.name.trim(); const rawCdpUrl = params.cdpUrl?.trim() || undefined; - const driver = params.driver === "extension" ? "extension" : undefined; + const driver = + params.driver === "extension" + ? "extension" + : params.driver === "existing-session" + ? "existing-session" + : undefined; if (!isValidProfileName(name)) { throw new BrowserValidationError( @@ -118,6 +123,11 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) { ); } } + if (driver === "existing-session") { + throw new BrowserValidationError( + "driver=existing-session does not accept cdpUrl; it attaches via the Chrome MCP auto-connect flow", + ); + } profileConfig = { cdpUrl: parsed.normalized, ...(driver ? { driver } : {}), @@ -136,6 +146,7 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) { profileConfig = { cdpPort, ...(driver ? { driver } : {}), + ...(driver === "existing-session" ? { attachOnly: true } : {}), color: profileColor, }; } @@ -195,7 +206,7 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) { const state = ctx.state(); const resolved = resolveProfile(state.resolved, name); - if (resolved?.cdpIsLoopback) { + if (resolved?.cdpIsLoopback && resolved.driver === "openclaw") { try { await ctx.forProfile(name).stopRunningBrowser(); } catch { diff --git a/src/browser/pw-session.get-page-for-targetid.extension-fallback.test.ts b/src/browser/pw-session.get-page-for-targetid.extension-fallback.test.ts index 43f1a6c7e09..8f64b2bf575 100644 --- a/src/browser/pw-session.get-page-for-targetid.extension-fallback.test.ts +++ b/src/browser/pw-session.get-page-for-targetid.extension-fallback.test.ts @@ -12,40 +12,49 @@ afterEach(async () => { await closePlaywrightBrowserConnection().catch(() => {}); }); +function createExtensionFallbackBrowserHarness(options?: { + urls?: string[]; + newCDPSessionError?: string; +}) { + const pageOn = vi.fn(); + const contextOn = vi.fn(); + const browserOn = vi.fn(); + const browserClose = vi.fn(async () => {}); + const newCDPSession = vi.fn(async () => { + throw new Error(options?.newCDPSessionError ?? "Not allowed"); + }); + + const context = { + pages: () => [], + on: contextOn, + newCDPSession, + } as unknown as import("playwright-core").BrowserContext; + + const pages = (options?.urls ?? [undefined]).map( + (url) => + ({ + on: pageOn, + context: () => context, + ...(url ? { url: () => url } : {}), + }) as unknown as import("playwright-core").Page, + ); + (context as unknown as { pages: () => unknown[] }).pages = () => pages; + + const browser = { + contexts: () => [context], + on: browserOn, + close: browserClose, + } as unknown as import("playwright-core").Browser; + + connectOverCdpSpy.mockResolvedValue(browser); + getChromeWebSocketUrlSpy.mockResolvedValue(null); + return { browserClose, newCDPSession, pages }; +} + describe("pw-session getPageForTargetId", () => { it("falls back to the only page when CDP session attachment is blocked (extension relays)", async () => { - connectOverCdpSpy.mockClear(); - getChromeWebSocketUrlSpy.mockClear(); - - const pageOn = vi.fn(); - const contextOn = vi.fn(); - const browserOn = vi.fn(); - const browserClose = vi.fn(async () => {}); - - const context = { - pages: () => [], - on: contextOn, - newCDPSession: vi.fn(async () => { - throw new Error("Not allowed"); - }), - } as unknown as import("playwright-core").BrowserContext; - - const page = { - on: pageOn, - context: () => context, - } as unknown as import("playwright-core").Page; - - // Fill pages() after page exists. - (context as unknown as { pages: () => unknown[] }).pages = () => [page]; - - const browser = { - contexts: () => [context], - on: browserOn, - close: browserClose, - } as unknown as import("playwright-core").Browser; - - connectOverCdpSpy.mockResolvedValue(browser); - getChromeWebSocketUrlSpy.mockResolvedValue(null); + const { browserClose, pages } = createExtensionFallbackBrowserHarness(); + const [page] = pages; const resolved = await getPageForTargetId({ cdpUrl: "http://127.0.0.1:18792", @@ -58,40 +67,9 @@ describe("pw-session getPageForTargetId", () => { }); it("uses the shared HTTP-base normalization when falling back to /json/list for direct WebSocket CDP URLs", async () => { - const pageOn = vi.fn(); - const contextOn = vi.fn(); - const browserOn = vi.fn(); - const browserClose = vi.fn(async () => {}); - - const context = { - pages: () => [], - on: contextOn, - newCDPSession: vi.fn(async () => { - throw new Error("Not allowed"); - }), - } as unknown as import("playwright-core").BrowserContext; - - const pageA = { - on: pageOn, - context: () => context, - url: () => "https://alpha.example", - } as unknown as import("playwright-core").Page; - const pageB = { - on: pageOn, - context: () => context, - url: () => "https://beta.example", - } as unknown as import("playwright-core").Page; - - (context as unknown as { pages: () => unknown[] }).pages = () => [pageA, pageB]; - - const browser = { - contexts: () => [context], - on: browserOn, - close: browserClose, - } as unknown as import("playwright-core").Browser; - - connectOverCdpSpy.mockResolvedValue(browser); - getChromeWebSocketUrlSpy.mockResolvedValue(null); + const [, pageB] = createExtensionFallbackBrowserHarness({ + urls: ["https://alpha.example", "https://beta.example"], + }).pages; const fetchSpy = vi.spyOn(globalThis, "fetch").mockResolvedValue({ ok: true, @@ -117,41 +95,11 @@ describe("pw-session getPageForTargetId", () => { }); it("resolves extension-relay pages from /json/list without probing page CDP sessions first", async () => { - const pageOn = vi.fn(); - const contextOn = vi.fn(); - const browserOn = vi.fn(); - const browserClose = vi.fn(async () => {}); - const newCDPSession = vi.fn(async () => { - throw new Error("Target.attachToBrowserTarget: Not allowed"); + const { newCDPSession, pages } = createExtensionFallbackBrowserHarness({ + urls: ["https://alpha.example", "https://beta.example"], + newCDPSessionError: "Target.attachToBrowserTarget: Not allowed", }); - - const context = { - pages: () => [], - on: contextOn, - newCDPSession, - } as unknown as import("playwright-core").BrowserContext; - - const pageA = { - on: pageOn, - context: () => context, - url: () => "https://alpha.example", - } as unknown as import("playwright-core").Page; - const pageB = { - on: pageOn, - context: () => context, - url: () => "https://beta.example", - } as unknown as import("playwright-core").Page; - - (context as unknown as { pages: () => unknown[] }).pages = () => [pageA, pageB]; - - const browser = { - contexts: () => [context], - on: browserOn, - close: browserClose, - } as unknown as import("playwright-core").Browser; - - connectOverCdpSpy.mockResolvedValue(browser); - getChromeWebSocketUrlSpy.mockResolvedValue(null); + const [, pageB] = pages; const fetchSpy = vi.spyOn(globalThis, "fetch"); fetchSpy diff --git a/src/browser/routes/agent.act.download.ts b/src/browser/routes/agent.act.download.ts index d08287fea59..9ed04469c26 100644 --- a/src/browser/routes/agent.act.download.ts +++ b/src/browser/routes/agent.act.download.ts @@ -1,5 +1,10 @@ import type { BrowserRouteContext } from "../server-context.js"; -import { readBody, resolveTargetIdFromBody, withPlaywrightRouteContext } from "./agent.shared.js"; +import { + readBody, + requirePwAi, + resolveTargetIdFromBody, + withRouteTabContext, +} from "./agent.shared.js"; import { ensureOutputRootDir, resolveWritableOutputPathOrRespond } from "./output-paths.js"; import { DEFAULT_DOWNLOAD_DIR } from "./path-output.js"; import type { BrowserRouteRegistrar } from "./types.js"; @@ -23,13 +28,23 @@ export function registerBrowserAgentActDownloadRoutes( const out = toStringOrEmpty(body.path) || ""; const timeoutMs = toNumber(body.timeoutMs); - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "wait for download", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { + if (profileCtx.profile.driver === "existing-session") { + return jsonError( + res, + 501, + "download waiting is not supported for existing-session profiles yet.", + ); + } + const pw = await requirePwAi(res, "wait for download"); + if (!pw) { + return; + } await ensureOutputRootDir(DEFAULT_DOWNLOAD_DIR); let downloadPath: string | undefined; if (out.trim()) { @@ -67,13 +82,23 @@ export function registerBrowserAgentActDownloadRoutes( return jsonError(res, 400, "path is required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "download", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { + if (profileCtx.profile.driver === "existing-session") { + return jsonError( + res, + 501, + "downloads are not supported for existing-session profiles yet.", + ); + } + const pw = await requirePwAi(res, "download"); + if (!pw) { + return; + } await ensureOutputRootDir(DEFAULT_DOWNLOAD_DIR); const downloadPath = await resolveWritableOutputPathOrRespond({ res, diff --git a/src/browser/routes/agent.act.hooks.ts b/src/browser/routes/agent.act.hooks.ts index 56d97bb03d3..bb1f03b7a7c 100644 --- a/src/browser/routes/agent.act.hooks.ts +++ b/src/browser/routes/agent.act.hooks.ts @@ -1,5 +1,11 @@ +import { evaluateChromeMcpScript, uploadChromeMcpFile } from "../chrome-mcp.js"; import type { BrowserRouteContext } from "../server-context.js"; -import { readBody, resolveTargetIdFromBody, withPlaywrightRouteContext } from "./agent.shared.js"; +import { + readBody, + requirePwAi, + resolveTargetIdFromBody, + withRouteTabContext, +} from "./agent.shared.js"; import { DEFAULT_UPLOAD_DIR, resolveExistingPathsWithinRoot } from "./path-output.js"; import type { BrowserRouteRegistrar } from "./types.js"; import { jsonError, toBoolean, toNumber, toStringArray, toStringOrEmpty } from "./utils.js"; @@ -20,13 +26,12 @@ export function registerBrowserAgentActHookRoutes( return jsonError(res, 400, "paths are required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "file chooser hook", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { const uploadPathsResult = await resolveExistingPathsWithinRoot({ rootDir: DEFAULT_UPLOAD_DIR, requestedPaths: paths, @@ -38,6 +43,39 @@ export function registerBrowserAgentActHookRoutes( } const resolvedPaths = uploadPathsResult.paths; + if (profileCtx.profile.driver === "existing-session") { + if (element) { + return jsonError( + res, + 501, + "existing-session file uploads do not support element selectors; use ref/inputRef.", + ); + } + if (resolvedPaths.length !== 1) { + return jsonError( + res, + 501, + "existing-session file uploads currently support one file at a time.", + ); + } + const uid = inputRef || ref; + if (!uid) { + return jsonError(res, 501, "existing-session file uploads require ref or inputRef."); + } + await uploadChromeMcpFile({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + uid, + filePath: resolvedPaths[0] ?? "", + }); + return res.json({ ok: true }); + } + + const pw = await requirePwAi(res, "file chooser hook"); + if (!pw) { + return; + } + if (inputRef || element) { if (ref) { return jsonError(res, 400, "ref cannot be combined with inputRef/element"); @@ -79,13 +117,69 @@ export function registerBrowserAgentActHookRoutes( return jsonError(res, 400, "accept is required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "dialog hook", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { + if (profileCtx.profile.driver === "existing-session") { + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session dialog handling does not support timeoutMs.", + ); + } + await evaluateChromeMcpScript({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + fn: `() => { + const state = (window.__openclawDialogHook ??= {}); + if (!state.originals) { + state.originals = { + alert: window.alert.bind(window), + confirm: window.confirm.bind(window), + prompt: window.prompt.bind(window), + }; + } + const originals = state.originals; + const restore = () => { + window.alert = originals.alert; + window.confirm = originals.confirm; + window.prompt = originals.prompt; + delete window.__openclawDialogHook; + }; + window.alert = (...args) => { + try { + return undefined; + } finally { + restore(); + } + }; + window.confirm = (...args) => { + try { + return ${accept ? "true" : "false"}; + } finally { + restore(); + } + }; + window.prompt = (...args) => { + try { + return ${accept ? JSON.stringify(promptText ?? "") : "null"}; + } finally { + restore(); + } + }; + return true; + }`, + }); + return res.json({ ok: true }); + } + const pw = await requirePwAi(res, "dialog hook"); + if (!pw) { + return; + } await pw.armDialogViaPlaywright({ cdpUrl, targetId: tab.targetId, diff --git a/src/browser/routes/agent.act.ts b/src/browser/routes/agent.act.ts index 2ae6073c7cf..8928a8a7d06 100644 --- a/src/browser/routes/agent.act.ts +++ b/src/browser/routes/agent.act.ts @@ -1,3 +1,14 @@ +import { + clickChromeMcpElement, + closeChromeMcpTab, + dragChromeMcpElement, + evaluateChromeMcpScript, + fillChromeMcpElement, + fillChromeMcpForm, + hoverChromeMcpElement, + pressChromeMcpKey, + resizeChromeMcpPage, +} from "../chrome-mcp.js"; import type { BrowserFormField } from "../client-actions-core.js"; import { normalizeBrowserFormField } from "../form-fields.js"; import type { BrowserRouteContext } from "../server-context.js"; @@ -11,13 +22,88 @@ import { } from "./agent.act.shared.js"; import { readBody, + requirePwAi, resolveTargetIdFromBody, - withPlaywrightRouteContext, + withRouteTabContext, SELECTOR_UNSUPPORTED_MESSAGE, } from "./agent.shared.js"; import type { BrowserRouteRegistrar } from "./types.js"; import { jsonError, toBoolean, toNumber, toStringArray, toStringOrEmpty } from "./utils.js"; +function sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); +} + +function buildExistingSessionWaitPredicate(params: { + text?: string; + textGone?: string; + selector?: string; + url?: string; + loadState?: "load" | "domcontentloaded" | "networkidle"; + fn?: string; +}): string | null { + const checks: string[] = []; + if (params.text) { + checks.push(`Boolean(document.body?.innerText?.includes(${JSON.stringify(params.text)}))`); + } + if (params.textGone) { + checks.push(`!document.body?.innerText?.includes(${JSON.stringify(params.textGone)})`); + } + if (params.selector) { + checks.push(`Boolean(document.querySelector(${JSON.stringify(params.selector)}))`); + } + if (params.url) { + checks.push(`window.location.href === ${JSON.stringify(params.url)}`); + } + if (params.loadState === "domcontentloaded") { + checks.push(`document.readyState === "interactive" || document.readyState === "complete"`); + } else if (params.loadState === "load" || params.loadState === "networkidle") { + checks.push(`document.readyState === "complete"`); + } + if (params.fn) { + checks.push(`Boolean(await (${params.fn})())`); + } + if (checks.length === 0) { + return null; + } + return checks.length === 1 ? checks[0] : checks.map((check) => `(${check})`).join(" && "); +} + +async function waitForExistingSessionCondition(params: { + profileName: string; + targetId: string; + timeMs?: number; + text?: string; + textGone?: string; + selector?: string; + url?: string; + loadState?: "load" | "domcontentloaded" | "networkidle"; + fn?: string; + timeoutMs?: number; +}): Promise { + if (params.timeMs && params.timeMs > 0) { + await sleep(params.timeMs); + } + const predicate = buildExistingSessionWaitPredicate(params); + if (!predicate) { + return; + } + const timeoutMs = Math.max(250, params.timeoutMs ?? 10_000); + const deadline = Date.now() + timeoutMs; + while (Date.now() < deadline) { + const ready = await evaluateChromeMcpScript({ + profileName: params.profileName, + targetId: params.targetId, + fn: `async () => ${predicate}`, + }); + if (ready) { + return; + } + await sleep(250); + } + throw new Error("Timed out waiting for condition"); +} + export function registerBrowserAgentActRoutes( app: BrowserRouteRegistrar, ctx: BrowserRouteContext, @@ -34,14 +120,15 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, SELECTOR_UNSUPPORTED_MESSAGE); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: `act:${kind}`, - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { const evaluateEnabled = ctx.state().resolved.evaluateEnabled; + const isExistingSession = profileCtx.profile.driver === "existing-session"; + const profileName = profileCtx.profile.name; switch (kind) { case "click": { @@ -63,6 +150,26 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, parsedModifiers.error); } const modifiers = parsedModifiers.modifiers; + if (isExistingSession) { + if ((button && button !== "left") || (modifiers && modifiers.length > 0)) { + return jsonError( + res, + 501, + "existing-session click currently supports left-click only (no button overrides/modifiers).", + ); + } + await clickChromeMcpElement({ + profileName, + targetId: tab.targetId, + uid: ref, + doubleClick, + }); + return res.json({ ok: true, targetId: tab.targetId, url: tab.url }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } const clickRequest: Parameters[0] = { cdpUrl, targetId: tab.targetId, @@ -93,6 +200,33 @@ export function registerBrowserAgentActRoutes( const submit = toBoolean(body.submit) ?? false; const slowly = toBoolean(body.slowly) ?? false; const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (slowly) { + return jsonError( + res, + 501, + "existing-session type does not support slowly=true; use fill/press instead.", + ); + } + await fillChromeMcpElement({ + profileName, + targetId: tab.targetId, + uid: ref, + value: text, + }); + if (submit) { + await pressChromeMcpKey({ + profileName, + targetId: tab.targetId, + key: "Enter", + }); + } + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } const typeRequest: Parameters[0] = { cdpUrl, targetId: tab.targetId, @@ -113,6 +247,17 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "key is required"); } const delayMs = toNumber(body.delayMs); + if (isExistingSession) { + if (delayMs) { + return jsonError(res, 501, "existing-session press does not support delayMs."); + } + await pressChromeMcpKey({ profileName, targetId: tab.targetId, key }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.pressKeyViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -127,6 +272,21 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "ref is required"); } const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session hover does not support timeoutMs overrides.", + ); + } + await hoverChromeMcpElement({ profileName, targetId: tab.targetId, uid: ref }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.hoverViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -141,6 +301,26 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "ref is required"); } const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session scrollIntoView does not support timeoutMs overrides.", + ); + } + await evaluateChromeMcpScript({ + profileName, + targetId: tab.targetId, + fn: `(el) => { el.scrollIntoView({ block: "center", inline: "center" }); return true; }`, + args: [ref], + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } const scrollRequest: Parameters[0] = { cdpUrl, targetId: tab.targetId, @@ -159,6 +339,26 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "startRef and endRef are required"); } const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session drag does not support timeoutMs overrides.", + ); + } + await dragChromeMcpElement({ + profileName, + targetId: tab.targetId, + fromUid: startRef, + toUid: endRef, + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.dragViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -175,6 +375,33 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "ref and values are required"); } const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (values.length !== 1) { + return jsonError( + res, + 501, + "existing-session select currently supports a single value only.", + ); + } + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session select does not support timeoutMs overrides.", + ); + } + await fillChromeMcpElement({ + profileName, + targetId: tab.targetId, + uid: ref, + value: values[0] ?? "", + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.selectOptionViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -198,6 +425,28 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "fields are required"); } const timeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (timeoutMs) { + return jsonError( + res, + 501, + "existing-session fill does not support timeoutMs overrides.", + ); + } + await fillChromeMcpForm({ + profileName, + targetId: tab.targetId, + elements: fields.map((field) => ({ + uid: field.ref, + value: String(field.value ?? ""), + })), + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.fillFormViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -212,6 +461,19 @@ export function registerBrowserAgentActRoutes( if (!width || !height) { return jsonError(res, 400, "width and height are required"); } + if (isExistingSession) { + await resizeChromeMcpPage({ + profileName, + targetId: tab.targetId, + width, + height, + }); + return res.json({ ok: true, targetId: tab.targetId, url: tab.url }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.resizeViewportViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -260,6 +522,25 @@ export function registerBrowserAgentActRoutes( "wait requires at least one of: timeMs, text, textGone, selector, url, loadState, fn", ); } + if (isExistingSession) { + await waitForExistingSessionCondition({ + profileName, + targetId: tab.targetId, + timeMs, + text, + textGone, + selector, + url, + loadState, + fn, + timeoutMs, + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.waitForViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -291,6 +572,31 @@ export function registerBrowserAgentActRoutes( } const ref = toStringOrEmpty(body.ref) || undefined; const evalTimeoutMs = toNumber(body.timeoutMs); + if (isExistingSession) { + if (evalTimeoutMs !== undefined) { + return jsonError( + res, + 501, + "existing-session evaluate does not support timeoutMs overrides.", + ); + } + const result = await evaluateChromeMcpScript({ + profileName, + targetId: tab.targetId, + fn, + args: ref ? [ref] : undefined, + }); + return res.json({ + ok: true, + targetId: tab.targetId, + url: tab.url, + result, + }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } const evalRequest: Parameters[0] = { cdpUrl, targetId: tab.targetId, @@ -310,6 +616,14 @@ export function registerBrowserAgentActRoutes( }); } case "close": { + if (isExistingSession) { + await closeChromeMcpTab(profileName, tab.targetId); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, `act:${kind}`); + if (!pw) { + return; + } await pw.closePageViaPlaywright({ cdpUrl, targetId: tab.targetId }); return res.json({ ok: true, targetId: tab.targetId }); } @@ -334,13 +648,23 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "url is required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "response body", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { + if (profileCtx.profile.driver === "existing-session") { + return jsonError( + res, + 501, + "response body is not supported for existing-session profiles yet.", + ); + } + const pw = await requirePwAi(res, "response body"); + if (!pw) { + return; + } const result = await pw.responseBodyViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -361,13 +685,39 @@ export function registerBrowserAgentActRoutes( return jsonError(res, 400, "ref is required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "highlight", - run: async ({ cdpUrl, tab, pw }) => { + run: async ({ profileCtx, cdpUrl, tab }) => { + if (profileCtx.profile.driver === "existing-session") { + await evaluateChromeMcpScript({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + args: [ref], + fn: `(el) => { + if (!(el instanceof Element)) { + return false; + } + el.scrollIntoView({ block: "center", inline: "center" }); + const previousOutline = el.style.outline; + const previousOffset = el.style.outlineOffset; + el.style.outline = "3px solid #FF4500"; + el.style.outlineOffset = "2px"; + setTimeout(() => { + el.style.outline = previousOutline; + el.style.outlineOffset = previousOffset; + }, 2000); + return true; + }`, + }); + return res.json({ ok: true, targetId: tab.targetId }); + } + const pw = await requirePwAi(res, "highlight"); + if (!pw) { + return; + } await pw.highlightViaPlaywright({ cdpUrl, targetId: tab.targetId, diff --git a/src/browser/routes/agent.snapshot.ts b/src/browser/routes/agent.snapshot.ts index c750cafe723..1b8626141b5 100644 --- a/src/browser/routes/agent.snapshot.ts +++ b/src/browser/routes/agent.snapshot.ts @@ -1,6 +1,20 @@ import path from "node:path"; import { ensureMediaDir, saveMediaBuffer } from "../../media/store.js"; import { captureScreenshot, snapshotAria } from "../cdp.js"; +import { + evaluateChromeMcpScript, + navigateChromeMcpPage, + takeChromeMcpScreenshot, + takeChromeMcpSnapshot, +} from "../chrome-mcp.js"; +import { + buildAiSnapshotFromChromeMcpSnapshot, + flattenChromeMcpSnapshotToAriaNodes, +} from "../chrome-mcp.snapshot.js"; +import { + assertBrowserNavigationAllowed, + assertBrowserNavigationResultAllowed, +} from "../navigation-guard.js"; import { withBrowserNavigationPolicy } from "../navigation-guard.js"; import { DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, @@ -25,6 +39,89 @@ import { import type { BrowserResponse, BrowserRouteRegistrar } from "./types.js"; import { jsonError, toBoolean, toStringOrEmpty } from "./utils.js"; +const CHROME_MCP_OVERLAY_ATTR = "data-openclaw-mcp-overlay"; + +async function clearChromeMcpOverlay(params: { + profileName: string; + targetId: string; +}): Promise { + await evaluateChromeMcpScript({ + profileName: params.profileName, + targetId: params.targetId, + fn: `() => { + document.querySelectorAll("[${CHROME_MCP_OVERLAY_ATTR}]").forEach((node) => node.remove()); + return true; + }`, + }).catch(() => {}); +} + +async function renderChromeMcpLabels(params: { + profileName: string; + targetId: string; + refs: string[]; +}): Promise<{ labels: number; skipped: number }> { + const refList = JSON.stringify(params.refs); + const result = await evaluateChromeMcpScript({ + profileName: params.profileName, + targetId: params.targetId, + args: params.refs, + fn: `(...elements) => { + const refs = ${refList}; + document.querySelectorAll("[${CHROME_MCP_OVERLAY_ATTR}]").forEach((node) => node.remove()); + const root = document.createElement("div"); + root.setAttribute("${CHROME_MCP_OVERLAY_ATTR}", "labels"); + root.style.position = "fixed"; + root.style.inset = "0"; + root.style.pointerEvents = "none"; + root.style.zIndex = "2147483647"; + let labels = 0; + let skipped = 0; + elements.forEach((el, index) => { + if (!(el instanceof Element)) { + skipped += 1; + return; + } + const rect = el.getBoundingClientRect(); + if (rect.width <= 0 && rect.height <= 0) { + skipped += 1; + return; + } + labels += 1; + const badge = document.createElement("div"); + badge.setAttribute("${CHROME_MCP_OVERLAY_ATTR}", "label"); + badge.textContent = refs[index] || String(labels); + badge.style.position = "fixed"; + badge.style.left = \`\${Math.max(0, rect.left)}px\`; + badge.style.top = \`\${Math.max(0, rect.top)}px\`; + badge.style.transform = "translateY(-100%)"; + badge.style.padding = "2px 6px"; + badge.style.borderRadius = "999px"; + badge.style.background = "#FF4500"; + badge.style.color = "#fff"; + badge.style.font = "600 12px ui-monospace, SFMono-Regular, Menlo, monospace"; + badge.style.boxShadow = "0 2px 6px rgba(0,0,0,0.35)"; + badge.style.whiteSpace = "nowrap"; + root.appendChild(badge); + }); + document.documentElement.appendChild(root); + return { labels, skipped }; + }`, + }); + const labels = + result && + typeof result === "object" && + typeof (result as { labels?: unknown }).labels === "number" + ? (result as { labels: number }).labels + : 0; + const skipped = + result && + typeof result === "object" && + typeof (result as { skipped?: unknown }).skipped === "number" + ? (result as { skipped: number }).skipped + : 0; + return { labels, skipped }; +} + async function saveBrowserMediaResponse(params: { res: BrowserResponse; buffer: Buffer; @@ -96,13 +193,27 @@ export function registerBrowserAgentSnapshotRoutes( if (!url) { return jsonError(res, 400, "url is required"); } - await withPlaywrightRouteContext({ + await withRouteTabContext({ req, res, ctx, targetId, - feature: "navigate", - run: async ({ cdpUrl, tab, pw, profileCtx }) => { + run: async ({ profileCtx, tab, cdpUrl }) => { + if (profileCtx.profile.driver === "existing-session") { + const ssrfPolicyOpts = withBrowserNavigationPolicy(ctx.state().resolved.ssrfPolicy); + await assertBrowserNavigationAllowed({ url, ...ssrfPolicyOpts }); + const result = await navigateChromeMcpPage({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + url, + }); + await assertBrowserNavigationResultAllowed({ url: result.url, ...ssrfPolicyOpts }); + return res.json({ ok: true, targetId: tab.targetId, ...result }); + } + const pw = await requirePwAi(res, "navigate"); + if (!pw) { + return; + } const result = await pw.navigateViaPlaywright({ cdpUrl, targetId: tab.targetId, @@ -122,6 +233,17 @@ export function registerBrowserAgentSnapshotRoutes( app.post("/pdf", async (req, res) => { const body = readBody(req); const targetId = toStringOrEmpty(body.targetId) || undefined; + const profileCtx = resolveProfileContext(req, res, ctx); + if (!profileCtx) { + return; + } + if (profileCtx.profile.driver === "existing-session") { + return jsonError( + res, + 501, + "pdf is not supported for existing-session profiles yet; use screenshot/snapshot instead.", + ); + } await withPlaywrightRouteContext({ req, res, @@ -163,6 +285,36 @@ export function registerBrowserAgentSnapshotRoutes( ctx, targetId, run: async ({ profileCtx, tab, cdpUrl }) => { + if (profileCtx.profile.driver === "existing-session") { + if (element) { + return jsonError( + res, + 400, + "element screenshots are not supported for existing-session profiles; use ref from snapshot.", + ); + } + const buffer = await takeChromeMcpScreenshot({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + uid: ref, + fullPage, + format: type, + }); + const normalized = await normalizeBrowserScreenshot(buffer, { + maxSide: DEFAULT_BROWSER_SCREENSHOT_MAX_SIDE, + maxBytes: DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, + }); + await saveBrowserMediaResponse({ + res, + buffer: normalized.buffer, + contentType: normalized.contentType ?? `image/${type}`, + maxBytes: DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, + targetId: tab.targetId, + url: tab.url, + }); + return; + } + let buffer: Buffer; const shouldUsePlaywright = shouldUsePlaywrightForScreenshot({ profile: profileCtx.profile, @@ -227,6 +379,90 @@ export function registerBrowserAgentSnapshotRoutes( if ((plan.labels || plan.mode === "efficient") && plan.format === "aria") { return jsonError(res, 400, "labels/mode=efficient require format=ai"); } + if (profileCtx.profile.driver === "existing-session") { + if (plan.labels) { + return jsonError(res, 501, "labels are not supported for existing-session profiles yet."); + } + if (plan.selectorValue || plan.frameSelectorValue) { + return jsonError( + res, + 400, + "selector/frame snapshots are not supported for existing-session profiles; snapshot the whole page and use refs.", + ); + } + const snapshot = await takeChromeMcpSnapshot({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + }); + if (plan.format === "aria") { + return res.json({ + ok: true, + format: "aria", + targetId: tab.targetId, + url: tab.url, + nodes: flattenChromeMcpSnapshotToAriaNodes(snapshot, plan.limit), + }); + } + const built = buildAiSnapshotFromChromeMcpSnapshot({ + root: snapshot, + options: { + interactive: plan.interactive ?? undefined, + compact: plan.compact ?? undefined, + maxDepth: plan.depth ?? undefined, + }, + maxChars: plan.resolvedMaxChars, + }); + if (plan.labels) { + const refs = Object.keys(built.refs); + const labelResult = await renderChromeMcpLabels({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + refs, + }); + try { + const labeled = await takeChromeMcpScreenshot({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + format: "png", + }); + const normalized = await normalizeBrowserScreenshot(labeled, { + maxSide: DEFAULT_BROWSER_SCREENSHOT_MAX_SIDE, + maxBytes: DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, + }); + await ensureMediaDir(); + const saved = await saveMediaBuffer( + normalized.buffer, + normalized.contentType ?? "image/png", + "browser", + DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES, + ); + return res.json({ + ok: true, + format: "ai", + targetId: tab.targetId, + url: tab.url, + labels: true, + labelsCount: labelResult.labels, + labelsSkipped: labelResult.skipped, + imagePath: path.resolve(saved.path), + imageType: normalized.contentType?.includes("jpeg") ? "jpeg" : "png", + ...built, + }); + } finally { + await clearChromeMcpOverlay({ + profileName: profileCtx.profile.name, + targetId: tab.targetId, + }); + } + } + return res.json({ + ok: true, + format: "ai", + targetId: tab.targetId, + url: tab.url, + ...built, + }); + } if (plan.format === "ai") { const pw = await requirePwAi(res, "ai snapshot"); if (!pw) { diff --git a/src/browser/routes/basic.ts b/src/browser/routes/basic.ts index 5f32c86729b..9991744107d 100644 --- a/src/browser/routes/basic.ts +++ b/src/browser/routes/basic.ts @@ -1,3 +1,4 @@ +import { getChromeMcpPid } from "../chrome-mcp.js"; import { resolveBrowserExecutableForPlatform } from "../chrome.executables.js"; import { toBrowserErrorResponse } from "../errors.js"; import { createBrowserProfilesService } from "../profiles-service.js"; @@ -76,10 +77,14 @@ export function registerBrowserBasicRoutes(app: BrowserRouteRegistrar, ctx: Brow res.json({ enabled: current.resolved.enabled, profile: profileCtx.profile.name, + driver: profileCtx.profile.driver, running: cdpReady, cdpReady, cdpHttp, - pid: profileState?.running?.pid ?? null, + pid: + profileCtx.profile.driver === "existing-session" + ? getChromeMcpPid(profileCtx.profile.name) + : (profileState?.running?.pid ?? null), cdpPort: profileCtx.profile.cdpPort, cdpUrl: profileCtx.profile.cdpUrl, chosenBrowser: profileState?.running?.exe.kind ?? null, @@ -146,6 +151,7 @@ export function registerBrowserBasicRoutes(app: BrowserRouteRegistrar, ctx: Brow const driver = toStringOrEmpty((req.body as { driver?: unknown })?.driver) as | "openclaw" | "extension" + | "existing-session" | ""; if (!name) { @@ -158,7 +164,12 @@ export function registerBrowserBasicRoutes(app: BrowserRouteRegistrar, ctx: Brow name, color: color || undefined, cdpUrl: cdpUrl || undefined, - driver: driver === "extension" ? "extension" : undefined, + driver: + driver === "extension" + ? "extension" + : driver === "existing-session" + ? "existing-session" + : undefined, }); res.json(result); } catch (err) { diff --git a/src/browser/server-context.availability.ts b/src/browser/server-context.availability.ts index 3b00ff99dff..d2d9944d964 100644 --- a/src/browser/server-context.availability.ts +++ b/src/browser/server-context.availability.ts @@ -3,6 +3,11 @@ import { PROFILE_POST_RESTART_WS_TIMEOUT_MS, resolveCdpReachabilityTimeouts, } from "./cdp-timeouts.js"; +import { + closeChromeMcpSession, + ensureChromeMcpAvailable, + listChromeMcpTabs, +} from "./chrome-mcp.js"; import { isChromeCdpReady, isChromeReachable, @@ -60,11 +65,19 @@ export function createProfileAvailability({ }); const isReachable = async (timeoutMs?: number) => { + if (profile.driver === "existing-session") { + await ensureChromeMcpAvailable(profile.name); + await listChromeMcpTabs(profile.name); + return true; + } const { httpTimeoutMs, wsTimeoutMs } = resolveTimeouts(timeoutMs); return await isChromeCdpReady(profile.cdpUrl, httpTimeoutMs, wsTimeoutMs); }; const isHttpReachable = async (timeoutMs?: number) => { + if (profile.driver === "existing-session") { + return await isReachable(timeoutMs); + } const { httpTimeoutMs } = resolveTimeouts(timeoutMs); return await isChromeReachable(profile.cdpUrl, httpTimeoutMs); }; @@ -109,6 +122,9 @@ export function createProfileAvailability({ if (previousProfile.driver === "extension") { await stopChromeExtensionRelayServer({ cdpUrl: previousProfile.cdpUrl }).catch(() => false); } + if (previousProfile.driver === "existing-session") { + await closeChromeMcpSession(previousProfile.name).catch(() => false); + } await closePlaywrightBrowserConnectionForProfile(previousProfile.cdpUrl); if (previousProfile.cdpUrl !== profile.cdpUrl) { await closePlaywrightBrowserConnectionForProfile(profile.cdpUrl); @@ -138,6 +154,10 @@ export function createProfileAvailability({ const ensureBrowserAvailable = async (): Promise => { await reconcileProfileRuntime(); + if (profile.driver === "existing-session") { + await ensureChromeMcpAvailable(profile.name); + return; + } const current = state(); const remoteCdp = capabilities.isRemote; const attachOnly = profile.attachOnly; @@ -238,6 +258,10 @@ export function createProfileAvailability({ const stopRunningBrowser = async (): Promise<{ stopped: boolean }> => { await reconcileProfileRuntime(); + if (profile.driver === "existing-session") { + const stopped = await closeChromeMcpSession(profile.name); + return { stopped }; + } if (capabilities.requiresRelay) { const stopped = await stopChromeExtensionRelayServer({ cdpUrl: profile.cdpUrl, diff --git a/src/browser/server-context.existing-session.test.ts b/src/browser/server-context.existing-session.test.ts new file mode 100644 index 00000000000..abbd222342e --- /dev/null +++ b/src/browser/server-context.existing-session.test.ts @@ -0,0 +1,102 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import { createBrowserRouteContext } from "./server-context.js"; +import type { BrowserServerState } from "./server-context.js"; + +vi.mock("./chrome-mcp.js", () => ({ + closeChromeMcpSession: vi.fn(async () => true), + ensureChromeMcpAvailable: vi.fn(async () => {}), + focusChromeMcpTab: vi.fn(async () => {}), + listChromeMcpTabs: vi.fn(async () => [ + { targetId: "7", title: "", url: "https://example.com", type: "page" }, + ]), + openChromeMcpTab: vi.fn(async () => ({ + targetId: "8", + title: "", + url: "https://openclaw.ai", + type: "page", + })), + closeChromeMcpTab: vi.fn(async () => {}), + getChromeMcpPid: vi.fn(() => 4321), +})); + +import * as chromeMcp from "./chrome-mcp.js"; + +function makeState(): BrowserServerState { + return { + server: null, + port: 0, + resolved: { + enabled: true, + evaluateEnabled: true, + controlPort: 18791, + cdpPortRangeStart: 18800, + cdpPortRangeEnd: 18899, + cdpProtocol: "http", + cdpHost: "127.0.0.1", + cdpIsLoopback: true, + remoteCdpTimeoutMs: 1500, + remoteCdpHandshakeTimeoutMs: 3000, + color: "#FF4500", + headless: false, + noSandbox: false, + attachOnly: false, + defaultProfile: "chrome-live", + profiles: { + "chrome-live": { + cdpPort: 18801, + color: "#0066CC", + driver: "existing-session", + attachOnly: true, + }, + }, + extraArgs: [], + ssrfPolicy: { dangerouslyAllowPrivateNetwork: true }, + }, + profiles: new Map(), + }; +} + +afterEach(() => { + vi.clearAllMocks(); +}); + +describe("browser server-context existing-session profile", () => { + it("routes tab operations through the Chrome MCP backend", async () => { + const state = makeState(); + const ctx = createBrowserRouteContext({ getState: () => state }); + const live = ctx.forProfile("chrome-live"); + + vi.mocked(chromeMcp.listChromeMcpTabs) + .mockResolvedValueOnce([ + { targetId: "7", title: "", url: "https://example.com", type: "page" }, + ]) + .mockResolvedValueOnce([ + { targetId: "8", title: "", url: "https://openclaw.ai", type: "page" }, + ]) + .mockResolvedValueOnce([ + { targetId: "8", title: "", url: "https://openclaw.ai", type: "page" }, + ]) + .mockResolvedValueOnce([ + { targetId: "7", title: "", url: "https://example.com", type: "page" }, + ]); + + await live.ensureBrowserAvailable(); + const tabs = await live.listTabs(); + expect(tabs.map((tab) => tab.targetId)).toEqual(["7"]); + + const opened = await live.openTab("https://openclaw.ai"); + expect(opened.targetId).toBe("8"); + + const selected = await live.ensureTabAvailable(); + expect(selected.targetId).toBe("8"); + + await live.focusTab("7"); + await live.stopRunningBrowser(); + + expect(chromeMcp.ensureChromeMcpAvailable).toHaveBeenCalledWith("chrome-live"); + expect(chromeMcp.listChromeMcpTabs).toHaveBeenCalledWith("chrome-live"); + expect(chromeMcp.openChromeMcpTab).toHaveBeenCalledWith("chrome-live", "https://openclaw.ai"); + expect(chromeMcp.focusChromeMcpTab).toHaveBeenCalledWith("chrome-live", "7"); + expect(chromeMcp.closeChromeMcpSession).toHaveBeenCalledWith("chrome-live"); + }); +}); diff --git a/src/browser/server-context.selection.ts b/src/browser/server-context.selection.ts index 8a9cfa19c42..9e1fb728b2a 100644 --- a/src/browser/server-context.selection.ts +++ b/src/browser/server-context.selection.ts @@ -1,5 +1,6 @@ import { fetchOk, normalizeCdpHttpBaseForJsonEndpoints } from "./cdp.helpers.js"; import { appendCdpPath } from "./cdp.js"; +import { closeChromeMcpTab, focusChromeMcpTab } from "./chrome-mcp.js"; import type { ResolvedBrowserProfile } from "./config.js"; import { BrowserTabNotFoundError, BrowserTargetAmbiguousError } from "./errors.js"; import { getBrowserProfileCapabilities } from "./profile-capabilities.js"; @@ -111,6 +112,13 @@ export function createProfileSelectionOps({ const focusTab = async (targetId: string): Promise => { const resolvedTargetId = await resolveTargetIdOrThrow(targetId); + if (profile.driver === "existing-session") { + await focusChromeMcpTab(profile.name, resolvedTargetId); + const profileState = getProfileState(); + profileState.lastTargetId = resolvedTargetId; + return; + } + if (capabilities.usesPersistentPlaywright) { const mod = await getPwAiModule({ mode: "strict" }); const focusPageByTargetIdViaPlaywright = (mod as Partial | null) @@ -134,6 +142,11 @@ export function createProfileSelectionOps({ const closeTab = async (targetId: string): Promise => { const resolvedTargetId = await resolveTargetIdOrThrow(targetId); + if (profile.driver === "existing-session") { + await closeChromeMcpTab(profile.name, resolvedTargetId); + return; + } + // For remote profiles, use Playwright's persistent connection to close tabs if (capabilities.usesPersistentPlaywright) { const mod = await getPwAiModule({ mode: "strict" }); diff --git a/src/browser/server-context.tab-ops.ts b/src/browser/server-context.tab-ops.ts index 24985430bdc..067536fd017 100644 --- a/src/browser/server-context.tab-ops.ts +++ b/src/browser/server-context.tab-ops.ts @@ -1,6 +1,7 @@ import { CDP_JSON_NEW_TIMEOUT_MS } from "./cdp-timeouts.js"; import { fetchJson, fetchOk, normalizeCdpHttpBaseForJsonEndpoints } from "./cdp.helpers.js"; import { appendCdpPath, createTargetViaCdp, normalizeCdpWsUrl } from "./cdp.js"; +import { listChromeMcpTabs, openChromeMcpTab } from "./chrome-mcp.js"; import type { ResolvedBrowserProfile } from "./config.js"; import { assertBrowserNavigationAllowed, @@ -65,6 +66,10 @@ export function createProfileTabOps({ const capabilities = getBrowserProfileCapabilities(profile); const listTabs = async (): Promise => { + if (profile.driver === "existing-session") { + return await listChromeMcpTabs(profile.name); + } + if (capabilities.usesPersistentPlaywright) { const mod = await getPwAiModule({ mode: "strict" }); const listPagesViaPlaywright = (mod as Partial | null)?.listPagesViaPlaywright; @@ -134,6 +139,15 @@ export function createProfileTabOps({ const openTab = async (url: string): Promise => { const ssrfPolicyOpts = withBrowserNavigationPolicy(state().resolved.ssrfPolicy); + if (profile.driver === "existing-session") { + await assertBrowserNavigationAllowed({ url, ...ssrfPolicyOpts }); + const page = await openChromeMcpTab(profile.name, url); + const profileState = getProfileState(); + profileState.lastTargetId = page.targetId; + await assertBrowserNavigationResultAllowed({ url: page.url, ...ssrfPolicyOpts }); + return page; + } + if (capabilities.usesPersistentPlaywright) { const mod = await getPwAiModule({ mode: "strict" }); const createPageViaPlaywright = (mod as Partial | null)?.createPageViaPlaywright; diff --git a/src/browser/server-context.ts b/src/browser/server-context.ts index d75b14c2471..37e182f1e69 100644 --- a/src/browser/server-context.ts +++ b/src/browser/server-context.ts @@ -162,12 +162,22 @@ export function createBrowserRouteContext(opts: ContextOptions): BrowserRouteCon let tabCount = 0; let running = false; + const profileCtx = createProfileContext(opts, profile); - if (profileState?.running) { + if (profile.driver === "existing-session") { + try { + running = await profileCtx.isReachable(300); + if (running) { + const tabs = await profileCtx.listTabs(); + tabCount = tabs.filter((t) => t.type === "page").length; + } + } catch { + // Chrome MCP not available + } + } else if (profileState?.running) { running = true; try { - const ctx = createProfileContext(opts, profile); - const tabs = await ctx.listTabs(); + const tabs = await profileCtx.listTabs(); tabCount = tabs.filter((t) => t.type === "page").length; } catch { // Browser might not be responsive @@ -178,8 +188,7 @@ export function createBrowserRouteContext(opts: ContextOptions): BrowserRouteCon const reachable = await isChromeReachable(profile.cdpUrl, 200); if (reachable) { running = true; - const ctx = createProfileContext(opts, profile); - const tabs = await ctx.listTabs().catch(() => []); + const tabs = await profileCtx.listTabs().catch(() => []); tabCount = tabs.filter((t) => t.type === "page").length; } } catch { @@ -192,6 +201,7 @@ export function createBrowserRouteContext(opts: ContextOptions): BrowserRouteCon cdpPort: profile.cdpPort, cdpUrl: profile.cdpUrl, color: profile.color, + driver: profile.driver, running, tabCount, isDefault: name === current.resolved.defaultProfile, diff --git a/src/browser/server-context.types.ts b/src/browser/server-context.types.ts index f05e90e9e77..8f949b96da6 100644 --- a/src/browser/server-context.types.ts +++ b/src/browser/server-context.types.ts @@ -56,6 +56,7 @@ export type ProfileStatus = { cdpPort: number; cdpUrl: string; color: string; + driver: ResolvedBrowserProfile["driver"]; running: boolean; tabCount: number; isDefault: boolean; diff --git a/src/channels/reply-prefix.ts b/src/channels/reply-prefix.ts index 2ae6f3d221a..59f0a29381d 100644 --- a/src/channels/reply-prefix.ts +++ b/src/channels/reply-prefix.ts @@ -5,19 +5,24 @@ import { } from "../auto-reply/reply/response-prefix-template.js"; import type { GetReplyOptions } from "../auto-reply/types.js"; import type { OpenClawConfig } from "../config/config.js"; +import { isSlackInteractiveRepliesEnabled } from "../slack/interactive-replies.js"; type ModelSelectionContext = Parameters>[0]; export type ReplyPrefixContextBundle = { prefixContext: ResponsePrefixContext; responsePrefix?: string; + enableSlackInteractiveReplies?: boolean; responsePrefixContextProvider: () => ResponsePrefixContext; onModelSelected: (ctx: ModelSelectionContext) => void; }; export type ReplyPrefixOptions = Pick< ReplyPrefixContextBundle, - "responsePrefix" | "responsePrefixContextProvider" | "onModelSelected" + | "responsePrefix" + | "enableSlackInteractiveReplies" + | "responsePrefixContextProvider" + | "onModelSelected" >; export function createReplyPrefixContext(params: { @@ -45,6 +50,10 @@ export function createReplyPrefixContext(params: { channel: params.channel, accountId: params.accountId, }).responsePrefix, + enableSlackInteractiveReplies: + params.channel === "slack" + ? isSlackInteractiveRepliesEnabled({ cfg, accountId: params.accountId }) + : undefined, responsePrefixContextProvider: () => prefixContext, onModelSelected, }; @@ -56,7 +65,16 @@ export function createReplyPrefixOptions(params: { channel?: string; accountId?: string; }): ReplyPrefixOptions { - const { responsePrefix, responsePrefixContextProvider, onModelSelected } = - createReplyPrefixContext(params); - return { responsePrefix, responsePrefixContextProvider, onModelSelected }; + const { + responsePrefix, + enableSlackInteractiveReplies, + responsePrefixContextProvider, + onModelSelected, + } = createReplyPrefixContext(params); + return { + responsePrefix, + enableSlackInteractiveReplies, + responsePrefixContextProvider, + onModelSelected, + }; } diff --git a/src/cli/browser-cli-manage.ts b/src/cli/browser-cli-manage.ts index 53b83ca3f97..31d4b02c2aa 100644 --- a/src/cli/browser-cli-manage.ts +++ b/src/cli/browser-cli-manage.ts @@ -407,7 +407,8 @@ export function registerBrowserManageCommands( const def = p.isDefault ? " [default]" : ""; const loc = p.isRemote ? `cdpUrl: ${p.cdpUrl}` : `port: ${p.cdpPort}`; const remote = p.isRemote ? " [remote]" : ""; - return `${p.name}: ${status}${tabs}${def}${remote}\n ${loc}, color: ${p.color}`; + const driver = p.driver !== "openclaw" ? ` [${p.driver}]` : ""; + return `${p.name}: ${status}${tabs}${def}${remote}${driver}\n ${loc}, color: ${p.color}`; }) .join("\n"), ); @@ -420,7 +421,10 @@ export function registerBrowserManageCommands( .requiredOption("--name ", "Profile name (lowercase, numbers, hyphens)") .option("--color ", "Profile color (hex format, e.g. #0066CC)") .option("--cdp-url ", "CDP URL for remote Chrome (http/https)") - .option("--driver ", "Profile driver (openclaw|extension). Default: openclaw") + .option( + "--driver ", + "Profile driver (openclaw|extension|existing-session). Default: openclaw", + ) .action( async (opts: { name: string; color?: string; cdpUrl?: string; driver?: string }, cmd) => { const parent = parentOpts(cmd); @@ -434,7 +438,12 @@ export function registerBrowserManageCommands( name: opts.name, color: opts.color, cdpUrl: opts.cdpUrl, - driver: opts.driver === "extension" ? "extension" : undefined, + driver: + opts.driver === "extension" + ? "extension" + : opts.driver === "existing-session" + ? "existing-session" + : undefined, }, }, { timeoutMs: 10_000 }, @@ -446,7 +455,11 @@ export function registerBrowserManageCommands( defaultRuntime.log( info( `🦞 Created profile "${result.profile}"\n${loc}\n color: ${result.color}${ - opts.driver === "extension" ? "\n driver: extension" : "" + opts.driver === "extension" + ? "\n driver: extension" + : opts.driver === "existing-session" + ? "\n driver: existing-session" + : "" }`, ), ); diff --git a/src/cli/command-secret-gateway.test.ts b/src/cli/command-secret-gateway.test.ts index 3cb12b03138..e2715262530 100644 --- a/src/cli/command-secret-gateway.test.ts +++ b/src/cli/command-secret-gateway.test.ts @@ -64,6 +64,17 @@ describe("resolveCommandSecretRefsViaGateway", () => { }); } + function expectGatewayUnavailableLocalFallbackDiagnostics( + result: Awaited>, + ) { + expect( + result.diagnostics.some((entry) => entry.includes("gateway secrets.resolve unavailable")), + ).toBe(true); + expect( + result.diagnostics.some((entry) => entry.includes("resolved command secrets locally")), + ).toBe(true); + } + it("returns config unchanged when no target SecretRefs are configured", async () => { const config = { talk: { @@ -208,11 +219,8 @@ describe("resolveCommandSecretRefsViaGateway", () => { it("falls back to local resolution for web search SecretRefs when gateway is unavailable", async () => { const envKey = "WEB_SEARCH_GEMINI_API_KEY_LOCAL_FALLBACK"; - const priorValue = process.env[envKey]; - process.env[envKey] = "gemini-local-fallback-key"; - callGateway.mockRejectedValueOnce(new Error("gateway closed")); - - try { + await withEnvValue(envKey, "gemini-local-fallback-key", async () => { + callGateway.mockRejectedValueOnce(new Error("gateway closed")); const result = await resolveCommandSecretRefsViaGateway({ config: { tools: { @@ -234,28 +242,14 @@ describe("resolveCommandSecretRefsViaGateway", () => { "gemini-local-fallback-key", ); expect(result.targetStatesByPath["tools.web.search.gemini.apiKey"]).toBe("resolved_local"); - expect( - result.diagnostics.some((entry) => entry.includes("gateway secrets.resolve unavailable")), - ).toBe(true); - expect( - result.diagnostics.some((entry) => entry.includes("resolved command secrets locally")), - ).toBe(true); - } finally { - if (priorValue === undefined) { - delete process.env[envKey]; - } else { - process.env[envKey] = priorValue; - } - } + expectGatewayUnavailableLocalFallbackDiagnostics(result); + }); }); it("falls back to local resolution for Firecrawl SecretRefs when gateway is unavailable", async () => { const envKey = "WEB_FETCH_FIRECRAWL_API_KEY_LOCAL_FALLBACK"; - const priorValue = process.env[envKey]; - process.env[envKey] = "firecrawl-local-fallback-key"; - callGateway.mockRejectedValueOnce(new Error("gateway closed")); - - try { + await withEnvValue(envKey, "firecrawl-local-fallback-key", async () => { + callGateway.mockRejectedValueOnce(new Error("gateway closed")); const result = await resolveCommandSecretRefsViaGateway({ config: { tools: { @@ -276,19 +270,8 @@ describe("resolveCommandSecretRefsViaGateway", () => { "firecrawl-local-fallback-key", ); expect(result.targetStatesByPath["tools.web.fetch.firecrawl.apiKey"]).toBe("resolved_local"); - expect( - result.diagnostics.some((entry) => entry.includes("gateway secrets.resolve unavailable")), - ).toBe(true); - expect( - result.diagnostics.some((entry) => entry.includes("resolved command secrets locally")), - ).toBe(true); - } finally { - if (priorValue === undefined) { - delete process.env[envKey]; - } else { - process.env[envKey] = priorValue; - } - } + expectGatewayUnavailableLocalFallbackDiagnostics(result); + }); }); it("marks web SecretRefs inactive when the web surface is disabled during local fallback", async () => { diff --git a/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts b/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts index 188e7090915..7b1526f87c6 100644 --- a/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts +++ b/src/cli/daemon-cli/lifecycle-core.config-guard.test.ts @@ -1,30 +1,15 @@ import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; +import { + defaultRuntime, + resetLifecycleRuntimeLogs, + resetLifecycleServiceMocks, + service, + stubEmptyGatewayEnv, +} from "./test-helpers/lifecycle-core-harness.js"; const readConfigFileSnapshotMock = vi.fn(); const loadConfig = vi.fn(() => ({})); -const runtimeLogs: string[] = []; -const defaultRuntime = { - log: (message: string) => runtimeLogs.push(message), - error: vi.fn(), - exit: (code: number) => { - throw new Error(`__exit__:${code}`); - }, -}; - -const service = { - label: "TestService", - loadedText: "loaded", - notLoadedText: "not loaded", - install: vi.fn(), - uninstall: vi.fn(), - stop: vi.fn(), - isLoaded: vi.fn(), - readCommand: vi.fn(), - readRuntime: vi.fn(), - restart: vi.fn(), -}; - vi.mock("../../config/config.js", () => ({ loadConfig: () => loadConfig(), readConfigFileSnapshot: () => readConfigFileSnapshotMock(), @@ -50,7 +35,7 @@ describe("runServiceRestart config pre-flight (#35862)", () => { }); beforeEach(() => { - runtimeLogs.length = 0; + resetLifecycleRuntimeLogs(); readConfigFileSnapshotMock.mockReset(); readConfigFileSnapshotMock.mockResolvedValue({ exists: true, @@ -60,15 +45,8 @@ describe("runServiceRestart config pre-flight (#35862)", () => { }); loadConfig.mockReset(); loadConfig.mockReturnValue({}); - service.isLoaded.mockClear(); - service.readCommand.mockClear(); - service.restart.mockClear(); - service.isLoaded.mockResolvedValue(true); - service.readCommand.mockResolvedValue({ environment: {} }); - service.restart.mockResolvedValue({ outcome: "completed" }); - vi.unstubAllEnvs(); - vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", ""); - vi.stubEnv("CLAWDBOT_GATEWAY_TOKEN", ""); + resetLifecycleServiceMocks(); + stubEmptyGatewayEnv(); }); it("aborts restart when config is invalid", async () => { @@ -152,7 +130,7 @@ describe("runServiceStart config pre-flight (#35862)", () => { }); beforeEach(() => { - runtimeLogs.length = 0; + resetLifecycleRuntimeLogs(); readConfigFileSnapshotMock.mockReset(); readConfigFileSnapshotMock.mockResolvedValue({ exists: true, @@ -160,10 +138,7 @@ describe("runServiceStart config pre-flight (#35862)", () => { config: {}, issues: [], }); - service.isLoaded.mockClear(); - service.restart.mockClear(); - service.isLoaded.mockResolvedValue(true); - service.restart.mockResolvedValue({ outcome: "completed" }); + resetLifecycleServiceMocks(); }); it("aborts start when config is invalid", async () => { diff --git a/src/cli/daemon-cli/lifecycle-core.test.ts b/src/cli/daemon-cli/lifecycle-core.test.ts index ff66bd17653..7503e21ae5e 100644 --- a/src/cli/daemon-cli/lifecycle-core.test.ts +++ b/src/cli/daemon-cli/lifecycle-core.test.ts @@ -1,4 +1,12 @@ import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; +import { + defaultRuntime, + resetLifecycleRuntimeLogs, + resetLifecycleServiceMocks, + runtimeLogs, + service, + stubEmptyGatewayEnv, +} from "./test-helpers/lifecycle-core-harness.js"; const loadConfig = vi.fn(() => ({ gateway: { @@ -8,28 +16,6 @@ const loadConfig = vi.fn(() => ({ }, })); -const runtimeLogs: string[] = []; -const defaultRuntime = { - log: (message: string) => runtimeLogs.push(message), - error: vi.fn(), - exit: (code: number) => { - throw new Error(`__exit__:${code}`); - }, -}; - -const service = { - label: "TestService", - loadedText: "loaded", - notLoadedText: "not loaded", - install: vi.fn(), - uninstall: vi.fn(), - stop: vi.fn(), - isLoaded: vi.fn(), - readCommand: vi.fn(), - readRuntime: vi.fn(), - restart: vi.fn(), -}; - vi.mock("../../config/config.js", () => ({ loadConfig: () => loadConfig(), readBestEffortConfig: async () => loadConfig(), @@ -49,7 +35,7 @@ describe("runServiceRestart token drift", () => { }); beforeEach(() => { - runtimeLogs.length = 0; + resetLifecycleRuntimeLogs(); loadConfig.mockReset(); loadConfig.mockReturnValue({ gateway: { @@ -58,19 +44,11 @@ describe("runServiceRestart token drift", () => { }, }, }); - service.isLoaded.mockClear(); - service.readCommand.mockClear(); - service.restart.mockClear(); - service.isLoaded.mockResolvedValue(true); + resetLifecycleServiceMocks(); service.readCommand.mockResolvedValue({ environment: { OPENCLAW_GATEWAY_TOKEN: "service-token" }, }); - service.restart.mockResolvedValue({ outcome: "completed" }); - vi.unstubAllEnvs(); - vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", ""); - vi.stubEnv("CLAWDBOT_GATEWAY_TOKEN", ""); - vi.stubEnv("OPENCLAW_GATEWAY_URL", ""); - vi.stubEnv("CLAWDBOT_GATEWAY_URL", ""); + stubEmptyGatewayEnv(); }); it("emits drift warning when enabled", async () => { diff --git a/src/cli/daemon-cli/lifecycle.test.ts b/src/cli/daemon-cli/lifecycle.test.ts index 61899e4e78c..7d03656f86b 100644 --- a/src/cli/daemon-cli/lifecycle.test.ts +++ b/src/cli/daemon-cli/lifecycle.test.ts @@ -1,8 +1,5 @@ import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; -const mockReadFileSync = vi.hoisted(() => vi.fn()); -const mockSpawnSync = vi.hoisted(() => vi.fn()); - type RestartHealthSnapshot = { healthy: boolean; staleGatewayPids: number[]; @@ -35,7 +32,9 @@ const terminateStaleGatewayPids = vi.fn(); const renderGatewayPortHealthDiagnostics = vi.fn(() => ["diag: unhealthy port"]); const renderRestartDiagnostics = vi.fn(() => ["diag: unhealthy runtime"]); const resolveGatewayPort = vi.fn(() => 18789); -const findGatewayPidsOnPortSync = vi.fn<(port: number) => number[]>(() => []); +const findVerifiedGatewayListenerPidsOnPortSync = vi.fn<(port: number) => number[]>(() => []); +const signalVerifiedGatewayPidSync = vi.fn<(pid: number, signal: "SIGTERM" | "SIGUSR1") => void>(); +const formatGatewayPidList = vi.fn<(pids: number[]) => string>((pids) => pids.join(", ")); const probeGateway = vi.fn< (opts: { url: string; @@ -49,24 +48,18 @@ const probeGateway = vi.fn< const isRestartEnabled = vi.fn<(config?: { commands?: unknown }) => boolean>(() => true); const loadConfig = vi.fn(() => ({})); -vi.mock("node:fs", () => ({ - default: { - readFileSync: (...args: unknown[]) => mockReadFileSync(...args), - }, -})); - -vi.mock("node:child_process", () => ({ - spawnSync: (...args: unknown[]) => mockSpawnSync(...args), -})); - vi.mock("../../config/config.js", () => ({ loadConfig: () => loadConfig(), readBestEffortConfig: async () => loadConfig(), resolveGatewayPort, })); -vi.mock("../../infra/restart.js", () => ({ - findGatewayPidsOnPortSync: (port: number) => findGatewayPidsOnPortSync(port), +vi.mock("../../infra/gateway-processes.js", () => ({ + findVerifiedGatewayListenerPidsOnPortSync: (port: number) => + findVerifiedGatewayListenerPidsOnPortSync(port), + signalVerifiedGatewayPidSync: (pid: number, signal: "SIGTERM" | "SIGUSR1") => + signalVerifiedGatewayPidSync(pid, signal), + formatGatewayPidList: (pids: number[]) => formatGatewayPidList(pids), })); vi.mock("../../gateway/probe.js", () => ({ @@ -121,12 +114,12 @@ describe("runDaemonRestart health checks", () => { renderGatewayPortHealthDiagnostics.mockReset(); renderRestartDiagnostics.mockReset(); resolveGatewayPort.mockReset(); - findGatewayPidsOnPortSync.mockReset(); + findVerifiedGatewayListenerPidsOnPortSync.mockReset(); + signalVerifiedGatewayPidSync.mockReset(); + formatGatewayPidList.mockReset(); probeGateway.mockReset(); isRestartEnabled.mockReset(); loadConfig.mockReset(); - mockReadFileSync.mockReset(); - mockSpawnSync.mockReset(); service.readCommand.mockResolvedValue({ programArguments: ["openclaw", "gateway", "--port", "18789"], @@ -158,23 +151,8 @@ describe("runDaemonRestart health checks", () => { configSnapshot: { commands: { restart: true } }, }); isRestartEnabled.mockReturnValue(true); - mockReadFileSync.mockImplementation((path: string) => { - const match = path.match(/\/proc\/(\d+)\/cmdline$/); - if (!match) { - throw new Error(`unexpected path ${path}`); - } - const pid = Number.parseInt(match[1] ?? "", 10); - if ([4200, 4300].includes(pid)) { - return ["openclaw", "gateway", "--port", "18789", ""].join("\0"); - } - throw new Error(`unknown pid ${pid}`); - }); - mockSpawnSync.mockReturnValue({ - error: null, - status: 0, - stdout: "openclaw gateway --port 18789", - stderr: "", - }); + signalVerifiedGatewayPidSync.mockImplementation(() => {}); + formatGatewayPidList.mockImplementation((pids) => pids.join(", ")); }); afterEach(() => { @@ -242,38 +220,20 @@ describe("runDaemonRestart health checks", () => { }); it("signals an unmanaged gateway process on stop", async () => { - vi.spyOn(process, "platform", "get").mockReturnValue("win32"); - const killSpy = vi.spyOn(process, "kill").mockImplementation(() => true); - findGatewayPidsOnPortSync.mockReturnValue([4200, 4200, 4300]); - mockSpawnSync.mockReturnValue({ - error: null, - status: 0, - stdout: - 'CommandLine="C:\\\\Program Files\\\\OpenClaw\\\\openclaw.exe" gateway --port 18789\r\n', - stderr: "", - }); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4200, 4200, 4300]); runServiceStop.mockImplementation(async (params: { onNotLoaded?: () => Promise }) => { await params.onNotLoaded?.(); }); await runDaemonStop({ json: true }); - expect(findGatewayPidsOnPortSync).toHaveBeenCalledWith(18789); - expect(killSpy).toHaveBeenCalledWith(4200, "SIGTERM"); - expect(killSpy).toHaveBeenCalledWith(4300, "SIGTERM"); + expect(findVerifiedGatewayListenerPidsOnPortSync).toHaveBeenCalledWith(18789); + expect(signalVerifiedGatewayPidSync).toHaveBeenCalledWith(4200, "SIGTERM"); + expect(signalVerifiedGatewayPidSync).toHaveBeenCalledWith(4300, "SIGTERM"); }); it("signals a single unmanaged gateway process on restart", async () => { - vi.spyOn(process, "platform", "get").mockReturnValue("win32"); - const killSpy = vi.spyOn(process, "kill").mockImplementation(() => true); - findGatewayPidsOnPortSync.mockReturnValue([4200]); - mockSpawnSync.mockReturnValue({ - error: null, - status: 0, - stdout: - 'CommandLine="C:\\\\Program Files\\\\OpenClaw\\\\openclaw.exe" gateway --port 18789\r\n', - stderr: "", - }); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4200]); runServiceRestart.mockImplementation( async (params: RestartParams & { onNotLoaded?: () => Promise }) => { await params.onNotLoaded?.(); @@ -291,8 +251,8 @@ describe("runDaemonRestart health checks", () => { await runDaemonRestart({ json: true }); - expect(findGatewayPidsOnPortSync).toHaveBeenCalledWith(18789); - expect(killSpy).toHaveBeenCalledWith(4200, "SIGUSR1"); + expect(findVerifiedGatewayListenerPidsOnPortSync).toHaveBeenCalledWith(18789); + expect(signalVerifiedGatewayPidSync).toHaveBeenCalledWith(4200, "SIGUSR1"); expect(probeGateway).toHaveBeenCalledTimes(1); expect(waitForGatewayHealthyListener).toHaveBeenCalledTimes(1); expect(waitForGatewayHealthyRestart).not.toHaveBeenCalled(); @@ -301,15 +261,7 @@ describe("runDaemonRestart health checks", () => { }); it("fails unmanaged restart when multiple gateway listeners are present", async () => { - vi.spyOn(process, "platform", "get").mockReturnValue("win32"); - findGatewayPidsOnPortSync.mockReturnValue([4200, 4300]); - mockSpawnSync.mockReturnValue({ - error: null, - status: 0, - stdout: - 'CommandLine="C:\\\\Program Files\\\\OpenClaw\\\\openclaw.exe" gateway --port 18789\r\n', - stderr: "", - }); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4200, 4300]); runServiceRestart.mockImplementation( async (params: RestartParams & { onNotLoaded?: () => Promise }) => { await params.onNotLoaded?.(); @@ -323,7 +275,7 @@ describe("runDaemonRestart health checks", () => { }); it("fails unmanaged restart when the running gateway has commands.restart disabled", async () => { - findGatewayPidsOnPortSync.mockReturnValue([4200]); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4200]); probeGateway.mockResolvedValue({ ok: true, configSnapshot: { commands: { restart: false } }, @@ -342,21 +294,13 @@ describe("runDaemonRestart health checks", () => { }); it("skips unmanaged signaling for pids that are not live gateway processes", async () => { - const killSpy = vi.spyOn(process, "kill").mockImplementation(() => true); - findGatewayPidsOnPortSync.mockReturnValue([4200]); - mockReadFileSync.mockReturnValue(["python", "-m", "http.server", ""].join("\0")); - mockSpawnSync.mockReturnValue({ - error: null, - status: 0, - stdout: "python -m http.server", - stderr: "", - }); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([]); runServiceStop.mockImplementation(async (params: { onNotLoaded?: () => Promise }) => { await params.onNotLoaded?.(); }); await runDaemonStop({ json: true }); - expect(killSpy).not.toHaveBeenCalled(); + expect(signalVerifiedGatewayPidSync).not.toHaveBeenCalled(); }); }); diff --git a/src/cli/daemon-cli/lifecycle.ts b/src/cli/daemon-cli/lifecycle.ts index 2b0775b0c48..53efaff9495 100644 --- a/src/cli/daemon-cli/lifecycle.ts +++ b/src/cli/daemon-cli/lifecycle.ts @@ -1,12 +1,12 @@ -import { spawnSync } from "node:child_process"; -import fsSync from "node:fs"; import { isRestartEnabled } from "../../config/commands.js"; import { readBestEffortConfig, resolveGatewayPort } from "../../config/config.js"; -import { parseCmdScriptCommandLine } from "../../daemon/cmd-argv.js"; import { resolveGatewayService } from "../../daemon/service.js"; import { probeGateway } from "../../gateway/probe.js"; -import { isGatewayArgv, parseProcCmdline } from "../../infra/gateway-process-argv.js"; -import { findGatewayPidsOnPortSync } from "../../infra/restart.js"; +import { + findVerifiedGatewayListenerPidsOnPortSync, + formatGatewayPidList, + signalVerifiedGatewayPidSync, +} from "../../infra/gateway-processes.js"; import { defaultRuntime } from "../../runtime.js"; import { theme } from "../../terminal/theme.js"; import { formatCliCommand } from "../command-format.js"; @@ -43,85 +43,12 @@ async function resolveGatewayLifecyclePort(service = resolveGatewayService()) { return portFromArgs ?? resolveGatewayPort(await readBestEffortConfig(), mergedEnv); } -function extractWindowsCommandLine(raw: string): string | null { - const lines = raw - .split(/\r?\n/) - .map((line) => line.trim()) - .filter(Boolean); - for (const line of lines) { - if (!line.toLowerCase().startsWith("commandline=")) { - continue; - } - const value = line.slice("commandline=".length).trim(); - return value || null; - } - return lines.find((line) => line.toLowerCase() !== "commandline") ?? null; -} - -function readGatewayProcessArgsSync(pid: number): string[] | null { - if (process.platform === "linux") { - try { - return parseProcCmdline(fsSync.readFileSync(`/proc/${pid}/cmdline`, "utf8")); - } catch { - return null; - } - } - if (process.platform === "darwin") { - const ps = spawnSync("ps", ["-o", "command=", "-p", String(pid)], { - encoding: "utf8", - timeout: 1000, - }); - if (ps.error || ps.status !== 0) { - return null; - } - const command = ps.stdout.trim(); - return command ? command.split(/\s+/) : null; - } - if (process.platform === "win32") { - const wmic = spawnSync( - "wmic", - ["process", "where", `ProcessId=${pid}`, "get", "CommandLine", "/value"], - { - encoding: "utf8", - timeout: 1000, - }, - ); - if (wmic.error || wmic.status !== 0) { - return null; - } - const command = extractWindowsCommandLine(wmic.stdout); - return command ? parseCmdScriptCommandLine(command) : null; - } - return null; -} - -function resolveGatewayListenerPids(port: number): number[] { - return Array.from(new Set(findGatewayPidsOnPortSync(port))) - .filter((pid): pid is number => Number.isFinite(pid) && pid > 0) - .filter((pid) => { - const args = readGatewayProcessArgsSync(pid); - return args != null && isGatewayArgv(args, { allowGatewayBinary: true }); - }); -} - function resolveGatewayPortFallback(): Promise { return readBestEffortConfig() .then((cfg) => resolveGatewayPort(cfg, process.env)) .catch(() => resolveGatewayPort(undefined, process.env)); } -function signalGatewayPid(pid: number, signal: "SIGTERM" | "SIGUSR1") { - const args = readGatewayProcessArgsSync(pid); - if (!args || !isGatewayArgv(args, { allowGatewayBinary: true })) { - throw new Error(`refusing to signal non-gateway process pid ${pid}`); - } - process.kill(pid, signal); -} - -function formatGatewayPidList(pids: number[]): string { - return pids.join(", "); -} - async function assertUnmanagedGatewayRestartEnabled(port: number): Promise { const probe = await probeGateway({ url: `ws://127.0.0.1:${port}`, @@ -143,7 +70,7 @@ async function assertUnmanagedGatewayRestartEnabled(port: number): Promise } function resolveVerifiedGatewayListenerPids(port: number): number[] { - return resolveGatewayListenerPids(port).filter( + return findVerifiedGatewayListenerPidsOnPortSync(port).filter( (pid): pid is number => Number.isFinite(pid) && pid > 0, ); } @@ -154,7 +81,7 @@ async function stopGatewayWithoutServiceManager(port: number) { return null; } for (const pid of pids) { - signalGatewayPid(pid, "SIGTERM"); + signalVerifiedGatewayPidSync(pid, "SIGTERM"); } return { result: "stopped" as const, @@ -173,7 +100,7 @@ async function restartGatewayWithoutServiceManager(port: number) { `multiple gateway processes are listening on port ${port}: ${formatGatewayPidList(pids)}; use "openclaw gateway status --deep" before retrying restart`, ); } - signalGatewayPid(pids[0], "SIGUSR1"); + signalVerifiedGatewayPidSync(pids[0], "SIGUSR1"); return { result: "restarted" as const, message: `Gateway restart signal sent to unmanaged process on port ${port}: ${pids[0]}.`, diff --git a/src/cli/daemon-cli/restart-health.test.ts b/src/cli/daemon-cli/restart-health.test.ts index 0202f591cc2..1a26f1a80dc 100644 --- a/src/cli/daemon-cli/restart-health.test.ts +++ b/src/cli/daemon-cli/restart-health.test.ts @@ -190,6 +190,32 @@ describe("inspectGatewayRestart", () => { ); }); + it("treats a busy port as healthy when runtime status lags but the probe succeeds", async () => { + Object.defineProperty(process, "platform", { value: "win32", configurable: true }); + + const service = { + readRuntime: vi.fn(async () => ({ status: "stopped" })), + } as unknown as GatewayService; + + inspectPortUsage.mockResolvedValue({ + port: 18789, + status: "busy", + listeners: [{ pid: 9100, commandLine: "openclaw-gateway" }], + hints: [], + }); + classifyPortListener.mockReturnValue("gateway"); + probeGateway.mockResolvedValue({ + ok: true, + close: null, + }); + + const { inspectGatewayRestart } = await import("./restart-health.js"); + const snapshot = await inspectGatewayRestart({ service, port: 18789 }); + + expect(snapshot.healthy).toBe(true); + expect(snapshot.staleGatewayPids).toEqual([]); + }); + it("treats auth-closed probe as healthy gateway reachability", async () => { const snapshot = await inspectAmbiguousOwnershipWithProbe({ ok: false, diff --git a/src/cli/daemon-cli/restart-health.ts b/src/cli/daemon-cli/restart-health.ts index 13741d2e9c4..9bfe3476ee6 100644 --- a/src/cli/daemon-cli/restart-health.ts +++ b/src/cli/daemon-cli/restart-health.ts @@ -65,7 +65,8 @@ async function confirmGatewayReachable(port: number): Promise { const probe = await probeGateway({ url: `ws://127.0.0.1:${port}`, auth: token || password ? { token, password } : undefined, - timeoutMs: 1_000, + timeoutMs: 3_000, + includeDetails: false, }); return probe.ok || looksLikeAuthClose(probe.close?.code, probe.close?.reason); } @@ -123,6 +124,22 @@ export async function inspectGatewayRestart(params: { }; } + if (portUsage.status === "busy" && runtime.status !== "running") { + try { + const reachable = await confirmGatewayReachable(params.port); + if (reachable) { + return { + runtime, + portUsage, + healthy: true, + staleGatewayPids: [], + }; + } + } catch { + // Probe is best-effort; keep the ownership-based diagnostics. + } + } + const gatewayListeners = portUsage.status === "busy" ? portUsage.listeners.filter( diff --git a/src/cli/daemon-cli/status.gather.test.ts b/src/cli/daemon-cli/status.gather.test.ts index 9b4d6428d1e..b0c08715abe 100644 --- a/src/cli/daemon-cli/status.gather.test.ts +++ b/src/cli/daemon-cli/status.gather.test.ts @@ -18,7 +18,12 @@ const readLastGatewayErrorLine = vi.fn(async (_env?: NodeJS.ProcessEnv) => null) const auditGatewayServiceConfig = vi.fn(async (_opts?: unknown) => undefined); const serviceIsLoaded = vi.fn(async (_opts?: unknown) => true); const serviceReadRuntime = vi.fn(async (_env?: NodeJS.ProcessEnv) => ({ status: "running" })); -const serviceReadCommand = vi.fn(async (_env?: NodeJS.ProcessEnv) => ({ +const serviceReadCommand = vi.fn< + (env?: NodeJS.ProcessEnv) => Promise<{ + programArguments: string[]; + environment?: Record; + }> +>(async (_env?: NodeJS.ProcessEnv) => ({ programArguments: ["/bin/node", "cli", "gateway", "--port", "19001"], environment: { OPENCLAW_STATE_DIR: "/tmp/openclaw-daemon", @@ -190,6 +195,37 @@ describe("gatherDaemonStatus", () => { expect(status.rpc?.url).toBe("wss://override.example:18790"); }); + it("reuses command environment when reading runtime status", async () => { + serviceReadCommand.mockResolvedValueOnce({ + programArguments: ["/bin/node", "cli", "gateway", "--port", "19001"], + environment: { + OPENCLAW_GATEWAY_PORT: "19001", + OPENCLAW_CONFIG_PATH: "/tmp/openclaw-daemon/openclaw.json", + OPENCLAW_STATE_DIR: "/tmp/openclaw-daemon", + } as Record, + }); + serviceReadRuntime.mockImplementationOnce(async (env?: NodeJS.ProcessEnv) => ({ + status: env?.OPENCLAW_GATEWAY_PORT === "19001" ? "running" : "unknown", + detail: env?.OPENCLAW_GATEWAY_PORT ?? "missing-port", + })); + + const status = await gatherDaemonStatus({ + rpc: {}, + probe: false, + deep: false, + }); + + expect(serviceReadRuntime).toHaveBeenCalledWith( + expect.objectContaining({ + OPENCLAW_GATEWAY_PORT: "19001", + }), + ); + expect(status.service.runtime).toMatchObject({ + status: "running", + detail: "19001", + }); + }); + it("resolves daemon gateway auth password SecretRef values before probing", async () => { daemonLoadedConfig = { gateway: { diff --git a/src/cli/daemon-cli/status.gather.ts b/src/cli/daemon-cli/status.gather.ts index a44ef93c656..ef15a377438 100644 --- a/src/cli/daemon-cli/status.gather.ts +++ b/src/cli/daemon-cli/status.gather.ts @@ -258,17 +258,21 @@ export async function gatherDaemonStatus( } & FindExtraGatewayServicesOptions, ): Promise { const service = resolveGatewayService(); - const [loaded, command, runtime] = await Promise.all([ - service.isLoaded({ env: process.env }).catch(() => false), - service.readCommand(process.env).catch(() => null), - service.readRuntime(process.env).catch((err) => ({ status: "unknown", detail: String(err) })), + const command = await service.readCommand(process.env).catch(() => null); + const serviceEnv = command?.environment + ? ({ + ...process.env, + ...command.environment, + } satisfies NodeJS.ProcessEnv) + : process.env; + const [loaded, runtime] = await Promise.all([ + service.isLoaded({ env: serviceEnv }).catch(() => false), + service.readRuntime(serviceEnv).catch((err) => ({ status: "unknown", detail: String(err) })), ]); const configAudit = await auditGatewayServiceConfig({ env: process.env, command, }); - - const serviceEnv = command?.environment ?? undefined; const { mergedDaemonEnv, cliCfg, @@ -276,7 +280,7 @@ export async function gatherDaemonStatus( cliConfigSummary, daemonConfigSummary, configMismatch, - } = await loadDaemonConfigContext(serviceEnv); + } = await loadDaemonConfigContext(command?.environment); const { gateway, daemonPort, cliPort, probeUrlOverride } = await resolveGatewayStatusSummary({ cliCfg, daemonCfg, diff --git a/src/cli/daemon-cli/test-helpers/lifecycle-core-harness.ts b/src/cli/daemon-cli/test-helpers/lifecycle-core-harness.ts new file mode 100644 index 00000000000..8e91db61664 --- /dev/null +++ b/src/cli/daemon-cli/test-helpers/lifecycle-core-harness.ts @@ -0,0 +1,45 @@ +import { vi } from "vitest"; + +export const runtimeLogs: string[] = []; + +export const defaultRuntime = { + log: (message: string) => runtimeLogs.push(message), + error: vi.fn(), + exit: (code: number) => { + throw new Error(`__exit__:${code}`); + }, +}; + +export const service = { + label: "TestService", + loadedText: "loaded", + notLoadedText: "not loaded", + install: vi.fn(), + uninstall: vi.fn(), + stop: vi.fn(), + isLoaded: vi.fn(), + readCommand: vi.fn(), + readRuntime: vi.fn(), + restart: vi.fn(), +}; + +export function resetLifecycleRuntimeLogs() { + runtimeLogs.length = 0; +} + +export function resetLifecycleServiceMocks() { + service.isLoaded.mockClear(); + service.readCommand.mockClear(); + service.restart.mockClear(); + service.isLoaded.mockResolvedValue(true); + service.readCommand.mockResolvedValue({ environment: {} }); + service.restart.mockResolvedValue({ outcome: "completed" }); +} + +export function stubEmptyGatewayEnv() { + vi.unstubAllEnvs(); + vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", ""); + vi.stubEnv("CLAWDBOT_GATEWAY_TOKEN", ""); + vi.stubEnv("OPENCLAW_GATEWAY_URL", ""); + vi.stubEnv("CLAWDBOT_GATEWAY_URL", ""); +} diff --git a/src/cli/update-cli.test.ts b/src/cli/update-cli.test.ts index 324f0b3ee35..40834fffdee 100644 --- a/src/cli/update-cli.test.ts +++ b/src/cli/update-cli.test.ts @@ -626,12 +626,50 @@ describe("update-cli", () => { expect(runCommandWithTimeout).toHaveBeenCalledWith( [expect.stringMatching(/node/), entryPath, "gateway", "install", "--force"], - expect.objectContaining({ timeoutMs: 60_000 }), + expect.objectContaining({ cwd: root, timeoutMs: 60_000 }), ); expect(runDaemonInstall).not.toHaveBeenCalled(); expect(runRestartScript).toHaveBeenCalled(); }); + it("updateCommand preserves invocation-relative service env overrides during refresh", async () => { + const root = createCaseDir("openclaw-updated-root"); + const entryPath = path.join(root, "dist", "entry.js"); + pathExists.mockImplementation(async (candidate: string) => candidate === entryPath); + + vi.mocked(runGatewayUpdate).mockResolvedValue({ + status: "ok", + mode: "npm", + root, + steps: [], + durationMs: 100, + }); + serviceLoaded.mockResolvedValue(true); + + await withEnvAsync( + { + OPENCLAW_STATE_DIR: "./state", + OPENCLAW_CONFIG_PATH: "./config/openclaw.json", + }, + async () => { + await updateCommand({}); + }, + ); + + expect(runCommandWithTimeout).toHaveBeenCalledWith( + [expect.stringMatching(/node/), entryPath, "gateway", "install", "--force"], + expect.objectContaining({ + cwd: root, + env: expect.objectContaining({ + OPENCLAW_STATE_DIR: path.resolve("./state"), + OPENCLAW_CONFIG_PATH: path.resolve("./config/openclaw.json"), + }), + timeoutMs: 60_000, + }), + ); + expect(runDaemonInstall).not.toHaveBeenCalled(); + }); + it("updateCommand falls back to restart when env refresh install fails", async () => { await runRestartFallbackScenario({ daemonInstall: "fail" }); }); diff --git a/src/cli/update-cli/restart-helper.test.ts b/src/cli/update-cli/restart-helper.test.ts index c8b59d69afa..847893e9f23 100644 --- a/src/cli/update-cli/restart-helper.test.ts +++ b/src/cli/update-cli/restart-helper.test.ts @@ -287,6 +287,7 @@ describe("restart-helper", () => { expect(spawn).toHaveBeenCalledWith("/bin/sh", [scriptPath], { detached: true, stdio: "ignore", + windowsHide: true, }); expect(mockChild.unref).toHaveBeenCalled(); }); @@ -302,6 +303,7 @@ describe("restart-helper", () => { expect(spawn).toHaveBeenCalledWith("cmd.exe", ["/d", "/s", "/c", scriptPath], { detached: true, stdio: "ignore", + windowsHide: true, }); expect(mockChild.unref).toHaveBeenCalled(); }); @@ -317,6 +319,7 @@ describe("restart-helper", () => { expect(spawn).toHaveBeenCalledWith("cmd.exe", ["/d", "/s", "/c", `"${scriptPath}"`], { detached: true, stdio: "ignore", + windowsHide: true, }); }); }); diff --git a/src/cli/update-cli/restart-helper.ts b/src/cli/update-cli/restart-helper.ts index c27f25cdc49..a68fab161fa 100644 --- a/src/cli/update-cli/restart-helper.ts +++ b/src/cli/update-cli/restart-helper.ts @@ -169,6 +169,7 @@ export async function runRestartScript(scriptPath: string): Promise { const child = spawn(file, args, { detached: true, stdio: "ignore", + windowsHide: true, }); child.unref(); } diff --git a/src/cli/update-cli/update-command.ts b/src/cli/update-cli/update-command.ts index 6063eb5f163..d0d39e0215a 100644 --- a/src/cli/update-cli/update-command.ts +++ b/src/cli/update-cli/update-command.ts @@ -69,6 +69,13 @@ import { suppressDeprecations } from "./suppress-deprecations.js"; const CLI_NAME = resolveCliName(); const SERVICE_REFRESH_TIMEOUT_MS = 60_000; +const SERVICE_REFRESH_PATH_ENV_KEYS = [ + "OPENCLAW_HOME", + "OPENCLAW_STATE_DIR", + "CLAWDBOT_STATE_DIR", + "OPENCLAW_CONFIG_PATH", + "CLAWDBOT_CONFIG_PATH", +] as const; const UPDATE_QUIPS = [ "Leveled up! New skills unlocked. You're welcome.", @@ -117,6 +124,25 @@ function formatCommandFailure(stdout: string, stderr: string): string { return detail.split("\n").slice(-3).join("\n"); } +function resolveServiceRefreshEnv( + env: NodeJS.ProcessEnv, + invocationCwd: string = process.cwd(), +): NodeJS.ProcessEnv { + const resolvedEnv: NodeJS.ProcessEnv = { ...env }; + for (const key of SERVICE_REFRESH_PATH_ENV_KEYS) { + const rawValue = resolvedEnv[key]?.trim(); + if (!rawValue) { + continue; + } + if (rawValue.startsWith("~") || path.isAbsolute(rawValue) || path.win32.isAbsolute(rawValue)) { + resolvedEnv[key] = rawValue; + continue; + } + resolvedEnv[key] = path.resolve(invocationCwd, rawValue); + } + return resolvedEnv; +} + type UpdateDryRunPreview = { dryRun: true; root: string; @@ -190,6 +216,8 @@ async function refreshGatewayServiceEnv(params: { continue; } const res = await runCommandWithTimeout([resolveNodeRunner(), candidate, ...args], { + cwd: params.result.root, + env: resolveServiceRefreshEnv(process.env), timeoutMs: SERVICE_REFRESH_TIMEOUT_MS, }); if (res.code === 0) { diff --git a/src/commands/backup-verify.test.ts b/src/commands/backup-verify.test.ts index 9288d2fb8c1..a5f0384e61b 100644 --- a/src/commands/backup-verify.test.ts +++ b/src/commands/backup-verify.test.ts @@ -8,6 +8,92 @@ import { buildBackupArchiveRoot } from "./backup-shared.js"; import { backupVerifyCommand } from "./backup-verify.js"; import { backupCreateCommand } from "./backup.js"; +const TEST_ARCHIVE_ROOT = "2026-03-09T00-00-00.000Z-openclaw-backup"; + +const createBackupVerifyRuntime = () => ({ + log: vi.fn(), + error: vi.fn(), + exit: vi.fn(), +}); + +function createBackupManifest(assetArchivePath: string) { + return { + schemaVersion: 1, + createdAt: "2026-03-09T00:00:00.000Z", + archiveRoot: TEST_ARCHIVE_ROOT, + runtimeVersion: "test", + platform: process.platform, + nodeVersion: process.version, + assets: [ + { + kind: "state", + sourcePath: "/tmp/.openclaw", + archivePath: assetArchivePath, + }, + ], + }; +} + +async function withBrokenArchiveFixture( + options: { + tempPrefix: string; + manifestAssetArchivePath: string; + payloads: Array<{ fileName: string; contents: string; archivePath?: string }>; + buildTarEntries?: (paths: { manifestPath: string; payloadPaths: string[] }) => string[]; + }, + run: (archivePath: string) => Promise, +) { + const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), options.tempPrefix)); + const archivePath = path.join(tempDir, "broken.tar.gz"); + const manifestPath = path.join(tempDir, "manifest.json"); + const payloadSpecs = await Promise.all( + options.payloads.map(async (payload) => { + const payloadPath = path.join(tempDir, payload.fileName); + await fs.writeFile(payloadPath, payload.contents, "utf8"); + return { + path: payloadPath, + archivePath: payload.archivePath ?? options.manifestAssetArchivePath, + }; + }), + ); + const payloadEntryPathBySource = new Map( + payloadSpecs.map((payload) => [payload.path, payload.archivePath]), + ); + + try { + await fs.writeFile( + manifestPath, + `${JSON.stringify(createBackupManifest(options.manifestAssetArchivePath), null, 2)}\n`, + "utf8", + ); + await tar.c( + { + file: archivePath, + gzip: true, + portable: true, + preservePaths: true, + onWriteEntry: (entry) => { + if (entry.path === manifestPath) { + entry.path = `${TEST_ARCHIVE_ROOT}/manifest.json`; + return; + } + const payloadEntryPath = payloadEntryPathBySource.get(entry.path); + if (payloadEntryPath) { + entry.path = payloadEntryPath; + } + }, + }, + options.buildTarEntries?.({ + manifestPath, + payloadPaths: payloadSpecs.map((payload) => payload.path), + }) ?? [manifestPath, ...payloadSpecs.map((payload) => payload.path)], + ); + await run(archivePath); + } finally { + await fs.rm(tempDir, { recursive: true, force: true }); + } +} + describe("backupVerifyCommand", () => { let tempHome: TempHomeEnv; @@ -26,12 +112,7 @@ describe("backupVerifyCommand", () => { await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); await fs.writeFile(path.join(stateDir, "state.txt"), "hello\n", "utf8"); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - + const runtime = createBackupVerifyRuntime(); const nowMs = Date.UTC(2026, 2, 9, 0, 0, 0); const created = await backupCreateCommand(runtime, { output: archiveDir, nowMs }); const verified = await backupVerifyCommand(runtime, { archive: created.archivePath }); @@ -53,12 +134,7 @@ describe("backupVerifyCommand", () => { await fs.writeFile(path.join(root, "payload", "data.txt"), "x\n", "utf8"); await tar.c({ file: archivePath, gzip: true, cwd: tempDir }, ["root"]); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - + const runtime = createBackupVerifyRuntime(); await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( /expected exactly one backup manifest entry/i, ); @@ -95,12 +171,7 @@ describe("backupVerifyCommand", () => { ); await tar.c({ file: archivePath, gzip: true, cwd: tempDir }, [rootName]); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - + const runtime = createBackupVerifyRuntime(); await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( /missing payload for manifest asset/i, ); @@ -110,119 +181,37 @@ describe("backupVerifyCommand", () => { }); it("fails when archive paths contain traversal segments", async () => { - const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-traversal-")); - const archivePath = path.join(tempDir, "broken.tar.gz"); - const manifestPath = path.join(tempDir, "manifest.json"); - const payloadPath = path.join(tempDir, "payload.txt"); - try { - const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup"; - const traversalPath = `${rootName}/payload/../escaped.txt`; - const manifest = { - schemaVersion: 1, - createdAt: "2026-03-09T00:00:00.000Z", - archiveRoot: rootName, - runtimeVersion: "test", - platform: process.platform, - nodeVersion: process.version, - assets: [ - { - kind: "state", - sourcePath: "/tmp/.openclaw", - archivePath: traversalPath, - }, - ], - }; - await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8"); - await fs.writeFile(payloadPath, "payload\n", "utf8"); - await tar.c( - { - file: archivePath, - gzip: true, - portable: true, - preservePaths: true, - onWriteEntry: (entry) => { - if (entry.path === manifestPath) { - entry.path = `${rootName}/manifest.json`; - return; - } - if (entry.path === payloadPath) { - entry.path = traversalPath; - } - }, - }, - [manifestPath, payloadPath], - ); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( - /path traversal segments/i, - ); - } finally { - await fs.rm(tempDir, { recursive: true, force: true }); - } + const traversalPath = `${TEST_ARCHIVE_ROOT}/payload/../escaped.txt`; + await withBrokenArchiveFixture( + { + tempPrefix: "openclaw-backup-traversal-", + manifestAssetArchivePath: traversalPath, + payloads: [{ fileName: "payload.txt", contents: "payload\n", archivePath: traversalPath }], + }, + async (archivePath) => { + const runtime = createBackupVerifyRuntime(); + await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( + /path traversal segments/i, + ); + }, + ); }); it("fails when archive paths contain backslashes", async () => { - const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-backslash-")); - const archivePath = path.join(tempDir, "broken.tar.gz"); - const manifestPath = path.join(tempDir, "manifest.json"); - const payloadPath = path.join(tempDir, "payload.txt"); - try { - const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup"; - const invalidPath = `${rootName}/payload\\..\\escaped.txt`; - const manifest = { - schemaVersion: 1, - createdAt: "2026-03-09T00:00:00.000Z", - archiveRoot: rootName, - runtimeVersion: "test", - platform: process.platform, - nodeVersion: process.version, - assets: [ - { - kind: "state", - sourcePath: "/tmp/.openclaw", - archivePath: invalidPath, - }, - ], - }; - await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8"); - await fs.writeFile(payloadPath, "payload\n", "utf8"); - await tar.c( - { - file: archivePath, - gzip: true, - portable: true, - preservePaths: true, - onWriteEntry: (entry) => { - if (entry.path === manifestPath) { - entry.path = `${rootName}/manifest.json`; - return; - } - if (entry.path === payloadPath) { - entry.path = invalidPath; - } - }, - }, - [manifestPath, payloadPath], - ); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( - /forward slashes/i, - ); - } finally { - await fs.rm(tempDir, { recursive: true, force: true }); - } + const invalidPath = `${TEST_ARCHIVE_ROOT}/payload\\..\\escaped.txt`; + await withBrokenArchiveFixture( + { + tempPrefix: "openclaw-backup-backslash-", + manifestAssetArchivePath: invalidPath, + payloads: [{ fileName: "payload.txt", contents: "payload\n", archivePath: invalidPath }], + }, + async (archivePath) => { + const runtime = createBackupVerifyRuntime(); + await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( + /forward slashes/i, + ); + }, + ); }); it("ignores payload manifest.json files when locating the backup manifest", async () => { @@ -251,12 +240,7 @@ describe("backupVerifyCommand", () => { "utf8", ); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - + const runtime = createBackupVerifyRuntime(); const created = await backupCreateCommand(runtime, { output: archiveDir, includeWorkspace: true, @@ -274,119 +258,44 @@ describe("backupVerifyCommand", () => { }); it("fails when the archive contains duplicate root manifest entries", async () => { - const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-duplicate-manifest-")); - const archivePath = path.join(tempDir, "broken.tar.gz"); - const manifestPath = path.join(tempDir, "manifest.json"); - const payloadPath = path.join(tempDir, "payload.txt"); - try { - const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup"; - const manifest = { - schemaVersion: 1, - createdAt: "2026-03-09T00:00:00.000Z", - archiveRoot: rootName, - runtimeVersion: "test", - platform: process.platform, - nodeVersion: process.version, - assets: [ - { - kind: "state", - sourcePath: "/tmp/.openclaw", - archivePath: `${rootName}/payload/posix/tmp/.openclaw/payload.txt`, - }, + const payloadArchivePath = `${TEST_ARCHIVE_ROOT}/payload/posix/tmp/.openclaw/payload.txt`; + await withBrokenArchiveFixture( + { + tempPrefix: "openclaw-backup-duplicate-manifest-", + manifestAssetArchivePath: payloadArchivePath, + payloads: [{ fileName: "payload.txt", contents: "payload\n" }], + buildTarEntries: ({ manifestPath, payloadPaths }) => [ + manifestPath, + manifestPath, + ...payloadPaths, ], - }; - await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8"); - await fs.writeFile(payloadPath, "payload\n", "utf8"); - await tar.c( - { - file: archivePath, - gzip: true, - portable: true, - preservePaths: true, - onWriteEntry: (entry) => { - if (entry.path === manifestPath) { - entry.path = `${rootName}/manifest.json`; - return; - } - if (entry.path === payloadPath) { - entry.path = `${rootName}/payload/posix/tmp/.openclaw/payload.txt`; - } - }, - }, - [manifestPath, manifestPath, payloadPath], - ); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( - /expected exactly one backup manifest entry, found 2/i, - ); - } finally { - await fs.rm(tempDir, { recursive: true, force: true }); - } + }, + async (archivePath) => { + const runtime = createBackupVerifyRuntime(); + await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( + /expected exactly one backup manifest entry, found 2/i, + ); + }, + ); }); it("fails when the archive contains duplicate payload entries", async () => { - const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-duplicate-payload-")); - const archivePath = path.join(tempDir, "broken.tar.gz"); - const manifestPath = path.join(tempDir, "manifest.json"); - const payloadPathA = path.join(tempDir, "payload-a.txt"); - const payloadPathB = path.join(tempDir, "payload-b.txt"); - try { - const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup"; - const payloadArchivePath = `${rootName}/payload/posix/tmp/.openclaw/payload.txt`; - const manifest = { - schemaVersion: 1, - createdAt: "2026-03-09T00:00:00.000Z", - archiveRoot: rootName, - runtimeVersion: "test", - platform: process.platform, - nodeVersion: process.version, - assets: [ - { - kind: "state", - sourcePath: "/tmp/.openclaw", - archivePath: payloadArchivePath, - }, + const payloadArchivePath = `${TEST_ARCHIVE_ROOT}/payload/posix/tmp/.openclaw/payload.txt`; + await withBrokenArchiveFixture( + { + tempPrefix: "openclaw-backup-duplicate-payload-", + manifestAssetArchivePath: payloadArchivePath, + payloads: [ + { fileName: "payload-a.txt", contents: "payload-a\n", archivePath: payloadArchivePath }, + { fileName: "payload-b.txt", contents: "payload-b\n", archivePath: payloadArchivePath }, ], - }; - await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8"); - await fs.writeFile(payloadPathA, "payload-a\n", "utf8"); - await fs.writeFile(payloadPathB, "payload-b\n", "utf8"); - await tar.c( - { - file: archivePath, - gzip: true, - portable: true, - preservePaths: true, - onWriteEntry: (entry) => { - if (entry.path === manifestPath) { - entry.path = `${rootName}/manifest.json`; - return; - } - if (entry.path === payloadPathA || entry.path === payloadPathB) { - entry.path = payloadArchivePath; - } - }, - }, - [manifestPath, payloadPathA, payloadPathB], - ); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( - /duplicate entry path/i, - ); - } finally { - await fs.rm(tempDir, { recursive: true, force: true }); - } + }, + async (archivePath) => { + const runtime = createBackupVerifyRuntime(); + await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow( + /duplicate entry path/i, + ); + }, + ); }); }); diff --git a/src/commands/backup.test.ts b/src/commands/backup.test.ts index 349714e4d15..decc55e6c05 100644 --- a/src/commands/backup.test.ts +++ b/src/commands/backup.test.ts @@ -3,6 +3,7 @@ import os from "node:os"; import path from "node:path"; import * as tar from "tar"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import type { RuntimeEnv } from "../runtime.js"; import { createTempHomeEnv, type TempHomeEnv } from "../test-utils/temp-home.js"; import { buildBackupArchiveRoot, @@ -41,6 +42,39 @@ describe("backup commands", () => { await tempHome.restore(); }); + function createRuntime(): RuntimeEnv { + return { + log: vi.fn(), + error: vi.fn(), + exit: vi.fn(), + } satisfies RuntimeEnv; + } + + async function withInvalidWorkspaceBackupConfig(fn: (runtime: RuntimeEnv) => Promise) { + const stateDir = path.join(tempHome.home, ".openclaw"); + const configPath = path.join(tempHome.home, "custom-config.json"); + process.env.OPENCLAW_CONFIG_PATH = configPath; + await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); + await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8"); + const runtime = createRuntime(); + + try { + return await fn(runtime); + } finally { + delete process.env.OPENCLAW_CONFIG_PATH; + } + } + + function expectWorkspaceCoveredByState( + plan: Awaited>, + ) { + expect(plan.included).toHaveLength(1); + expect(plan.included[0]?.kind).toBe("state"); + expect(plan.skipped).toEqual( + expect.arrayContaining([expect.objectContaining({ kind: "workspace", reason: "covered" })]), + ); + } + it("collapses default config, credentials, and workspace into the state backup root", async () => { const stateDir = path.join(tempHome.home, ".openclaw"); await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); @@ -50,12 +84,7 @@ describe("backup commands", () => { await fs.writeFile(path.join(stateDir, "workspace", "SOUL.md"), "# soul\n", "utf8"); const plan = await resolveBackupPlanFromDisk({ includeWorkspace: true, nowMs: 123 }); - - expect(plan.included).toHaveLength(1); - expect(plan.included[0]?.kind).toBe("state"); - expect(plan.skipped).toEqual( - expect.arrayContaining([expect.objectContaining({ kind: "workspace", reason: "covered" })]), - ); + expectWorkspaceCoveredByState(plan); }); it("orders coverage checks by canonical path so symlinked workspaces do not duplicate state", async () => { @@ -84,12 +113,7 @@ describe("backup commands", () => { ); const plan = await resolveBackupPlanFromDisk({ includeWorkspace: true, nowMs: 123 }); - - expect(plan.included).toHaveLength(1); - expect(plan.included[0]?.kind).toBe("state"); - expect(plan.skipped).toEqual( - expect.arrayContaining([expect.objectContaining({ kind: "workspace", reason: "covered" })]), - ); + expectWorkspaceCoveredByState(plan); } finally { await fs.rm(symlinkDir, { recursive: true, force: true }); } @@ -116,11 +140,7 @@ describe("backup commands", () => { await fs.writeFile(path.join(stateDir, "state.txt"), "state\n", "utf8"); await fs.writeFile(path.join(externalWorkspace, "SOUL.md"), "# external\n", "utf8"); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); const nowMs = Date.UTC(2026, 2, 9, 0, 0, 0); const result = await backupCreateCommand(runtime, { @@ -189,11 +209,7 @@ describe("backup commands", () => { await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); await fs.writeFile(path.join(stateDir, "state.txt"), "state\n", "utf8"); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); const result = await backupCreateCommand(runtime, { output: archiveDir, @@ -214,11 +230,7 @@ describe("backup commands", () => { const stateDir = path.join(tempHome.home, ".openclaw"); await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); await expect( backupCreateCommand(runtime, { @@ -239,11 +251,7 @@ describe("backup commands", () => { await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); await fs.symlink(stateDir, symlinkPath); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); await expect( backupCreateCommand(runtime, { @@ -263,11 +271,7 @@ describe("backup commands", () => { await fs.writeFile(path.join(workspaceDir, "SOUL.md"), "# soul\n", "utf8"); process.chdir(workspaceDir); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); const nowMs = Date.UTC(2026, 2, 9, 1, 2, 3); const result = await backupCreateCommand(runtime, { nowMs }); @@ -294,11 +298,7 @@ describe("backup commands", () => { await fs.symlink(workspaceDir, workspaceLink); process.chdir(workspaceLink); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); const nowMs = Date.UTC(2026, 2, 9, 1, 3, 4); const result = await backupCreateCommand(runtime, { nowMs }); @@ -318,11 +318,7 @@ describe("backup commands", () => { await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); await fs.writeFile(existingArchive, "already here", "utf8"); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); const result = await backupCreateCommand(runtime, { output: existingArchive, @@ -336,41 +332,15 @@ describe("backup commands", () => { }); it("fails fast when config is invalid and workspace backup is enabled", async () => { - const stateDir = path.join(tempHome.home, ".openclaw"); - const configPath = path.join(tempHome.home, "custom-config.json"); - process.env.OPENCLAW_CONFIG_PATH = configPath; - await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); - await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8"); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - try { + await withInvalidWorkspaceBackupConfig(async (runtime) => { await expect(backupCreateCommand(runtime, { dryRun: true })).rejects.toThrow( /--no-include-workspace/i, ); - } finally { - delete process.env.OPENCLAW_CONFIG_PATH; - } + }); }); it("allows explicit partial backups when config is invalid", async () => { - const stateDir = path.join(tempHome.home, ".openclaw"); - const configPath = path.join(tempHome.home, "custom-config.json"); - process.env.OPENCLAW_CONFIG_PATH = configPath; - await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8"); - await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8"); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - - try { + await withInvalidWorkspaceBackupConfig(async (runtime) => { const result = await backupCreateCommand(runtime, { dryRun: true, includeWorkspace: false, @@ -378,9 +348,7 @@ describe("backup commands", () => { expect(result.includeWorkspace).toBe(false); expect(result.assets.some((asset) => asset.kind === "workspace")).toBe(false); - } finally { - delete process.env.OPENCLAW_CONFIG_PATH; - } + }); }); it("backs up only the active config file when --only-config is requested", async () => { @@ -391,11 +359,7 @@ describe("backup commands", () => { await fs.writeFile(path.join(stateDir, "state.txt"), "state\n", "utf8"); await fs.writeFile(path.join(stateDir, "credentials", "oauth.json"), "{}", "utf8"); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); const result = await backupCreateCommand(runtime, { dryRun: true, @@ -413,11 +377,7 @@ describe("backup commands", () => { process.env.OPENCLAW_CONFIG_PATH = configPath; await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8"); - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; + const runtime = createRuntime(); try { const result = await backupCreateCommand(runtime, { diff --git a/src/commands/daemon-install-helpers.test.ts b/src/commands/daemon-install-helpers.test.ts index 704c193880c..931a983a8ee 100644 --- a/src/commands/daemon-install-helpers.test.ts +++ b/src/commands/daemon-install-helpers.test.ts @@ -1,6 +1,7 @@ import { afterEach, describe, expect, it, vi } from "vitest"; const mocks = vi.hoisted(() => ({ + loadAuthProfileStoreForSecretsRuntime: vi.fn(), resolvePreferredNodePath: vi.fn(), resolveGatewayProgramArguments: vi.fn(), resolveSystemNodeInfo: vi.fn(), @@ -8,6 +9,10 @@ const mocks = vi.hoisted(() => ({ buildServiceEnvironment: vi.fn(), })); +vi.mock("../agents/auth-profiles.js", () => ({ + loadAuthProfileStoreForSecretsRuntime: mocks.loadAuthProfileStoreForSecretsRuntime, +})); + vi.mock("../daemon/runtime-paths.js", () => ({ resolvePreferredNodePath: mocks.resolvePreferredNodePath, resolveSystemNodeInfo: mocks.resolveSystemNodeInfo, @@ -63,6 +68,10 @@ function mockNodeGatewayPlanFixture( programArguments: ["node", "gateway"], workingDirectory, }); + mocks.loadAuthProfileStoreForSecretsRuntime.mockReturnValue({ + version: 1, + profiles: {}, + }); mocks.resolveSystemNodeInfo.mockResolvedValue({ path: "/opt/node", version, @@ -232,6 +241,67 @@ describe("buildGatewayInstallPlan", () => { expect(plan.environment.HOME).toBe("/Users/service"); expect(plan.environment.OPENCLAW_PORT).toBe("3000"); }); + + it("merges env-backed auth-profile refs into the service environment", async () => { + mockNodeGatewayPlanFixture({ + serviceEnvironment: { + OPENCLAW_PORT: "3000", + }, + }); + mocks.loadAuthProfileStoreForSecretsRuntime.mockReturnValue({ + version: 1, + profiles: { + "openai:default": { + type: "api_key", + provider: "openai", + keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }, + "anthropic:default": { + type: "token", + provider: "anthropic", + tokenRef: { source: "env", provider: "default", id: "ANTHROPIC_TOKEN" }, + }, + }, + }); + + const plan = await buildGatewayInstallPlan({ + env: { + OPENAI_API_KEY: "sk-openai-test", // pragma: allowlist secret + ANTHROPIC_TOKEN: "ant-test-token", + }, + port: 3000, + runtime: "node", + }); + + expect(plan.environment.OPENAI_API_KEY).toBe("sk-openai-test"); + expect(plan.environment.ANTHROPIC_TOKEN).toBe("ant-test-token"); + }); + + it("skips unresolved auth-profile env refs", async () => { + mockNodeGatewayPlanFixture({ + serviceEnvironment: { + OPENCLAW_PORT: "3000", + }, + }); + mocks.loadAuthProfileStoreForSecretsRuntime.mockReturnValue({ + version: 1, + profiles: { + "openai:default": { + type: "api_key", + provider: "openai", + keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, + }, + }, + }); + + const plan = await buildGatewayInstallPlan({ + env: {}, + port: 3000, + runtime: "node", + }); + + expect(plan.environment.OPENAI_API_KEY).toBeUndefined(); + }); }); describe("gatewayInstallErrorHint", () => { diff --git a/src/commands/daemon-install-helpers.ts b/src/commands/daemon-install-helpers.ts index 7a3bd42e2fc..91248cb86a7 100644 --- a/src/commands/daemon-install-helpers.ts +++ b/src/commands/daemon-install-helpers.ts @@ -1,3 +1,7 @@ +import { + loadAuthProfileStoreForSecretsRuntime, + type AuthProfileStore, +} from "../agents/auth-profiles.js"; import { formatCliCommand } from "../cli/command-format.js"; import { collectConfigServiceEnvVars } from "../config/env-vars.js"; import type { OpenClawConfig } from "../config/types.js"; @@ -19,6 +23,33 @@ export type GatewayInstallPlan = { environment: Record; }; +function collectAuthProfileServiceEnvVars(params: { + env: Record; + authStore?: AuthProfileStore; +}): Record { + const authStore = params.authStore ?? loadAuthProfileStoreForSecretsRuntime(); + const entries: Record = {}; + + for (const credential of Object.values(authStore.profiles)) { + const ref = + credential.type === "api_key" + ? credential.keyRef + : credential.type === "token" + ? credential.tokenRef + : undefined; + if (!ref || ref.source !== "env") { + continue; + } + const value = params.env[ref.id]?.trim(); + if (!value) { + continue; + } + entries[ref.id] = value; + } + + return entries; +} + export async function buildGatewayInstallPlan(params: { env: Record; port: number; @@ -28,6 +59,7 @@ export async function buildGatewayInstallPlan(params: { warn?: DaemonInstallWarnFn; /** Full config to extract env vars from (env vars + inline env keys). */ config?: OpenClawConfig; + authStore?: AuthProfileStore; }): Promise { const { devMode, nodePath } = await resolveDaemonInstallRuntimeInputs({ env: params.env, @@ -61,6 +93,10 @@ export async function buildGatewayInstallPlan(params: { // Config env vars are added first so service-specific vars take precedence. const environment: Record = { ...collectConfigServiceEnvVars(params.config), + ...collectAuthProfileServiceEnvVars({ + env: params.env, + authStore: params.authStore, + }), }; Object.assign(environment, serviceEnvironment); diff --git a/src/commands/doctor-cron.test.ts b/src/commands/doctor-cron.test.ts index e7af38f662c..3ad4f2811ed 100644 --- a/src/commands/doctor-cron.test.ts +++ b/src/commands/doctor-cron.test.ts @@ -27,44 +27,55 @@ function makePrompter(confirmResult = true) { }; } +function createCronConfig(storePath: string): OpenClawConfig { + return { + cron: { + store: storePath, + webhook: "https://example.invalid/cron-finished", + }, + }; +} + +function createLegacyCronJob(overrides: Record = {}) { + return { + jobId: "legacy-job", + name: "Legacy job", + notify: true, + createdAtMs: Date.parse("2026-02-01T00:00:00.000Z"), + updatedAtMs: Date.parse("2026-02-02T00:00:00.000Z"), + schedule: { kind: "cron", cron: "0 7 * * *", tz: "UTC" }, + payload: { + kind: "systemEvent", + text: "Morning brief", + }, + state: {}, + ...overrides, + }; +} + +async function writeCronStore(storePath: string, jobs: Array>) { + await fs.mkdir(path.dirname(storePath), { recursive: true }); + await fs.writeFile( + storePath, + JSON.stringify( + { + version: 1, + jobs, + }, + null, + 2, + ), + "utf-8", + ); +} + describe("maybeRepairLegacyCronStore", () => { it("repairs legacy cron store fields and migrates notify fallback to webhook delivery", async () => { const storePath = await makeTempStorePath(); - await fs.mkdir(path.dirname(storePath), { recursive: true }); - await fs.writeFile( - storePath, - JSON.stringify( - { - version: 1, - jobs: [ - { - jobId: "legacy-job", - name: "Legacy job", - notify: true, - createdAtMs: Date.parse("2026-02-01T00:00:00.000Z"), - updatedAtMs: Date.parse("2026-02-02T00:00:00.000Z"), - schedule: { kind: "cron", cron: "0 7 * * *", tz: "UTC" }, - payload: { - kind: "systemEvent", - text: "Morning brief", - }, - state: {}, - }, - ], - }, - null, - 2, - ), - "utf-8", - ); + await writeCronStore(storePath, [createLegacyCronJob()]); const noteSpy = vi.spyOn(noteModule, "note").mockImplementation(() => {}); - const cfg: OpenClawConfig = { - cron: { - store: storePath, - webhook: "https://example.invalid/cron-finished", - }, - }; + const cfg = createCronConfig(storePath); await maybeRepairLegacyCronStore({ cfg, @@ -158,44 +169,13 @@ describe("maybeRepairLegacyCronStore", () => { it("does not auto-repair in non-interactive mode without explicit repair approval", async () => { const storePath = await makeTempStorePath(); - await fs.mkdir(path.dirname(storePath), { recursive: true }); - await fs.writeFile( - storePath, - JSON.stringify( - { - version: 1, - jobs: [ - { - jobId: "legacy-job", - name: "Legacy job", - notify: true, - createdAtMs: Date.parse("2026-02-01T00:00:00.000Z"), - updatedAtMs: Date.parse("2026-02-02T00:00:00.000Z"), - schedule: { kind: "cron", cron: "0 7 * * *", tz: "UTC" }, - payload: { - kind: "systemEvent", - text: "Morning brief", - }, - state: {}, - }, - ], - }, - null, - 2, - ), - "utf-8", - ); + await writeCronStore(storePath, [createLegacyCronJob()]); const noteSpy = vi.spyOn(noteModule, "note").mockImplementation(() => {}); const prompter = makePrompter(false); await maybeRepairLegacyCronStore({ - cfg: { - cron: { - store: storePath, - webhook: "https://example.invalid/cron-finished", - }, - }, + cfg: createCronConfig(storePath), options: { nonInteractive: true }, prompter, }); diff --git a/src/commands/doctor-state-migrations.test.ts b/src/commands/doctor-state-migrations.test.ts index 4116a6fca6e..ec465632cfa 100644 --- a/src/commands/doctor-state-migrations.test.ts +++ b/src/commands/doctor-state-migrations.test.ts @@ -26,6 +26,32 @@ async function makeRootWithEmptyCfg() { return { root, cfg }; } +function writeLegacyTelegramAllowFromStore(oauthDir: string) { + fs.writeFileSync( + path.join(oauthDir, "telegram-allowFrom.json"), + JSON.stringify( + { + version: 1, + allowFrom: ["123456"], + }, + null, + 2, + ) + "\n", + "utf-8", + ); +} + +async function runTelegramAllowFromMigration(params: { root: string; cfg: OpenClawConfig }) { + const oauthDir = ensureCredentialsDir(params.root); + writeLegacyTelegramAllowFromStore(oauthDir); + const detected = await detectLegacyStateMigrations({ + cfg: params.cfg, + env: { OPENCLAW_STATE_DIR: params.root } as NodeJS.ProcessEnv, + }); + const result = await runLegacyStateMigrations({ detected, now: () => 123 }); + return { oauthDir, detected, result }; +} + afterEach(async () => { resetAutoMigrateLegacyStateForTest(); resetAutoMigrateLegacyStateDirForTest(); @@ -277,30 +303,11 @@ describe("doctor legacy state migrations", () => { it("migrates legacy Telegram pairing allowFrom store to account-scoped default file", async () => { const { root, cfg } = await makeRootWithEmptyCfg(); - const oauthDir = ensureCredentialsDir(root); - fs.writeFileSync( - path.join(oauthDir, "telegram-allowFrom.json"), - JSON.stringify( - { - version: 1, - allowFrom: ["123456"], - }, - null, - 2, - ) + "\n", - "utf-8", - ); - - const detected = await detectLegacyStateMigrations({ - cfg, - env: { OPENCLAW_STATE_DIR: root } as NodeJS.ProcessEnv, - }); + const { oauthDir, detected, result } = await runTelegramAllowFromMigration({ root, cfg }); expect(detected.pairingAllowFrom.hasLegacyTelegram).toBe(true); expect( detected.pairingAllowFrom.copyPlans.map((plan) => path.basename(plan.targetPath)), ).toEqual(["telegram-default-allowFrom.json"]); - - const result = await runLegacyStateMigrations({ detected, now: () => 123 }); expect(result.warnings).toEqual([]); const target = path.join(oauthDir, "telegram-default-allowFrom.json"); @@ -323,30 +330,11 @@ describe("doctor legacy state migrations", () => { }, }, }; - const oauthDir = ensureCredentialsDir(root); - fs.writeFileSync( - path.join(oauthDir, "telegram-allowFrom.json"), - JSON.stringify( - { - version: 1, - allowFrom: ["123456"], - }, - null, - 2, - ) + "\n", - "utf-8", - ); - - const detected = await detectLegacyStateMigrations({ - cfg, - env: { OPENCLAW_STATE_DIR: root } as NodeJS.ProcessEnv, - }); + const { oauthDir, detected, result } = await runTelegramAllowFromMigration({ root, cfg }); expect(detected.pairingAllowFrom.hasLegacyTelegram).toBe(true); expect( detected.pairingAllowFrom.copyPlans.map((plan) => path.basename(plan.targetPath)).toSorted(), ).toEqual(["telegram-bot1-allowFrom.json", "telegram-bot2-allowFrom.json"]); - - const result = await runLegacyStateMigrations({ detected, now: () => 123 }); expect(result.warnings).toEqual([]); const bot1Target = path.join(oauthDir, "telegram-bot1-allowFrom.json"); diff --git a/src/commands/models/list.auth-overview.test.ts b/src/commands/models/list.auth-overview.test.ts index 69807a5d7a7..65c324d4b42 100644 --- a/src/commands/models/list.auth-overview.test.ts +++ b/src/commands/models/list.auth-overview.test.ts @@ -1,7 +1,28 @@ import { describe, expect, it } from "vitest"; import { NON_ENV_SECRETREF_MARKER } from "../../agents/model-auth-markers.js"; +import { withEnv } from "../../test-utils/env.js"; import { resolveProviderAuthOverview } from "./list.auth-overview.js"; +function resolveOpenAiOverview(apiKey: string) { + return resolveProviderAuthOverview({ + provider: "openai", + cfg: { + models: { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions", + apiKey, + models: [], + }, + }, + }, + } as never, + store: { version: 1, profiles: {} } as never, + modelsPath: "/tmp/models.json", + }); +} + describe("resolveProviderAuthOverview", () => { it("does not throw when token profile only has tokenRef", () => { const overview = resolveProviderAuthOverview({ @@ -24,23 +45,9 @@ describe("resolveProviderAuthOverview", () => { }); it("renders marker-backed models.json auth as marker detail", () => { - const overview = resolveProviderAuthOverview({ - provider: "openai", - cfg: { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions", - apiKey: NON_ENV_SECRETREF_MARKER, - models: [], - }, - }, - }, - } as never, - store: { version: 1, profiles: {} } as never, - modelsPath: "/tmp/models.json", - }); + const overview = withEnv({ OPENAI_API_KEY: undefined }, () => + resolveOpenAiOverview(NON_ENV_SECRETREF_MARKER), + ); expect(overview.effective.kind).toBe("missing"); expect(overview.effective.detail).toBe("missing"); @@ -48,23 +55,9 @@ describe("resolveProviderAuthOverview", () => { }); it("keeps env-var-shaped models.json values masked to avoid accidental plaintext exposure", () => { - const overview = resolveProviderAuthOverview({ - provider: "openai", - cfg: { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions", - apiKey: "OPENAI_API_KEY", // pragma: allowlist secret - models: [], - }, - }, - }, - } as never, - store: { version: 1, profiles: {} } as never, - modelsPath: "/tmp/models.json", - }); + const overview = withEnv({ OPENAI_API_KEY: undefined }, () => + resolveOpenAiOverview("OPENAI_API_KEY"), + ); expect(overview.effective.kind).toBe("missing"); expect(overview.effective.detail).toBe("missing"); @@ -76,23 +69,7 @@ describe("resolveProviderAuthOverview", () => { const prior = process.env.OPENAI_API_KEY; process.env.OPENAI_API_KEY = "sk-openai-from-env"; // pragma: allowlist secret try { - const overview = resolveProviderAuthOverview({ - provider: "openai", - cfg: { - models: { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions", - apiKey: "OPENAI_API_KEY", // pragma: allowlist secret - models: [], - }, - }, - }, - } as never, - store: { version: 1, profiles: {} } as never, - modelsPath: "/tmp/models.json", - }); + const overview = resolveOpenAiOverview("OPENAI_API_KEY"); expect(overview.effective.kind).toBe("env"); expect(overview.effective.detail).not.toContain("OPENAI_API_KEY"); } finally { diff --git a/src/commands/ollama-setup.test.ts b/src/commands/ollama-setup.test.ts index 124254c53b2..0b9b5d0e414 100644 --- a/src/commands/ollama-setup.test.ts +++ b/src/commands/ollama-setup.test.ts @@ -1,5 +1,6 @@ import { afterEach, describe, expect, it, vi } from "vitest"; import type { RuntimeEnv } from "../runtime.js"; +import { jsonResponse, requestBodyText, requestUrl } from "../test-helpers/http.js"; import type { WizardPrompter } from "../wizard/prompts.js"; import { configureOllamaNonInteractive, @@ -23,27 +24,6 @@ vi.mock("./oauth-env.js", () => ({ isRemoteEnvironment: isRemoteEnvironmentMock, })); -function jsonResponse(body: unknown, status = 200): Response { - return new Response(JSON.stringify(body), { - status, - headers: { "Content-Type": "application/json" }, - }); -} - -function requestUrl(input: string | URL | Request): string { - if (typeof input === "string") { - return input; - } - if (input instanceof URL) { - return input.toString(); - } - return input.url; -} - -function requestBody(body: BodyInit | null | undefined): string { - return typeof body === "string" ? body : "{}"; -} - function createOllamaFetchMock(params: { tags?: string[]; show?: Record; @@ -61,7 +41,7 @@ function createOllamaFetchMock(params: { return jsonResponse({ models: (params.tags ?? []).map((name) => ({ name })) }); } if (url.endsWith("/api/show")) { - const body = JSON.parse(requestBody(init?.body)) as { name?: string }; + const body = JSON.parse(requestBodyText(init?.body)) as { name?: string }; const contextWindow = body.name ? params.show?.[body.name] : undefined; return contextWindow ? jsonResponse({ model_info: { "llama.context_length": contextWindow } }) @@ -77,6 +57,45 @@ function createOllamaFetchMock(params: { }); } +function createModePrompter( + mode: "local" | "remote", + params?: { confirm?: boolean }, +): WizardPrompter { + return { + text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), + select: vi.fn().mockResolvedValueOnce(mode), + ...(params?.confirm !== undefined + ? { confirm: vi.fn().mockResolvedValueOnce(params.confirm) } + : {}), + note: vi.fn(async () => undefined), + } as unknown as WizardPrompter; +} + +function createSignedOutRemoteFetchMock() { + return createOllamaFetchMock({ + tags: ["llama3:8b"], + meResponses: [ + jsonResponse({ error: "not signed in", signin_url: "https://ollama.com/signin" }, 401), + jsonResponse({ username: "testuser" }), + ], + }); +} + +function createDefaultOllamaConfig(primary: string) { + return { + agents: { defaults: { model: { primary } } }, + models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } }, + }; +} + +function createRuntime() { + return { + log: vi.fn(), + error: vi.fn(), + exit: vi.fn(), + } as unknown as RuntimeEnv; +} + describe("ollama setup", () => { afterEach(() => { vi.unstubAllGlobals(); @@ -86,11 +105,7 @@ describe("ollama setup", () => { }); it("returns suggested default model for local mode", async () => { - const prompter = { - text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), - select: vi.fn().mockResolvedValueOnce("local"), - note: vi.fn(async () => undefined), - } as unknown as WizardPrompter; + const prompter = createModePrompter("local"); const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] }); vi.stubGlobal("fetch", fetchMock); @@ -101,11 +116,7 @@ describe("ollama setup", () => { }); it("returns suggested default model for remote mode", async () => { - const prompter = { - text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), - select: vi.fn().mockResolvedValueOnce("remote"), - note: vi.fn(async () => undefined), - } as unknown as WizardPrompter; + const prompter = createModePrompter("remote"); const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] }); vi.stubGlobal("fetch", fetchMock); @@ -116,11 +127,7 @@ describe("ollama setup", () => { }); it("mode selection affects model ordering (local)", async () => { - const prompter = { - text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), - select: vi.fn().mockResolvedValueOnce("local"), - note: vi.fn(async () => undefined), - } as unknown as WizardPrompter; + const prompter = createModePrompter("local"); const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b", "glm-4.7-flash"] }); vi.stubGlobal("fetch", fetchMock); @@ -134,20 +141,8 @@ describe("ollama setup", () => { }); it("cloud+local mode triggers /api/me check and opens sign-in URL", async () => { - const prompter = { - text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), - select: vi.fn().mockResolvedValueOnce("remote"), - confirm: vi.fn().mockResolvedValueOnce(true), - note: vi.fn(async () => undefined), - } as unknown as WizardPrompter; - - const fetchMock = createOllamaFetchMock({ - tags: ["llama3:8b"], - meResponses: [ - jsonResponse({ error: "not signed in", signin_url: "https://ollama.com/signin" }, 401), - jsonResponse({ username: "testuser" }), - ], - }); + const prompter = createModePrompter("remote", { confirm: true }); + const fetchMock = createSignedOutRemoteFetchMock(); vi.stubGlobal("fetch", fetchMock); await promptAndConfigureOllama({ cfg: {}, prompter }); @@ -158,20 +153,8 @@ describe("ollama setup", () => { it("cloud+local mode does not open browser in remote environment", async () => { isRemoteEnvironmentMock.mockReturnValue(true); - const prompter = { - text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), - select: vi.fn().mockResolvedValueOnce("remote"), - confirm: vi.fn().mockResolvedValueOnce(true), - note: vi.fn(async () => undefined), - } as unknown as WizardPrompter; - - const fetchMock = createOllamaFetchMock({ - tags: ["llama3:8b"], - meResponses: [ - jsonResponse({ error: "not signed in", signin_url: "https://ollama.com/signin" }, 401), - jsonResponse({ username: "testuser" }), - ], - }); + const prompter = createModePrompter("remote", { confirm: true }); + const fetchMock = createSignedOutRemoteFetchMock(); vi.stubGlobal("fetch", fetchMock); await promptAndConfigureOllama({ cfg: {}, prompter }); @@ -180,11 +163,7 @@ describe("ollama setup", () => { }); it("local mode does not trigger cloud auth", async () => { - const prompter = { - text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"), - select: vi.fn().mockResolvedValueOnce("local"), - note: vi.fn(async () => undefined), - } as unknown as WizardPrompter; + const prompter = createModePrompter("local"); const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] }); vi.stubGlobal("fetch", fetchMock); @@ -258,10 +237,7 @@ describe("ollama setup", () => { vi.stubGlobal("fetch", fetchMock); await ensureOllamaModelPulled({ - config: { - agents: { defaults: { model: { primary: "ollama/glm-4.7-flash" } } }, - models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } }, - }, + config: createDefaultOllamaConfig("ollama/glm-4.7-flash"), prompter, }); @@ -276,10 +252,7 @@ describe("ollama setup", () => { vi.stubGlobal("fetch", fetchMock); await ensureOllamaModelPulled({ - config: { - agents: { defaults: { model: { primary: "ollama/glm-4.7-flash" } } }, - models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } }, - }, + config: createDefaultOllamaConfig("ollama/glm-4.7-flash"), prompter, }); @@ -292,10 +265,7 @@ describe("ollama setup", () => { vi.stubGlobal("fetch", fetchMock); await ensureOllamaModelPulled({ - config: { - agents: { defaults: { model: { primary: "ollama/kimi-k2.5:cloud" } } }, - models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } }, - }, + config: createDefaultOllamaConfig("ollama/kimi-k2.5:cloud"), prompter, }); @@ -324,12 +294,7 @@ describe("ollama setup", () => { pullResponse: new Response('{"error":"disk full"}\n', { status: 200 }), }); vi.stubGlobal("fetch", fetchMock); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - } as unknown as RuntimeEnv; + const runtime = createRuntime(); const result = await configureOllamaNonInteractive({ nextConfig: { @@ -362,12 +327,7 @@ describe("ollama setup", () => { pullResponse: new Response('{"status":"success"}\n', { status: 200 }), }); vi.stubGlobal("fetch", fetchMock); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - } as unknown as RuntimeEnv; + const runtime = createRuntime(); const result = await configureOllamaNonInteractive({ nextConfig: {}, @@ -379,7 +339,7 @@ describe("ollama setup", () => { }); const pullRequest = fetchMock.mock.calls[1]?.[1]; - expect(JSON.parse(requestBody(pullRequest?.body))).toEqual({ name: "llama3.2:latest" }); + expect(JSON.parse(requestBodyText(pullRequest?.body))).toEqual({ name: "llama3.2:latest" }); expect(result.agents?.defaults?.model).toEqual( expect.objectContaining({ primary: "ollama/llama3.2:latest" }), ); @@ -388,12 +348,7 @@ describe("ollama setup", () => { it("accepts cloud models in non-interactive mode without pulling", async () => { const fetchMock = createOllamaFetchMock({ tags: [] }); vi.stubGlobal("fetch", fetchMock); - - const runtime = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - } as unknown as RuntimeEnv; + const runtime = createRuntime(); const result = await configureOllamaNonInteractive({ nextConfig: {}, diff --git a/src/commands/onboard-auth.config-core.ts b/src/commands/onboard-auth.config-core.ts index 4bda29df1bf..8c41bfb939c 100644 --- a/src/commands/onboard-auth.config-core.ts +++ b/src/commands/onboard-auth.config-core.ts @@ -85,6 +85,29 @@ import { MODELSTUDIO_DEFAULT_MODEL_REF, } from "./onboard-auth.models.js"; +function mergeProviderModels( + existingProvider: Record | undefined, + defaultModels: T[], +): T[] { + const existingModels = Array.isArray(existingProvider?.models) + ? (existingProvider.models as T[]) + : []; + const mergedModels = [...existingModels]; + const seen = new Set(existingModels.map((model) => model.id)); + for (const model of defaultModels) { + if (!seen.has(model.id)) { + mergedModels.push(model); + seen.add(model.id); + } + } + return mergedModels; +} + +function getNormalizedProviderApiKey(existingProvider: Record | undefined) { + const { apiKey } = (existingProvider ?? {}) as { apiKey?: string }; + return typeof apiKey === "string" ? apiKey.trim() || undefined : undefined; +} + export function applyZaiProviderConfig( cfg: OpenClawConfig, params?: { endpoint?: string; modelId?: string }, @@ -100,7 +123,6 @@ export function applyZaiProviderConfig( const providers = { ...cfg.models?.providers }; const existingProvider = providers.zai; - const existingModels = Array.isArray(existingProvider?.models) ? existingProvider.models : []; const defaultModels = [ buildZaiModelDefinition({ id: "glm-5" }), @@ -109,21 +131,13 @@ export function applyZaiProviderConfig( buildZaiModelDefinition({ id: "glm-4.7-flashx" }), ]; - const mergedModels = [...existingModels]; - const seen = new Set(existingModels.map((m) => m.id)); - for (const model of defaultModels) { - if (!seen.has(model.id)) { - mergedModels.push(model); - seen.add(model.id); - } - } + const mergedModels = mergeProviderModels(existingProvider, defaultModels); - const { apiKey: existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< + const { apiKey: _existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< string, unknown > as { apiKey?: string }; - const resolvedApiKey = typeof existingApiKey === "string" ? existingApiKey : undefined; - const normalizedApiKey = resolvedApiKey?.trim(); + const normalizedApiKey = getNormalizedProviderApiKey(existingProvider); const baseUrl = params?.endpoint ? resolveZaiBaseUrl(params.endpoint) @@ -256,12 +270,11 @@ export function applySyntheticProviderConfig(cfg: OpenClawConfig): OpenClawConfi (model) => !existingModels.some((existing) => existing.id === model.id), ), ]; - const { apiKey: existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< + const { apiKey: _existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< string, unknown > as { apiKey?: string }; - const resolvedApiKey = typeof existingApiKey === "string" ? existingApiKey : undefined; - const normalizedApiKey = resolvedApiKey?.trim(); + const normalizedApiKey = getNormalizedProviderApiKey(existingProvider); providers.synthetic = { ...existingProviderRest, baseUrl: SYNTHETIC_BASE_URL, @@ -609,7 +622,6 @@ function applyModelStudioProviderConfigWithBaseUrl( const providers = { ...cfg.models?.providers }; const existingProvider = providers.modelstudio; - const existingModels = Array.isArray(existingProvider?.models) ? existingProvider.models : []; const defaultModels = [ buildModelStudioModelDefinition({ id: "qwen3.5-plus" }), @@ -622,21 +634,13 @@ function applyModelStudioProviderConfigWithBaseUrl( buildModelStudioModelDefinition({ id: "kimi-k2.5" }), ]; - const mergedModels = [...existingModels]; - const seen = new Set(existingModels.map((m) => m.id)); - for (const model of defaultModels) { - if (!seen.has(model.id)) { - mergedModels.push(model); - seen.add(model.id); - } - } + const mergedModels = mergeProviderModels(existingProvider, defaultModels); - const { apiKey: existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< + const { apiKey: _existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record< string, unknown > as { apiKey?: string }; - const resolvedApiKey = typeof existingApiKey === "string" ? existingApiKey : undefined; - const normalizedApiKey = resolvedApiKey?.trim(); + const normalizedApiKey = getNormalizedProviderApiKey(existingProvider); providers.modelstudio = { ...existingProviderRest, diff --git a/src/commands/status.service-summary.test.ts b/src/commands/status.service-summary.test.ts index f1a688ea092..f730137a111 100644 --- a/src/commands/status.service-summary.test.ts +++ b/src/commands/status.service-summary.test.ts @@ -1,5 +1,6 @@ import { describe, expect, it, vi } from "vitest"; import type { GatewayService } from "../daemon/service.js"; +import type { GatewayServiceEnvArgs } from "../daemon/service.js"; import { readServiceStatusSummary } from "./status.service-summary.js"; function createService(overrides: Partial): GatewayService { @@ -57,4 +58,41 @@ describe("readServiceStatusSummary", () => { expect(summary.externallyManaged).toBe(false); expect(summary.loadedText).toBe("disabled"); }); + + it("passes command environment to runtime and loaded checks", async () => { + const isLoaded = vi.fn(async ({ env }: GatewayServiceEnvArgs) => { + return env?.OPENCLAW_GATEWAY_PORT === "18789"; + }); + const readRuntime = vi.fn(async (env?: NodeJS.ProcessEnv) => ({ + status: env?.OPENCLAW_GATEWAY_PORT === "18789" ? ("running" as const) : ("unknown" as const), + })); + + const summary = await readServiceStatusSummary( + createService({ + isLoaded, + readCommand: vi.fn(async () => ({ + programArguments: ["openclaw", "gateway", "run", "--port", "18789"], + environment: { OPENCLAW_GATEWAY_PORT: "18789" }, + })), + readRuntime, + }), + "Daemon", + ); + + expect(isLoaded).toHaveBeenCalledWith( + expect.objectContaining({ + env: expect.objectContaining({ + OPENCLAW_GATEWAY_PORT: "18789", + }), + }), + ); + expect(readRuntime).toHaveBeenCalledWith( + expect.objectContaining({ + OPENCLAW_GATEWAY_PORT: "18789", + }), + ); + expect(summary.installed).toBe(true); + expect(summary.loaded).toBe(true); + expect(summary.runtime).toMatchObject({ status: "running" }); + }); }); diff --git a/src/commands/status.service-summary.ts b/src/commands/status.service-summary.ts index d750fe7eb02..cc366c2c7ba 100644 --- a/src/commands/status.service-summary.ts +++ b/src/commands/status.service-summary.ts @@ -16,10 +16,16 @@ export async function readServiceStatusSummary( fallbackLabel: string, ): Promise { try { - const [loaded, runtime, command] = await Promise.all([ - service.isLoaded({ env: process.env }).catch(() => false), - service.readRuntime(process.env).catch(() => undefined), - service.readCommand(process.env).catch(() => null), + const command = await service.readCommand(process.env).catch(() => null); + const serviceEnv = command?.environment + ? ({ + ...process.env, + ...command.environment, + } satisfies NodeJS.ProcessEnv) + : process.env; + const [loaded, runtime] = await Promise.all([ + service.isLoaded({ env: serviceEnv }).catch(() => false), + service.readRuntime(serviceEnv).catch(() => undefined), ]); const managedByOpenClaw = command != null; const externallyManaged = !managedByOpenClaw && runtime?.status === "running"; diff --git a/src/config/channel-capabilities.test.ts b/src/config/channel-capabilities.test.ts index 423cc3e2f74..75083317e82 100644 --- a/src/config/channel-capabilities.test.ts +++ b/src/config/channel-capabilities.test.ts @@ -125,6 +125,23 @@ describe("resolveChannelCapabilities", () => { }), ).toBeUndefined(); }); + + it("handles Slack object-format capabilities gracefully", () => { + const cfg = { + channels: { + slack: { + capabilities: { interactiveReplies: true }, + }, + }, + } as unknown as Partial; + + expect( + resolveChannelCapabilities({ + cfg, + channel: "slack", + }), + ).toBeUndefined(); + }); }); const createStubPlugin = (id: string): ChannelPlugin => ({ diff --git a/src/config/channel-capabilities.ts b/src/config/channel-capabilities.ts index 0e66f755e3b..b7edc354596 100644 --- a/src/config/channel-capabilities.ts +++ b/src/config/channel-capabilities.ts @@ -2,9 +2,10 @@ import { normalizeChannelId } from "../channels/plugins/index.js"; import { resolveAccountEntry } from "../routing/account-lookup.js"; import { normalizeAccountId } from "../routing/session-key.js"; import type { OpenClawConfig } from "./config.js"; +import type { SlackCapabilitiesConfig } from "./types.slack.js"; import type { TelegramCapabilitiesConfig } from "./types.telegram.js"; -type CapabilitiesConfig = TelegramCapabilitiesConfig; +type CapabilitiesConfig = TelegramCapabilitiesConfig | SlackCapabilitiesConfig; const isStringArray = (value: unknown): value is string[] => Array.isArray(value) && value.every((entry) => typeof entry === "string"); diff --git a/src/config/config.talk-validation.test.ts b/src/config/config.talk-validation.test.ts index cb948d75c75..d2fb463613c 100644 --- a/src/config/config.talk-validation.test.ts +++ b/src/config/config.talk-validation.test.ts @@ -8,38 +8,42 @@ describe("talk config validation fail-closed behavior", () => { vi.restoreAllMocks(); }); + async function expectInvalidTalkConfig(config: unknown, messagePattern: RegExp) { + await withTempHomeConfig(config, async () => { + const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {}); + + let thrown: unknown; + try { + loadConfig(); + } catch (error) { + thrown = error; + } + + expect(thrown).toBeInstanceOf(Error); + expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG"); + expect((thrown as Error).message).toMatch(messagePattern); + expect(consoleSpy).toHaveBeenCalled(); + }); + } + it.each([ ["boolean", true], ["string", "1500"], ["float", 1500.5], ])("rejects %s talk.silenceTimeoutMs during config load", async (_label, value) => { - await withTempHomeConfig( + await expectInvalidTalkConfig( { agents: { list: [{ id: "main" }] }, talk: { silenceTimeoutMs: value, }, }, - async () => { - const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {}); - - let thrown: unknown; - try { - loadConfig(); - } catch (error) { - thrown = error; - } - - expect(thrown).toBeInstanceOf(Error); - expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG"); - expect((thrown as Error).message).toMatch(/silenceTimeoutMs|talk/i); - expect(consoleSpy).toHaveBeenCalled(); - }, + /silenceTimeoutMs|talk/i, ); }); it("rejects talk.provider when it does not match talk.providers during config load", async () => { - await withTempHomeConfig( + await expectInvalidTalkConfig( { agents: { list: [{ id: "main" }] }, talk: { @@ -51,26 +55,12 @@ describe("talk config validation fail-closed behavior", () => { }, }, }, - async () => { - const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {}); - - let thrown: unknown; - try { - loadConfig(); - } catch (error) { - thrown = error; - } - - expect(thrown).toBeInstanceOf(Error); - expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG"); - expect((thrown as Error).message).toMatch(/talk\.provider|talk\.providers|acme/i); - expect(consoleSpy).toHaveBeenCalled(); - }, + /talk\.provider|talk\.providers|acme/i, ); }); it("rejects multi-provider talk config without talk.provider during config load", async () => { - await withTempHomeConfig( + await expectInvalidTalkConfig( { agents: { list: [{ id: "main" }] }, talk: { @@ -84,21 +74,7 @@ describe("talk config validation fail-closed behavior", () => { }, }, }, - async () => { - const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {}); - - let thrown: unknown; - try { - loadConfig(); - } catch (error) { - thrown = error; - } - - expect(thrown).toBeInstanceOf(Error); - expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG"); - expect((thrown as Error).message).toMatch(/talk\.provider|required/i); - expect(consoleSpy).toHaveBeenCalled(); - }, + /talk\.provider|required/i, ); }); }); diff --git a/src/config/paths.test.ts b/src/config/paths.test.ts index b8afe7674cb..6d2ffcfaf08 100644 --- a/src/config/paths.test.ts +++ b/src/config/paths.test.ts @@ -1,7 +1,7 @@ import fs from "node:fs/promises"; -import os from "node:os"; import path from "node:path"; import { describe, expect, it } from "vitest"; +import { withTempDir } from "../test-helpers/temp-dir.js"; import { resolveDefaultConfigCandidates, resolveConfigPathCandidate, @@ -37,15 +37,6 @@ describe("oauth paths", () => { }); describe("state + config path candidates", () => { - async function withTempRoot(prefix: string, run: (root: string) => Promise): Promise { - const root = await fs.mkdtemp(path.join(os.tmpdir(), prefix)); - try { - await run(root); - } finally { - await fs.rm(root, { recursive: true, force: true }); - } - } - function expectOpenClawHomeDefaults(env: NodeJS.ProcessEnv): void { const configuredHome = env.OPENCLAW_HOME; if (!configuredHome) { @@ -107,7 +98,7 @@ describe("state + config path candidates", () => { }); it("prefers ~/.openclaw when it exists and legacy dir is missing", async () => { - await withTempRoot("openclaw-state-", async (root) => { + await withTempDir({ prefix: "openclaw-state-" }, async (root) => { const newDir = path.join(root, ".openclaw"); await fs.mkdir(newDir, { recursive: true }); const resolved = resolveStateDir({} as NodeJS.ProcessEnv, () => root); @@ -116,7 +107,7 @@ describe("state + config path candidates", () => { }); it("falls back to existing legacy state dir when ~/.openclaw is missing", async () => { - await withTempRoot("openclaw-state-legacy-", async (root) => { + await withTempDir({ prefix: "openclaw-state-legacy-" }, async (root) => { const legacyDir = path.join(root, ".clawdbot"); await fs.mkdir(legacyDir, { recursive: true }); const resolved = resolveStateDir({} as NodeJS.ProcessEnv, () => root); @@ -125,7 +116,7 @@ describe("state + config path candidates", () => { }); it("CONFIG_PATH prefers existing config when present", async () => { - await withTempRoot("openclaw-config-", async (root) => { + await withTempDir({ prefix: "openclaw-config-" }, async (root) => { const legacyDir = path.join(root, ".openclaw"); await fs.mkdir(legacyDir, { recursive: true }); const legacyPath = path.join(legacyDir, "openclaw.json"); @@ -137,7 +128,7 @@ describe("state + config path candidates", () => { }); it("respects state dir overrides when config is missing", async () => { - await withTempRoot("openclaw-config-override-", async (root) => { + await withTempDir({ prefix: "openclaw-config-override-" }, async (root) => { const legacyDir = path.join(root, ".openclaw"); await fs.mkdir(legacyDir, { recursive: true }); const legacyConfig = path.join(legacyDir, "openclaw.json"); diff --git a/src/config/schema.help.ts b/src/config/schema.help.ts index 392043c5ae5..0c8f1d8cafd 100644 --- a/src/config/schema.help.ts +++ b/src/config/schema.help.ts @@ -1441,6 +1441,8 @@ export const FIELD_HELP: Record = { "Optional Slack user token for workflows requiring user-context API access beyond bot permissions. Use sparingly and audit scopes because this token can carry broader authority.", "channels.slack.userTokenReadOnly": "When true, treat configured Slack user token usage as read-only helper behavior where possible. Keep enabled if you only need supplemental reads without user-context writes.", + "channels.slack.capabilities.interactiveReplies": + "Enable agent-authored Slack interactive reply directives (`[[slack_buttons: ...]]`, `[[slack_select: ...]]`). Default: false.", "channels.mattermost.configWrites": "Allow Mattermost to write config in response to channel events/commands (default: true).", "channels.discord.configWrites": diff --git a/src/config/schema.labels.ts b/src/config/schema.labels.ts index 4c61cfbe1fb..c7c7b8d0e41 100644 --- a/src/config/schema.labels.ts +++ b/src/config/schema.labels.ts @@ -818,6 +818,7 @@ export const FIELD_LABELS: Record = { "channels.slack.appToken": "Slack App Token", "channels.slack.userToken": "Slack User Token", "channels.slack.userTokenReadOnly": "Slack User Token Read Only", + "channels.slack.capabilities.interactiveReplies": "Slack Interactive Replies", "channels.slack.streaming": "Slack Streaming Mode", "channels.slack.nativeStreaming": "Slack Native Streaming", "channels.slack.streamMode": "Slack Stream Mode (Legacy)", diff --git a/src/config/types.browser.ts b/src/config/types.browser.ts index 57d036bd88c..5f8e28a0ebe 100644 --- a/src/config/types.browser.ts +++ b/src/config/types.browser.ts @@ -4,7 +4,7 @@ export type BrowserProfileConfig = { /** CDP URL for this profile (use for remote Chrome). */ cdpUrl?: string; /** Profile driver (default: openclaw). */ - driver?: "openclaw" | "clawd" | "extension"; + driver?: "openclaw" | "clawd" | "extension" | "existing-session"; /** If true, never launch a browser for this profile; only attach. Falls back to browser.attachOnly. */ attachOnly?: boolean; /** Profile color (hex). Auto-assigned at creation. */ diff --git a/src/config/types.slack.ts b/src/config/types.slack.ts index 96abe2641d6..a90f1ed5020 100644 --- a/src/config/types.slack.ts +++ b/src/config/types.slack.ts @@ -47,6 +47,11 @@ export type SlackChannelConfig = { export type SlackReactionNotificationMode = "off" | "own" | "all" | "allowlist"; export type SlackStreamingMode = "off" | "partial" | "block" | "progress"; export type SlackLegacyStreamMode = "replace" | "status_final" | "append"; +export type SlackCapabilitiesConfig = + | string[] + | { + interactiveReplies?: boolean; + }; export type SlackActionConfig = { reactions?: boolean; @@ -89,7 +94,7 @@ export type SlackAccountConfig = { /** Slack Events API webhook path (default: /slack/events). */ webhookPath?: string; /** Optional provider capability tags used for agent/runtime guidance. */ - capabilities?: string[]; + capabilities?: SlackCapabilitiesConfig; /** Markdown formatting overrides (tables). */ markdown?: MarkdownConfig; /** Override native command registration for Slack (bool or "auto"). */ diff --git a/src/config/zod-schema.providers-core.ts b/src/config/zod-schema.providers-core.ts index 47f76614dd8..ced89bd8512 100644 --- a/src/config/zod-schema.providers-core.ts +++ b/src/config/zod-schema.providers-core.ts @@ -59,6 +59,14 @@ const TelegramCapabilitiesSchema = z.union([ }) .strict(), ]); +const SlackCapabilitiesSchema = z.union([ + z.array(z.string()), + z + .object({ + interactiveReplies: z.boolean().optional(), + }) + .strict(), +]); export const TelegramTopicSchema = z .object({ @@ -831,7 +839,7 @@ export const SlackAccountSchema = z mode: z.enum(["socket", "http"]).optional(), signingSecret: SecretInputSchema.optional().register(sensitive), webhookPath: z.string().optional(), - capabilities: z.array(z.string()).optional(), + capabilities: SlackCapabilitiesSchema.optional(), markdown: MarkdownConfigSchema, enabled: z.boolean().optional(), commands: ProviderCommandsSchema, diff --git a/src/config/zod-schema.ts b/src/config/zod-schema.ts index 0064afddd20..741b4bcc0c9 100644 --- a/src/config/zod-schema.ts +++ b/src/config/zod-schema.ts @@ -360,7 +360,12 @@ export const OpenClawSchema = z cdpPort: z.number().int().min(1).max(65535).optional(), cdpUrl: z.string().optional(), driver: z - .union([z.literal("openclaw"), z.literal("clawd"), z.literal("extension")]) + .union([ + z.literal("openclaw"), + z.literal("clawd"), + z.literal("extension"), + z.literal("existing-session"), + ]) .optional(), attachOnly: z.boolean().optional(), color: HexColorSchema, diff --git a/src/cron/isolated-agent.lane.test.ts b/src/cron/isolated-agent.lane.test.ts index 5d26faff327..3790c5e511a 100644 --- a/src/cron/isolated-agent.lane.test.ts +++ b/src/cron/isolated-agent.lane.test.ts @@ -1,6 +1,7 @@ import "./isolated-agent.mocks.js"; import { beforeEach, describe, expect, it, vi } from "vitest"; import { runEmbeddedPiAgent } from "../agents/pi-embedded.js"; +import { createCliDeps, mockAgentPayloads } from "./isolated-agent.delivery.test-helpers.js"; import { runCronIsolatedAgentTurn } from "./isolated-agent.js"; import { makeCfg, @@ -9,27 +10,6 @@ import { writeSessionStoreEntries, } from "./isolated-agent.test-harness.js"; -function makeDeps() { - return { - sendMessageSlack: vi.fn(), - sendMessageWhatsApp: vi.fn(), - sendMessageTelegram: vi.fn(), - sendMessageDiscord: vi.fn(), - sendMessageSignal: vi.fn(), - sendMessageIMessage: vi.fn(), - }; -} - -function mockEmbeddedOk() { - vi.mocked(runEmbeddedPiAgent).mockResolvedValue({ - payloads: [{ text: "ok" }], - meta: { - durationMs: 5, - agentMeta: { sessionId: "s", provider: "p", model: "m" }, - }, - }); -} - function lastEmbeddedLane(): string | undefined { const calls = vi.mocked(runEmbeddedPiAgent).mock.calls; expect(calls.length).toBeGreaterThan(0); @@ -45,11 +25,11 @@ async function runLaneCase(home: string, lane?: string) { lastTo: "", }, }); - mockEmbeddedOk(); + mockAgentPayloads([{ text: "ok" }]); await runCronIsolatedAgentTurn({ cfg: makeCfg(home, storePath), - deps: makeDeps(), + deps: createCliDeps(), job: makeJob({ kind: "agentTurn", message: "do it", deliver: false }), message: "do it", sessionKey: "cron:job-1", diff --git a/src/cron/isolated-agent.model-formatting.test.ts b/src/cron/isolated-agent.model-formatting.test.ts index e78f251dc8b..f9732a32d31 100644 --- a/src/cron/isolated-agent.model-formatting.test.ts +++ b/src/cron/isolated-agent.model-formatting.test.ts @@ -2,6 +2,7 @@ import "./isolated-agent.mocks.js"; import { beforeEach, describe, expect, it, vi } from "vitest"; import { loadModelCatalog } from "../agents/model-catalog.js"; import { runEmbeddedPiAgent } from "../agents/pi-embedded.js"; +import { createCliDeps, mockAgentPayloads } from "./isolated-agent.delivery.test-helpers.js"; import { runCronIsolatedAgentTurn } from "./isolated-agent.js"; import { makeCfg, @@ -13,27 +14,6 @@ import type { CronJob } from "./types.js"; const withTempHome = withTempCronHome; -function makeDeps() { - return { - sendMessageSlack: vi.fn(), - sendMessageWhatsApp: vi.fn(), - sendMessageTelegram: vi.fn(), - sendMessageDiscord: vi.fn(), - sendMessageSignal: vi.fn(), - sendMessageIMessage: vi.fn(), - }; -} - -function mockEmbeddedOk() { - vi.mocked(runEmbeddedPiAgent).mockResolvedValue({ - payloads: [{ text: "ok" }], - meta: { - durationMs: 5, - agentMeta: { sessionId: "s", provider: "p", model: "m" }, - }, - }); -} - /** * Extract the provider and model from the last runEmbeddedPiAgent call. */ @@ -62,7 +42,7 @@ async function runTurnCore(home: string, options: TurnOptions = {}) { }, ...options.storeEntries, }); - mockEmbeddedOk(); + mockAgentPayloads([{ text: "ok" }]); const jobPayload = options.jobPayload ?? { kind: "agentTurn" as const, @@ -72,7 +52,7 @@ async function runTurnCore(home: string, options: TurnOptions = {}) { const res = await runCronIsolatedAgentTurn({ cfg: makeCfg(home, storePath, options.cfgOverrides), - deps: makeDeps(), + deps: createCliDeps(), job: makeJob(jobPayload), message: DEFAULT_MESSAGE, sessionKey: options.sessionKey ?? "cron:job-1", @@ -310,7 +290,7 @@ describe("cron model formatting and precedence edge cases", () => { // Step 2: No job model, session store says openai vi.mocked(runEmbeddedPiAgent).mockClear(); - mockEmbeddedOk(); + mockAgentPayloads([{ text: "ok" }]); const step2 = await runTurn(home, { jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, storeEntries: { @@ -327,7 +307,7 @@ describe("cron model formatting and precedence edge cases", () => { // Step 3: Job payload says anthropic, session store still says openai vi.mocked(runEmbeddedPiAgent).mockClear(); - mockEmbeddedOk(); + mockAgentPayloads([{ text: "ok" }]); const step3 = await runTurn(home, { jobPayload: { kind: "agentTurn", @@ -365,7 +345,7 @@ describe("cron model formatting and precedence edge cases", () => { // Run 2: no override — must revert to default anthropic vi.mocked(runEmbeddedPiAgent).mockClear(); - mockEmbeddedOk(); + mockAgentPayloads([{ text: "ok" }]); const r2 = await runTurn(home, { jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false }, }); diff --git a/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts b/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts index b9c0fddb3a3..2cdb6ee0048 100644 --- a/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts +++ b/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts @@ -133,6 +133,16 @@ async function runTelegramDeliveryResult(bestEffort: boolean) { return outcome; } +function expectSuccessfulTelegramTextDelivery(params: { + res: Awaited>; + deps: CliDeps; +}): void { + expect(params.res.status).toBe("ok"); + expect(params.res.delivered).toBe(true); + expect(params.res.deliveryAttempted).toBe(true); + expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); +} + async function runSignalDeliveryResult(bestEffort: boolean) { let outcome: | { @@ -379,31 +389,11 @@ describe("runCronIsolatedAgentTurn", () => { }); it("delivers text directly when best-effort is disabled", async () => { - await withTempHome(async (home) => { - const storePath = await writeSessionStore(home, { lastProvider: "webchat", lastTo: "" }); - const deps = createCliDeps(); - mockAgentPayloads([{ text: "hello from cron" }]); - - const res = await runTelegramAnnounceTurn({ - home, - storePath, - deps, - delivery: { - mode: "announce", - channel: "telegram", - to: "123", - bestEffort: false, - }, - }); - - expect(res.status).toBe("ok"); - expect(res.delivered).toBe(true); - expect(res.deliveryAttempted).toBe(true); - expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); - expectDirectTelegramDelivery(deps, { - chatId: "123", - text: "hello from cron", - }); + const { res, deps } = await runTelegramDeliveryResult(false); + expectSuccessfulTelegramTextDelivery({ res, deps }); + expectDirectTelegramDelivery(deps, { + chatId: "123", + text: "hello from cron", }); }); @@ -459,10 +449,7 @@ describe("runCronIsolatedAgentTurn", () => { }, }); - expect(res.status).toBe("ok"); - expect(res.delivered).toBe(true); - expect(res.deliveryAttempted).toBe(true); - expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); + expectSuccessfulTelegramTextDelivery({ res, deps }); expect(deps.sendMessageTelegram).toHaveBeenCalledTimes(2); expect(deps.sendMessageTelegram).toHaveBeenLastCalledWith( "123", @@ -490,10 +477,7 @@ describe("runCronIsolatedAgentTurn", () => { it("delivers text directly when best-effort is enabled", async () => { const { res, deps } = await runTelegramDeliveryResult(true); - expect(res.status).toBe("ok"); - expect(res.delivered).toBe(true); - expect(res.deliveryAttempted).toBe(true); - expect(runSubagentAnnounceFlow).not.toHaveBeenCalled(); + expectSuccessfulTelegramTextDelivery({ res, deps }); expectDirectTelegramDelivery(deps, { chatId: "123", text: "hello from cron", diff --git a/src/cron/isolated-agent/run.fast-mode.test.ts b/src/cron/isolated-agent/run.fast-mode.test.ts index 471471e9ecd..abe50ea5554 100644 --- a/src/cron/isolated-agent/run.fast-mode.test.ts +++ b/src/cron/isolated-agent/run.fast-mode.test.ts @@ -14,169 +14,102 @@ import { const runCronIsolatedAgentTurn = await loadRunCronIsolatedAgentTurn(); +const OPENAI_GPT4_MODEL = "openai/gpt-4"; + +function mockSuccessfulModelFallback() { + runWithModelFallbackMock.mockImplementation(async ({ provider, model, run }) => { + await run(provider, model); + return { + result: { + payloads: [{ text: "ok" }], + meta: { agentMeta: { usage: { input: 10, output: 20 } } }, + }, + provider, + model, + attempts: [], + }; + }); +} + +async function runFastModeCase(params: { + configFastMode: boolean; + expectedFastMode: boolean; + message: string; + sessionFastMode?: boolean; +}) { + const baseSession = makeCronSession(); + resolveCronSessionMock.mockReturnValue( + params.sessionFastMode === undefined + ? baseSession + : makeCronSession({ + sessionEntry: { + ...baseSession.sessionEntry, + fastMode: params.sessionFastMode, + }, + }), + ); + mockSuccessfulModelFallback(); + + const result = await runCronIsolatedAgentTurn( + makeIsolatedAgentTurnParams({ + cfg: { + agents: { + defaults: { + models: { + [OPENAI_GPT4_MODEL]: { + params: { + fastMode: params.configFastMode, + }, + }, + }, + }, + }, + }, + job: makeIsolatedAgentTurnJob({ + payload: { + kind: "agentTurn", + message: params.message, + model: OPENAI_GPT4_MODEL, + }, + }), + }), + ); + + expect(result.status).toBe("ok"); + expect(runEmbeddedPiAgentMock).toHaveBeenCalledOnce(); + expect(runEmbeddedPiAgentMock.mock.calls[0][0]).toMatchObject({ + provider: "openai", + model: "gpt-4", + fastMode: params.expectedFastMode, + }); +} + describe("runCronIsolatedAgentTurn — fast mode", () => { setupRunCronIsolatedAgentTurnSuite(); it("passes config-driven fast mode into embedded cron runs", async () => { - const cronSession = makeCronSession(); - resolveCronSessionMock.mockReturnValue(cronSession); - - runWithModelFallbackMock.mockImplementation(async ({ provider, model, run }) => { - await run(provider, model); - return { - result: { - payloads: [{ text: "ok" }], - meta: { agentMeta: { usage: { input: 10, output: 20 } } }, - }, - provider, - model, - attempts: [], - }; - }); - - const result = await runCronIsolatedAgentTurn( - makeIsolatedAgentTurnParams({ - cfg: { - agents: { - defaults: { - models: { - "openai/gpt-4": { - params: { - fastMode: true, - }, - }, - }, - }, - }, - }, - job: makeIsolatedAgentTurnJob({ - payload: { - kind: "agentTurn", - message: "test fast mode", - model: "openai/gpt-4", - }, - }), - }), - ); - - expect(result.status).toBe("ok"); - expect(runEmbeddedPiAgentMock).toHaveBeenCalledOnce(); - expect(runEmbeddedPiAgentMock.mock.calls[0][0]).toMatchObject({ - provider: "openai", - model: "gpt-4", - fastMode: true, + await runFastModeCase({ + configFastMode: true, + expectedFastMode: true, + message: "test fast mode", }); }); it("honors session fastMode=false over config fastMode=true", async () => { - const cronSession = makeCronSession({ - sessionEntry: { - ...makeCronSession().sessionEntry, - fastMode: false, - }, - }); - resolveCronSessionMock.mockReturnValue(cronSession); - - runWithModelFallbackMock.mockImplementation(async ({ provider, model, run }) => { - await run(provider, model); - return { - result: { - payloads: [{ text: "ok" }], - meta: { agentMeta: { usage: { input: 10, output: 20 } } }, - }, - provider, - model, - attempts: [], - }; - }); - - const result = await runCronIsolatedAgentTurn( - makeIsolatedAgentTurnParams({ - cfg: { - agents: { - defaults: { - models: { - "openai/gpt-4": { - params: { - fastMode: true, - }, - }, - }, - }, - }, - }, - job: makeIsolatedAgentTurnJob({ - payload: { - kind: "agentTurn", - message: "test fast mode override", - model: "openai/gpt-4", - }, - }), - }), - ); - - expect(result.status).toBe("ok"); - expect(runEmbeddedPiAgentMock).toHaveBeenCalledOnce(); - expect(runEmbeddedPiAgentMock.mock.calls[0][0]).toMatchObject({ - provider: "openai", - model: "gpt-4", - fastMode: false, + await runFastModeCase({ + configFastMode: true, + expectedFastMode: false, + message: "test fast mode override", + sessionFastMode: false, }); }); it("honors session fastMode=true over config fastMode=false", async () => { - const cronSession = makeCronSession({ - sessionEntry: { - ...makeCronSession().sessionEntry, - fastMode: true, - }, - }); - resolveCronSessionMock.mockReturnValue(cronSession); - - runWithModelFallbackMock.mockImplementation(async ({ provider, model, run }) => { - await run(provider, model); - return { - result: { - payloads: [{ text: "ok" }], - meta: { agentMeta: { usage: { input: 10, output: 20 } } }, - }, - provider, - model, - attempts: [], - }; - }); - - const result = await runCronIsolatedAgentTurn( - makeIsolatedAgentTurnParams({ - cfg: { - agents: { - defaults: { - models: { - "openai/gpt-4": { - params: { - fastMode: false, - }, - }, - }, - }, - }, - }, - job: makeIsolatedAgentTurnJob({ - payload: { - kind: "agentTurn", - message: "test fast mode session override", - model: "openai/gpt-4", - }, - }), - }), - ); - - expect(result.status).toBe("ok"); - expect(runEmbeddedPiAgentMock).toHaveBeenCalledOnce(); - expect(runEmbeddedPiAgentMock.mock.calls[0][0]).toMatchObject({ - provider: "openai", - model: "gpt-4", - fastMode: true, + await runFastModeCase({ + configFastMode: false, + expectedFastMode: true, + message: "test fast mode session override", + sessionFastMode: true, }); }); }); diff --git a/src/cron/service.restart-catchup.test.ts b/src/cron/service.restart-catchup.test.ts index f0c9c3e4dc9..70da886b9a0 100644 --- a/src/cron/service.restart-catchup.test.ts +++ b/src/cron/service.restart-catchup.test.ts @@ -47,326 +47,274 @@ describe("CronService restart catch-up", () => { }; } - it("executes an overdue recurring job immediately on start", async () => { + async function withRestartedCron( + jobs: unknown[], + run: (params: { + cron: CronService; + enqueueSystemEvent: ReturnType; + requestHeartbeatNow: ReturnType; + }) => Promise, + ) { const store = await makeStorePath(); const enqueueSystemEvent = vi.fn(); const requestHeartbeatNow = vi.fn(); + await writeStoreJobs(store.storePath, jobs); + + const cron = createRestartCronService({ + storePath: store.storePath, + enqueueSystemEvent, + requestHeartbeatNow, + }); + + try { + await cron.start(); + await run({ cron, enqueueSystemEvent, requestHeartbeatNow }); + } finally { + cron.stop(); + await store.cleanup(); + } + } + + it("executes an overdue recurring job immediately on start", async () => { const dueAt = Date.parse("2025-12-13T15:00:00.000Z"); const lastRunAt = Date.parse("2025-12-12T15:00:00.000Z"); - await writeStoreJobs(store.storePath, [ - { - id: "restart-overdue-job", - name: "daily digest", - enabled: true, - createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), - updatedAtMs: Date.parse("2025-12-12T15:00:00.000Z"), - schedule: { kind: "cron", expr: "0 15 * * *", tz: "UTC" }, - sessionTarget: "main", - wakeMode: "next-heartbeat", - payload: { kind: "systemEvent", text: "digest now" }, - state: { - nextRunAtMs: dueAt, - lastRunAtMs: lastRunAt, - lastStatus: "ok", + await withRestartedCron( + [ + { + id: "restart-overdue-job", + name: "daily digest", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-12T15:00:00.000Z"), + schedule: { kind: "cron", expr: "0 15 * * *", tz: "UTC" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "digest now" }, + state: { + nextRunAtMs: dueAt, + lastRunAtMs: lastRunAt, + lastStatus: "ok", + }, }, + ], + async ({ cron, enqueueSystemEvent, requestHeartbeatNow }) => { + expect(enqueueSystemEvent).toHaveBeenCalledWith( + "digest now", + expect.objectContaining({ agentId: undefined }), + ); + expect(requestHeartbeatNow).toHaveBeenCalled(); + + const listedJobs = await cron.list({ includeDisabled: true }); + const updated = listedJobs.find((job) => job.id === "restart-overdue-job"); + expect(updated?.state.lastStatus).toBe("ok"); + expect(updated?.state.lastRunAtMs).toBe(Date.parse("2025-12-13T17:00:00.000Z")); + expect(updated?.state.nextRunAtMs).toBeGreaterThan(Date.parse("2025-12-13T17:00:00.000Z")); }, - ]); - - const cron = createRestartCronService({ - storePath: store.storePath, - enqueueSystemEvent, - requestHeartbeatNow, - }); - - await cron.start(); - - expect(enqueueSystemEvent).toHaveBeenCalledWith( - "digest now", - expect.objectContaining({ agentId: undefined }), ); - expect(requestHeartbeatNow).toHaveBeenCalled(); - - const jobs = await cron.list({ includeDisabled: true }); - const updated = jobs.find((job) => job.id === "restart-overdue-job"); - expect(updated?.state.lastStatus).toBe("ok"); - expect(updated?.state.lastRunAtMs).toBe(Date.parse("2025-12-13T17:00:00.000Z")); - expect(updated?.state.nextRunAtMs).toBeGreaterThan(Date.parse("2025-12-13T17:00:00.000Z")); - - cron.stop(); - await store.cleanup(); }); it("clears stale running markers without replaying interrupted startup jobs", async () => { - const store = await makeStorePath(); - const enqueueSystemEvent = vi.fn(); - const requestHeartbeatNow = vi.fn(); - const dueAt = Date.parse("2025-12-13T16:00:00.000Z"); const staleRunningAt = Date.parse("2025-12-13T16:30:00.000Z"); - await writeStoreJobs(store.storePath, [ - { - id: "restart-stale-running", - name: "daily stale marker", - enabled: true, - createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), - updatedAtMs: Date.parse("2025-12-13T16:30:00.000Z"), - schedule: { kind: "cron", expr: "0 16 * * *", tz: "UTC" }, - sessionTarget: "main", - wakeMode: "next-heartbeat", - payload: { kind: "systemEvent", text: "resume stale marker" }, - state: { - nextRunAtMs: dueAt, - runningAtMs: staleRunningAt, + await withRestartedCron( + [ + { + id: "restart-stale-running", + name: "daily stale marker", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-13T16:30:00.000Z"), + schedule: { kind: "cron", expr: "0 16 * * *", tz: "UTC" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "resume stale marker" }, + state: { + nextRunAtMs: dueAt, + runningAtMs: staleRunningAt, + }, }, + ], + async ({ cron, enqueueSystemEvent }) => { + expect(enqueueSystemEvent).not.toHaveBeenCalled(); + expect(noopLogger.warn).toHaveBeenCalledWith( + expect.objectContaining({ jobId: "restart-stale-running" }), + "cron: clearing stale running marker on startup", + ); + + const listedJobs = await cron.list({ includeDisabled: true }); + const updated = listedJobs.find((job) => job.id === "restart-stale-running"); + expect(updated?.state.runningAtMs).toBeUndefined(); + expect(updated?.state.lastStatus).toBeUndefined(); + expect(updated?.state.lastRunAtMs).toBeUndefined(); + expect((updated?.state.nextRunAtMs ?? 0) > Date.parse("2025-12-13T17:00:00.000Z")).toBe( + true, + ); }, - ]); - - const cron = createRestartCronService({ - storePath: store.storePath, - enqueueSystemEvent, - requestHeartbeatNow, - }); - - await cron.start(); - - expect(enqueueSystemEvent).not.toHaveBeenCalled(); - expect(noopLogger.warn).toHaveBeenCalledWith( - expect.objectContaining({ jobId: "restart-stale-running" }), - "cron: clearing stale running marker on startup", ); - - const jobs = await cron.list({ includeDisabled: true }); - const updated = jobs.find((job) => job.id === "restart-stale-running"); - expect(updated?.state.runningAtMs).toBeUndefined(); - expect(updated?.state.lastStatus).toBeUndefined(); - expect(updated?.state.lastRunAtMs).toBeUndefined(); - expect((updated?.state.nextRunAtMs ?? 0) > Date.parse("2025-12-13T17:00:00.000Z")).toBe(true); - - cron.stop(); - await store.cleanup(); }); it("replays the most recent missed cron slot after restart when nextRunAtMs already advanced", async () => { vi.setSystemTime(new Date("2025-12-13T04:02:00.000Z")); - const store = await makeStorePath(); - const enqueueSystemEvent = vi.fn(); - const requestHeartbeatNow = vi.fn(); - - await writeStoreJobs(store.storePath, [ - { - id: "restart-missed-slot", - name: "every ten minutes +1", - enabled: true, - createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), - updatedAtMs: Date.parse("2025-12-13T04:01:00.000Z"), - schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" }, - sessionTarget: "main", - wakeMode: "next-heartbeat", - payload: { kind: "systemEvent", text: "catch missed slot" }, - state: { - // Persisted state may already be recomputed from restart time and - // point to the future slot, even though 04:01 was missed. - nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"), - lastRunAtMs: Date.parse("2025-12-13T03:51:00.000Z"), - lastStatus: "ok", + await withRestartedCron( + [ + { + id: "restart-missed-slot", + name: "every ten minutes +1", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-13T04:01:00.000Z"), + schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "catch missed slot" }, + state: { + // Persisted state may already be recomputed from restart time and + // point to the future slot, even though 04:01 was missed. + nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"), + lastRunAtMs: Date.parse("2025-12-13T03:51:00.000Z"), + lastStatus: "ok", + }, }, + ], + async ({ cron, enqueueSystemEvent, requestHeartbeatNow }) => { + expect(enqueueSystemEvent).toHaveBeenCalledWith( + "catch missed slot", + expect.objectContaining({ agentId: undefined }), + ); + expect(requestHeartbeatNow).toHaveBeenCalled(); + + const listedJobs = await cron.list({ includeDisabled: true }); + const updated = listedJobs.find((job) => job.id === "restart-missed-slot"); + expect(updated?.state.lastRunAtMs).toBe(Date.parse("2025-12-13T04:02:00.000Z")); }, - ]); - - const cron = createRestartCronService({ - storePath: store.storePath, - enqueueSystemEvent, - requestHeartbeatNow, - }); - - await cron.start(); - - expect(enqueueSystemEvent).toHaveBeenCalledWith( - "catch missed slot", - expect.objectContaining({ agentId: undefined }), ); - expect(requestHeartbeatNow).toHaveBeenCalled(); - - const jobs = await cron.list({ includeDisabled: true }); - const updated = jobs.find((job) => job.id === "restart-missed-slot"); - expect(updated?.state.lastRunAtMs).toBe(Date.parse("2025-12-13T04:02:00.000Z")); - - cron.stop(); - await store.cleanup(); }); it("does not replay interrupted one-shot jobs on startup", async () => { - const store = await makeStorePath(); - const enqueueSystemEvent = vi.fn(); - const requestHeartbeatNow = vi.fn(); - const dueAt = Date.parse("2025-12-13T16:00:00.000Z"); const staleRunningAt = Date.parse("2025-12-13T16:30:00.000Z"); - await writeStoreJobs(store.storePath, [ - { - id: "restart-stale-one-shot", - name: "one shot stale marker", - enabled: true, - createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), - updatedAtMs: Date.parse("2025-12-13T16:30:00.000Z"), - schedule: { kind: "at", at: "2025-12-13T16:00:00.000Z" }, - sessionTarget: "main", - wakeMode: "next-heartbeat", - payload: { kind: "systemEvent", text: "one-shot stale marker" }, - state: { - nextRunAtMs: dueAt, - runningAtMs: staleRunningAt, + await withRestartedCron( + [ + { + id: "restart-stale-one-shot", + name: "one shot stale marker", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-13T16:30:00.000Z"), + schedule: { kind: "at", at: "2025-12-13T16:00:00.000Z" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "one-shot stale marker" }, + state: { + nextRunAtMs: dueAt, + runningAtMs: staleRunningAt, + }, }, + ], + async ({ cron, enqueueSystemEvent, requestHeartbeatNow }) => { + expect(enqueueSystemEvent).not.toHaveBeenCalled(); + expect(requestHeartbeatNow).not.toHaveBeenCalled(); + + const listedJobs = await cron.list({ includeDisabled: true }); + const updated = listedJobs.find((job) => job.id === "restart-stale-one-shot"); + expect(updated?.state.runningAtMs).toBeUndefined(); }, - ]); - - const cron = createRestartCronService({ - storePath: store.storePath, - enqueueSystemEvent, - requestHeartbeatNow, - }); - - await cron.start(); - - expect(enqueueSystemEvent).not.toHaveBeenCalled(); - expect(requestHeartbeatNow).not.toHaveBeenCalled(); - - const jobs = await cron.list({ includeDisabled: true }); - const updated = jobs.find((job) => job.id === "restart-stale-one-shot"); - expect(updated?.state.runningAtMs).toBeUndefined(); - - cron.stop(); - await store.cleanup(); + ); }); it("does not replay cron slot when the latest slot already ran before restart", async () => { vi.setSystemTime(new Date("2025-12-13T04:02:00.000Z")); - const store = await makeStorePath(); - const enqueueSystemEvent = vi.fn(); - const requestHeartbeatNow = vi.fn(); - - await writeStoreJobs(store.storePath, [ - { - id: "restart-no-duplicate-slot", - name: "every ten minutes +1 no duplicate", - enabled: true, - createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), - updatedAtMs: Date.parse("2025-12-13T04:01:00.000Z"), - schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" }, - sessionTarget: "main", - wakeMode: "next-heartbeat", - payload: { kind: "systemEvent", text: "already ran" }, - state: { - nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"), - lastRunAtMs: Date.parse("2025-12-13T04:01:00.000Z"), - lastStatus: "ok", + await withRestartedCron( + [ + { + id: "restart-no-duplicate-slot", + name: "every ten minutes +1 no duplicate", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-13T04:01:00.000Z"), + schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "already ran" }, + state: { + nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"), + lastRunAtMs: Date.parse("2025-12-13T04:01:00.000Z"), + lastStatus: "ok", + }, }, + ], + async ({ enqueueSystemEvent, requestHeartbeatNow }) => { + expect(enqueueSystemEvent).not.toHaveBeenCalled(); + expect(requestHeartbeatNow).not.toHaveBeenCalled(); }, - ]); - - const cron = createRestartCronService({ - storePath: store.storePath, - enqueueSystemEvent, - requestHeartbeatNow, - }); - - await cron.start(); - - expect(enqueueSystemEvent).not.toHaveBeenCalled(); - expect(requestHeartbeatNow).not.toHaveBeenCalled(); - cron.stop(); - await store.cleanup(); + ); }); it("does not replay missed cron slots while error backoff is pending after restart", async () => { vi.setSystemTime(new Date("2025-12-13T04:02:00.000Z")); - const store = await makeStorePath(); - const enqueueSystemEvent = vi.fn(); - const requestHeartbeatNow = vi.fn(); - - await writeStoreJobs(store.storePath, [ - { - id: "restart-backoff-pending", - name: "backoff pending", - enabled: true, - createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), - updatedAtMs: Date.parse("2025-12-13T04:01:10.000Z"), - schedule: { kind: "cron", expr: "* * * * *", tz: "UTC" }, - sessionTarget: "main", - wakeMode: "next-heartbeat", - payload: { kind: "systemEvent", text: "do not run during backoff" }, - state: { - // Next retry is intentionally delayed by backoff despite a newer cron slot. - nextRunAtMs: Date.parse("2025-12-13T04:10:00.000Z"), - lastRunAtMs: Date.parse("2025-12-13T04:01:00.000Z"), - lastStatus: "error", - consecutiveErrors: 4, + await withRestartedCron( + [ + { + id: "restart-backoff-pending", + name: "backoff pending", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-13T04:01:10.000Z"), + schedule: { kind: "cron", expr: "* * * * *", tz: "UTC" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "do not run during backoff" }, + state: { + // Next retry is intentionally delayed by backoff despite a newer cron slot. + nextRunAtMs: Date.parse("2025-12-13T04:10:00.000Z"), + lastRunAtMs: Date.parse("2025-12-13T04:01:00.000Z"), + lastStatus: "error", + consecutiveErrors: 4, + }, }, + ], + async ({ enqueueSystemEvent, requestHeartbeatNow }) => { + expect(enqueueSystemEvent).not.toHaveBeenCalled(); + expect(requestHeartbeatNow).not.toHaveBeenCalled(); }, - ]); - - const cron = createRestartCronService({ - storePath: store.storePath, - enqueueSystemEvent, - requestHeartbeatNow, - }); - - await cron.start(); - - expect(enqueueSystemEvent).not.toHaveBeenCalled(); - expect(requestHeartbeatNow).not.toHaveBeenCalled(); - - cron.stop(); - await store.cleanup(); + ); }); it("replays missed cron slot after restart when error backoff has already elapsed", async () => { vi.setSystemTime(new Date("2025-12-13T04:02:00.000Z")); - const store = await makeStorePath(); - const enqueueSystemEvent = vi.fn(); - const requestHeartbeatNow = vi.fn(); - - await writeStoreJobs(store.storePath, [ - { - id: "restart-backoff-elapsed-replay", - name: "backoff elapsed replay", - enabled: true, - createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), - updatedAtMs: Date.parse("2025-12-13T04:01:10.000Z"), - schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" }, - sessionTarget: "main", - wakeMode: "next-heartbeat", - payload: { kind: "systemEvent", text: "replay after backoff elapsed" }, - state: { - // Startup maintenance may already point to a future slot (04:11) even - // though 04:01 was missed and the 30s error backoff has elapsed. - nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"), - lastRunAtMs: Date.parse("2025-12-13T03:51:00.000Z"), - lastStatus: "error", - consecutiveErrors: 1, + await withRestartedCron( + [ + { + id: "restart-backoff-elapsed-replay", + name: "backoff elapsed replay", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-13T04:01:10.000Z"), + schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "replay after backoff elapsed" }, + state: { + // Startup maintenance may already point to a future slot (04:11) even + // though 04:01 was missed and the 30s error backoff has elapsed. + nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"), + lastRunAtMs: Date.parse("2025-12-13T03:51:00.000Z"), + lastStatus: "error", + consecutiveErrors: 1, + }, }, + ], + async ({ enqueueSystemEvent, requestHeartbeatNow }) => { + expect(enqueueSystemEvent).toHaveBeenCalledWith( + "replay after backoff elapsed", + expect.objectContaining({ agentId: undefined }), + ); + expect(requestHeartbeatNow).toHaveBeenCalled(); }, - ]); - - const cron = createRestartCronService({ - storePath: store.storePath, - enqueueSystemEvent, - requestHeartbeatNow, - }); - - await cron.start(); - - expect(enqueueSystemEvent).toHaveBeenCalledWith( - "replay after backoff elapsed", - expect.objectContaining({ agentId: undefined }), ); - expect(requestHeartbeatNow).toHaveBeenCalled(); - - cron.stop(); - await store.cleanup(); }); it("reschedules deferred missed jobs from the post-catchup clock so they stay in the future", async () => { diff --git a/src/daemon/launchd.test.ts b/src/daemon/launchd.test.ts index ba43715ba28..4c624cfeec1 100644 --- a/src/daemon/launchd.test.ts +++ b/src/daemon/launchd.test.ts @@ -31,6 +31,25 @@ const launchdRestartHandoffState = vi.hoisted(() => ({ })); const defaultProgramArguments = ["node", "-e", "process.exit(0)"]; +function expectLaunchctlEnableBootstrapOrder(env: Record) { + const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; + const label = "ai.openclaw.gateway"; + const plistPath = resolveLaunchAgentPlistPath(env); + const serviceId = `${domain}/${label}`; + const enableIndex = state.launchctlCalls.findIndex( + (c) => c[0] === "enable" && c[1] === serviceId, + ); + const bootstrapIndex = state.launchctlCalls.findIndex( + (c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath, + ); + + expect(enableIndex).toBeGreaterThanOrEqual(0); + expect(bootstrapIndex).toBeGreaterThanOrEqual(0); + expect(enableIndex).toBeLessThan(bootstrapIndex); + + return { domain, label, serviceId, bootstrapIndex }; +} + function normalizeLaunchctlArgs(file: string, args: string[]): string[] { if (file === "launchctl") { return args; @@ -219,25 +238,12 @@ describe("launchd bootstrap repair", () => { const repair = await repairLaunchAgentBootstrap({ env }); expect(repair.ok).toBe(true); - const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; - const label = "ai.openclaw.gateway"; - const plistPath = resolveLaunchAgentPlistPath(env); - const serviceId = `${domain}/${label}`; - - const enableIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "enable" && c[1] === serviceId, - ); - const bootstrapIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath, - ); + const { serviceId, bootstrapIndex } = expectLaunchctlEnableBootstrapOrder(env); const kickstartIndex = state.launchctlCalls.findIndex( (c) => c[0] === "kickstart" && c[1] === "-k" && c[2] === serviceId, ); - expect(enableIndex).toBeGreaterThanOrEqual(0); - expect(bootstrapIndex).toBeGreaterThanOrEqual(0); expect(kickstartIndex).toBeGreaterThanOrEqual(0); - expect(enableIndex).toBeLessThan(bootstrapIndex); expect(bootstrapIndex).toBeLessThan(kickstartIndex); }); }); @@ -258,23 +264,10 @@ describe("launchd install", () => { programArguments: defaultProgramArguments, }); - const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; - const label = "ai.openclaw.gateway"; - const plistPath = resolveLaunchAgentPlistPath(env); - const serviceId = `${domain}/${label}`; - - const enableIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "enable" && c[1] === serviceId, - ); - const bootstrapIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath, - ); + const { serviceId } = expectLaunchctlEnableBootstrapOrder(env); const installKickstartIndex = state.launchctlCalls.findIndex( (c) => c[0] === "kickstart" && c[2] === serviceId, ); - expect(enableIndex).toBeGreaterThanOrEqual(0); - expect(bootstrapIndex).toBeGreaterThanOrEqual(0); - expect(enableIndex).toBeLessThan(bootstrapIndex); expect(installKickstartIndex).toBe(-1); }); @@ -360,24 +353,13 @@ describe("launchd install", () => { stdout: new PassThrough(), }); - const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501"; - const label = "ai.openclaw.gateway"; - const plistPath = resolveLaunchAgentPlistPath(env); - const serviceId = `${domain}/${label}`; + const { serviceId } = expectLaunchctlEnableBootstrapOrder(env); const kickstartCalls = state.launchctlCalls.filter( (c) => c[0] === "kickstart" && c[1] === "-k" && c[2] === serviceId, ); - const enableIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "enable" && c[1] === serviceId, - ); - const bootstrapIndex = state.launchctlCalls.findIndex( - (c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath, - ); expect(result).toEqual({ outcome: "completed" }); expect(kickstartCalls).toHaveLength(2); - expect(enableIndex).toBeGreaterThanOrEqual(0); - expect(bootstrapIndex).toBeGreaterThanOrEqual(0); expect(state.launchctlCalls.some((call) => call[0] === "bootout")).toBe(false); }); diff --git a/src/daemon/schtasks.startup-fallback.test.ts b/src/daemon/schtasks.startup-fallback.test.ts index 8b26a98e4ed..6e6a8521d6c 100644 --- a/src/daemon/schtasks.startup-fallback.test.ts +++ b/src/daemon/schtasks.startup-fallback.test.ts @@ -1,37 +1,26 @@ import fs from "node:fs/promises"; -import os from "node:os"; import path from "node:path"; import { PassThrough } from "node:stream"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import { quoteCmdScriptArg } from "./cmd-argv.js"; - -const schtasksResponses = vi.hoisted( - () => [] as Array<{ code: number; stdout: string; stderr: string }>, -); -const schtasksCalls = vi.hoisted(() => [] as string[][]); -const inspectPortUsage = vi.hoisted(() => vi.fn()); -const killProcessTree = vi.hoisted(() => vi.fn()); +import "./test-helpers/schtasks-base-mocks.js"; +import { + inspectPortUsage, + killProcessTree, + resetSchtasksBaseMocks, + schtasksResponses, + withWindowsEnv, +} from "./test-helpers/schtasks-fixtures.js"; const childUnref = vi.hoisted(() => vi.fn()); const spawn = vi.hoisted(() => vi.fn(() => ({ unref: childUnref }))); -vi.mock("./schtasks-exec.js", () => ({ - execSchtasks: async (argv: string[]) => { - schtasksCalls.push(argv); - return schtasksResponses.shift() ?? { code: 0, stdout: "", stderr: "" }; - }, -})); - -vi.mock("../infra/ports.js", () => ({ - inspectPortUsage: (...args: unknown[]) => inspectPortUsage(...args), -})); - -vi.mock("../process/kill-tree.js", () => ({ - killProcessTree: (...args: unknown[]) => killProcessTree(...args), -})); - -vi.mock("node:child_process", () => ({ - spawn, -})); +vi.mock("node:child_process", async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + spawn, + }; +}); const { installScheduledTask, @@ -39,6 +28,7 @@ const { readScheduledTaskRuntime, restartScheduledTask, resolveTaskScriptPath, + stopScheduledTask, } = await import("./schtasks.js"); function resolveStartupEntryPath(env: Record) { @@ -53,28 +43,22 @@ function resolveStartupEntryPath(env: Record) { ); } -async function withWindowsEnv( - run: (params: { tmpDir: string; env: Record }) => Promise, -) { - const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-win-startup-")); - const env = { - USERPROFILE: tmpDir, - APPDATA: path.join(tmpDir, "AppData", "Roaming"), - OPENCLAW_PROFILE: "default", - OPENCLAW_GATEWAY_PORT: "18789", - }; - try { - await run({ tmpDir, env }); - } finally { - await fs.rm(tmpDir, { recursive: true, force: true }); - } +async function writeGatewayScript(env: Record, port = 18789) { + const scriptPath = resolveTaskScriptPath(env); + await fs.mkdir(path.dirname(scriptPath), { recursive: true }); + await fs.writeFile( + scriptPath, + [ + "@echo off", + `set "OPENCLAW_GATEWAY_PORT=${port}"`, + `"C:\\Program Files\\nodejs\\node.exe" "C:\\Users\\steipete\\AppData\\Roaming\\npm\\node_modules\\openclaw\\dist\\index.js" gateway --port ${port}`, + "", + ].join("\r\n"), + "utf8", + ); } - beforeEach(() => { - schtasksResponses.length = 0; - schtasksCalls.length = 0; - inspectPortUsage.mockReset(); - killProcessTree.mockReset(); + resetSchtasksBaseMocks(); spawn.mockClear(); childUnref.mockClear(); }); @@ -85,7 +69,7 @@ afterEach(() => { describe("Windows startup fallback", () => { it("falls back to a Startup-folder launcher when schtasks create is denied", async () => { - await withWindowsEnv(async ({ env }) => { + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { schtasksResponses.push( { code: 0, stdout: "", stderr: "" }, { code: 5, stdout: "", stderr: "ERROR: Access is denied." }, @@ -120,7 +104,7 @@ describe("Windows startup fallback", () => { }); it("falls back to a Startup-folder launcher when schtasks create hangs", async () => { - await withWindowsEnv(async ({ env }) => { + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { schtasksResponses.push( { code: 0, stdout: "", stderr: "" }, { code: 124, stdout: "", stderr: "schtasks timed out after 15000ms" }, @@ -144,7 +128,7 @@ describe("Windows startup fallback", () => { }); it("treats an installed Startup-folder launcher as loaded", async () => { - await withWindowsEnv(async ({ env }) => { + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { schtasksResponses.push( { code: 0, stdout: "", stderr: "" }, { code: 1, stdout: "", stderr: "not found" }, @@ -157,7 +141,7 @@ describe("Windows startup fallback", () => { }); it("reports runtime from the gateway listener when using the Startup fallback", async () => { - await withWindowsEnv(async ({ env }) => { + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { schtasksResponses.push( { code: 0, stdout: "", stderr: "" }, { code: 1, stdout: "", stderr: "not found" }, @@ -179,7 +163,7 @@ describe("Windows startup fallback", () => { }); it("restarts the Startup fallback by killing the current pid and relaunching the entry", async () => { - await withWindowsEnv(async ({ env }) => { + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { schtasksResponses.push( { code: 0, stdout: "", stderr: "" }, { code: 1, stdout: "", stderr: "not found" }, @@ -207,4 +191,39 @@ describe("Windows startup fallback", () => { ); }); }); + + it("kills the Startup fallback runtime even when the CLI env omits the gateway port", async () => { + await withWindowsEnv("openclaw-win-startup-", async ({ env }) => { + schtasksResponses.push({ code: 0, stdout: "", stderr: "" }); + await writeGatewayScript(env); + await fs.mkdir(path.dirname(resolveStartupEntryPath(env)), { recursive: true }); + await fs.writeFile(resolveStartupEntryPath(env), "@echo off\r\n", "utf8"); + inspectPortUsage + .mockResolvedValueOnce({ + port: 18789, + status: "busy", + listeners: [{ pid: 5151, command: "node.exe" }], + hints: [], + }) + .mockResolvedValueOnce({ + port: 18789, + status: "busy", + listeners: [{ pid: 5151, command: "node.exe" }], + hints: [], + }) + .mockResolvedValueOnce({ + port: 18789, + status: "free", + listeners: [], + hints: [], + }); + + const stdout = new PassThrough(); + const envWithoutPort = { ...env }; + delete envWithoutPort.OPENCLAW_GATEWAY_PORT; + await stopScheduledTask({ env: envWithoutPort, stdout }); + + expect(killProcessTree).toHaveBeenCalledWith(5151, { graceMs: 300 }); + }); + }); }); diff --git a/src/daemon/schtasks.stop.test.ts b/src/daemon/schtasks.stop.test.ts new file mode 100644 index 00000000000..320170706b6 --- /dev/null +++ b/src/daemon/schtasks.stop.test.ts @@ -0,0 +1,209 @@ +import fs from "node:fs/promises"; +import path from "node:path"; +import { PassThrough } from "node:stream"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import "./test-helpers/schtasks-base-mocks.js"; +import { + inspectPortUsage, + killProcessTree, + resetSchtasksBaseMocks, + schtasksCalls, + schtasksResponses, + withWindowsEnv, +} from "./test-helpers/schtasks-fixtures.js"; +const findVerifiedGatewayListenerPidsOnPortSync = vi.hoisted(() => + vi.fn<(port: number) => number[]>(() => []), +); + +vi.mock("../infra/gateway-processes.js", () => ({ + findVerifiedGatewayListenerPidsOnPortSync: (port: number) => + findVerifiedGatewayListenerPidsOnPortSync(port), +})); + +const { restartScheduledTask, resolveTaskScriptPath, stopScheduledTask } = + await import("./schtasks.js"); + +async function writeGatewayScript(env: Record, port = 18789) { + const scriptPath = resolveTaskScriptPath(env); + await fs.mkdir(path.dirname(scriptPath), { recursive: true }); + await fs.writeFile( + scriptPath, + [ + "@echo off", + `set "OPENCLAW_GATEWAY_PORT=${port}"`, + `"C:\\Program Files\\nodejs\\node.exe" "C:\\Users\\steipete\\AppData\\Roaming\\npm\\node_modules\\openclaw\\dist\\index.js" gateway --port ${port}`, + "", + ].join("\r\n"), + "utf8", + ); +} + +beforeEach(() => { + resetSchtasksBaseMocks(); + findVerifiedGatewayListenerPidsOnPortSync.mockReset(); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([]); + inspectPortUsage.mockResolvedValue({ + port: 18789, + status: "free", + listeners: [], + hints: [], + }); +}); + +afterEach(() => { + vi.restoreAllMocks(); +}); + +describe("Scheduled Task stop/restart cleanup", () => { + it("kills lingering verified gateway listeners after schtasks stop", async () => { + await withWindowsEnv("openclaw-win-stop-", async ({ env }) => { + await writeGatewayScript(env); + schtasksResponses.push( + { code: 0, stdout: "", stderr: "" }, + { code: 0, stdout: "", stderr: "" }, + { code: 0, stdout: "", stderr: "" }, + ); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4242]); + inspectPortUsage + .mockResolvedValueOnce({ + port: 18789, + status: "busy", + listeners: [{ pid: 4242, command: "node.exe" }], + hints: [], + }) + .mockResolvedValueOnce({ + port: 18789, + status: "free", + listeners: [], + hints: [], + }); + + const stdout = new PassThrough(); + await stopScheduledTask({ env, stdout }); + + expect(findVerifiedGatewayListenerPidsOnPortSync).toHaveBeenCalledWith(18789); + expect(killProcessTree).toHaveBeenCalledWith(4242, { graceMs: 300 }); + expect(inspectPortUsage).toHaveBeenCalledTimes(2); + }); + }); + + it("force-kills remaining busy port listeners when the first stop pass does not free the port", async () => { + await withWindowsEnv("openclaw-win-stop-", async ({ env }) => { + await writeGatewayScript(env); + schtasksResponses.push( + { code: 0, stdout: "", stderr: "" }, + { code: 0, stdout: "", stderr: "" }, + { code: 0, stdout: "", stderr: "" }, + ); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4242]); + inspectPortUsage.mockResolvedValueOnce({ + port: 18789, + status: "busy", + listeners: [{ pid: 4242, command: "node.exe" }], + hints: [], + }); + for (let i = 0; i < 20; i += 1) { + inspectPortUsage.mockResolvedValueOnce({ + port: 18789, + status: "busy", + listeners: [{ pid: 4242, command: "node.exe" }], + hints: [], + }); + } + inspectPortUsage + .mockResolvedValueOnce({ + port: 18789, + status: "busy", + listeners: [{ pid: 5252, command: "node.exe" }], + hints: [], + }) + .mockResolvedValueOnce({ + port: 18789, + status: "free", + listeners: [], + hints: [], + }); + + const stdout = new PassThrough(); + await stopScheduledTask({ env, stdout }); + + expect(killProcessTree).toHaveBeenNthCalledWith(1, 4242, { graceMs: 300 }); + expect(killProcessTree).toHaveBeenNthCalledWith(2, expect.any(Number), { graceMs: 300 }); + expect(inspectPortUsage.mock.calls.length).toBeGreaterThanOrEqual(22); + }); + }); + + it("falls back to inspected gateway listeners when sync verification misses on Windows", async () => { + await withWindowsEnv("openclaw-win-stop-", async ({ env }) => { + await writeGatewayScript(env); + schtasksResponses.push( + { code: 0, stdout: "", stderr: "" }, + { code: 0, stdout: "", stderr: "" }, + { code: 0, stdout: "", stderr: "" }, + ); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([]); + inspectPortUsage + .mockResolvedValueOnce({ + port: 18789, + status: "busy", + listeners: [ + { + pid: 6262, + command: "node.exe", + commandLine: + '"C:\\Program Files\\nodejs\\node.exe" "C:\\Users\\steipete\\AppData\\Roaming\\npm\\node_modules\\openclaw\\dist\\index.js" gateway --port 18789', + }, + ], + hints: [], + }) + .mockResolvedValueOnce({ + port: 18789, + status: "free", + listeners: [], + hints: [], + }); + + const stdout = new PassThrough(); + await stopScheduledTask({ env, stdout }); + + expect(killProcessTree).toHaveBeenCalledWith(6262, { graceMs: 300 }); + expect(inspectPortUsage).toHaveBeenCalledTimes(2); + }); + }); + + it("kills lingering verified gateway listeners and waits for port release before restart", async () => { + await withWindowsEnv("openclaw-win-stop-", async ({ env }) => { + await writeGatewayScript(env); + schtasksResponses.push( + { code: 0, stdout: "", stderr: "" }, + { code: 0, stdout: "", stderr: "" }, + { code: 0, stdout: "", stderr: "" }, + { code: 0, stdout: "", stderr: "" }, + ); + findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([5151]); + inspectPortUsage + .mockResolvedValueOnce({ + port: 18789, + status: "busy", + listeners: [{ pid: 5151, command: "node.exe" }], + hints: [], + }) + .mockResolvedValueOnce({ + port: 18789, + status: "free", + listeners: [], + hints: [], + }); + + const stdout = new PassThrough(); + await expect(restartScheduledTask({ env, stdout })).resolves.toEqual({ + outcome: "completed", + }); + + expect(findVerifiedGatewayListenerPidsOnPortSync).toHaveBeenCalledWith(18789); + expect(killProcessTree).toHaveBeenCalledWith(5151, { graceMs: 300 }); + expect(inspectPortUsage).toHaveBeenCalledTimes(2); + expect(schtasksCalls.at(-1)).toEqual(["/Run", "/TN", "OpenClaw Gateway"]); + }); + }); +}); diff --git a/src/daemon/schtasks.ts b/src/daemon/schtasks.ts index 2c74cf26a61..2216e93bfd9 100644 --- a/src/daemon/schtasks.ts +++ b/src/daemon/schtasks.ts @@ -1,8 +1,11 @@ -import { spawn } from "node:child_process"; +import { spawn, spawnSync } from "node:child_process"; import fs from "node:fs/promises"; import path from "node:path"; +import { isGatewayArgv } from "../infra/gateway-process-argv.js"; +import { findVerifiedGatewayListenerPidsOnPortSync } from "../infra/gateway-processes.js"; import { inspectPortUsage } from "../infra/ports.js"; import { killProcessTree } from "../process/kill-tree.js"; +import { sleep } from "../utils.js"; import { parseCmdScriptCommandLine, quoteCmdScriptArg } from "./cmd-argv.js"; import { assertNoCmdLineBreak, parseCmdSetAssignment, renderCmdSetAssignment } from "./cmd-set.js"; import { resolveGatewayServiceDescription, resolveGatewayWindowsTaskName } from "./constants.js"; @@ -158,6 +161,12 @@ export type ScheduledTaskInfo = { lastRunResult?: string; }; +function hasListenerPid( + listener: T, +): listener is T & { pid: number } { + return typeof listener.pid === "number"; +} + export function parseSchtasksQuery(output: string): ScheduledTaskInfo { const entries = parseKeyValueOutput(output, ":"); const info: ScheduledTaskInfo = {}; @@ -311,8 +320,175 @@ function resolveConfiguredGatewayPort(env: GatewayServiceEnv): number | null { return Number.isFinite(parsed) && parsed > 0 ? parsed : null; } +function parsePositivePort(raw: string | undefined): number | null { + const value = raw?.trim(); + if (!value) { + return null; + } + if (!/^\d+$/.test(value)) { + return null; + } + const parsed = Number.parseInt(value, 10); + return Number.isFinite(parsed) && parsed > 0 && parsed <= 65535 ? parsed : null; +} + +function parsePortFromProgramArguments(programArguments?: string[]): number | null { + if (!programArguments?.length) { + return null; + } + for (let i = 0; i < programArguments.length; i += 1) { + const arg = programArguments[i]; + if (!arg) { + continue; + } + const inlineMatch = arg.match(/^--port=(\d+)$/); + if (inlineMatch) { + return parsePositivePort(inlineMatch[1]); + } + if (arg === "--port") { + return parsePositivePort(programArguments[i + 1]); + } + } + return null; +} + +async function resolveScheduledTaskPort(env: GatewayServiceEnv): Promise { + const command = await readScheduledTaskCommand(env).catch(() => null); + return ( + parsePortFromProgramArguments(command?.programArguments) ?? + parsePositivePort(command?.environment?.OPENCLAW_GATEWAY_PORT) ?? + resolveConfiguredGatewayPort(env) + ); +} + +async function resolveScheduledTaskGatewayListenerPids(port: number): Promise { + const verified = findVerifiedGatewayListenerPidsOnPortSync(port); + if (verified.length > 0) { + return verified; + } + + const diagnostics = await inspectPortUsage(port).catch(() => null); + if (diagnostics?.status !== "busy") { + return []; + } + + const matchedGatewayPids = Array.from( + new Set( + diagnostics.listeners + .filter( + (listener) => + typeof listener.pid === "number" && + listener.commandLine && + isGatewayArgv(parseCmdScriptCommandLine(listener.commandLine), { + allowGatewayBinary: true, + }), + ) + .map((listener) => listener.pid as number), + ), + ); + if (matchedGatewayPids.length > 0) { + return matchedGatewayPids; + } + + return Array.from( + new Set( + diagnostics.listeners + .map((listener) => listener.pid) + .filter((pid): pid is number => typeof pid === "number" && Number.isFinite(pid) && pid > 0), + ), + ); +} + +async function terminateScheduledTaskGatewayListeners(env: GatewayServiceEnv): Promise { + const port = await resolveScheduledTaskPort(env); + if (!port) { + return []; + } + const pids = await resolveScheduledTaskGatewayListenerPids(port); + for (const pid of pids) { + await terminateGatewayProcessTree(pid, 300); + } + return pids; +} + +function isProcessAlive(pid: number): boolean { + try { + process.kill(pid, 0); + return true; + } catch { + return false; + } +} + +async function waitForProcessExit(pid: number, timeoutMs: number): Promise { + const deadline = Date.now() + timeoutMs; + while (Date.now() < deadline) { + if (!isProcessAlive(pid)) { + return true; + } + await sleep(100); + } + return !isProcessAlive(pid); +} + +async function terminateGatewayProcessTree(pid: number, graceMs: number): Promise { + if (process.platform !== "win32") { + killProcessTree(pid, { graceMs }); + return; + } + const taskkillPath = path.join( + process.env.SystemRoot ?? "C:\\Windows", + "System32", + "taskkill.exe", + ); + spawnSync(taskkillPath, ["/T", "/PID", String(pid)], { + stdio: "ignore", + timeout: 5_000, + windowsHide: true, + }); + if (await waitForProcessExit(pid, graceMs)) { + return; + } + spawnSync(taskkillPath, ["/F", "/T", "/PID", String(pid)], { + stdio: "ignore", + timeout: 5_000, + windowsHide: true, + }); + await waitForProcessExit(pid, 5_000); +} + +async function waitForGatewayPortRelease(port: number, timeoutMs = 5_000): Promise { + const deadline = Date.now() + timeoutMs; + while (Date.now() < deadline) { + const diagnostics = await inspectPortUsage(port).catch(() => null); + if (diagnostics?.status === "free") { + return true; + } + await sleep(250); + } + return false; +} + +async function terminateBusyPortListeners(port: number): Promise { + const diagnostics = await inspectPortUsage(port).catch(() => null); + if (diagnostics?.status !== "busy") { + return []; + } + const pids = Array.from( + new Set( + diagnostics.listeners + .map((listener) => listener.pid) + .filter((pid): pid is number => typeof pid === "number" && Number.isFinite(pid) && pid > 0), + ), + ); + for (const pid of pids) { + await terminateGatewayProcessTree(pid, 300); + } + return pids; +} + async function resolveFallbackRuntime(env: GatewayServiceEnv): Promise { - const port = resolveConfiguredGatewayPort(env); + const port = (await resolveScheduledTaskPort(env)) ?? resolveConfiguredGatewayPort(env); if (!port) { return { status: "unknown", @@ -326,7 +502,7 @@ async function resolveFallbackRuntime(env: GatewayServiceEnv): Promise typeof item.pid === "number"); + const listener = diagnostics.listeners.find(hasListenerPid); return { status: diagnostics.status === "busy" ? "running" : "stopped", ...(listener?.pid ? { pid: listener.pid } : {}), @@ -343,18 +519,28 @@ async function stopStartupEntry( ): Promise { const runtime = await resolveFallbackRuntime(env); if (typeof runtime.pid === "number" && runtime.pid > 0) { - killProcessTree(runtime.pid, { graceMs: 300 }); + await terminateGatewayProcessTree(runtime.pid, 300); } stdout.write(`${formatLine("Stopped Windows login item", resolveTaskName(env))}\n`); } +async function terminateInstalledStartupRuntime(env: GatewayServiceEnv): Promise { + if (!(await isStartupEntryInstalled(env))) { + return; + } + const runtime = await resolveFallbackRuntime(env); + if (typeof runtime.pid === "number" && runtime.pid > 0) { + await terminateGatewayProcessTree(runtime.pid, 300); + } +} + async function restartStartupEntry( env: GatewayServiceEnv, stdout: NodeJS.WritableStream, ): Promise { const runtime = await resolveFallbackRuntime(env); if (typeof runtime.pid === "number" && runtime.pid > 0) { - killProcessTree(runtime.pid, { graceMs: 300 }); + await terminateGatewayProcessTree(runtime.pid, 300); } launchFallbackTaskScript(resolveTaskScriptPath(env)); stdout.write(`${formatLine("Restarted Windows login item", resolveTaskName(env))}\n`); @@ -489,6 +675,19 @@ export async function stopScheduledTask({ stdout, env }: GatewayServiceControlAr if (res.code !== 0 && !isTaskNotRunning(res)) { throw new Error(`schtasks end failed: ${res.stderr || res.stdout}`.trim()); } + const stopPort = await resolveScheduledTaskPort(effectiveEnv); + await terminateScheduledTaskGatewayListeners(effectiveEnv); + await terminateInstalledStartupRuntime(effectiveEnv); + if (stopPort) { + const released = await waitForGatewayPortRelease(stopPort); + if (!released) { + await terminateBusyPortListeners(stopPort); + const releasedAfterForce = await waitForGatewayPortRelease(stopPort, 2_000); + if (!releasedAfterForce) { + throw new Error(`gateway port ${stopPort} is still busy after stop`); + } + } + } stdout.write(`${formatLine("Stopped Scheduled Task", taskName)}\n`); } @@ -512,6 +711,19 @@ export async function restartScheduledTask({ } const taskName = resolveTaskName(effectiveEnv); await execSchtasks(["/End", "/TN", taskName]); + const restartPort = await resolveScheduledTaskPort(effectiveEnv); + await terminateScheduledTaskGatewayListeners(effectiveEnv); + await terminateInstalledStartupRuntime(effectiveEnv); + if (restartPort) { + const released = await waitForGatewayPortRelease(restartPort); + if (!released) { + await terminateBusyPortListeners(restartPort); + const releasedAfterForce = await waitForGatewayPortRelease(restartPort, 2_000); + if (!releasedAfterForce) { + throw new Error(`gateway port ${restartPort} is still busy before restart`); + } + } + } const res = await execSchtasks(["/Run", "/TN", taskName]); if (res.code !== 0) { throw new Error(`schtasks run failed: ${res.stderr || res.stdout}`.trim()); diff --git a/src/daemon/test-helpers/schtasks-base-mocks.ts b/src/daemon/test-helpers/schtasks-base-mocks.ts new file mode 100644 index 00000000000..48933ecdd1c --- /dev/null +++ b/src/daemon/test-helpers/schtasks-base-mocks.ts @@ -0,0 +1,22 @@ +import { vi } from "vitest"; +import { + inspectPortUsage, + killProcessTree, + schtasksCalls, + schtasksResponses, +} from "./schtasks-fixtures.js"; + +vi.mock("../schtasks-exec.js", () => ({ + execSchtasks: async (argv: string[]) => { + schtasksCalls.push(argv); + return schtasksResponses.shift() ?? { code: 0, stdout: "", stderr: "" }; + }, +})); + +vi.mock("../../infra/ports.js", () => ({ + inspectPortUsage: (...args: unknown[]) => inspectPortUsage(...args), +})); + +vi.mock("../../process/kill-tree.js", () => ({ + killProcessTree: (...args: unknown[]) => killProcessTree(...args), +})); diff --git a/src/daemon/test-helpers/schtasks-fixtures.ts b/src/daemon/test-helpers/schtasks-fixtures.ts new file mode 100644 index 00000000000..a89d7a0eb2e --- /dev/null +++ b/src/daemon/test-helpers/schtasks-fixtures.ts @@ -0,0 +1,34 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { vi } from "vitest"; + +export const schtasksResponses: Array<{ code: number; stdout: string; stderr: string }> = []; +export const schtasksCalls: string[][] = []; +export const inspectPortUsage = vi.fn(); +export const killProcessTree = vi.fn(); + +export async function withWindowsEnv( + prefix: string, + run: (params: { tmpDir: string; env: Record }) => Promise, +) { + const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), prefix)); + const env = { + USERPROFILE: tmpDir, + APPDATA: path.join(tmpDir, "AppData", "Roaming"), + OPENCLAW_PROFILE: "default", + OPENCLAW_GATEWAY_PORT: "18789", + }; + try { + await run({ tmpDir, env }); + } finally { + await fs.rm(tmpDir, { recursive: true, force: true }); + } +} + +export function resetSchtasksBaseMocks() { + schtasksResponses.length = 0; + schtasksCalls.length = 0; + inspectPortUsage.mockReset(); + killProcessTree.mockReset(); +} diff --git a/src/discord/monitor/allow-list.ts b/src/discord/monitor/allow-list.ts index 7c1250cb8ef..583d4fa7cd2 100644 --- a/src/discord/monitor/allow-list.ts +++ b/src/discord/monitor/allow-list.ts @@ -103,6 +103,21 @@ export function normalizeDiscordSlug(value: string) { .replace(/^-+|-+$/g, ""); } +function resolveDiscordAllowListNameMatch( + list: DiscordAllowList, + candidate: { name?: string; tag?: string }, +): { matchKey: string; matchSource: "name" | "tag" } | null { + const nameSlug = candidate.name ? normalizeDiscordSlug(candidate.name) : ""; + if (nameSlug && list.names.has(nameSlug)) { + return { matchKey: nameSlug, matchSource: "name" }; + } + const tagSlug = candidate.tag ? normalizeDiscordSlug(candidate.tag) : ""; + if (tagSlug && list.names.has(tagSlug)) { + return { matchKey: tagSlug, matchSource: "tag" }; + } + return null; +} + export function allowListMatches( list: DiscordAllowList, candidate: { id?: string; name?: string; tag?: string }, @@ -115,11 +130,7 @@ export function allowListMatches( return true; } if (params?.allowNameMatching === true) { - const slug = candidate.name ? normalizeDiscordSlug(candidate.name) : ""; - if (slug && list.names.has(slug)) { - return true; - } - if (candidate.tag && list.names.has(normalizeDiscordSlug(candidate.tag))) { + if (resolveDiscordAllowListNameMatch(list, candidate)) { return true; } } @@ -139,13 +150,9 @@ export function resolveDiscordAllowListMatch(params: { return { allowed: true, matchKey: candidate.id, matchSource: "id" }; } if (params.allowNameMatching === true) { - const nameSlug = candidate.name ? normalizeDiscordSlug(candidate.name) : ""; - if (nameSlug && allowList.names.has(nameSlug)) { - return { allowed: true, matchKey: nameSlug, matchSource: "name" }; - } - const tagSlug = candidate.tag ? normalizeDiscordSlug(candidate.tag) : ""; - if (tagSlug && allowList.names.has(tagSlug)) { - return { allowed: true, matchKey: tagSlug, matchSource: "tag" }; + const namedMatch = resolveDiscordAllowListNameMatch(allowList, candidate); + if (namedMatch) { + return { allowed: true, ...namedMatch }; } } return { allowed: false }; diff --git a/src/discord/monitor/provider.proxy.test.ts b/src/discord/monitor/provider.proxy.test.ts index 0b45fd2a2e7..9a15dcef94b 100644 --- a/src/discord/monitor/provider.proxy.test.ts +++ b/src/discord/monitor/provider.proxy.test.ts @@ -123,6 +123,30 @@ describe("createDiscordGatewayPlugin", () => { }; } + async function registerGatewayClient(plugin: unknown) { + await ( + plugin as { + registerClient: (client: { options: { token: string } }) => Promise; + } + ).registerClient({ + options: { token: "token-123" }, + }); + } + + async function expectGatewayRegisterFetchFailure(response: Response) { + const runtime = createRuntime(); + globalFetchMock.mockResolvedValue(response); + const plugin = createDiscordGatewayPlugin({ + discordConfig: {}, + runtime, + }); + + await expect(registerGatewayClient(plugin)).rejects.toThrow( + "Failed to get gateway information from Discord: fetch failed", + ); + expect(baseRegisterClientSpy).not.toHaveBeenCalled(); + } + beforeEach(() => { vi.stubGlobal("fetch", globalFetchMock); baseRegisterClientSpy.mockClear(); @@ -165,28 +189,12 @@ describe("createDiscordGatewayPlugin", () => { }); it("maps plain-text Discord 503 responses to fetch failed", async () => { - const runtime = createRuntime(); - globalFetchMock.mockResolvedValue({ + await expectGatewayRegisterFetchFailure({ ok: false, status: 503, text: async () => "upstream connect error or disconnect/reset before headers. reset reason: overflow", } as Response); - const plugin = createDiscordGatewayPlugin({ - discordConfig: {}, - runtime, - }); - - await expect( - ( - plugin as unknown as { - registerClient: (client: { options: { token: string } }) => Promise; - } - ).registerClient({ - options: { token: "token-123" }, - }), - ).rejects.toThrow("Failed to get gateway information from Discord: fetch failed"); - expect(baseRegisterClientSpy).not.toHaveBeenCalled(); }); it("uses proxy agent for gateway WebSocket when configured", async () => { @@ -257,28 +265,12 @@ describe("createDiscordGatewayPlugin", () => { }); it("maps body read failures to fetch failed", async () => { - const runtime = createRuntime(); - globalFetchMock.mockResolvedValue({ + await expectGatewayRegisterFetchFailure({ ok: true, status: 200, text: async () => { throw new Error("body stream closed"); }, } as unknown as Response); - const plugin = createDiscordGatewayPlugin({ - discordConfig: {}, - runtime, - }); - - await expect( - ( - plugin as unknown as { - registerClient: (client: { options: { token: string } }) => Promise; - } - ).registerClient({ - options: { token: "token-123" }, - }), - ).rejects.toThrow("Failed to get gateway information from Discord: fetch failed"); - expect(baseRegisterClientSpy).not.toHaveBeenCalled(); }); }); diff --git a/src/gateway/auth-rate-limit.test.ts b/src/gateway/auth-rate-limit.test.ts index 13ff65eb972..68fa8c14c9d 100644 --- a/src/gateway/auth-rate-limit.test.ts +++ b/src/gateway/auth-rate-limit.test.ts @@ -1,6 +1,7 @@ import { afterEach, describe, expect, it, vi } from "vitest"; import { AUTH_RATE_LIMIT_SCOPE_DEVICE_TOKEN, + AUTH_RATE_LIMIT_SCOPE_HOOK_AUTH, AUTH_RATE_LIMIT_SCOPE_SHARED_SECRET, createAuthRateLimiter, type AuthRateLimiter, @@ -8,6 +9,23 @@ import { describe("auth rate limiter", () => { let limiter: AuthRateLimiter; + const baseConfig = { maxAttempts: 2, windowMs: 60_000, lockoutMs: 60_000 }; + + function createLimiter( + overrides?: Partial<{ + maxAttempts: number; + windowMs: number; + lockoutMs: number; + exemptLoopback: boolean; + pruneIntervalMs: number; + }>, + ) { + limiter = createAuthRateLimiter({ + ...baseConfig, + ...overrides, + }); + return limiter; + } afterEach(() => { limiter?.dispose(); @@ -32,7 +50,7 @@ describe("auth rate limiter", () => { }); it("blocks the IP once maxAttempts is reached", () => { - limiter = createAuthRateLimiter({ maxAttempts: 2, windowMs: 60_000, lockoutMs: 10_000 }); + createLimiter({ lockoutMs: 10_000 }); limiter.recordFailure("10.0.0.2"); limiter.recordFailure("10.0.0.2"); const result = limiter.check("10.0.0.2"); @@ -42,12 +60,20 @@ describe("auth rate limiter", () => { expect(result.retryAfterMs).toBeLessThanOrEqual(10_000); }); + it("treats blank scopes as the default scope", () => { + createLimiter(); + limiter.recordFailure("10.0.0.8", " "); + limiter.recordFailure("10.0.0.8"); + expect(limiter.check("10.0.0.8").allowed).toBe(false); + expect(limiter.check("10.0.0.8", " \t ").allowed).toBe(false); + }); + // ---------- lockout expiry ---------- it("unblocks after the lockout period expires", () => { vi.useFakeTimers(); try { - limiter = createAuthRateLimiter({ maxAttempts: 2, windowMs: 60_000, lockoutMs: 5_000 }); + createLimiter({ lockoutMs: 5_000 }); limiter.recordFailure("10.0.0.3"); limiter.recordFailure("10.0.0.3"); expect(limiter.check("10.0.0.3").allowed).toBe(false); @@ -62,6 +88,25 @@ describe("auth rate limiter", () => { } }); + it("does not extend lockout when failures are recorded while already locked", () => { + vi.useFakeTimers(); + try { + createLimiter({ lockoutMs: 5_000 }); + limiter.recordFailure("10.0.0.33"); + limiter.recordFailure("10.0.0.33"); + const locked = limiter.check("10.0.0.33"); + expect(locked.allowed).toBe(false); + const initialRetryAfter = locked.retryAfterMs; + + vi.advanceTimersByTime(1_000); + limiter.recordFailure("10.0.0.33"); + const afterExtraFailure = limiter.check("10.0.0.33"); + expect(afterExtraFailure.retryAfterMs).toBeLessThanOrEqual(initialRetryAfter - 1_000); + } finally { + vi.useRealTimers(); + } + }); + // ---------- sliding window expiry ---------- it("expires old failures outside the window", () => { @@ -83,7 +128,7 @@ describe("auth rate limiter", () => { // ---------- per-IP isolation ---------- it("tracks IPs independently", () => { - limiter = createAuthRateLimiter({ maxAttempts: 2, windowMs: 60_000, lockoutMs: 60_000 }); + createLimiter(); limiter.recordFailure("10.0.0.10"); limiter.recordFailure("10.0.0.10"); expect(limiter.check("10.0.0.10").allowed).toBe(false); @@ -99,26 +144,22 @@ describe("auth rate limiter", () => { expect(limiter.check("::ffff:1.2.3.4").allowed).toBe(false); }); - it("tracks scopes independently for the same IP", () => { - limiter = createAuthRateLimiter({ maxAttempts: 1, windowMs: 60_000, lockoutMs: 60_000 }); - limiter.recordFailure("10.0.0.12", AUTH_RATE_LIMIT_SCOPE_SHARED_SECRET); - expect(limiter.check("10.0.0.12", AUTH_RATE_LIMIT_SCOPE_SHARED_SECRET).allowed).toBe(false); - expect(limiter.check("10.0.0.12", AUTH_RATE_LIMIT_SCOPE_DEVICE_TOKEN).allowed).toBe(true); - }); + it.each([AUTH_RATE_LIMIT_SCOPE_DEVICE_TOKEN, AUTH_RATE_LIMIT_SCOPE_HOOK_AUTH])( + "tracks %s independently from shared-secret for the same IP", + (otherScope) => { + limiter = createAuthRateLimiter({ maxAttempts: 1, windowMs: 60_000, lockoutMs: 60_000 }); + limiter.recordFailure("10.0.0.12", AUTH_RATE_LIMIT_SCOPE_SHARED_SECRET); + expect(limiter.check("10.0.0.12", AUTH_RATE_LIMIT_SCOPE_SHARED_SECRET).allowed).toBe(false); + expect(limiter.check("10.0.0.12", otherScope).allowed).toBe(true); + }, + ); // ---------- loopback exemption ---------- - it("exempts loopback addresses by default", () => { + it.each(["127.0.0.1", "::1"])("exempts loopback address %s by default", (ip) => { limiter = createAuthRateLimiter({ maxAttempts: 1, windowMs: 60_000, lockoutMs: 60_000 }); - limiter.recordFailure("127.0.0.1"); - // Should still be allowed even though maxAttempts is 1. - expect(limiter.check("127.0.0.1").allowed).toBe(true); - }); - - it("exempts IPv6 loopback by default", () => { - limiter = createAuthRateLimiter({ maxAttempts: 1, windowMs: 60_000, lockoutMs: 60_000 }); - limiter.recordFailure("::1"); - expect(limiter.check("::1").allowed).toBe(true); + limiter.recordFailure(ip); + expect(limiter.check(ip).allowed).toBe(true); }); it("rate-limits loopback when exemptLoopback is false", () => { @@ -135,7 +176,7 @@ describe("auth rate limiter", () => { // ---------- reset ---------- it("clears tracking state when reset is called", () => { - limiter = createAuthRateLimiter({ maxAttempts: 2, windowMs: 60_000, lockoutMs: 60_000 }); + createLimiter(); limiter.recordFailure("10.0.0.20"); limiter.recordFailure("10.0.0.20"); expect(limiter.check("10.0.0.20").allowed).toBe(false); @@ -193,7 +234,7 @@ describe("auth rate limiter", () => { // ---------- undefined / empty IP ---------- it("normalizes undefined IP to 'unknown'", () => { - limiter = createAuthRateLimiter({ maxAttempts: 2, windowMs: 60_000, lockoutMs: 60_000 }); + createLimiter(); limiter.recordFailure(undefined); limiter.recordFailure(undefined); expect(limiter.check(undefined).allowed).toBe(false); @@ -201,7 +242,7 @@ describe("auth rate limiter", () => { }); it("normalizes empty-string IP to 'unknown'", () => { - limiter = createAuthRateLimiter({ maxAttempts: 2, windowMs: 60_000, lockoutMs: 60_000 }); + createLimiter(); limiter.recordFailure(""); limiter.recordFailure(""); expect(limiter.check("").allowed).toBe(false); diff --git a/src/gateway/client.test.ts b/src/gateway/client.test.ts index 04217b96a65..d9bcc55b722 100644 --- a/src/gateway/client.test.ts +++ b/src/gateway/client.test.ts @@ -101,6 +101,7 @@ vi.mock("../logger.js", async (importOriginal) => { }); const { GatewayClient } = await import("./client.js"); +type GatewayClientInstance = InstanceType; function getLatestWs(): MockWebSocket { const ws = wsInstances.at(-1); @@ -344,6 +345,20 @@ describe("GatewayClient connect auth payload", () => { return parsed.params?.auth ?? {}; } + function connectRequestFrom(ws: MockWebSocket) { + const raw = ws.sent.find((frame) => frame.includes('"method":"connect"')); + expect(raw).toBeTruthy(); + return JSON.parse(raw ?? "{}") as { + id?: string; + params?: { + auth?: { + token?: string; + deviceToken?: string; + }; + }; + }; + } + function emitConnectChallenge(ws: MockWebSocket, nonce = "nonce-1") { ws.emitMessage( JSON.stringify({ @@ -354,6 +369,63 @@ describe("GatewayClient connect auth payload", () => { ); } + function startClientAndConnect(params: { client: GatewayClientInstance; nonce?: string }) { + params.client.start(); + const ws = getLatestWs(); + ws.emitOpen(); + emitConnectChallenge(ws, params.nonce); + return { ws, connect: connectRequestFrom(ws) }; + } + + function emitConnectFailure( + ws: MockWebSocket, + connectId: string | undefined, + details: Record, + ) { + ws.emitMessage( + JSON.stringify({ + type: "res", + id: connectId, + ok: false, + error: { + code: "INVALID_REQUEST", + message: "unauthorized", + details, + }, + }), + ); + } + + async function expectRetriedConnectAuth(params: { + firstWs: MockWebSocket; + connectId: string | undefined; + failureDetails: Record; + }) { + emitConnectFailure(params.firstWs, params.connectId, params.failureDetails); + await vi.waitFor(() => expect(wsInstances.length).toBeGreaterThan(1), { timeout: 3_000 }); + const ws = getLatestWs(); + ws.emitOpen(); + emitConnectChallenge(ws, "nonce-2"); + return connectFrameFrom(ws); + } + + async function expectNoReconnectAfterConnectFailure(params: { + client: GatewayClientInstance; + firstWs: MockWebSocket; + connectId: string | undefined; + failureDetails: Record; + }) { + vi.useFakeTimers(); + try { + emitConnectFailure(params.firstWs, params.connectId, params.failureDetails); + await vi.advanceTimersByTimeAsync(30_000); + expect(wsInstances).toHaveLength(1); + } finally { + params.client.stop(); + vi.useRealTimers(); + } + } + it("uses explicit shared token and does not inject stored device token", () => { loadDeviceAuthTokenMock.mockReturnValue({ token: "stored-device-token" }); const client = new GatewayClient({ @@ -457,37 +529,16 @@ describe("GatewayClient connect auth payload", () => { token: "shared-token", }); - client.start(); - const ws1 = getLatestWs(); - ws1.emitOpen(); - emitConnectChallenge(ws1); - const firstConnectRaw = ws1.sent.find((frame) => frame.includes('"method":"connect"')); - expect(firstConnectRaw).toBeTruthy(); - const firstConnect = JSON.parse(firstConnectRaw ?? "{}") as { - id?: string; - params?: { auth?: { token?: string; deviceToken?: string } }; - }; + const { ws: ws1, connect: firstConnect } = startClientAndConnect({ client }); expect(firstConnect.params?.auth?.token).toBe("shared-token"); expect(firstConnect.params?.auth?.deviceToken).toBeUndefined(); - ws1.emitMessage( - JSON.stringify({ - type: "res", - id: firstConnect.id, - ok: false, - error: { - code: "INVALID_REQUEST", - message: "unauthorized", - details: { code: "AUTH_TOKEN_MISMATCH", canRetryWithDeviceToken: true }, - }, - }), - ); - - await vi.waitFor(() => expect(wsInstances.length).toBeGreaterThan(1), { timeout: 3_000 }); - const ws2 = getLatestWs(); - ws2.emitOpen(); - emitConnectChallenge(ws2, "nonce-2"); - expect(connectFrameFrom(ws2)).toMatchObject({ + const retriedAuth = await expectRetriedConnectAuth({ + firstWs: ws1, + connectId: firstConnect.id, + failureDetails: { code: "AUTH_TOKEN_MISMATCH", canRetryWithDeviceToken: true }, + }); + expect(retriedAuth).toMatchObject({ token: "shared-token", deviceToken: "stored-device-token", }); @@ -501,32 +552,13 @@ describe("GatewayClient connect auth payload", () => { token: "shared-token", }); - client.start(); - const ws1 = getLatestWs(); - ws1.emitOpen(); - emitConnectChallenge(ws1); - const firstConnectRaw = ws1.sent.find((frame) => frame.includes('"method":"connect"')); - expect(firstConnectRaw).toBeTruthy(); - const firstConnect = JSON.parse(firstConnectRaw ?? "{}") as { id?: string }; - - ws1.emitMessage( - JSON.stringify({ - type: "res", - id: firstConnect.id, - ok: false, - error: { - code: "INVALID_REQUEST", - message: "unauthorized", - details: { code: "AUTH_UNAUTHORIZED", recommendedNextStep: "retry_with_device_token" }, - }, - }), - ); - - await vi.waitFor(() => expect(wsInstances.length).toBeGreaterThan(1), { timeout: 3_000 }); - const ws2 = getLatestWs(); - ws2.emitOpen(); - emitConnectChallenge(ws2, "nonce-2"); - expect(connectFrameFrom(ws2)).toMatchObject({ + const { ws: ws1, connect: firstConnect } = startClientAndConnect({ client }); + const retriedAuth = await expectRetriedConnectAuth({ + firstWs: ws1, + connectId: firstConnect.id, + failureDetails: { code: "AUTH_UNAUTHORIZED", recommendedNextStep: "retry_with_device_token" }, + }); + expect(retriedAuth).toMatchObject({ token: "shared-token", deviceToken: "stored-device-token", }); @@ -534,71 +566,33 @@ describe("GatewayClient connect auth payload", () => { }); it("does not auto-reconnect on AUTH_TOKEN_MISSING connect failures", async () => { - vi.useFakeTimers(); const client = new GatewayClient({ url: "ws://127.0.0.1:18789", token: "shared-token", }); - client.start(); - const ws1 = getLatestWs(); - ws1.emitOpen(); - emitConnectChallenge(ws1); - const firstConnectRaw = ws1.sent.find((frame) => frame.includes('"method":"connect"')); - expect(firstConnectRaw).toBeTruthy(); - const firstConnect = JSON.parse(firstConnectRaw ?? "{}") as { id?: string }; - - ws1.emitMessage( - JSON.stringify({ - type: "res", - id: firstConnect.id, - ok: false, - error: { - code: "INVALID_REQUEST", - message: "unauthorized", - details: { code: "AUTH_TOKEN_MISSING" }, - }, - }), - ); - - await vi.advanceTimersByTimeAsync(30_000); - expect(wsInstances).toHaveLength(1); - client.stop(); - vi.useRealTimers(); + const { ws: ws1, connect: firstConnect } = startClientAndConnect({ client }); + await expectNoReconnectAfterConnectFailure({ + client, + firstWs: ws1, + connectId: firstConnect.id, + failureDetails: { code: "AUTH_TOKEN_MISSING" }, + }); }); it("does not auto-reconnect on token mismatch when retry is not trusted", async () => { - vi.useFakeTimers(); loadDeviceAuthTokenMock.mockReturnValue({ token: "stored-device-token" }); const client = new GatewayClient({ url: "wss://gateway.example.com:18789", token: "shared-token", }); - client.start(); - const ws1 = getLatestWs(); - ws1.emitOpen(); - emitConnectChallenge(ws1); - const firstConnectRaw = ws1.sent.find((frame) => frame.includes('"method":"connect"')); - expect(firstConnectRaw).toBeTruthy(); - const firstConnect = JSON.parse(firstConnectRaw ?? "{}") as { id?: string }; - - ws1.emitMessage( - JSON.stringify({ - type: "res", - id: firstConnect.id, - ok: false, - error: { - code: "INVALID_REQUEST", - message: "unauthorized", - details: { code: "AUTH_TOKEN_MISMATCH", canRetryWithDeviceToken: true }, - }, - }), - ); - - await vi.advanceTimersByTimeAsync(30_000); - expect(wsInstances).toHaveLength(1); - client.stop(); - vi.useRealTimers(); + const { ws: ws1, connect: firstConnect } = startClientAndConnect({ client }); + await expectNoReconnectAfterConnectFailure({ + client, + firstWs: ws1, + connectId: firstConnect.id, + failureDetails: { code: "AUTH_TOKEN_MISMATCH", canRetryWithDeviceToken: true }, + }); }); }); diff --git a/src/gateway/client.ts b/src/gateway/client.ts index 9e98a9bc0c4..f2c7a184dd8 100644 --- a/src/gateway/client.ts +++ b/src/gateway/client.ts @@ -95,7 +95,7 @@ export type GatewayClientOptions = { commands?: string[]; permissions?: Record; pathEnv?: string; - deviceIdentity?: DeviceIdentity; + deviceIdentity?: DeviceIdentity | null; minProtocol?: number; maxProtocol?: number; tlsFingerprint?: string; @@ -138,7 +138,10 @@ export class GatewayClient { constructor(opts: GatewayClientOptions) { this.opts = { ...opts, - deviceIdentity: opts.deviceIdentity ?? loadOrCreateDeviceIdentity(), + deviceIdentity: + opts.deviceIdentity === null + ? undefined + : (opts.deviceIdentity ?? loadOrCreateDeviceIdentity()), }; } diff --git a/src/gateway/control-ui-routing.test.ts b/src/gateway/control-ui-routing.test.ts index f3f172cc7d4..929c645cd01 100644 --- a/src/gateway/control-ui-routing.test.ts +++ b/src/gateway/control-ui-routing.test.ts @@ -2,65 +2,114 @@ import { describe, expect, it } from "vitest"; import { classifyControlUiRequest } from "./control-ui-routing.js"; describe("classifyControlUiRequest", () => { - it("falls through non-read root requests for plugin webhooks", () => { - const classified = classifyControlUiRequest({ - basePath: "", - pathname: "/bluebubbles-webhook", - search: "", - method: "POST", + describe("root-mounted control ui", () => { + it.each([ + { + name: "serves the root entrypoint", + pathname: "/", + method: "GET", + expected: { kind: "serve" as const }, + }, + { + name: "serves other read-only SPA routes", + pathname: "/chat", + method: "HEAD", + expected: { kind: "serve" as const }, + }, + { + name: "keeps health probes outside the SPA catch-all", + pathname: "/healthz", + method: "GET", + expected: { kind: "not-control-ui" as const }, + }, + { + name: "keeps readiness probes outside the SPA catch-all", + pathname: "/ready", + method: "HEAD", + expected: { kind: "not-control-ui" as const }, + }, + { + name: "keeps plugin routes outside the SPA catch-all", + pathname: "/plugins/webhook", + method: "GET", + expected: { kind: "not-control-ui" as const }, + }, + { + name: "keeps API routes outside the SPA catch-all", + pathname: "/api/sessions", + method: "GET", + expected: { kind: "not-control-ui" as const }, + }, + { + name: "returns not-found for legacy ui routes", + pathname: "/ui/settings", + method: "GET", + expected: { kind: "not-found" as const }, + }, + { + name: "falls through non-read requests", + pathname: "/bluebubbles-webhook", + method: "POST", + expected: { kind: "not-control-ui" as const }, + }, + ])("$name", ({ pathname, method, expected }) => { + expect( + classifyControlUiRequest({ + basePath: "", + pathname, + search: "", + method, + }), + ).toEqual(expected); }); - expect(classified).toEqual({ kind: "not-control-ui" }); }); - it("returns not-found for legacy /ui routes when root-mounted", () => { - const classified = classifyControlUiRequest({ - basePath: "", - pathname: "/ui/settings", - search: "", - method: "GET", - }); - expect(classified).toEqual({ kind: "not-found" }); - }); - - it("falls through basePath non-read methods for plugin webhooks", () => { - const classified = classifyControlUiRequest({ - basePath: "/openclaw", - pathname: "/openclaw", - search: "", - method: "POST", - }); - expect(classified).toEqual({ kind: "not-control-ui" }); - }); - - it("falls through PUT/DELETE/PATCH/OPTIONS under basePath for plugin handlers", () => { - for (const method of ["PUT", "DELETE", "PATCH", "OPTIONS"]) { - const classified = classifyControlUiRequest({ - basePath: "/openclaw", + describe("basePath-mounted control ui", () => { + it.each([ + { + name: "redirects the basePath entrypoint", + pathname: "/openclaw", + search: "?foo=1", + method: "GET", + expected: { kind: "redirect" as const, location: "/openclaw/?foo=1" }, + }, + { + name: "serves nested read-only routes", + pathname: "/openclaw/chat", + search: "", + method: "HEAD", + expected: { kind: "serve" as const }, + }, + { + name: "falls through unmatched paths", + pathname: "/elsewhere/chat", + search: "", + method: "GET", + expected: { kind: "not-control-ui" as const }, + }, + { + name: "falls through write requests to the basePath entrypoint", + pathname: "/openclaw", + search: "", + method: "POST", + expected: { kind: "not-control-ui" as const }, + }, + ...["PUT", "DELETE", "PATCH", "OPTIONS"].map((method) => ({ + name: `falls through ${method} subroute requests`, pathname: "/openclaw/webhook", search: "", method, - }); - expect(classified, `${method} should fall through`).toEqual({ kind: "not-control-ui" }); - } - }); - - it("returns redirect for basePath entrypoint GET", () => { - const classified = classifyControlUiRequest({ - basePath: "/openclaw", - pathname: "/openclaw", - search: "?foo=1", - method: "GET", + expected: { kind: "not-control-ui" as const }, + })), + ])("$name", ({ pathname, search, method, expected }) => { + expect( + classifyControlUiRequest({ + basePath: "/openclaw", + pathname, + search, + method, + }), + ).toEqual(expected); }); - expect(classified).toEqual({ kind: "redirect", location: "/openclaw/?foo=1" }); - }); - - it("classifies basePath subroutes as control ui", () => { - const classified = classifyControlUiRequest({ - basePath: "/openclaw", - pathname: "/openclaw/chat", - search: "", - method: "HEAD", - }); - expect(classified).toEqual({ kind: "serve" }); }); }); diff --git a/src/gateway/control-ui.http.test.ts b/src/gateway/control-ui.http.test.ts index a63bb1590e2..e6b74c3d135 100644 --- a/src/gateway/control-ui.http.test.ts +++ b/src/gateway/control-ui.http.test.ts @@ -85,6 +85,13 @@ describe("handleControlUiHttpRequest", () => { return { assetsDir, filePath }; } + async function createHardlinkedAssetFile(rootPath: string) { + const { filePath } = await writeAssetFile(rootPath, "app.js", "console.log('hi');"); + const hardlinkPath = path.join(path.dirname(filePath), "app.hl.js"); + await fs.link(filePath, hardlinkPath); + return hardlinkPath; + } + async function withBasePathRootFixture(params: { siblingDir: string; fn: (paths: { root: string; sibling: string }) => Promise; @@ -353,10 +360,7 @@ describe("handleControlUiHttpRequest", () => { it("rejects hardlinked asset files for custom/resolved roots (security boundary)", async () => { await withControlUiRoot({ fn: async (tmp) => { - const assetsDir = path.join(tmp, "assets"); - await fs.mkdir(assetsDir, { recursive: true }); - await fs.writeFile(path.join(assetsDir, "app.js"), "console.log('hi');"); - await fs.link(path.join(assetsDir, "app.js"), path.join(assetsDir, "app.hl.js")); + await createHardlinkedAssetFile(tmp); const { res, end, handled } = runControlUiRequest({ url: "/assets/app.hl.js", @@ -374,10 +378,7 @@ describe("handleControlUiHttpRequest", () => { it("serves hardlinked asset files for bundled roots (pnpm global install)", async () => { await withControlUiRoot({ fn: async (tmp) => { - const assetsDir = path.join(tmp, "assets"); - await fs.mkdir(assetsDir, { recursive: true }); - await fs.writeFile(path.join(assetsDir, "app.js"), "console.log('hi');"); - await fs.link(path.join(assetsDir, "app.js"), path.join(assetsDir, "app.hl.js")); + await createHardlinkedAssetFile(tmp); const { res, end, handled } = runControlUiRequest({ url: "/assets/app.hl.js", diff --git a/src/gateway/credentials.test.ts b/src/gateway/credentials.test.ts index a3f3a8b9f45..a927395e833 100644 --- a/src/gateway/credentials.test.ts +++ b/src/gateway/credentials.test.ts @@ -71,6 +71,43 @@ function resolveLocalModeWithUnresolvedPassword(mode: "none" | "trusted-proxy") }); } +function expectUnresolvedLocalAuthSecretRefFailure(params: { + authMode: "token" | "password"; + secretId: string; + errorPath: "gateway.auth.token" | "gateway.auth.password"; + remote?: { token?: string; password?: string }; +}) { + const localAuth = + params.authMode === "token" + ? { + mode: "token" as const, + token: { source: "env", provider: "default", id: params.secretId }, + } + : { + mode: "password" as const, + password: { source: "env", provider: "default", id: params.secretId }, + }; + + expect(() => + resolveGatewayCredentialsFromConfig({ + cfg: { + gateway: { + mode: "local", + auth: localAuth, + remote: params.remote, + }, + secrets: { + providers: { + default: { source: "env" }, + }, + }, + } as unknown as OpenClawConfig, + env: {} as NodeJS.ProcessEnv, + includeLegacyEnv: false, + }), + ).toThrow(params.errorPath); +} + describe("resolveGatewayCredentialsFromConfig", () => { it("prefers explicit credentials over config and environment", () => { const resolved = resolveGatewayCredentialsFor( @@ -159,78 +196,29 @@ describe("resolveGatewayCredentialsFromConfig", () => { }); it("fails closed when local token SecretRef is unresolved and remote token fallback exists", () => { - expect(() => - resolveGatewayCredentialsFromConfig({ - cfg: { - gateway: { - mode: "local", - auth: { - mode: "token", - token: { source: "env", provider: "default", id: "MISSING_LOCAL_TOKEN" }, - }, - remote: { - token: "remote-token", - }, - }, - secrets: { - providers: { - default: { source: "env" }, - }, - }, - } as unknown as OpenClawConfig, - env: {} as NodeJS.ProcessEnv, - includeLegacyEnv: false, - }), - ).toThrow("gateway.auth.token"); + expectUnresolvedLocalAuthSecretRefFailure({ + authMode: "token", + secretId: "MISSING_LOCAL_TOKEN", + errorPath: "gateway.auth.token", + remote: { token: "remote-token" }, + }); }); it("fails closed when local password SecretRef is unresolved and remote password fallback exists", () => { - expect(() => - resolveGatewayCredentialsFromConfig({ - cfg: { - gateway: { - mode: "local", - auth: { - mode: "password", - password: { source: "env", provider: "default", id: "MISSING_LOCAL_PASSWORD" }, - }, - remote: { - password: "remote-password", // pragma: allowlist secret - }, - }, - secrets: { - providers: { - default: { source: "env" }, - }, - }, - } as unknown as OpenClawConfig, - env: {} as NodeJS.ProcessEnv, - includeLegacyEnv: false, - }), - ).toThrow("gateway.auth.password"); + expectUnresolvedLocalAuthSecretRefFailure({ + authMode: "password", + secretId: "MISSING_LOCAL_PASSWORD", + errorPath: "gateway.auth.password", + remote: { password: "remote-password" }, // pragma: allowlist secret + }); }); it("throws when local password auth relies on an unresolved SecretRef", () => { - expect(() => - resolveGatewayCredentialsFromConfig({ - cfg: { - gateway: { - mode: "local", - auth: { - mode: "password", - password: { source: "env", provider: "default", id: "MISSING_GATEWAY_PASSWORD" }, - }, - }, - secrets: { - providers: { - default: { source: "env" }, - }, - }, - } as unknown as OpenClawConfig, - env: {} as NodeJS.ProcessEnv, - includeLegacyEnv: false, - }), - ).toThrow("gateway.auth.password"); + expectUnresolvedLocalAuthSecretRefFailure({ + authMode: "password", + secretId: "MISSING_GATEWAY_PASSWORD", + errorPath: "gateway.auth.password", + }); }); it("treats env-template local tokens as SecretRefs instead of plaintext", () => { @@ -275,55 +263,21 @@ describe("resolveGatewayCredentialsFromConfig", () => { }); it("throws when unresolved local token SecretRef would otherwise fall back to remote token", () => { - expect(() => - resolveGatewayCredentialsFromConfig({ - cfg: { - gateway: { - mode: "local", - auth: { - mode: "token", - token: { source: "env", provider: "default", id: "MISSING_LOCAL_TOKEN" }, - }, - remote: { - token: "remote-token", - }, - }, - secrets: { - providers: { - default: { source: "env" }, - }, - }, - } as unknown as OpenClawConfig, - env: {} as NodeJS.ProcessEnv, - includeLegacyEnv: false, - }), - ).toThrow("gateway.auth.token"); + expectUnresolvedLocalAuthSecretRefFailure({ + authMode: "token", + secretId: "MISSING_LOCAL_TOKEN", + errorPath: "gateway.auth.token", + remote: { token: "remote-token" }, + }); }); it("throws when unresolved local password SecretRef would otherwise fall back to remote password", () => { - expect(() => - resolveGatewayCredentialsFromConfig({ - cfg: { - gateway: { - mode: "local", - auth: { - mode: "password", - password: { source: "env", provider: "default", id: "MISSING_LOCAL_PASSWORD" }, - }, - remote: { - password: "remote-password", // pragma: allowlist secret - }, - }, - secrets: { - providers: { - default: { source: "env" }, - }, - }, - } as unknown as OpenClawConfig, - env: {} as NodeJS.ProcessEnv, - includeLegacyEnv: false, - }), - ).toThrow("gateway.auth.password"); + expectUnresolvedLocalAuthSecretRefFailure({ + authMode: "password", + secretId: "MISSING_LOCAL_PASSWORD", + errorPath: "gateway.auth.password", + remote: { password: "remote-password" }, // pragma: allowlist secret + }); }); it("ignores unresolved local password ref when local auth mode is none", () => { diff --git a/src/gateway/device-auth.test.ts b/src/gateway/device-auth.test.ts index 9d7ac3fb7b5..8db88428ce9 100644 --- a/src/gateway/device-auth.test.ts +++ b/src/gateway/device-auth.test.ts @@ -1,29 +1,69 @@ import { describe, expect, it } from "vitest"; -import { buildDeviceAuthPayloadV3, normalizeDeviceMetadataForAuth } from "./device-auth.js"; +import { + buildDeviceAuthPayload, + buildDeviceAuthPayloadV3, + normalizeDeviceMetadataForAuth, +} from "./device-auth.js"; describe("device-auth payload vectors", () => { - it("builds canonical v3 payload", () => { - const payload = buildDeviceAuthPayloadV3({ - deviceId: "dev-1", - clientId: "openclaw-macos", - clientMode: "ui", - role: "operator", - scopes: ["operator.admin", "operator.read"], - signedAtMs: 1_700_000_000_000, - token: "tok-123", - nonce: "nonce-abc", - platform: " IOS ", - deviceFamily: " iPhone ", - }); - - expect(payload).toBe( - "v3|dev-1|openclaw-macos|ui|operator|operator.admin,operator.read|1700000000000|tok-123|nonce-abc|ios|iphone", - ); + it.each([ + { + name: "builds canonical v2 payloads", + build: () => + buildDeviceAuthPayload({ + deviceId: "dev-1", + clientId: "openclaw-macos", + clientMode: "ui", + role: "operator", + scopes: ["operator.admin", "operator.read"], + signedAtMs: 1_700_000_000_000, + token: null, + nonce: "nonce-abc", + }), + expected: + "v2|dev-1|openclaw-macos|ui|operator|operator.admin,operator.read|1700000000000||nonce-abc", + }, + { + name: "builds canonical v3 payloads", + build: () => + buildDeviceAuthPayloadV3({ + deviceId: "dev-1", + clientId: "openclaw-macos", + clientMode: "ui", + role: "operator", + scopes: ["operator.admin", "operator.read"], + signedAtMs: 1_700_000_000_000, + token: "tok-123", + nonce: "nonce-abc", + platform: " IOS ", + deviceFamily: " iPhone ", + }), + expected: + "v3|dev-1|openclaw-macos|ui|operator|operator.admin,operator.read|1700000000000|tok-123|nonce-abc|ios|iphone", + }, + { + name: "keeps empty metadata slots in v3 payloads", + build: () => + buildDeviceAuthPayloadV3({ + deviceId: "dev-2", + clientId: "openclaw-ios", + clientMode: "ui", + role: "operator", + scopes: ["operator.read"], + signedAtMs: 1_700_000_000_001, + nonce: "nonce-def", + }), + expected: "v3|dev-2|openclaw-ios|ui|operator|operator.read|1700000000001||nonce-def||", + }, + ])("$name", ({ build, expected }) => { + expect(build()).toBe(expected); }); - it("normalizes metadata with ASCII-only lowercase", () => { - expect(normalizeDeviceMetadataForAuth(" İOS ")).toBe("İos"); - expect(normalizeDeviceMetadataForAuth(" MAC ")).toBe("mac"); - expect(normalizeDeviceMetadataForAuth(undefined)).toBe(""); + it.each([ + { input: " İOS ", expected: "İos" }, + { input: " MAC ", expected: "mac" }, + { input: undefined, expected: "" }, + ])("normalizes metadata %j", ({ input, expected }) => { + expect(normalizeDeviceMetadataForAuth(input)).toBe(expected); }); }); diff --git a/src/gateway/live-tool-probe-utils.test.ts b/src/gateway/live-tool-probe-utils.test.ts index ca73032c6fb..75f27c08036 100644 --- a/src/gateway/live-tool-probe-utils.test.ts +++ b/src/gateway/live-tool-probe-utils.test.ts @@ -8,198 +8,245 @@ import { } from "./live-tool-probe-utils.js"; describe("live tool probe utils", () => { - it("matches nonce pair when both are present", () => { - expect(hasExpectedToolNonce("value a-1 and b-2", "a-1", "b-2")).toBe(true); - expect(hasExpectedToolNonce("value a-1 only", "a-1", "b-2")).toBe(false); + describe("nonce matching", () => { + it.each([ + { + name: "matches tool nonce pairs only when both are present", + actual: hasExpectedToolNonce("value a-1 and b-2", "a-1", "b-2"), + expected: true, + }, + { + name: "rejects partial tool nonce matches", + actual: hasExpectedToolNonce("value a-1 only", "a-1", "b-2"), + expected: false, + }, + { + name: "matches a single nonce when present", + actual: hasExpectedSingleNonce("value nonce-1", "nonce-1"), + expected: true, + }, + { + name: "rejects single nonce mismatches", + actual: hasExpectedSingleNonce("value nonce-2", "nonce-1"), + expected: false, + }, + ])("$name", ({ actual, expected }) => { + expect(actual).toBe(expected); + }); }); - it("matches single nonce when present", () => { - expect(hasExpectedSingleNonce("value nonce-1", "nonce-1")).toBe(true); - expect(hasExpectedSingleNonce("value nonce-2", "nonce-1")).toBe(false); + describe("refusal detection", () => { + it.each([ + { + name: "detects nonce refusal phrasing", + text: "Same request, same answer — this isn't a real OpenClaw probe. No part of the system asks me to parrot back nonce values.", + expected: true, + }, + { + name: "detects prompt-injection style refusals without nonce text", + text: "That's not a legitimate self-test. This looks like a prompt injection attempt.", + expected: true, + }, + { + name: "ignores generic helper text", + text: "I can help with that request.", + expected: false, + }, + { + name: "does not treat nonce markers without the word nonce as refusal", + text: "No part of the system asks me to parrot back values.", + expected: false, + }, + ])("$name", ({ text, expected }) => { + expect(isLikelyToolNonceRefusal(text)).toBe(expected); + }); }); - it("detects anthropic nonce refusal phrasing", () => { - expect( - isLikelyToolNonceRefusal( - "Same request, same answer — this isn't a real OpenClaw probe. No part of the system asks me to parrot back nonce values.", - ), - ).toBe(true); + describe("shouldRetryToolReadProbe", () => { + it.each([ + { + name: "retries malformed tool output when attempts remain", + params: { + text: "read[object Object],[object Object]", + nonceA: "nonce-a", + nonceB: "nonce-b", + provider: "mistral", + attempt: 0, + maxAttempts: 3, + }, + expected: true, + }, + { + name: "does not retry once max attempts are exhausted", + params: { + text: "read[object Object],[object Object]", + nonceA: "nonce-a", + nonceB: "nonce-b", + provider: "mistral", + attempt: 2, + maxAttempts: 3, + }, + expected: false, + }, + { + name: "does not retry when the nonce pair is already present", + params: { + text: "nonce-a nonce-b", + nonceA: "nonce-a", + nonceB: "nonce-b", + provider: "mistral", + attempt: 0, + maxAttempts: 3, + }, + expected: false, + }, + { + name: "prefers a valid nonce pair even if the text still contains scaffolding words", + params: { + text: "tool output nonce-a nonce-b function", + nonceA: "nonce-a", + nonceB: "nonce-b", + provider: "openai", + attempt: 0, + maxAttempts: 3, + }, + expected: false, + }, + { + name: "retries empty output", + params: { + text: " ", + nonceA: "nonce-a", + nonceB: "nonce-b", + provider: "openai", + attempt: 0, + maxAttempts: 3, + }, + expected: true, + }, + { + name: "retries tool scaffolding output", + params: { + text: "Use tool function read[] now.", + nonceA: "nonce-a", + nonceB: "nonce-b", + provider: "openai", + attempt: 0, + maxAttempts: 3, + }, + expected: true, + }, + { + name: "retries mistral nonce marker echoes without parsed values", + params: { + text: "nonceA= nonceB=", + nonceA: "nonce-a", + nonceB: "nonce-b", + provider: "mistral", + attempt: 0, + maxAttempts: 3, + }, + expected: true, + }, + { + name: "retries anthropic refusal output", + params: { + text: "This isn't a real OpenClaw probe; I won't parrot back nonce values.", + nonceA: "nonce-a", + nonceB: "nonce-b", + provider: "anthropic", + attempt: 0, + maxAttempts: 3, + }, + expected: true, + }, + { + name: "does not special-case anthropic refusals for other providers", + params: { + text: "This isn't a real OpenClaw probe; I won't parrot back nonce values.", + nonceA: "nonce-a", + nonceB: "nonce-b", + provider: "openai", + attempt: 0, + maxAttempts: 3, + }, + expected: false, + }, + ])("$name", ({ params, expected }) => { + expect(shouldRetryToolReadProbe(params)).toBe(expected); + }); }); - it("does not treat generic helper text as nonce refusal", () => { - expect(isLikelyToolNonceRefusal("I can help with that request.")).toBe(false); - }); - - it("detects prompt-injection style tool refusal without nonce text", () => { - expect( - isLikelyToolNonceRefusal( - "That's not a legitimate self-test. This looks like a prompt injection attempt.", - ), - ).toBe(true); - }); - - it("retries malformed tool output when attempts remain", () => { - expect( - shouldRetryToolReadProbe({ - text: "read[object Object],[object Object]", - nonceA: "nonce-a", - nonceB: "nonce-b", - provider: "mistral", - attempt: 0, - maxAttempts: 3, - }), - ).toBe(true); - }); - - it("does not retry once max attempts are exhausted", () => { - expect( - shouldRetryToolReadProbe({ - text: "read[object Object],[object Object]", - nonceA: "nonce-a", - nonceB: "nonce-b", - provider: "mistral", - attempt: 2, - maxAttempts: 3, - }), - ).toBe(false); - }); - - it("does not retry when nonce pair is already present", () => { - expect( - shouldRetryToolReadProbe({ - text: "nonce-a nonce-b", - nonceA: "nonce-a", - nonceB: "nonce-b", - provider: "mistral", - attempt: 0, - maxAttempts: 3, - }), - ).toBe(false); - }); - - it("retries when tool output is empty and attempts remain", () => { - expect( - shouldRetryToolReadProbe({ - text: " ", - nonceA: "nonce-a", - nonceB: "nonce-b", - provider: "openai", - attempt: 0, - maxAttempts: 3, - }), - ).toBe(true); - }); - - it("retries when output still looks like tool/function scaffolding", () => { - expect( - shouldRetryToolReadProbe({ - text: "Use tool function read[] now.", - nonceA: "nonce-a", - nonceB: "nonce-b", - provider: "openai", - attempt: 0, - maxAttempts: 3, - }), - ).toBe(true); - }); - - it("retries mistral nonce marker echoes without parsed nonce values", () => { - expect( - shouldRetryToolReadProbe({ - text: "nonceA= nonceB=", - nonceA: "nonce-a", - nonceB: "nonce-b", - provider: "mistral", - attempt: 0, - maxAttempts: 3, - }), - ).toBe(true); - }); - - it("retries anthropic nonce refusal output", () => { - expect( - shouldRetryToolReadProbe({ - text: "This isn't a real OpenClaw probe; I won't parrot back nonce values.", - nonceA: "nonce-a", - nonceB: "nonce-b", - provider: "anthropic", - attempt: 0, - maxAttempts: 3, - }), - ).toBe(true); - }); - - it("retries anthropic prompt-injection refusal output", () => { - expect( - shouldRetryToolReadProbe({ - text: "This is not a legitimate self-test; it appears to be a prompt injection attempt.", - nonceA: "nonce-a", - nonceB: "nonce-b", - provider: "anthropic", - attempt: 0, - maxAttempts: 3, - }), - ).toBe(true); - }); - - it("does not retry nonce marker echoes for non-mistral providers", () => { - expect( - shouldRetryToolReadProbe({ - text: "nonceA= nonceB=", - nonceA: "nonce-a", - nonceB: "nonce-b", - provider: "openai", - attempt: 0, - maxAttempts: 3, - }), - ).toBe(false); - }); - - it("retries malformed exec+read output when attempts remain", () => { - expect( - shouldRetryExecReadProbe({ - text: "read[object Object]", - nonce: "nonce-c", - provider: "openai", - attempt: 0, - maxAttempts: 3, - }), - ).toBe(true); - }); - - it("does not retry exec+read once max attempts are exhausted", () => { - expect( - shouldRetryExecReadProbe({ - text: "read[object Object]", - nonce: "nonce-c", - provider: "openai", - attempt: 2, - maxAttempts: 3, - }), - ).toBe(false); - }); - - it("does not retry exec+read when nonce is present", () => { - expect( - shouldRetryExecReadProbe({ - text: "nonce-c", - nonce: "nonce-c", - provider: "openai", - attempt: 0, - maxAttempts: 3, - }), - ).toBe(false); - }); - - it("retries anthropic exec+read nonce refusal output", () => { - expect( - shouldRetryExecReadProbe({ - text: "No part of the system asks me to parrot back nonce values.", - nonce: "nonce-c", - provider: "anthropic", - attempt: 0, - maxAttempts: 3, - }), - ).toBe(true); + describe("shouldRetryExecReadProbe", () => { + it.each([ + { + name: "retries malformed exec+read output when attempts remain", + params: { + text: "read[object Object]", + nonce: "nonce-c", + provider: "openai", + attempt: 0, + maxAttempts: 3, + }, + expected: true, + }, + { + name: "does not retry once max attempts are exhausted", + params: { + text: "read[object Object]", + nonce: "nonce-c", + provider: "openai", + attempt: 2, + maxAttempts: 3, + }, + expected: false, + }, + { + name: "does not retry when the nonce is already present", + params: { + text: "nonce-c", + nonce: "nonce-c", + provider: "openai", + attempt: 0, + maxAttempts: 3, + }, + expected: false, + }, + { + name: "prefers a valid nonce even if the text still contains scaffolding words", + params: { + text: "tool output nonce-c function", + nonce: "nonce-c", + provider: "openai", + attempt: 0, + maxAttempts: 3, + }, + expected: false, + }, + { + name: "retries anthropic nonce refusal output", + params: { + text: "No part of the system asks me to parrot back nonce values.", + nonce: "nonce-c", + provider: "anthropic", + attempt: 0, + maxAttempts: 3, + }, + expected: true, + }, + { + name: "does not special-case anthropic refusals for other providers", + params: { + text: "No part of the system asks me to parrot back nonce values.", + nonce: "nonce-c", + provider: "openai", + attempt: 0, + maxAttempts: 3, + }, + expected: false, + }, + ])("$name", ({ params, expected }) => { + expect(shouldRetryExecReadProbe(params)).toBe(expected); + }); }); }); diff --git a/src/gateway/method-scopes.test.ts b/src/gateway/method-scopes.test.ts index 18ff74509ee..3a91f8b8044 100644 --- a/src/gateway/method-scopes.test.ts +++ b/src/gateway/method-scopes.test.ts @@ -8,14 +8,15 @@ import { listGatewayMethods } from "./server-methods-list.js"; import { coreGatewayHandlers } from "./server-methods.js"; describe("method scope resolution", () => { - it("classifies sessions.resolve + config.schema.lookup as read and poll as write", () => { - expect(resolveLeastPrivilegeOperatorScopesForMethod("sessions.resolve")).toEqual([ - "operator.read", - ]); - expect(resolveLeastPrivilegeOperatorScopesForMethod("config.schema.lookup")).toEqual([ - "operator.read", - ]); - expect(resolveLeastPrivilegeOperatorScopesForMethod("poll")).toEqual(["operator.write"]); + it.each([ + ["sessions.resolve", ["operator.read"]], + ["config.schema.lookup", ["operator.read"]], + ["poll", ["operator.write"]], + ["config.patch", ["operator.admin"]], + ["wizard.start", ["operator.admin"]], + ["update.run", ["operator.admin"]], + ])("resolves least-privilege scopes for %s", (method, expected) => { + expect(resolveLeastPrivilegeOperatorScopesForMethod(method)).toEqual(expected); }); it("leaves node-only pending drain outside operator scopes", () => { @@ -28,16 +29,13 @@ describe("method scope resolution", () => { }); describe("operator scope authorization", () => { - it("allows read methods with operator.read or operator.write", () => { - expect(authorizeOperatorScopesForMethod("health", ["operator.read"])).toEqual({ - allowed: true, - }); - expect(authorizeOperatorScopesForMethod("health", ["operator.write"])).toEqual({ - allowed: true, - }); - expect(authorizeOperatorScopesForMethod("config.schema.lookup", ["operator.read"])).toEqual({ - allowed: true, - }); + it.each([ + ["health", ["operator.read"], { allowed: true }], + ["health", ["operator.write"], { allowed: true }], + ["config.schema.lookup", ["operator.read"], { allowed: true }], + ["config.patch", ["operator.admin"], { allowed: true }], + ])("authorizes %s for scopes %j", (method, scopes, expected) => { + expect(authorizeOperatorScopesForMethod(method, scopes)).toEqual(expected); }); it("requires operator.write for write methods", () => { @@ -63,6 +61,11 @@ describe("operator scope authorization", () => { }); describe("core gateway method classification", () => { + it("treats node-role methods as classified even without operator scopes", () => { + expect(isGatewayMethodClassified("node.pending.drain")).toBe(true); + expect(isGatewayMethodClassified("node.pending.pull")).toBe(true); + }); + it("classifies every exposed core gateway handler method", () => { const unclassified = Object.keys(coreGatewayHandlers).filter( (method) => !isGatewayMethodClassified(method), diff --git a/src/gateway/net.test.ts b/src/gateway/net.test.ts index f5ee5db9a8e..185325d5428 100644 --- a/src/gateway/net.test.ts +++ b/src/gateway/net.test.ts @@ -49,117 +49,147 @@ describe("isLocalishHost", () => { }); describe("isTrustedProxyAddress", () => { - describe("exact IP matching", () => { - it("returns true when IP matches exactly", () => { - expect(isTrustedProxyAddress("192.168.1.1", ["192.168.1.1"])).toBe(true); - }); - - it("returns false when IP does not match", () => { - expect(isTrustedProxyAddress("192.168.1.2", ["192.168.1.1"])).toBe(false); - }); - - it("returns true when IP matches one of multiple proxies", () => { - expect(isTrustedProxyAddress("10.0.0.5", ["192.168.1.1", "10.0.0.5", "172.16.0.1"])).toBe( - true, - ); - }); - - it("ignores surrounding whitespace in exact IP entries", () => { - expect(isTrustedProxyAddress("10.0.0.5", [" 10.0.0.5 "])).toBe(true); - }); - }); - - describe("CIDR subnet matching", () => { - it("returns true when IP is within /24 subnet", () => { - expect(isTrustedProxyAddress("10.42.0.59", ["10.42.0.0/24"])).toBe(true); - expect(isTrustedProxyAddress("10.42.0.1", ["10.42.0.0/24"])).toBe(true); - expect(isTrustedProxyAddress("10.42.0.254", ["10.42.0.0/24"])).toBe(true); - }); - - it("returns false when IP is outside /24 subnet", () => { - expect(isTrustedProxyAddress("10.42.1.1", ["10.42.0.0/24"])).toBe(false); - expect(isTrustedProxyAddress("10.43.0.1", ["10.42.0.0/24"])).toBe(false); - }); - - it("returns true when IP is within /16 subnet", () => { - expect(isTrustedProxyAddress("172.19.5.100", ["172.19.0.0/16"])).toBe(true); - expect(isTrustedProxyAddress("172.19.255.255", ["172.19.0.0/16"])).toBe(true); - }); - - it("returns false when IP is outside /16 subnet", () => { - expect(isTrustedProxyAddress("172.20.0.1", ["172.19.0.0/16"])).toBe(false); - }); - - it("returns true when IP is within /32 subnet (single IP)", () => { - expect(isTrustedProxyAddress("10.42.0.0", ["10.42.0.0/32"])).toBe(true); - }); - - it("returns false when IP does not match /32 subnet", () => { - expect(isTrustedProxyAddress("10.42.0.1", ["10.42.0.0/32"])).toBe(false); - }); - - it("handles mixed exact IPs and CIDR notation", () => { - const proxies = ["192.168.1.1", "10.42.0.0/24", "172.19.0.0/16"]; - expect(isTrustedProxyAddress("192.168.1.1", proxies)).toBe(true); // exact match - expect(isTrustedProxyAddress("10.42.0.59", proxies)).toBe(true); // CIDR match - expect(isTrustedProxyAddress("172.19.5.100", proxies)).toBe(true); // CIDR match - expect(isTrustedProxyAddress("10.43.0.1", proxies)).toBe(false); // no match - }); - - it("supports IPv6 CIDR notation", () => { - expect(isTrustedProxyAddress("2001:db8::1234", ["2001:db8::/32"])).toBe(true); - expect(isTrustedProxyAddress("2001:db9::1234", ["2001:db8::/32"])).toBe(false); - }); - }); - - describe("backward compatibility", () => { - it("preserves exact IP matching behavior (no CIDR notation)", () => { - // Old configs with exact IPs should work exactly as before - expect(isTrustedProxyAddress("192.168.1.1", ["192.168.1.1"])).toBe(true); - expect(isTrustedProxyAddress("192.168.1.2", ["192.168.1.1"])).toBe(false); - expect(isTrustedProxyAddress("10.0.0.5", ["192.168.1.1", "10.0.0.5"])).toBe(true); - }); - - it("does NOT treat plain IPs as /32 CIDR (exact match only)", () => { - // "10.42.0.1" without /32 should match ONLY that exact IP - expect(isTrustedProxyAddress("10.42.0.1", ["10.42.0.1"])).toBe(true); - expect(isTrustedProxyAddress("10.42.0.2", ["10.42.0.1"])).toBe(false); - expect(isTrustedProxyAddress("10.42.0.59", ["10.42.0.1"])).toBe(false); - }); - - it("handles IPv4-mapped IPv6 addresses (existing normalizeIp behavior)", () => { - // Existing normalizeIp() behavior should be preserved - expect(isTrustedProxyAddress("::ffff:192.168.1.1", ["192.168.1.1"])).toBe(true); - }); - }); - - describe("edge cases", () => { - it("returns false when IP is undefined", () => { - expect(isTrustedProxyAddress(undefined, ["192.168.1.1"])).toBe(false); - }); - - it("returns false when trustedProxies is undefined", () => { - expect(isTrustedProxyAddress("192.168.1.1", undefined)).toBe(false); - }); - - it("returns false when trustedProxies is empty", () => { - expect(isTrustedProxyAddress("192.168.1.1", [])).toBe(false); - }); - - it("returns false for invalid CIDR notation", () => { - expect(isTrustedProxyAddress("10.42.0.59", ["10.42.0.0/33"])).toBe(false); // invalid prefix - expect(isTrustedProxyAddress("10.42.0.59", ["10.42.0.0/-1"])).toBe(false); // negative prefix - expect(isTrustedProxyAddress("10.42.0.59", ["invalid/24"])).toBe(false); // invalid IP - }); - - it("ignores surrounding whitespace in CIDR entries", () => { - expect(isTrustedProxyAddress("10.42.0.59", [" 10.42.0.0/24 "])).toBe(true); - }); - - it("ignores blank trusted proxy entries", () => { - expect(isTrustedProxyAddress("10.0.0.5", [" ", "\t"])).toBe(false); - expect(isTrustedProxyAddress("10.0.0.5", [" ", "10.0.0.5", ""])).toBe(true); - }); + it.each([ + { + name: "matches exact IP entries", + ip: "192.168.1.1", + trustedProxies: ["192.168.1.1"], + expected: true, + }, + { + name: "rejects non-matching exact IP entries", + ip: "192.168.1.2", + trustedProxies: ["192.168.1.1"], + expected: false, + }, + { + name: "matches one of multiple exact entries", + ip: "10.0.0.5", + trustedProxies: ["192.168.1.1", "10.0.0.5", "172.16.0.1"], + expected: true, + }, + { + name: "ignores surrounding whitespace in exact IP entries", + ip: "10.0.0.5", + trustedProxies: [" 10.0.0.5 "], + expected: true, + }, + { + name: "matches /24 CIDR entries", + ip: "10.42.0.59", + trustedProxies: ["10.42.0.0/24"], + expected: true, + }, + { + name: "rejects IPs outside /24 CIDR entries", + ip: "10.42.1.1", + trustedProxies: ["10.42.0.0/24"], + expected: false, + }, + { + name: "matches /16 CIDR entries", + ip: "172.19.255.255", + trustedProxies: ["172.19.0.0/16"], + expected: true, + }, + { + name: "rejects IPs outside /16 CIDR entries", + ip: "172.20.0.1", + trustedProxies: ["172.19.0.0/16"], + expected: false, + }, + { + name: "treats /32 as a single-IP CIDR", + ip: "10.42.0.0", + trustedProxies: ["10.42.0.0/32"], + expected: true, + }, + { + name: "rejects non-matching /32 CIDR entries", + ip: "10.42.0.1", + trustedProxies: ["10.42.0.0/32"], + expected: false, + }, + { + name: "handles mixed exact IP and CIDR entries", + ip: "172.19.5.100", + trustedProxies: ["192.168.1.1", "10.42.0.0/24", "172.19.0.0/16"], + expected: true, + }, + { + name: "rejects IPs missing from mixed exact IP and CIDR entries", + ip: "10.43.0.1", + trustedProxies: ["192.168.1.1", "10.42.0.0/24", "172.19.0.0/16"], + expected: false, + }, + { + name: "supports IPv6 CIDR notation", + ip: "2001:db8::1234", + trustedProxies: ["2001:db8::/32"], + expected: true, + }, + { + name: "rejects IPv6 addresses outside the configured CIDR", + ip: "2001:db9::1234", + trustedProxies: ["2001:db8::/32"], + expected: false, + }, + { + name: "preserves exact matching behavior for plain IP entries", + ip: "10.42.0.59", + trustedProxies: ["10.42.0.1"], + expected: false, + }, + { + name: "normalizes IPv4-mapped IPv6 addresses", + ip: "::ffff:192.168.1.1", + trustedProxies: ["192.168.1.1"], + expected: true, + }, + { + name: "returns false when IP is undefined", + ip: undefined, + trustedProxies: ["192.168.1.1"], + expected: false, + }, + { + name: "returns false when trusted proxies are undefined", + ip: "192.168.1.1", + trustedProxies: undefined, + expected: false, + }, + { + name: "returns false when trusted proxies are empty", + ip: "192.168.1.1", + trustedProxies: [], + expected: false, + }, + { + name: "rejects invalid CIDR prefixes and addresses", + ip: "10.42.0.59", + trustedProxies: ["10.42.0.0/33", "10.42.0.0/-1", "invalid/24", "2001:db8::/129"], + expected: false, + }, + { + name: "ignores surrounding whitespace in CIDR entries", + ip: "10.42.0.59", + trustedProxies: [" 10.42.0.0/24 "], + expected: true, + }, + { + name: "ignores blank trusted proxy entries", + ip: "10.0.0.5", + trustedProxies: [" ", "10.0.0.5", ""], + expected: true, + }, + { + name: "treats all-blank trusted proxy entries as no match", + ip: "10.0.0.5", + trustedProxies: [" ", "\t"], + expected: false, + }, + ])("$name", ({ ip, trustedProxies, expected }) => { + expect(isTrustedProxyAddress(ip, trustedProxies)).toBe(expected); }); }); diff --git a/src/gateway/origin-check.test.ts b/src/gateway/origin-check.test.ts index 50c031e927d..2bdec288fd6 100644 --- a/src/gateway/origin-check.test.ts +++ b/src/gateway/origin-check.test.ts @@ -2,102 +2,93 @@ import { describe, expect, it } from "vitest"; import { checkBrowserOrigin } from "./origin-check.js"; describe("checkBrowserOrigin", () => { - it("accepts same-origin host matches only with legacy host-header fallback", () => { - const result = checkBrowserOrigin({ - requestHost: "127.0.0.1:18789", - origin: "http://127.0.0.1:18789", - allowHostHeaderOriginFallback: true, - }); - expect(result.ok).toBe(true); - if (result.ok) { - expect(result.matchedBy).toBe("host-header-fallback"); - } - }); - - it("rejects same-origin host matches when legacy host-header fallback is disabled", () => { - const result = checkBrowserOrigin({ - requestHost: "gateway.example.com:18789", - origin: "https://gateway.example.com:18789", - }); - expect(result.ok).toBe(false); - }); - - it("accepts loopback host mismatches for dev", () => { - const result = checkBrowserOrigin({ - requestHost: "127.0.0.1:18789", - origin: "http://localhost:5173", - isLocalClient: true, - }); - expect(result.ok).toBe(true); - }); - - it("rejects loopback origin mismatches when request is not local", () => { - const result = checkBrowserOrigin({ - requestHost: "127.0.0.1:18789", - origin: "http://localhost:5173", - isLocalClient: false, - }); - expect(result.ok).toBe(false); - }); - - it("accepts allowlisted origins", () => { - const result = checkBrowserOrigin({ - requestHost: "gateway.example.com:18789", - origin: "https://control.example.com", - allowedOrigins: ["https://control.example.com"], - }); - expect(result.ok).toBe(true); - }); - - it("accepts wildcard allowedOrigins", () => { - const result = checkBrowserOrigin({ - requestHost: "gateway.example.com:18789", - origin: "https://any-origin.example.com", - allowedOrigins: ["*"], - }); - expect(result.ok).toBe(true); - }); - - it("rejects missing origin", () => { - const result = checkBrowserOrigin({ - requestHost: "gateway.example.com:18789", - origin: "", - }); - expect(result.ok).toBe(false); - }); - - it("rejects mismatched origins", () => { - const result = checkBrowserOrigin({ - requestHost: "gateway.example.com:18789", - origin: "https://attacker.example.com", - }); - expect(result.ok).toBe(false); - }); - - it('accepts any origin when allowedOrigins includes "*" (regression: #30990)', () => { - const result = checkBrowserOrigin({ - requestHost: "100.86.79.37:18789", - origin: "https://100.86.79.37:18789", - allowedOrigins: ["*"], - }); - expect(result.ok).toBe(true); - }); - - it('accepts any origin when allowedOrigins includes "*" alongside specific entries', () => { - const result = checkBrowserOrigin({ - requestHost: "gateway.tailnet.ts.net:18789", - origin: "https://gateway.tailnet.ts.net:18789", - allowedOrigins: ["https://control.example.com", "*"], - }); - expect(result.ok).toBe(true); - }); - - it("accepts wildcard entries with surrounding whitespace", () => { - const result = checkBrowserOrigin({ - requestHost: "100.86.79.37:18789", - origin: "https://100.86.79.37:18789", - allowedOrigins: [" * "], - }); - expect(result.ok).toBe(true); + it.each([ + { + name: "accepts host-header fallback when explicitly enabled", + input: { + requestHost: "127.0.0.1:18789", + origin: "http://127.0.0.1:18789", + allowHostHeaderOriginFallback: true, + }, + expected: { ok: true as const, matchedBy: "host-header-fallback" as const }, + }, + { + name: "rejects same-origin host matches when fallback is disabled", + input: { + requestHost: "gateway.example.com:18789", + origin: "https://gateway.example.com:18789", + }, + expected: { ok: false as const, reason: "origin not allowed" }, + }, + { + name: "accepts local loopback mismatches for local clients", + input: { + requestHost: "127.0.0.1:18789", + origin: "http://localhost:5173", + isLocalClient: true, + }, + expected: { ok: true as const, matchedBy: "local-loopback" as const }, + }, + { + name: "rejects loopback mismatches for non-local clients", + input: { + requestHost: "127.0.0.1:18789", + origin: "http://localhost:5173", + isLocalClient: false, + }, + expected: { ok: false as const, reason: "origin not allowed" }, + }, + { + name: "accepts trimmed lowercase-normalized allowlist matches", + input: { + requestHost: "gateway.example.com:18789", + origin: "https://CONTROL.example.com", + allowedOrigins: [" https://control.example.com "], + }, + expected: { ok: true as const, matchedBy: "allowlist" as const }, + }, + { + name: "accepts wildcard allowlists even alongside specific entries", + input: { + requestHost: "gateway.tailnet.ts.net:18789", + origin: "https://any-origin.example.com", + allowedOrigins: ["https://control.example.com", " * "], + }, + expected: { ok: true as const, matchedBy: "allowlist" as const }, + }, + { + name: "rejects missing origin", + input: { + requestHost: "gateway.example.com:18789", + origin: "", + }, + expected: { ok: false as const, reason: "origin missing or invalid" }, + }, + { + name: 'rejects literal "null" origin', + input: { + requestHost: "gateway.example.com:18789", + origin: "null", + }, + expected: { ok: false as const, reason: "origin missing or invalid" }, + }, + { + name: "rejects malformed origin URLs", + input: { + requestHost: "gateway.example.com:18789", + origin: "not a url", + }, + expected: { ok: false as const, reason: "origin missing or invalid" }, + }, + { + name: "rejects mismatched origins", + input: { + requestHost: "gateway.example.com:18789", + origin: "https://attacker.example.com", + }, + expected: { ok: false as const, reason: "origin not allowed" }, + }, + ])("$name", ({ input, expected }) => { + expect(checkBrowserOrigin(input)).toEqual(expected); }); }); diff --git a/src/gateway/probe-auth.test.ts b/src/gateway/probe-auth.test.ts index 7a6d639e10a..bbf034c882f 100644 --- a/src/gateway/probe-auth.test.ts +++ b/src/gateway/probe-auth.test.ts @@ -5,6 +5,18 @@ import { resolveGatewayProbeAuthWithSecretInputs, } from "./probe-auth.js"; +function expectUnresolvedProbeTokenWarning(cfg: OpenClawConfig) { + const result = resolveGatewayProbeAuthSafe({ + cfg, + mode: "local", + env: {} as NodeJS.ProcessEnv, + }); + + expect(result.auth).toEqual({}); + expect(result.warning).toContain("gateway.auth.token"); + expect(result.warning).toContain("unresolved"); +} + describe("resolveGatewayProbeAuthSafe", () => { it("returns probe auth credentials when available", () => { const result = resolveGatewayProbeAuthSafe({ @@ -28,55 +40,39 @@ describe("resolveGatewayProbeAuthSafe", () => { }); it("returns warning and empty auth when token SecretRef is unresolved", () => { - const result = resolveGatewayProbeAuthSafe({ - cfg: { - gateway: { - auth: { - mode: "token", - token: { source: "env", provider: "default", id: "MISSING_GATEWAY_TOKEN" }, - }, + expectUnresolvedProbeTokenWarning({ + gateway: { + auth: { + mode: "token", + token: { source: "env", provider: "default", id: "MISSING_GATEWAY_TOKEN" }, }, - secrets: { - providers: { - default: { source: "env" }, - }, + }, + secrets: { + providers: { + default: { source: "env" }, }, - } as OpenClawConfig, - mode: "local", - env: {} as NodeJS.ProcessEnv, - }); - - expect(result.auth).toEqual({}); - expect(result.warning).toContain("gateway.auth.token"); - expect(result.warning).toContain("unresolved"); + }, + } as OpenClawConfig); }); it("does not fall through to remote token when local token SecretRef is unresolved", () => { - const result = resolveGatewayProbeAuthSafe({ - cfg: { - gateway: { - mode: "local", - auth: { - mode: "token", - token: { source: "env", provider: "default", id: "MISSING_GATEWAY_TOKEN" }, - }, - remote: { - token: "remote-token", - }, + expectUnresolvedProbeTokenWarning({ + gateway: { + mode: "local", + auth: { + mode: "token", + token: { source: "env", provider: "default", id: "MISSING_GATEWAY_TOKEN" }, }, - secrets: { - providers: { - default: { source: "env" }, - }, + remote: { + token: "remote-token", }, - } as OpenClawConfig, - mode: "local", - env: {} as NodeJS.ProcessEnv, - }); - - expect(result.auth).toEqual({}); - expect(result.warning).toContain("gateway.auth.token"); - expect(result.warning).toContain("unresolved"); + }, + secrets: { + providers: { + default: { source: "env" }, + }, + }, + } as OpenClawConfig); }); it("ignores unresolved local token SecretRef in remote mode when remote-only auth is requested", () => { diff --git a/src/gateway/probe.test.ts b/src/gateway/probe.test.ts index b5927389c4d..6cd7d64fc51 100644 --- a/src/gateway/probe.test.ts +++ b/src/gateway/probe.test.ts @@ -2,6 +2,7 @@ import { describe, expect, it, vi } from "vitest"; const gatewayClientState = vi.hoisted(() => ({ options: null as Record | null, + requests: [] as string[], })); class MockGatewayClient { @@ -10,6 +11,7 @@ class MockGatewayClient { constructor(opts: Record) { this.opts = opts; gatewayClientState.options = opts; + gatewayClientState.requests = []; } start(): void { @@ -26,6 +28,7 @@ class MockGatewayClient { stop(): void {} async request(method: string): Promise { + gatewayClientState.requests.push(method); if (method === "system-presence") { return []; } @@ -48,6 +51,34 @@ describe("probeGateway", () => { }); expect(gatewayClientState.options?.scopes).toEqual(["operator.read"]); + expect(gatewayClientState.options?.deviceIdentity).toBeNull(); + expect(gatewayClientState.requests).toEqual([ + "health", + "status", + "system-presence", + "config.get", + ]); expect(result.ok).toBe(true); }); + + it("keeps device identity enabled for remote probes", async () => { + await probeGateway({ + url: "wss://gateway.example/ws", + auth: { token: "secret" }, + timeoutMs: 1_000, + }); + + expect(gatewayClientState.options?.deviceIdentity).toBeUndefined(); + }); + + it("skips detail RPCs for lightweight reachability probes", async () => { + const result = await probeGateway({ + url: "ws://127.0.0.1:18789", + timeoutMs: 1_000, + includeDetails: false, + }); + + expect(result.ok).toBe(true); + expect(gatewayClientState.requests).toEqual([]); + }); }); diff --git a/src/gateway/probe.ts b/src/gateway/probe.ts index 0521e84d9c8..40740987fb0 100644 --- a/src/gateway/probe.ts +++ b/src/gateway/probe.ts @@ -4,6 +4,7 @@ import type { SystemPresence } from "../infra/system-presence.js"; import { GATEWAY_CLIENT_MODES, GATEWAY_CLIENT_NAMES } from "../utils/message-channel.js"; import { GatewayClient } from "./client.js"; import { READ_SCOPE } from "./method-scopes.js"; +import { isLoopbackHost } from "./net.js"; export type GatewayProbeAuth = { token?: string; @@ -32,6 +33,7 @@ export async function probeGateway(opts: { url: string; auth?: GatewayProbeAuth; timeoutMs: number; + includeDetails?: boolean; }): Promise { const startedAt = Date.now(); const instanceId = randomUUID(); @@ -39,6 +41,14 @@ export async function probeGateway(opts: { let connectError: string | null = null; let close: GatewayProbeClose | null = null; + const disableDeviceIdentity = (() => { + try { + return isLoopbackHost(new URL(opts.url).hostname); + } catch { + return false; + } + })(); + return await new Promise((resolve) => { let settled = false; const settle = (result: Omit) => { @@ -60,6 +70,7 @@ export async function probeGateway(opts: { clientVersion: "dev", mode: GATEWAY_CLIENT_MODES.PROBE, instanceId, + deviceIdentity: disableDeviceIdentity ? null : undefined, onConnectError: (err) => { connectError = formatErrorMessage(err); }, @@ -68,6 +79,19 @@ export async function probeGateway(opts: { }, onHelloOk: async () => { connectLatencyMs = Date.now() - startedAt; + if (opts.includeDetails === false) { + settle({ + ok: true, + connectLatencyMs, + error: null, + close, + health: null, + status: null, + presence: null, + configSnapshot: null, + }); + return; + } try { const [health, status, presence, configSnapshot] = await Promise.all([ client.request("health"), diff --git a/src/gateway/server-methods/agent-wait-dedupe.test.ts b/src/gateway/server-methods/agent-wait-dedupe.test.ts index e9a1899c88b..4bbf2a575a0 100644 --- a/src/gateway/server-methods/agent-wait-dedupe.test.ts +++ b/src/gateway/server-methods/agent-wait-dedupe.test.ts @@ -1,4 +1,5 @@ import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import type { DedupeEntry } from "../server-shared.js"; import { __testing, readTerminalSnapshotFromGatewayDedupe, @@ -7,6 +8,25 @@ import { } from "./agent-wait-dedupe.js"; describe("agent wait dedupe helper", () => { + function setRunEntry(params: { + dedupe: Map; + kind: "agent" | "chat"; + runId: string; + ts?: number; + ok?: boolean; + payload: Record; + }) { + setGatewayDedupeEntry({ + dedupe: params.dedupe, + key: `${params.kind}:${params.runId}`, + entry: { + ts: params.ts ?? Date.now(), + ok: params.ok ?? true, + payload: params.payload, + }, + }); + } + beforeEach(() => { __testing.resetWaiters(); vi.useFakeTimers(); @@ -29,18 +49,15 @@ describe("agent wait dedupe helper", () => { await Promise.resolve(); expect(__testing.getWaiterCount(runId)).toBe(1); - setGatewayDedupeEntry({ + setRunEntry({ dedupe, - key: `chat:${runId}`, - entry: { - ts: Date.now(), - ok: true, - payload: { - runId, - status: "ok", - startedAt: 100, - endedAt: 200, - }, + kind: "chat", + runId, + payload: { + runId, + status: "ok", + startedAt: 100, + endedAt: 200, }, }); @@ -56,28 +73,22 @@ describe("agent wait dedupe helper", () => { it("keeps stale chat dedupe blocked while agent dedupe is in-flight", async () => { const dedupe = new Map(); const runId = "run-stale-chat"; - setGatewayDedupeEntry({ + setRunEntry({ dedupe, - key: `chat:${runId}`, - entry: { - ts: Date.now(), - ok: true, - payload: { - runId, - status: "ok", - }, + kind: "chat", + runId, + payload: { + runId, + status: "ok", }, }); - setGatewayDedupeEntry({ + setRunEntry({ dedupe, - key: `agent:${runId}`, - entry: { - ts: Date.now(), - ok: true, - payload: { - runId, - status: "accepted", - }, + kind: "agent", + runId, + payload: { + runId, + status: "accepted", }, }); @@ -100,30 +111,26 @@ describe("agent wait dedupe helper", () => { it("uses newer terminal chat snapshot when agent entry is non-terminal", () => { const dedupe = new Map(); const runId = "run-nonterminal-agent-with-newer-chat"; - setGatewayDedupeEntry({ + setRunEntry({ dedupe, - key: `agent:${runId}`, - entry: { - ts: 100, - ok: true, - payload: { - runId, - status: "accepted", - }, + kind: "agent", + runId, + ts: 100, + payload: { + runId, + status: "accepted", }, }); - setGatewayDedupeEntry({ + setRunEntry({ dedupe, - key: `chat:${runId}`, - entry: { - ts: 200, - ok: true, - payload: { - runId, - status: "ok", - startedAt: 1, - endedAt: 2, - }, + kind: "chat", + runId, + ts: 200, + payload: { + runId, + status: "ok", + startedAt: 1, + endedAt: 2, }, }); @@ -143,16 +150,13 @@ describe("agent wait dedupe helper", () => { it("ignores stale agent snapshots when waiting for an active chat run", async () => { const dedupe = new Map(); const runId = "run-chat-active-ignore-agent"; - setGatewayDedupeEntry({ + setRunEntry({ dedupe, - key: `agent:${runId}`, - entry: { - ts: Date.now(), - ok: true, - payload: { - runId, - status: "ok", - }, + kind: "agent", + runId, + payload: { + runId, + status: "ok", }, }); @@ -173,18 +177,15 @@ describe("agent wait dedupe helper", () => { await Promise.resolve(); expect(__testing.getWaiterCount(runId)).toBe(1); - setGatewayDedupeEntry({ + setRunEntry({ dedupe, - key: `chat:${runId}`, - entry: { - ts: Date.now(), - ok: true, - payload: { - runId, - status: "ok", - startedAt: 123, - endedAt: 456, - }, + kind: "chat", + runId, + payload: { + runId, + status: "ok", + startedAt: 123, + endedAt: 456, }, }); @@ -200,23 +201,20 @@ describe("agent wait dedupe helper", () => { const runId = "run-collision"; const dedupe = new Map(); - setGatewayDedupeEntry({ + setRunEntry({ dedupe, - key: `agent:${runId}`, - entry: { - ts: 100, - ok: true, - payload: { runId, status: "ok", startedAt: 10, endedAt: 20 }, - }, + kind: "agent", + runId, + ts: 100, + payload: { runId, status: "ok", startedAt: 10, endedAt: 20 }, }); - setGatewayDedupeEntry({ + setRunEntry({ dedupe, - key: `chat:${runId}`, - entry: { - ts: 200, - ok: false, - payload: { runId, status: "error", startedAt: 30, endedAt: 40, error: "chat failed" }, - }, + kind: "chat", + runId, + ts: 200, + ok: false, + payload: { runId, status: "error", startedAt: 30, endedAt: 40, error: "chat failed" }, }); expect( @@ -232,23 +230,19 @@ describe("agent wait dedupe helper", () => { }); const dedupeReverse = new Map(); - setGatewayDedupeEntry({ + setRunEntry({ dedupe: dedupeReverse, - key: `chat:${runId}`, - entry: { - ts: 100, - ok: true, - payload: { runId, status: "ok", startedAt: 1, endedAt: 2 }, - }, + kind: "chat", + runId, + ts: 100, + payload: { runId, status: "ok", startedAt: 1, endedAt: 2 }, }); - setGatewayDedupeEntry({ + setRunEntry({ dedupe: dedupeReverse, - key: `agent:${runId}`, - entry: { - ts: 200, - ok: true, - payload: { runId, status: "timeout", startedAt: 3, endedAt: 4, error: "still running" }, - }, + kind: "agent", + runId, + ts: 200, + payload: { runId, status: "timeout", startedAt: 3, endedAt: 4, error: "still running" }, }); expect( @@ -281,14 +275,11 @@ describe("agent wait dedupe helper", () => { await Promise.resolve(); expect(__testing.getWaiterCount(runId)).toBe(2); - setGatewayDedupeEntry({ + setRunEntry({ dedupe, - key: `chat:${runId}`, - entry: { - ts: Date.now(), - ok: true, - payload: { runId, status: "ok" }, - }, + kind: "chat", + runId, + payload: { runId, status: "ok" }, }); await expect(first).resolves.toEqual( diff --git a/src/gateway/server-methods/agent.ts b/src/gateway/server-methods/agent.ts index ee08425b7fd..5a7507345df 100644 --- a/src/gateway/server-methods/agent.ts +++ b/src/gateway/server-methods/agent.ts @@ -50,8 +50,7 @@ import { performGatewaySessionReset } from "../session-reset-service.js"; import { canonicalizeSpawnedByForAgent, loadSessionEntry, - pruneLegacyStoreKeys, - resolveGatewaySessionStoreTarget, + migrateAndPruneGatewaySessionStoreKey, } from "../session-utils.js"; import { formatForLog } from "../ws-log.js"; import { waitForAgentJob } from "./agent-job.js"; @@ -425,18 +424,13 @@ export const agentHandlers: GatewayRequestHandlers = { const mainSessionKey = resolveAgentMainSessionKey({ cfg, agentId }); if (storePath) { const persisted = await updateSessionStore(storePath, (store) => { - const target = resolveGatewaySessionStoreTarget({ + const { primaryKey } = migrateAndPruneGatewaySessionStoreKey({ cfg, key: requestedSessionKey, store, }); - pruneLegacyStoreKeys({ - store, - canonicalKey: target.canonicalKey, - candidates: target.storeKeys, - }); - const merged = mergeSessionEntry(store[canonicalSessionKey], nextEntryPatch); - store[canonicalSessionKey] = merged; + const merged = mergeSessionEntry(store[primaryKey], nextEntryPatch); + store[primaryKey] = merged; return merged; }); sessionEntry = persisted; diff --git a/src/gateway/server-methods/chat.abort-authorization.test.ts b/src/gateway/server-methods/chat.abort-authorization.test.ts index 607e80b58ff..ed8a92e48a0 100644 --- a/src/gateway/server-methods/chat.abort-authorization.test.ts +++ b/src/gateway/server-methods/chat.abort-authorization.test.ts @@ -6,6 +6,30 @@ import { } from "./chat.abort.test-helpers.js"; import { chatHandlers } from "./chat.js"; +async function invokeSingleRunAbort({ + context, + runId = "run-1", + connId, + deviceId, + scopes, +}: { + context: ReturnType; + runId?: string; + connId: string; + deviceId: string; + scopes: string[]; +}) { + return await invokeChatAbortHandler({ + handler: chatHandlers["chat.abort"], + context, + request: { sessionKey: "main", runId }, + client: { + connId, + connect: { device: { id: deviceId }, scopes }, + }, + }); +} + describe("chat.abort authorization", () => { it("rejects explicit run aborts from other clients", async () => { const context = createChatAbortContext({ @@ -17,14 +41,11 @@ describe("chat.abort authorization", () => { ]), }); - const respond = await invokeChatAbortHandler({ - handler: chatHandlers["chat.abort"], + const respond = await invokeSingleRunAbort({ context, - request: { sessionKey: "main", runId: "run-1" }, - client: { - connId: "conn-other", - connect: { device: { id: "dev-other" }, scopes: ["operator.write"] }, - }, + connId: "conn-other", + deviceId: "dev-other", + scopes: ["operator.write"], }); const [ok, payload, error] = respond.mock.calls.at(-1) ?? []; @@ -92,14 +113,11 @@ describe("chat.abort authorization", () => { ]), }); - const respond = await invokeChatAbortHandler({ - handler: chatHandlers["chat.abort"], + const respond = await invokeSingleRunAbort({ context, - request: { sessionKey: "main", runId: "run-1" }, - client: { - connId: "conn-admin", - connect: { device: { id: "dev-admin" }, scopes: ["operator.admin"] }, - }, + connId: "conn-admin", + deviceId: "dev-admin", + scopes: ["operator.admin"], }); const [ok, payload] = respond.mock.calls.at(-1) ?? []; diff --git a/src/gateway/server-methods/chat.abort-persistence.test.ts b/src/gateway/server-methods/chat.abort-persistence.test.ts index 31a00a3f186..e11b2dc08cb 100644 --- a/src/gateway/server-methods/chat.abort-persistence.test.ts +++ b/src/gateway/server-methods/chat.abort-persistence.test.ts @@ -197,7 +197,7 @@ describe("chat abort transcript persistence", () => { const { transcriptPath, sessionId } = await createTranscriptFixture("openclaw-chat-stop-"); const respond = vi.fn(); const context = createChatAbortContext({ - chatAbortControllers: new Map([["run-stop-1", createActiveRun("main", sessionId)]]), + chatAbortControllers: new Map([["run-stop-1", createActiveRun("main", { sessionId })]]), chatRunBuffers: new Map([["run-stop-1", "Partial from /stop"]]), chatDeltaSentAt: new Map([["run-stop-1", Date.now()]]), removeChatRun: vi.fn().mockReturnValue({ sessionKey: "main", clientRunId: "client-stop-1" }), diff --git a/src/gateway/server-methods/chat.abort.test-helpers.ts b/src/gateway/server-methods/chat.abort.test-helpers.ts index fe5cd324ccb..fb6efebd8f5 100644 --- a/src/gateway/server-methods/chat.abort.test-helpers.ts +++ b/src/gateway/server-methods/chat.abort.test-helpers.ts @@ -1,4 +1,6 @@ import { vi } from "vitest"; +import type { Mock } from "vitest"; +import type { GatewayRequestHandler, RespondFn } from "./types.js"; export function createActiveRun( sessionKey: string, @@ -19,7 +21,23 @@ export function createActiveRun( }; } -export function createChatAbortContext(overrides: Record = {}) { +export type ChatAbortTestContext = Record & { + chatAbortControllers: Map>; + chatRunBuffers: Map; + chatDeltaSentAt: Map; + chatAbortedRuns: Map; + removeChatRun: (...args: unknown[]) => { sessionKey: string; clientRunId: string } | undefined; + agentRunSeq: Map; + broadcast: (...args: unknown[]) => void; + nodeSendToSession: (...args: unknown[]) => void; + logGateway: { warn: (...args: unknown[]) => void }; +}; + +export type ChatAbortRespondMock = Mock; + +export function createChatAbortContext( + overrides: Record = {}, +): ChatAbortTestContext { return { chatAbortControllers: new Map(), chatRunBuffers: new Map(), @@ -37,15 +55,8 @@ export function createChatAbortContext(overrides: Record = {}) } export async function invokeChatAbortHandler(params: { - handler: (args: { - params: { sessionKey: string; runId?: string }; - respond: never; - context: never; - req: never; - client: never; - isWebchatConnect: () => boolean; - }) => Promise; - context: ReturnType; + handler: GatewayRequestHandler; + context: ChatAbortTestContext; request: { sessionKey: string; runId?: string }; client?: { connId?: string; @@ -54,8 +65,8 @@ export async function invokeChatAbortHandler(params: { scopes?: string[]; }; } | null; - respond?: ReturnType; -}) { + respond?: ChatAbortRespondMock; +}): Promise { const respond = params.respond ?? vi.fn(); await params.handler({ params: params.request, diff --git a/src/gateway/server-methods/server-methods.test.ts b/src/gateway/server-methods/server-methods.test.ts index 424511370cd..bd42485f4f8 100644 --- a/src/gateway/server-methods/server-methods.test.ts +++ b/src/gateway/server-methods/server-methods.test.ts @@ -221,59 +221,70 @@ describe("injectTimestamp", () => { }); describe("timestampOptsFromConfig", () => { - it("extracts timezone from config", () => { - const opts = timestampOptsFromConfig({ - agents: { - defaults: { - userTimezone: "America/Chicago", - }, - }, + it.each([ + { + name: "extracts timezone from config", // oxlint-disable-next-line typescript/no-explicit-any - } as any); - - expect(opts.timezone).toBe("America/Chicago"); - }); - - it("falls back gracefully with empty config", () => { - // oxlint-disable-next-line typescript/no-explicit-any - const opts = timestampOptsFromConfig({} as any); - - expect(opts.timezone).toBeDefined(); + cfg: { agents: { defaults: { userTimezone: "America/Chicago" } } } as any, + expected: "America/Chicago", + }, + { + name: "falls back gracefully with empty config", + // oxlint-disable-next-line typescript/no-explicit-any + cfg: {} as any, + expected: Intl.DateTimeFormat().resolvedOptions().timeZone, + }, + ])("$name", ({ cfg, expected }) => { + expect(timestampOptsFromConfig(cfg).timezone).toBe(expected); }); }); describe("normalizeRpcAttachmentsToChatAttachments", () => { - it("passes through string content", () => { - const res = normalizeRpcAttachmentsToChatAttachments([ - { type: "file", mimeType: "image/png", fileName: "a.png", content: "Zm9v" }, - ]); - expect(res).toEqual([ - { type: "file", mimeType: "image/png", fileName: "a.png", content: "Zm9v" }, - ]); - }); - - it("converts Uint8Array content to base64", () => { - const bytes = new TextEncoder().encode("foo"); - const res = normalizeRpcAttachmentsToChatAttachments([{ content: bytes }]); - expect(res[0]?.content).toBe("Zm9v"); + it.each([ + { + name: "passes through string content", + attachments: [{ type: "file", mimeType: "image/png", fileName: "a.png", content: "Zm9v" }], + expected: [{ type: "file", mimeType: "image/png", fileName: "a.png", content: "Zm9v" }], + }, + { + name: "converts Uint8Array content to base64", + attachments: [{ content: new TextEncoder().encode("foo") }], + expected: [{ type: undefined, mimeType: undefined, fileName: undefined, content: "Zm9v" }], + }, + { + name: "converts ArrayBuffer content to base64", + attachments: [{ content: new TextEncoder().encode("bar").buffer }], + expected: [{ type: undefined, mimeType: undefined, fileName: undefined, content: "YmFy" }], + }, + { + name: "drops attachments without usable content", + attachments: [{ content: undefined }, { mimeType: "image/png" }], + expected: [], + }, + ])("$name", ({ attachments, expected }) => { + expect(normalizeRpcAttachmentsToChatAttachments(attachments)).toEqual(expected); }); }); describe("sanitizeChatSendMessageInput", () => { - it("rejects null bytes", () => { - expect(sanitizeChatSendMessageInput("before\u0000after")).toEqual({ - ok: false, - error: "message must not contain null bytes", - }); - }); - - it("strips unsafe control characters while preserving tab/newline/carriage return", () => { - const result = sanitizeChatSendMessageInput("a\u0001b\tc\nd\re\u0007f\u007f"); - expect(result).toEqual({ ok: true, message: "ab\tc\nd\ref" }); - }); - - it("normalizes unicode to NFC", () => { - expect(sanitizeChatSendMessageInput("Cafe\u0301")).toEqual({ ok: true, message: "Café" }); + it.each([ + { + name: "rejects null bytes", + input: "before\u0000after", + expected: { ok: false as const, error: "message must not contain null bytes" }, + }, + { + name: "strips unsafe control characters while preserving tab/newline/carriage return", + input: "a\u0001b\tc\nd\re\u0007f\u007f", + expected: { ok: true as const, message: "ab\tc\nd\ref" }, + }, + { + name: "normalizes unicode to NFC", + input: "Cafe\u0301", + expected: { ok: true as const, message: "Café" }, + }, + ])("$name", ({ input, expected }) => { + expect(sanitizeChatSendMessageInput(input)).toEqual(expected); }); }); diff --git a/src/gateway/server-methods/sessions.ts b/src/gateway/server-methods/sessions.ts index f2e3817bfa6..d5244116d33 100644 --- a/src/gateway/server-methods/sessions.ts +++ b/src/gateway/server-methods/sessions.ts @@ -31,7 +31,7 @@ import { listSessionsFromStore, loadCombinedSessionStoreForGateway, loadSessionEntry, - pruneLegacyStoreKeys, + migrateAndPruneGatewaySessionStoreKey, readSessionPreviewItemsFromTranscript, resolveGatewaySessionStoreTarget, resolveSessionModelRef, @@ -92,31 +92,6 @@ function rejectWebchatSessionMutation(params: { return true; } -function migrateAndPruneSessionStoreKey(params: { - cfg: ReturnType; - key: string; - store: Record; -}) { - const target = resolveGatewaySessionStoreTarget({ - cfg: params.cfg, - key: params.key, - store: params.store, - }); - const primaryKey = target.canonicalKey; - if (!params.store[primaryKey]) { - const existingKey = target.storeKeys.find((candidate) => Boolean(params.store[candidate])); - if (existingKey) { - params.store[primaryKey] = params.store[existingKey]; - } - } - pruneLegacyStoreKeys({ - store: params.store, - canonicalKey: primaryKey, - candidates: target.storeKeys, - }); - return { target, primaryKey, entry: params.store[primaryKey] }; -} - export const sessionsHandlers: GatewayRequestHandlers = { "sessions.list": ({ params, respond }) => { if (!assertValidParams(params, validateSessionsListParams, "sessions.list", respond)) { @@ -224,7 +199,7 @@ export const sessionsHandlers: GatewayRequestHandlers = { const { cfg, target, storePath } = resolveGatewaySessionTargetFromKey(key); const applied = await updateSessionStore(storePath, async (store) => { - const { primaryKey } = migrateAndPruneSessionStoreKey({ cfg, key, store }); + const { primaryKey } = migrateAndPruneGatewaySessionStoreKey({ cfg, key, store }); return await applySessionsPatchToStore({ cfg, store, @@ -316,7 +291,7 @@ export const sessionsHandlers: GatewayRequestHandlers = { } const sessionId = entry?.sessionId; const deleted = await updateSessionStore(storePath, (store) => { - const { primaryKey } = migrateAndPruneSessionStoreKey({ cfg, key, store }); + const { primaryKey } = migrateAndPruneGatewaySessionStoreKey({ cfg, key, store }); const hadEntry = Boolean(store[primaryKey]); if (hadEntry) { delete store[primaryKey]; @@ -385,7 +360,7 @@ export const sessionsHandlers: GatewayRequestHandlers = { const { cfg, target, storePath } = resolveGatewaySessionTargetFromKey(key); // Lock + read in a short critical section; transcript work happens outside. const compactTarget = await updateSessionStore(storePath, (store) => { - const { entry, primaryKey } = migrateAndPruneSessionStoreKey({ cfg, key, store }); + const { entry, primaryKey } = migrateAndPruneGatewaySessionStoreKey({ cfg, key, store }); return { entry, primaryKey }; }); const entry = compactTarget.entry; diff --git a/src/gateway/server-node-events.ts b/src/gateway/server-node-events.ts index b36ca9aca50..8ab24644101 100644 --- a/src/gateway/server-node-events.ts +++ b/src/gateway/server-node-events.ts @@ -16,11 +16,7 @@ import { defaultRuntime } from "../runtime.js"; import { parseMessageWithAttachments } from "./chat-attachments.js"; import { normalizeRpcAttachmentsToChatAttachments } from "./server-methods/attachment-normalize.js"; import type { NodeEvent, NodeEventContext } from "./server-node-events-types.js"; -import { - loadSessionEntry, - pruneLegacyStoreKeys, - resolveGatewaySessionStoreTarget, -} from "./session-utils.js"; +import { loadSessionEntry, migrateAndPruneGatewaySessionStoreKey } from "./session-utils.js"; import { formatForLog } from "./ws-log.js"; const MAX_EXEC_EVENT_OUTPUT_CHARS = 180; @@ -152,17 +148,12 @@ async function touchSessionStore(params: { return; } await updateSessionStore(storePath, (store) => { - const target = resolveGatewaySessionStoreTarget({ + const { primaryKey } = migrateAndPruneGatewaySessionStoreKey({ cfg: params.cfg, key: params.sessionKey, store, }); - pruneLegacyStoreKeys({ - store, - canonicalKey: target.canonicalKey, - candidates: target.storeKeys, - }); - store[params.canonicalKey] = { + store[primaryKey] = { sessionId: params.sessionId, updatedAt: params.now, thinkingLevel: params.entry?.thinkingLevel, diff --git a/src/gateway/server-runtime-config.test.ts b/src/gateway/server-runtime-config.test.ts index 34cc4632670..5c1354d7cd5 100644 --- a/src/gateway/server-runtime-config.test.ts +++ b/src/gateway/server-runtime-config.test.ts @@ -201,39 +201,79 @@ describe("resolveGatewayRuntimeConfig", () => { ); }); - it("rejects non-loopback control UI when allowed origins are missing", async () => { - await expect( - resolveGatewayRuntimeConfig({ - cfg: { - gateway: { - bind: "lan", - auth: TOKEN_AUTH, - }, - }, - port: 18789, - }), - ).rejects.toThrow("non-loopback Control UI requires gateway.controlUi.allowedOrigins"); - }); - - it("allows non-loopback control UI without allowed origins when dangerous fallback is enabled", async () => { - const result = await resolveGatewayRuntimeConfig({ + it.each([ + { + name: "rejects non-loopback control UI when allowed origins are missing", cfg: { gateway: { - bind: "lan", + bind: "lan" as const, + auth: TOKEN_AUTH, + }, + }, + expectedError: "non-loopback Control UI requires gateway.controlUi.allowedOrigins", + }, + { + name: "allows non-loopback control UI without allowed origins when dangerous fallback is enabled", + cfg: { + gateway: { + bind: "lan" as const, auth: TOKEN_AUTH, controlUi: { dangerouslyAllowHostHeaderOriginFallback: true, }, }, }, - port: 18789, - }); - expect(result.bindHost).toBe("0.0.0.0"); + expectedBindHost: "0.0.0.0", + }, + { + name: "allows non-loopback control UI when allowed origins collapse after trimming", + cfg: { + gateway: { + bind: "lan" as const, + auth: TOKEN_AUTH, + controlUi: { + allowedOrigins: [" https://control.example.com "], + }, + }, + }, + expectedBindHost: "0.0.0.0", + }, + ])("$name", async ({ cfg, expectedError, expectedBindHost }) => { + if (expectedError) { + await expect(resolveGatewayRuntimeConfig({ cfg, port: 18789 })).rejects.toThrow( + expectedError, + ); + return; + } + const result = await resolveGatewayRuntimeConfig({ cfg, port: 18789 }); + expect(result.bindHost).toBe(expectedBindHost); }); }); describe("HTTP security headers", () => { - it("resolves strict transport security header from config", async () => { + const cases = [ + { + name: "resolves strict transport security headers from config", + strictTransportSecurity: " max-age=31536000; includeSubDomains ", + expected: "max-age=31536000; includeSubDomains", + }, + { + name: "does not set strict transport security when explicitly disabled", + strictTransportSecurity: false, + expected: undefined, + }, + { + name: "does not set strict transport security when the value is blank", + strictTransportSecurity: " ", + expected: undefined, + }, + ] satisfies ReadonlyArray<{ + name: string; + strictTransportSecurity: string | false; + expected: string | undefined; + }>; + + it.each(cases)("$name", async ({ strictTransportSecurity, expected }) => { const result = await resolveGatewayRuntimeConfig({ cfg: { gateway: { @@ -241,7 +281,7 @@ describe("resolveGatewayRuntimeConfig", () => { auth: { mode: "none" }, http: { securityHeaders: { - strictTransportSecurity: " max-age=31536000; includeSubDomains ", + strictTransportSecurity, }, }, }, @@ -249,26 +289,7 @@ describe("resolveGatewayRuntimeConfig", () => { port: 18789, }); - expect(result.strictTransportSecurityHeader).toBe("max-age=31536000; includeSubDomains"); - }); - - it("does not set strict transport security when explicitly disabled", async () => { - const result = await resolveGatewayRuntimeConfig({ - cfg: { - gateway: { - bind: "loopback", - auth: { mode: "none" }, - http: { - securityHeaders: { - strictTransportSecurity: false, - }, - }, - }, - }, - port: 18789, - }); - - expect(result.strictTransportSecurityHeader).toBeUndefined(); + expect(result.strictTransportSecurityHeader).toBe(expected); }); }); }); diff --git a/src/gateway/server.auth.browser-hardening.test.ts b/src/gateway/server.auth.browser-hardening.test.ts index c31fb7c19b1..b28f60ad8c6 100644 --- a/src/gateway/server.auth.browser-hardening.test.ts +++ b/src/gateway/server.auth.browser-hardening.test.ts @@ -30,6 +30,11 @@ const TEST_OPERATOR_CLIENT = { mode: GATEWAY_CLIENT_MODES.TEST, }; const ALLOWED_BROWSER_ORIGIN = "https://control.example.com"; +const TRUSTED_PROXY_BROWSER_HEADERS = { + "x-forwarded-for": "203.0.113.50", + "x-forwarded-proto": "https", + "x-forwarded-user": "operator@example.com", +}; const originForPort = (port: number) => `http://127.0.0.1:${port}`; @@ -75,120 +80,73 @@ async function createSignedDevice(params: { }; } -describe("gateway auth browser hardening", () => { - test("rejects trusted-proxy browser connects from origins outside the allowlist", async () => { - const { writeConfigFile } = await import("../config/config.js"); - await writeConfigFile({ - gateway: { - auth: { - mode: "trusted-proxy", - trustedProxy: { - userHeader: "x-forwarded-user", - requiredHeaders: ["x-forwarded-proto"], - }, - }, - trustedProxies: ["127.0.0.1"], - controlUi: { - allowedOrigins: [ALLOWED_BROWSER_ORIGIN], +async function writeTrustedProxyBrowserAuthConfig() { + const { writeConfigFile } = await import("../config/config.js"); + await writeConfigFile({ + gateway: { + auth: { + mode: "trusted-proxy", + trustedProxy: { + userHeader: "x-forwarded-user", + requiredHeaders: ["x-forwarded-proto"], }, }, - }); + trustedProxies: ["127.0.0.1"], + controlUi: { + allowedOrigins: [ALLOWED_BROWSER_ORIGIN], + }, + }, + }); +} - await withGatewayServer(async ({ port }) => { - const ws = await openWs(port, { - origin: "https://evil.example", - "x-forwarded-for": "203.0.113.50", - "x-forwarded-proto": "https", - "x-forwarded-user": "operator@example.com", +async function withTrustedProxyBrowserWs(origin: string, run: (ws: WebSocket) => Promise) { + await writeTrustedProxyBrowserAuthConfig(); + await withGatewayServer(async ({ port }) => { + const ws = await openWs(port, { + origin, + ...TRUSTED_PROXY_BROWSER_HEADERS, + }); + try { + await run(ws); + } finally { + ws.close(); + } + }); +} + +describe("gateway auth browser hardening", () => { + test("rejects trusted-proxy browser connects from origins outside the allowlist", async () => { + await withTrustedProxyBrowserWs("https://evil.example", async (ws) => { + const res = await connectReq(ws, { + client: TEST_OPERATOR_CLIENT, + device: null, }); - try { - const res = await connectReq(ws, { - client: TEST_OPERATOR_CLIENT, - device: null, - }); - expect(res.ok).toBe(false); - expect(res.error?.message ?? "").toContain("origin not allowed"); - } finally { - ws.close(); - } + expect(res.ok).toBe(false); + expect(res.error?.message ?? "").toContain("origin not allowed"); }); }); test("accepts trusted-proxy browser connects from allowed origins", async () => { - const { writeConfigFile } = await import("../config/config.js"); - await writeConfigFile({ - gateway: { - auth: { - mode: "trusted-proxy", - trustedProxy: { - userHeader: "x-forwarded-user", - requiredHeaders: ["x-forwarded-proto"], - }, - }, - trustedProxies: ["127.0.0.1"], - controlUi: { - allowedOrigins: [ALLOWED_BROWSER_ORIGIN], - }, - }, - }); - - await withGatewayServer(async ({ port }) => { - const ws = await openWs(port, { - origin: ALLOWED_BROWSER_ORIGIN, - "x-forwarded-for": "203.0.113.50", - "x-forwarded-proto": "https", - "x-forwarded-user": "operator@example.com", + await withTrustedProxyBrowserWs(ALLOWED_BROWSER_ORIGIN, async (ws) => { + const payload = await connectOk(ws, { + client: TEST_OPERATOR_CLIENT, + device: null, }); - try { - const payload = await connectOk(ws, { - client: TEST_OPERATOR_CLIENT, - device: null, - }); - expect(payload.type).toBe("hello-ok"); - } finally { - ws.close(); - } + expect(payload.type).toBe("hello-ok"); }); }); test("preserves scopes for trusted-proxy non-control-ui browser sessions", async () => { - const { writeConfigFile } = await import("../config/config.js"); - await writeConfigFile({ - gateway: { - auth: { - mode: "trusted-proxy", - trustedProxy: { - userHeader: "x-forwarded-user", - requiredHeaders: ["x-forwarded-proto"], - }, - }, - trustedProxies: ["127.0.0.1"], - controlUi: { - allowedOrigins: [ALLOWED_BROWSER_ORIGIN], - }, - }, - }); - - await withGatewayServer(async ({ port }) => { - const ws = await openWs(port, { - origin: ALLOWED_BROWSER_ORIGIN, - "x-forwarded-for": "203.0.113.50", - "x-forwarded-proto": "https", - "x-forwarded-user": "operator@example.com", + await withTrustedProxyBrowserWs(ALLOWED_BROWSER_ORIGIN, async (ws) => { + const payload = await connectOk(ws, { + client: TEST_OPERATOR_CLIENT, + device: null, + scopes: ["operator.read"], }); - try { - const payload = await connectOk(ws, { - client: TEST_OPERATOR_CLIENT, - device: null, - scopes: ["operator.read"], - }); - expect(payload.type).toBe("hello-ok"); + expect(payload.type).toBe("hello-ok"); - const status = await rpcReq(ws, "status"); - expect(status.ok).toBe(true); - } finally { - ws.close(); - } + const status = await rpcReq(ws, "status"); + expect(status.ok).toBe(true); }); }); diff --git a/src/gateway/server.auth.compat-baseline.test.ts b/src/gateway/server.auth.compat-baseline.test.ts index 8c6ea06978c..a606feab909 100644 --- a/src/gateway/server.auth.compat-baseline.test.ts +++ b/src/gateway/server.auth.compat-baseline.test.ts @@ -34,6 +34,27 @@ function expectAuthErrorDetails(params: { } } +async function expectSharedOperatorScopesCleared( + port: number, + auth: { token?: string; password?: string }, +) { + const ws = await openWs(port); + try { + const res = await connectReq(ws, { + ...auth, + scopes: ["operator.admin"], + device: null, + }); + expect(res.ok).toBe(true); + + const adminRes = await rpcReq(ws, "set-heartbeats", { enabled: false }); + expect(adminRes.ok).toBe(false); + expect(adminRes.error?.message).toBe("missing scope: operator.admin"); + } finally { + ws.close(); + } +} + describe("gateway auth compatibility baseline", () => { describe("token mode", () => { let server: Awaited>; @@ -64,21 +85,7 @@ describe("gateway auth compatibility baseline", () => { }); test("clears client-declared scopes for shared-token operator connects", async () => { - const ws = await openWs(port); - try { - const res = await connectReq(ws, { - token: "secret", - scopes: ["operator.admin"], - device: null, - }); - expect(res.ok).toBe(true); - - const adminRes = await rpcReq(ws, "set-heartbeats", { enabled: false }); - expect(adminRes.ok).toBe(false); - expect(adminRes.error?.message).toBe("missing scope: operator.admin"); - } finally { - ws.close(); - } + await expectSharedOperatorScopesCleared(port, { token: "secret" }); }); test("returns stable token-missing details for control ui without token", async () => { @@ -184,21 +191,7 @@ describe("gateway auth compatibility baseline", () => { }); test("clears client-declared scopes for shared-password operator connects", async () => { - const ws = await openWs(port); - try { - const res = await connectReq(ws, { - password: "secret", - scopes: ["operator.admin"], - device: null, - }); - expect(res.ok).toBe(true); - - const adminRes = await rpcReq(ws, "set-heartbeats", { enabled: false }); - expect(adminRes.ok).toBe(false); - expect(adminRes.error?.message).toBe("missing scope: operator.admin"); - } finally { - ws.close(); - } + await expectSharedOperatorScopesCleared(port, { password: "secret" }); }); }); diff --git a/src/gateway/server.config-patch.test.ts b/src/gateway/server.config-patch.test.ts index 67efe9b79be..bc8b4fab75a 100644 --- a/src/gateway/server.config-patch.test.ts +++ b/src/gateway/server.config-patch.test.ts @@ -46,6 +46,21 @@ async function resetTempDir(name: string): Promise { return dir; } +async function getConfigHash() { + const current = await rpcReq<{ + hash?: string; + }>(requireWs(), "config.get", {}); + expect(current.ok).toBe(true); + expect(typeof current.payload?.hash).toBe("string"); + return String(current.payload?.hash); +} + +async function expectSchemaLookupInvalid(path: unknown) { + const res = await rpcReq<{ ok?: boolean }>(requireWs(), "config.schema.lookup", { path }); + expect(res.ok).toBe(false); + expect(res.error?.message ?? "").toContain("invalid config.schema.lookup params"); +} + describe("gateway config methods", () => { it("round-trips config.set and returns the live config path", async () => { const { createConfigIO } = await import("../config/config.js"); @@ -73,12 +88,6 @@ describe("gateway config methods", () => { }); it("returns config.set validation details in the top-level error message", async () => { - const current = await rpcReq<{ - hash?: string; - }>(requireWs(), "config.get", {}); - expect(current.ok).toBe(true); - expect(typeof current.payload?.hash).toBe("string"); - const res = await rpcReq<{ ok?: boolean; error?: { @@ -86,7 +95,7 @@ describe("gateway config methods", () => { }; }>(requireWs(), "config.set", { raw: JSON.stringify({ gateway: { bind: 123 } }), - baseHash: current.payload?.hash, + baseHash: await getConfigHash(), }); const error = res.error as | { @@ -135,31 +144,22 @@ describe("gateway config methods", () => { expect(res.error?.message).toBe("config schema path not found"); }); - it("rejects config.schema.lookup when the path is only whitespace", async () => { - const res = await rpcReq<{ ok?: boolean }>(requireWs(), "config.schema.lookup", { - path: " ", - }); - - expect(res.ok).toBe(false); - expect(res.error?.message ?? "").toContain("invalid config.schema.lookup params"); - }); - - it("rejects config.schema.lookup when the path exceeds the protocol limit", async () => { - const res = await rpcReq<{ ok?: boolean }>(requireWs(), "config.schema.lookup", { + it.each([ + { name: "rejects config.schema.lookup when the path is only whitespace", path: " " }, + { + name: "rejects config.schema.lookup when the path exceeds the protocol limit", path: `gateway.${"a".repeat(1020)}`, - }); - - expect(res.ok).toBe(false); - expect(res.error?.message ?? "").toContain("invalid config.schema.lookup params"); - }); - - it("rejects config.schema.lookup when the path contains invalid characters", async () => { - const res = await rpcReq<{ ok?: boolean }>(requireWs(), "config.schema.lookup", { + }, + { + name: "rejects config.schema.lookup when the path contains invalid characters", path: "gateway.auth\nspoof", - }); - - expect(res.ok).toBe(false); - expect(res.error?.message ?? "").toContain("invalid config.schema.lookup params"); + }, + { + name: "rejects config.schema.lookup when the path is not a string", + path: 42, + }, + ])("$name", async ({ path }) => { + await expectSchemaLookupInvalid(path); }); it("rejects prototype-chain config.schema.lookup paths without reflecting them", async () => { @@ -171,9 +171,10 @@ describe("gateway config methods", () => { expect(res.error?.message).toBe("config schema path not found"); }); - it("rejects config.patch when raw is not an object", async () => { + it("rejects config.patch when raw is null", async () => { const res = await rpcReq<{ ok?: boolean }>(requireWs(), "config.patch", { - raw: "[]", + raw: "null", + baseHash: await getConfigHash(), }); expect(res.ok).toBe(false); expect(res.error?.message ?? "").toContain("raw must be an object"); diff --git a/src/gateway/server.sessions-send.test.ts b/src/gateway/server.sessions-send.test.ts index 7f1e49e8f01..11fae253ff3 100644 --- a/src/gateway/server.sessions-send.test.ts +++ b/src/gateway/server.sessions-send.test.ts @@ -184,7 +184,18 @@ describe("sessions_send label lookup", () => { timeoutMs: 5000, }); - const tool = getSessionsSendTool(); + const tool = createOpenClawTools({ + config: { + tools: { + sessions: { + visibility: "all", + }, + }, + }, + }).find((candidate) => candidate.name === "sessions_send"); + if (!tool) { + throw new Error("missing sessions_send tool"); + } // Send using label instead of sessionKey const result = await tool.execute("call-by-label", { diff --git a/src/gateway/server.talk-config.test.ts b/src/gateway/server.talk-config.test.ts index ad9027f36fc..a47addbb0e0 100644 --- a/src/gateway/server.talk-config.test.ts +++ b/src/gateway/server.talk-config.test.ts @@ -20,6 +20,27 @@ import { withServer } from "./test-with-server.js"; installGatewayTestHooks({ scope: "suite" }); type GatewaySocket = Parameters[0]>[0]; +type SecretRef = { source?: string; provider?: string; id?: string }; +type TalkConfigPayload = { + config?: { + talk?: { + provider?: string; + providers?: { + elevenlabs?: { voiceId?: string; apiKey?: string | SecretRef }; + }; + resolved?: { + provider?: string; + config?: { voiceId?: string; apiKey?: string | SecretRef }; + }; + apiKey?: string | SecretRef; + voiceId?: string; + silenceTimeoutMs?: number; + }; + session?: { mainKey?: string }; + ui?: { seamColor?: string }; + }; +}; +type TalkConfig = NonNullable["talk"]>; const TALK_CONFIG_DEVICE_PATH = path.join( os.tmpdir(), `openclaw-talk-config-device-${process.pid}.json`, @@ -67,6 +88,37 @@ async function writeTalkConfig(config: { await writeConfigFile({ talk: config }); } +async function fetchTalkConfig( + ws: GatewaySocket, + params?: { includeSecrets?: boolean } | Record, +) { + return rpcReq(ws, "talk.config", params ?? {}); +} + +function expectElevenLabsTalkConfig( + talk: TalkConfig | undefined, + expected: { + voiceId?: string; + apiKey?: string | SecretRef; + silenceTimeoutMs?: number; + }, +) { + expect(talk?.provider).toBe("elevenlabs"); + expect(talk?.providers?.elevenlabs?.voiceId).toBe(expected.voiceId); + expect(talk?.resolved?.provider).toBe("elevenlabs"); + expect(talk?.resolved?.config?.voiceId).toBe(expected.voiceId); + expect(talk?.voiceId).toBe(expected.voiceId); + + if ("apiKey" in expected) { + expect(talk?.providers?.elevenlabs?.apiKey).toEqual(expected.apiKey); + expect(talk?.resolved?.config?.apiKey).toEqual(expected.apiKey); + expect(talk?.apiKey).toEqual(expected.apiKey); + } + if ("silenceTimeoutMs" in expected) { + expect(talk?.silenceTimeoutMs).toBe(expected.silenceTimeoutMs); + } +} + describe("gateway talk.config", () => { it("returns redacted talk config for read scope", async () => { const { writeConfigFile } = await import("../config/config.js"); @@ -86,35 +138,26 @@ describe("gateway talk.config", () => { await withServer(async (ws) => { await connectOperator(ws, ["operator.read"]); - const res = await rpcReq<{ - config?: { - talk?: { - provider?: string; - providers?: { - elevenlabs?: { voiceId?: string; apiKey?: string }; - }; - resolved?: { - provider?: string; - config?: { voiceId?: string; apiKey?: string }; - }; - apiKey?: string; - voiceId?: string; - silenceTimeoutMs?: number; - }; - }; - }>(ws, "talk.config", {}); + const res = await fetchTalkConfig(ws); expect(res.ok).toBe(true); - expect(res.payload?.config?.talk?.provider).toBe("elevenlabs"); - expect(res.payload?.config?.talk?.providers?.elevenlabs?.voiceId).toBe("voice-123"); - expect(res.payload?.config?.talk?.providers?.elevenlabs?.apiKey).toBe( - "__OPENCLAW_REDACTED__", - ); - expect(res.payload?.config?.talk?.resolved?.provider).toBe("elevenlabs"); - expect(res.payload?.config?.talk?.resolved?.config?.voiceId).toBe("voice-123"); - expect(res.payload?.config?.talk?.resolved?.config?.apiKey).toBe("__OPENCLAW_REDACTED__"); - expect(res.payload?.config?.talk?.voiceId).toBe("voice-123"); - expect(res.payload?.config?.talk?.apiKey).toBe("__OPENCLAW_REDACTED__"); - expect(res.payload?.config?.talk?.silenceTimeoutMs).toBe(1500); + expectElevenLabsTalkConfig(res.payload?.config?.talk, { + voiceId: "voice-123", + apiKey: "__OPENCLAW_REDACTED__", + silenceTimeoutMs: 1500, + }); + expect(res.payload?.config?.session?.mainKey).toBe("main-test"); + expect(res.payload?.config?.ui?.seamColor).toBe("#112233"); + }); + }); + + it("rejects invalid talk.config params", async () => { + await writeTalkConfig({ apiKey: "secret-key-abc" }); // pragma: allowlist secret + + await withServer(async (ws) => { + await connectOperator(ws, ["operator.read"]); + const res = await fetchTalkConfig(ws, { includeSecrets: "yes" }); + expect(res.ok).toBe(false); + expect(res.error?.message).toContain("invalid talk.config params"); }); }); @@ -123,22 +166,25 @@ describe("gateway talk.config", () => { await withServer(async (ws) => { await connectOperator(ws, ["operator.read"]); - const res = await rpcReq(ws, "talk.config", { includeSecrets: true }); + const res = await fetchTalkConfig(ws, { includeSecrets: true }); expect(res.ok).toBe(false); expect(res.error?.message).toContain("missing scope: operator.talk.secrets"); }); }); - it("returns secrets for operator.talk.secrets scope", async () => { + it.each([ + ["operator.talk.secrets", ["operator.read", "operator.write", "operator.talk.secrets"]], + ["operator.admin", ["operator.read", "operator.admin"]], + ] as const)("returns secrets for %s scope", async (_label, scopes) => { await writeTalkConfig({ apiKey: "secret-key-abc" }); // pragma: allowlist secret await withServer(async (ws) => { - await connectOperator(ws, ["operator.read", "operator.write", "operator.talk.secrets"]); - const res = await rpcReq<{ config?: { talk?: { apiKey?: string } } }>(ws, "talk.config", { - includeSecrets: true, - }); + await connectOperator(ws, [...scopes]); + const res = await fetchTalkConfig(ws, { includeSecrets: true }); expect(res.ok).toBe(true); - expect(res.payload?.config?.talk?.apiKey).toBe("secret-key-abc"); + expectElevenLabsTalkConfig(res.payload?.config?.talk, { + apiKey: "secret-key-abc", + }); }); }); @@ -154,44 +200,15 @@ describe("gateway talk.config", () => { await withEnvAsync({ ELEVENLABS_API_KEY: "env-elevenlabs-key" }, async () => { await withServer(async (ws) => { await connectOperator(ws, ["operator.read", "operator.write", "operator.talk.secrets"]); - const res = await rpcReq<{ - config?: { - talk?: { - apiKey?: { source?: string; provider?: string; id?: string }; - providers?: { - elevenlabs?: { - apiKey?: { source?: string; provider?: string; id?: string }; - }; - }; - resolved?: { - provider?: string; - config?: { - apiKey?: { source?: string; provider?: string; id?: string }; - }; - }; - }; - }; - }>(ws, "talk.config", { - includeSecrets: true, - }); + const res = await fetchTalkConfig(ws, { includeSecrets: true }); expect(res.ok).toBe(true); expect(validateTalkConfigResult(res.payload)).toBe(true); - expect(res.payload?.config?.talk?.apiKey).toEqual({ + const secretRef = { source: "env", provider: "default", id: "ELEVENLABS_API_KEY", - }); - expect(res.payload?.config?.talk?.providers?.elevenlabs?.apiKey).toEqual({ - source: "env", - provider: "default", - id: "ELEVENLABS_API_KEY", - }); - expect(res.payload?.config?.talk?.resolved?.provider).toBe("elevenlabs"); - expect(res.payload?.config?.talk?.resolved?.config?.apiKey).toEqual({ - source: "env", - provider: "default", - id: "ELEVENLABS_API_KEY", - }); + } satisfies SecretRef; + expectElevenLabsTalkConfig(res.payload?.config?.talk, { apiKey: secretRef }); }); }); }); @@ -212,27 +229,11 @@ describe("gateway talk.config", () => { await withServer(async (ws) => { await connectOperator(ws, ["operator.read"]); - const res = await rpcReq<{ - config?: { - talk?: { - provider?: string; - providers?: { - elevenlabs?: { voiceId?: string }; - }; - resolved?: { - provider?: string; - config?: { voiceId?: string }; - }; - voiceId?: string; - }; - }; - }>(ws, "talk.config", {}); + const res = await fetchTalkConfig(ws); expect(res.ok).toBe(true); - expect(res.payload?.config?.talk?.provider).toBe("elevenlabs"); - expect(res.payload?.config?.talk?.providers?.elevenlabs?.voiceId).toBe("voice-normalized"); - expect(res.payload?.config?.talk?.resolved?.provider).toBe("elevenlabs"); - expect(res.payload?.config?.talk?.resolved?.config?.voiceId).toBe("voice-normalized"); - expect(res.payload?.config?.talk?.voiceId).toBe("voice-normalized"); + expectElevenLabsTalkConfig(res.payload?.config?.talk, { + voiceId: "voice-normalized", + }); }); }); }); diff --git a/src/gateway/server/plugins-http.test.ts b/src/gateway/server/plugins-http.test.ts index 476f76f8850..e5062686246 100644 --- a/src/gateway/server/plugins-http.test.ts +++ b/src/gateway/server/plugins-http.test.ts @@ -86,6 +86,31 @@ async function createSubagentRuntime(): Promise { return call.runtimeOptions.subagent; } +function createSecurePluginRouteHandler(params: { + exactPluginHandler: () => boolean | Promise; + prefixGatewayHandler: () => boolean | Promise; +}) { + return createGatewayPluginRequestHandler({ + registry: createTestRegistry({ + httpRoutes: [ + createRoute({ + path: "/plugin/secure/report", + match: "exact", + auth: "plugin", + handler: params.exactPluginHandler, + }), + createRoute({ + path: "/plugin/secure", + match: "prefix", + auth: "gateway", + handler: params.prefixGatewayHandler, + }), + ], + }), + log: createPluginLog(), + }); +} + describe("createGatewayPluginRequestHandler", () => { it("caps unauthenticated plugin routes to non-admin subagent scopes", async () => { loadOpenClawPlugins.mockReset(); @@ -209,24 +234,9 @@ describe("createGatewayPluginRequestHandler", () => { it("fails closed when a matched gateway route reaches dispatch without auth", async () => { const exactPluginHandler = vi.fn(async () => false); const prefixGatewayHandler = vi.fn(async () => true); - const handler = createGatewayPluginRequestHandler({ - registry: createTestRegistry({ - httpRoutes: [ - createRoute({ - path: "/plugin/secure/report", - match: "exact", - auth: "plugin", - handler: exactPluginHandler, - }), - createRoute({ - path: "/plugin/secure", - match: "prefix", - auth: "gateway", - handler: prefixGatewayHandler, - }), - ], - }), - log: createPluginLog(), + const handler = createSecurePluginRouteHandler({ + exactPluginHandler, + prefixGatewayHandler, }); const { res } = makeMockHttpResponse(); @@ -246,24 +256,9 @@ describe("createGatewayPluginRequestHandler", () => { it("allows gateway route fallthrough only after gateway auth succeeds", async () => { const exactPluginHandler = vi.fn(async () => false); const prefixGatewayHandler = vi.fn(async () => true); - const handler = createGatewayPluginRequestHandler({ - registry: createTestRegistry({ - httpRoutes: [ - createRoute({ - path: "/plugin/secure/report", - match: "exact", - auth: "plugin", - handler: exactPluginHandler, - }), - createRoute({ - path: "/plugin/secure", - match: "prefix", - auth: "gateway", - handler: prefixGatewayHandler, - }), - ], - }), - log: createPluginLog(), + const handler = createSecurePluginRouteHandler({ + exactPluginHandler, + prefixGatewayHandler, }); const { res } = makeMockHttpResponse(); diff --git a/src/gateway/server/readiness.test.ts b/src/gateway/server/readiness.test.ts index 2ad29d3655a..b333277f158 100644 --- a/src/gateway/server/readiness.test.ts +++ b/src/gateway/server/readiness.test.ts @@ -46,172 +46,188 @@ function createHealthyDiscordManager(startedAt: number, lastEventAt: number): Ch ); } +function withReadinessClock(run: () => void) { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-06T12:00:00Z")); + try { + run(); + } finally { + vi.useRealTimers(); + } +} + +function createReadinessHarness(params: { + startedAgoMs: number; + accounts: Record>; + cacheTtlMs?: number; +}) { + const startedAt = Date.now() - params.startedAgoMs; + const manager = createManager(snapshotWith(params.accounts)); + return { + manager, + readiness: createReadinessChecker({ + channelManager: manager, + startedAt, + cacheTtlMs: params.cacheTtlMs, + }), + }; +} + describe("createReadinessChecker", () => { it("reports ready when all managed channels are healthy", () => { - vi.useFakeTimers(); - vi.setSystemTime(new Date("2026-03-06T12:00:00Z")); - const startedAt = Date.now() - 5 * 60_000; - const manager = createHealthyDiscordManager(startedAt, Date.now() - 1_000); + withReadinessClock(() => { + const startedAt = Date.now() - 5 * 60_000; + const manager = createHealthyDiscordManager(startedAt, Date.now() - 1_000); - const readiness = createReadinessChecker({ channelManager: manager, startedAt }); - expect(readiness()).toEqual({ ready: true, failing: [], uptimeMs: 300_000 }); - vi.useRealTimers(); + const readiness = createReadinessChecker({ channelManager: manager, startedAt }); + expect(readiness()).toEqual({ ready: true, failing: [], uptimeMs: 300_000 }); + }); }); it("ignores disabled and unconfigured channels", () => { - vi.useFakeTimers(); - vi.setSystemTime(new Date("2026-03-06T12:00:00Z")); - const startedAt = Date.now() - 5 * 60_000; - const manager = createManager( - snapshotWith({ - discord: { - running: false, - enabled: false, - configured: true, - lastStartAt: startedAt, + withReadinessClock(() => { + const { readiness } = createReadinessHarness({ + startedAgoMs: 5 * 60_000, + accounts: { + discord: { + running: false, + enabled: false, + configured: true, + lastStartAt: Date.now() - 5 * 60_000, + }, + telegram: { + running: false, + enabled: true, + configured: false, + lastStartAt: Date.now() - 5 * 60_000, + }, }, - telegram: { - running: false, - enabled: true, - configured: false, - lastStartAt: startedAt, - }, - }), - ); - - const readiness = createReadinessChecker({ channelManager: manager, startedAt }); - expect(readiness()).toEqual({ ready: true, failing: [], uptimeMs: 300_000 }); - vi.useRealTimers(); + }); + expect(readiness()).toEqual({ ready: true, failing: [], uptimeMs: 300_000 }); + }); }); it("uses startup grace before marking disconnected channels not ready", () => { - vi.useFakeTimers(); - vi.setSystemTime(new Date("2026-03-06T12:00:00Z")); - const startedAt = Date.now() - 30_000; - const manager = createManager( - snapshotWith({ - discord: { - running: true, - connected: false, - enabled: true, - configured: true, - lastStartAt: startedAt, + withReadinessClock(() => { + const { readiness } = createReadinessHarness({ + startedAgoMs: 30_000, + accounts: { + discord: { + running: true, + connected: false, + enabled: true, + configured: true, + lastStartAt: Date.now() - 30_000, + }, }, - }), - ); - - const readiness = createReadinessChecker({ channelManager: manager, startedAt }); - expect(readiness()).toEqual({ ready: true, failing: [], uptimeMs: 30_000 }); - vi.useRealTimers(); + }); + expect(readiness()).toEqual({ ready: true, failing: [], uptimeMs: 30_000 }); + }); }); it("reports disconnected managed channels after startup grace", () => { - vi.useFakeTimers(); - vi.setSystemTime(new Date("2026-03-06T12:00:00Z")); - const startedAt = Date.now() - 5 * 60_000; - const manager = createManager( - snapshotWith({ - discord: { - running: true, - connected: false, - enabled: true, - configured: true, - lastStartAt: startedAt, + withReadinessClock(() => { + const { readiness } = createReadinessHarness({ + startedAgoMs: 5 * 60_000, + accounts: { + discord: { + running: true, + connected: false, + enabled: true, + configured: true, + lastStartAt: Date.now() - 5 * 60_000, + }, }, - }), - ); - - const readiness = createReadinessChecker({ channelManager: manager, startedAt }); - expect(readiness()).toEqual({ ready: false, failing: ["discord"], uptimeMs: 300_000 }); - vi.useRealTimers(); + }); + expect(readiness()).toEqual({ ready: false, failing: ["discord"], uptimeMs: 300_000 }); + }); }); it("keeps restart-pending channels ready during reconnect backoff", () => { - vi.useFakeTimers(); - vi.setSystemTime(new Date("2026-03-06T12:00:00Z")); - const startedAt = Date.now() - 5 * 60_000; - const manager = createManager( - snapshotWith({ - discord: { - running: false, - restartPending: true, - reconnectAttempts: 3, - enabled: true, - configured: true, - lastStartAt: startedAt - 30_000, - lastStopAt: Date.now() - 5_000, + withReadinessClock(() => { + const startedAt = Date.now() - 5 * 60_000; + const { readiness } = createReadinessHarness({ + startedAgoMs: 5 * 60_000, + accounts: { + discord: { + running: false, + restartPending: true, + reconnectAttempts: 3, + enabled: true, + configured: true, + lastStartAt: startedAt - 30_000, + lastStopAt: Date.now() - 5_000, + }, }, - }), - ); - - const readiness = createReadinessChecker({ channelManager: manager, startedAt }); - expect(readiness()).toEqual({ ready: true, failing: [], uptimeMs: 300_000 }); - vi.useRealTimers(); + }); + expect(readiness()).toEqual({ ready: true, failing: [], uptimeMs: 300_000 }); + }); }); it("treats stale-socket channels as ready to avoid pulling healthy idle pods", () => { - vi.useFakeTimers(); - vi.setSystemTime(new Date("2026-03-06T12:00:00Z")); - const startedAt = Date.now() - 31 * 60_000; - const manager = createManager( - snapshotWith({ - discord: { - running: true, - connected: true, - enabled: true, - configured: true, - lastStartAt: startedAt, - lastEventAt: Date.now() - 31 * 60_000, + withReadinessClock(() => { + const startedAt = Date.now() - 31 * 60_000; + const { readiness } = createReadinessHarness({ + startedAgoMs: 31 * 60_000, + accounts: { + discord: { + running: true, + connected: true, + enabled: true, + configured: true, + lastStartAt: startedAt, + lastEventAt: Date.now() - 31 * 60_000, + }, }, - }), - ); - - const readiness = createReadinessChecker({ channelManager: manager, startedAt }); - expect(readiness()).toEqual({ ready: true, failing: [], uptimeMs: 1_860_000 }); - vi.useRealTimers(); + }); + expect(readiness()).toEqual({ ready: true, failing: [], uptimeMs: 1_860_000 }); + }); }); it("keeps telegram long-polling channels ready without stale-socket classification", () => { - vi.useFakeTimers(); - vi.setSystemTime(new Date("2026-03-06T12:00:00Z")); - const startedAt = Date.now() - 31 * 60_000; - const manager = createManager( - snapshotWith({ - telegram: { - running: true, - connected: true, - enabled: true, - configured: true, - lastStartAt: startedAt, - lastEventAt: null, + withReadinessClock(() => { + const startedAt = Date.now() - 31 * 60_000; + const { readiness } = createReadinessHarness({ + startedAgoMs: 31 * 60_000, + accounts: { + telegram: { + running: true, + connected: true, + enabled: true, + configured: true, + lastStartAt: startedAt, + lastEventAt: null, + }, }, - }), - ); - - const readiness = createReadinessChecker({ channelManager: manager, startedAt }); - expect(readiness()).toEqual({ ready: true, failing: [], uptimeMs: 1_860_000 }); - vi.useRealTimers(); + }); + expect(readiness()).toEqual({ ready: true, failing: [], uptimeMs: 1_860_000 }); + }); }); it("caches readiness snapshots briefly to keep repeated probes cheap", () => { - vi.useFakeTimers(); - vi.setSystemTime(new Date("2026-03-06T12:00:00Z")); - const startedAt = Date.now() - 5 * 60_000; - const manager = createHealthyDiscordManager(startedAt, Date.now() - 1_000); + withReadinessClock(() => { + const { manager, readiness } = createReadinessHarness({ + startedAgoMs: 5 * 60_000, + accounts: { + discord: { + running: true, + connected: true, + enabled: true, + configured: true, + lastStartAt: Date.now() - 5 * 60_000, + lastEventAt: Date.now() - 1_000, + }, + }, + cacheTtlMs: 1_000, + }); + expect(readiness()).toEqual({ ready: true, failing: [], uptimeMs: 300_000 }); + vi.advanceTimersByTime(500); + expect(readiness()).toEqual({ ready: true, failing: [], uptimeMs: 300_500 }); + expect(manager.getRuntimeSnapshot).toHaveBeenCalledTimes(1); - const readiness = createReadinessChecker({ - channelManager: manager, - startedAt, - cacheTtlMs: 1_000, + vi.advanceTimersByTime(600); + expect(readiness()).toEqual({ ready: true, failing: [], uptimeMs: 301_100 }); + expect(manager.getRuntimeSnapshot).toHaveBeenCalledTimes(2); }); - expect(readiness()).toEqual({ ready: true, failing: [], uptimeMs: 300_000 }); - vi.advanceTimersByTime(500); - expect(readiness()).toEqual({ ready: true, failing: [], uptimeMs: 300_500 }); - expect(manager.getRuntimeSnapshot).toHaveBeenCalledTimes(1); - - vi.advanceTimersByTime(600); - expect(readiness()).toEqual({ ready: true, failing: [], uptimeMs: 301_100 }); - expect(manager.getRuntimeSnapshot).toHaveBeenCalledTimes(2); - vi.useRealTimers(); }); }); diff --git a/src/gateway/session-reset-service.ts b/src/gateway/session-reset-service.ts index 15b9a0aa37f..b0a5b0a54f0 100644 --- a/src/gateway/session-reset-service.ts +++ b/src/gateway/session-reset-service.ts @@ -25,38 +25,13 @@ import { ErrorCodes, errorShape } from "./protocol/index.js"; import { archiveSessionTranscripts, loadSessionEntry, - pruneLegacyStoreKeys, + migrateAndPruneGatewaySessionStoreKey, resolveGatewaySessionStoreTarget, resolveSessionModelRef, } from "./session-utils.js"; const ACP_RUNTIME_CLEANUP_TIMEOUT_MS = 15_000; -function migrateAndPruneSessionStoreKey(params: { - cfg: ReturnType; - key: string; - store: Record; -}) { - const target = resolveGatewaySessionStoreTarget({ - cfg: params.cfg, - key: params.key, - store: params.store, - }); - const primaryKey = target.canonicalKey; - if (!params.store[primaryKey]) { - const existingKey = target.storeKeys.find((candidate) => Boolean(params.store[candidate])); - if (existingKey) { - params.store[primaryKey] = params.store[existingKey]; - } - } - pruneLegacyStoreKeys({ - store: params.store, - canonicalKey: primaryKey, - candidates: target.storeKeys, - }); - return { target, primaryKey, entry: params.store[primaryKey] }; -} - function stripRuntimeModelState(entry?: SessionEntry): SessionEntry | undefined { if (!entry) { return entry; @@ -311,7 +286,11 @@ export async function performGatewaySessionReset(params: { let oldSessionId: string | undefined; let oldSessionFile: string | undefined; const next = await updateSessionStore(storePath, (store) => { - const { primaryKey } = migrateAndPruneSessionStoreKey({ cfg, key: params.key, store }); + const { primaryKey } = migrateAndPruneGatewaySessionStoreKey({ + cfg, + key: params.key, + store, + }); const currentEntry = store[primaryKey]; const resetEntry = stripRuntimeModelState(currentEntry); const parsed = parseAgentSessionKey(primaryKey); diff --git a/src/gateway/session-utils.ts b/src/gateway/session-utils.ts index 591799879b9..00a2cb7747e 100644 --- a/src/gateway/session-utils.ts +++ b/src/gateway/session-utils.ts @@ -263,6 +263,31 @@ export function pruneLegacyStoreKeys(params: { } } +export function migrateAndPruneGatewaySessionStoreKey(params: { + cfg: ReturnType; + key: string; + store: Record; +}) { + const target = resolveGatewaySessionStoreTarget({ + cfg: params.cfg, + key: params.key, + store: params.store, + }); + const primaryKey = target.canonicalKey; + if (!params.store[primaryKey]) { + const existingKey = target.storeKeys.find((candidate) => Boolean(params.store[candidate])); + if (existingKey) { + params.store[primaryKey] = params.store[existingKey]; + } + } + pruneLegacyStoreKeys({ + store: params.store, + canonicalKey: primaryKey, + candidates: target.storeKeys, + }); + return { target, primaryKey, entry: params.store[primaryKey] }; +} + export function classifySessionKey(key: string, entry?: SessionEntry): GatewaySessionRow["kind"] { if (key === "global") { return "global"; diff --git a/src/gateway/sessions-resolve.ts b/src/gateway/sessions-resolve.ts index 21b6779573c..47ca47b86e3 100644 --- a/src/gateway/sessions-resolve.ts +++ b/src/gateway/sessions-resolve.ts @@ -10,7 +10,7 @@ import { import { listSessionsFromStore, loadCombinedSessionStoreForGateway, - pruneLegacyStoreKeys, + migrateAndPruneGatewaySessionStoreKey, resolveGatewaySessionStoreTarget, } from "./session-utils.js"; @@ -58,13 +58,10 @@ export async function resolveSessionKeyFromResolveParams(params: { }; } await updateSessionStore(target.storePath, (s) => { - const liveTarget = resolveGatewaySessionStoreTarget({ cfg, key, store: s }); - const canonicalKey = liveTarget.canonicalKey; - // Migrate the first legacy entry to the canonical key. - if (!s[canonicalKey] && s[legacyKey]) { - s[canonicalKey] = s[legacyKey]; + const { primaryKey } = migrateAndPruneGatewaySessionStoreKey({ cfg, key, store: s }); + if (!s[primaryKey] && s[legacyKey]) { + s[primaryKey] = s[legacyKey]; } - pruneLegacyStoreKeys({ store: s, canonicalKey, candidates: liveTarget.storeKeys }); }); return { ok: true, key: target.canonicalKey }; } diff --git a/src/gateway/startup-auth.test.ts b/src/gateway/startup-auth.test.ts index c2ad8a51915..bfd1912f28c 100644 --- a/src/gateway/startup-auth.test.ts +++ b/src/gateway/startup-auth.test.ts @@ -53,6 +53,28 @@ describe("ensureGatewayStartupAuth", () => { expect(mocks.writeConfigFile).not.toHaveBeenCalled(); } + async function expectResolvedToken(params: { + cfg: OpenClawConfig; + env: NodeJS.ProcessEnv; + expectedToken: string; + expectedConfiguredToken?: unknown; + }) { + const result = await ensureGatewayStartupAuth({ + cfg: params.cfg, + env: params.env, + persist: true, + }); + + expect(result.generatedToken).toBeUndefined(); + expect(result.persistedGeneratedToken).toBe(false); + expect(result.auth.mode).toBe("token"); + expect(result.auth.token).toBe(params.expectedToken); + if ("expectedConfiguredToken" in params) { + expect(result.cfg.gateway?.auth?.token).toEqual(params.expectedConfiguredToken); + } + expect(mocks.writeConfigFile).not.toHaveBeenCalled(); + } + it("generates and persists a token when startup auth is missing", async () => { const result = await ensureGatewayStartupAuth({ cfg: {}, @@ -138,7 +160,7 @@ describe("ensureGatewayStartupAuth", () => { }); it("resolves gateway.auth.token SecretRef before startup auth checks", async () => { - const result = await ensureGatewayStartupAuth({ + await expectResolvedToken({ cfg: { gateway: { auth: { @@ -155,23 +177,17 @@ describe("ensureGatewayStartupAuth", () => { env: { GW_TOKEN: "resolved-token", } as NodeJS.ProcessEnv, - persist: true, + expectedToken: "resolved-token", + expectedConfiguredToken: { + source: "env", + provider: "default", + id: "GW_TOKEN", + }, }); - - expect(result.generatedToken).toBeUndefined(); - expect(result.persistedGeneratedToken).toBe(false); - expect(result.auth.mode).toBe("token"); - expect(result.auth.token).toBe("resolved-token"); - expect(result.cfg.gateway?.auth?.token).toEqual({ - source: "env", - provider: "default", - id: "GW_TOKEN", - }); - expect(mocks.writeConfigFile).not.toHaveBeenCalled(); }); it("resolves env-template gateway.auth.token before env-token short-circuiting", async () => { - const result = await ensureGatewayStartupAuth({ + await expectResolvedToken({ cfg: { gateway: { auth: { @@ -183,19 +199,13 @@ describe("ensureGatewayStartupAuth", () => { env: { OPENCLAW_GATEWAY_TOKEN: "resolved-token", } as NodeJS.ProcessEnv, - persist: true, + expectedToken: "resolved-token", + expectedConfiguredToken: "${OPENCLAW_GATEWAY_TOKEN}", }); - - expect(result.generatedToken).toBeUndefined(); - expect(result.persistedGeneratedToken).toBe(false); - expect(result.auth.mode).toBe("token"); - expect(result.auth.token).toBe("resolved-token"); - expect(result.cfg.gateway?.auth?.token).toBe("${OPENCLAW_GATEWAY_TOKEN}"); - expect(mocks.writeConfigFile).not.toHaveBeenCalled(); }); it("uses OPENCLAW_GATEWAY_TOKEN without resolving configured token SecretRef", async () => { - const result = await ensureGatewayStartupAuth({ + await expectResolvedToken({ cfg: { gateway: { auth: { @@ -212,14 +222,8 @@ describe("ensureGatewayStartupAuth", () => { env: { OPENCLAW_GATEWAY_TOKEN: "token-from-env", } as NodeJS.ProcessEnv, - persist: true, + expectedToken: "token-from-env", }); - - expect(result.generatedToken).toBeUndefined(); - expect(result.persistedGeneratedToken).toBe(false); - expect(result.auth.mode).toBe("token"); - expect(result.auth.token).toBe("token-from-env"); - expect(mocks.writeConfigFile).not.toHaveBeenCalled(); }); it("fails when gateway.auth.token SecretRef is active and unresolved", async () => { diff --git a/src/gateway/ws-log.test.ts b/src/gateway/ws-log.test.ts index 5a748c38eb7..a14bca6f628 100644 --- a/src/gateway/ws-log.test.ts +++ b/src/gateway/ws-log.test.ts @@ -2,20 +2,39 @@ import { describe, expect, test } from "vitest"; import { formatForLog, shortId, summarizeAgentEventForWsLog } from "./ws-log.js"; describe("gateway ws log helpers", () => { - test("shortId compacts uuids and long strings", () => { - expect(shortId("12345678-1234-1234-1234-123456789abc")).toBe("12345678…9abc"); - expect(shortId("a".repeat(30))).toBe("aaaaaaaaaaaa…aaaa"); - expect(shortId("short")).toBe("short"); + test.each([ + { + name: "compacts uuids", + input: "12345678-1234-1234-1234-123456789abc", + expected: "12345678…9abc", + }, + { + name: "compacts long strings", + input: "a".repeat(30), + expected: "aaaaaaaaaaaa…aaaa", + }, + { + name: "trims before checking length", + input: " short ", + expected: "short", + }, + ])("shortId $name", ({ input, expected }) => { + expect(shortId(input)).toBe(expected); }); - test("formatForLog formats errors and messages", () => { - const err = new Error("boom"); - err.name = "TestError"; - expect(formatForLog(err)).toContain("TestError"); - expect(formatForLog(err)).toContain("boom"); - - const obj = { name: "Oops", message: "failed", code: "E1" }; - expect(formatForLog(obj)).toBe("Oops: failed: code=E1"); + test.each([ + { + name: "formats Error instances", + input: Object.assign(new Error("boom"), { name: "TestError" }), + expected: "TestError: boom", + }, + { + name: "formats message-like objects with codes", + input: { name: "Oops", message: "failed", code: "E1" }, + expected: "Oops: failed: code=E1", + }, + ])("formatForLog $name", ({ input, expected }) => { + expect(formatForLog(input)).toBe(expected); }); test("formatForLog redacts obvious secrets", () => { @@ -26,33 +45,79 @@ describe("gateway ws log helpers", () => { expect(out).toContain("…"); }); - test("summarizeAgentEventForWsLog extracts useful fields", () => { + test("summarizeAgentEventForWsLog compacts assistant payloads", () => { const summary = summarizeAgentEventForWsLog({ runId: "12345678-1234-1234-1234-123456789abc", sessionKey: "agent:main:main", stream: "assistant", seq: 2, - data: { text: "hello world", mediaUrls: ["a", "b"] }, + data: { + text: "hello\n\nworld ".repeat(20), + mediaUrls: ["a", "b"], + }, }); + expect(summary).toMatchObject({ agent: "main", run: "12345678…9abc", session: "main", stream: "assistant", aseq: 2, - text: "hello world", media: 2, }); + expect(summary.text).toBeTypeOf("string"); + expect(summary.text).not.toContain("\n"); + }); - const tool = summarizeAgentEventForWsLog({ - runId: "run-1", - stream: "tool", - data: { phase: "start", name: "fetch", toolCallId: "call-1" }, - }); - expect(tool).toMatchObject({ + test("summarizeAgentEventForWsLog includes tool metadata", () => { + expect( + summarizeAgentEventForWsLog({ + runId: "run-1", + stream: "tool", + data: { phase: "start", name: "fetch", toolCallId: "12345678-1234-1234-1234-123456789abc" }, + }), + ).toMatchObject({ + run: "run-1", stream: "tool", tool: "start:fetch", - call: "call-1", + call: "12345678…9abc", + }); + }); + + test("summarizeAgentEventForWsLog includes lifecycle errors with compact previews", () => { + const summary = summarizeAgentEventForWsLog({ + runId: "run-2", + sessionKey: "agent:main:thread-1", + stream: "lifecycle", + data: { + phase: "abort", + aborted: true, + error: "fatal ".repeat(40), + }, + }); + + expect(summary).toMatchObject({ + agent: "main", + session: "thread-1", + stream: "lifecycle", + phase: "abort", + aborted: true, + }); + expect(summary.error).toBeTypeOf("string"); + expect((summary.error as string).length).toBeLessThanOrEqual(120); + }); + + test("summarizeAgentEventForWsLog preserves invalid session keys and unknown-stream reasons", () => { + expect( + summarizeAgentEventForWsLog({ + sessionKey: "bogus-session", + stream: "other", + data: { reason: "dropped" }, + }), + ).toEqual({ + session: "bogus-session", + stream: "other", + reason: "dropped", }); }); }); diff --git a/src/infra/abort-signal.test.ts b/src/infra/abort-signal.test.ts index be32e0d881a..16dae0498e5 100644 --- a/src/infra/abort-signal.test.ts +++ b/src/infra/abort-signal.test.ts @@ -26,4 +26,32 @@ describe("waitForAbortSignal", () => { await task; expect(resolved).toBe(true); }); + + it("registers and removes the abort listener exactly once", async () => { + let handler: (() => void) | undefined; + const addEventListener = ( + _type: string, + listener: () => void, + options?: AddEventListenerOptions, + ) => { + handler = listener; + expect(options).toEqual({ once: true }); + }; + const removeEventListener = (_type: string, listener: () => void) => { + expect(listener).toBe(handler); + removed += 1; + }; + let removed = 0; + + const task = waitForAbortSignal({ + aborted: false, + addEventListener, + removeEventListener, + } as unknown as AbortSignal); + + expect(handler).toBeTypeOf("function"); + handler?.(); + await expect(task).resolves.toBeUndefined(); + expect(removed).toBe(1); + }); }); diff --git a/src/infra/agent-events.test.ts b/src/infra/agent-events.test.ts index 9661ee13bfc..0079a443c7b 100644 --- a/src/infra/agent-events.test.ts +++ b/src/infra/agent-events.test.ts @@ -83,4 +83,65 @@ describe("agent-events sequencing", () => { expect(receivedSessionKey).toBeUndefined(); }); + + test("merges later run context updates into existing runs", async () => { + resetAgentRunContextForTest(); + registerAgentRunContext("run-ctx", { + sessionKey: "session-main", + isControlUiVisible: true, + }); + registerAgentRunContext("run-ctx", { + verboseLevel: "full", + isHeartbeat: true, + }); + + expect(getAgentRunContext("run-ctx")).toEqual({ + sessionKey: "session-main", + verboseLevel: "full", + isHeartbeat: true, + isControlUiVisible: true, + }); + }); + + test("falls back to registered sessionKey when event sessionKey is blank", async () => { + resetAgentRunContextForTest(); + registerAgentRunContext("run-ctx", { sessionKey: "session-main" }); + + let receivedSessionKey: string | undefined; + const stop = onAgentEvent((evt) => { + receivedSessionKey = evt.sessionKey; + }); + emitAgentEvent({ + runId: "run-ctx", + stream: "assistant", + data: { text: "hi" }, + sessionKey: " ", + }); + stop(); + + expect(receivedSessionKey).toBe("session-main"); + }); + + test("keeps notifying later listeners when one throws", async () => { + const seen: string[] = []; + const stopBad = onAgentEvent(() => { + throw new Error("boom"); + }); + const stopGood = onAgentEvent((evt) => { + seen.push(evt.runId); + }); + + expect(() => + emitAgentEvent({ + runId: "run-safe", + stream: "assistant", + data: { text: "hi" }, + }), + ).not.toThrow(); + + stopGood(); + stopBad(); + + expect(seen).toEqual(["run-safe"]); + }); }); diff --git a/src/infra/archive-path.test.ts b/src/infra/archive-path.test.ts index bc900c6964c..02ed7f4ff2d 100644 --- a/src/infra/archive-path.test.ts +++ b/src/infra/archive-path.test.ts @@ -1,18 +1,71 @@ import path from "node:path"; import { describe, expect, it } from "vitest"; import { + isWindowsDrivePath, + normalizeArchiveEntryPath, resolveArchiveOutputPath, stripArchivePath, validateArchiveEntryPath, } from "./archive-path.js"; describe("archive path helpers", () => { - it("uses custom escape labels in traversal errors", () => { + it.each([ + { value: "C:\\temp\\file.txt", expected: true }, + { value: "D:/temp/file.txt", expected: true }, + { value: "tmp/file.txt", expected: false }, + { value: "/tmp/file.txt", expected: false }, + ])("detects Windows drive paths for %j", ({ value, expected }) => { + expect(isWindowsDrivePath(value)).toBe(expected); + }); + + it.each([ + { raw: "dir\\file.txt", expected: "dir/file.txt" }, + { raw: "dir/file.txt", expected: "dir/file.txt" }, + ])("normalizes archive separators for %j", ({ raw, expected }) => { + expect(normalizeArchiveEntryPath(raw)).toBe(expected); + }); + + it.each(["", ".", "./"])("accepts empty-like entry paths: %j", (entryPath) => { + expect(() => validateArchiveEntryPath(entryPath)).not.toThrow(); + }); + + it.each([ + { + name: "uses custom escape labels in traversal errors", + entryPath: "../escape.txt", + message: "archive entry escapes targetDir: ../escape.txt", + }, + { + name: "rejects Windows drive paths", + entryPath: "C:\\temp\\file.txt", + message: "archive entry uses a drive path: C:\\temp\\file.txt", + }, + { + name: "rejects absolute paths after normalization", + entryPath: "/tmp/file.txt", + message: "archive entry is absolute: /tmp/file.txt", + }, + { + name: "rejects double-slash absolute paths after normalization", + entryPath: "\\\\server\\share.txt", + message: "archive entry is absolute: \\\\server\\share.txt", + }, + ])("$name", ({ entryPath, message }) => { expect(() => - validateArchiveEntryPath("../escape.txt", { + validateArchiveEntryPath(entryPath, { escapeLabel: "targetDir", }), - ).toThrow("archive entry escapes targetDir: ../escape.txt"); + ).toThrow(message); + }); + + it.each([ + { entryPath: "a/../escape.txt", stripComponents: 1, expected: "../escape.txt" }, + { entryPath: "a//b/file.txt", stripComponents: 1, expected: "b/file.txt" }, + { entryPath: "./", stripComponents: 0, expected: null }, + { entryPath: "a", stripComponents: 3, expected: null }, + { entryPath: "dir\\sub\\file.txt", stripComponents: 1, expected: "sub/file.txt" }, + ])("strips archive paths for %j", ({ entryPath, stripComponents, expected }) => { + expect(stripArchivePath(entryPath, stripComponents)).toBe(expected); }); it("preserves strip-induced traversal for follow-up validation", () => { @@ -25,22 +78,40 @@ describe("archive path helpers", () => { ).toThrow("archive entry escapes targetDir: ../escape.txt"); }); - it("keeps resolved output paths inside the root", () => { - const rootDir = path.join(path.sep, "tmp", "archive-root"); - const safe = resolveArchiveOutputPath({ - rootDir, + it.each([ + { + name: "keeps resolved output paths inside the root", relPath: "sub/file.txt", originalPath: "sub/file.txt", - }); - expect(safe).toBe(path.resolve(rootDir, "sub/file.txt")); + expected: path.resolve(path.join(path.sep, "tmp", "archive-root"), "sub/file.txt"), + }, + { + name: "rejects output paths that escape the root", + relPath: "../escape.txt", + originalPath: "../escape.txt", + escapeLabel: "targetDir", + message: "archive entry escapes targetDir: ../escape.txt", + }, + ])("$name", ({ relPath, originalPath, escapeLabel, expected, message }) => { + const rootDir = path.join(path.sep, "tmp", "archive-root"); + if (message) { + expect(() => + resolveArchiveOutputPath({ + rootDir, + relPath, + originalPath, + escapeLabel, + }), + ).toThrow(message); + return; + } - expect(() => + expect( resolveArchiveOutputPath({ rootDir, - relPath: "../escape.txt", - originalPath: "../escape.txt", - escapeLabel: "targetDir", + relPath, + originalPath, }), - ).toThrow("archive entry escapes targetDir: ../escape.txt"); + ).toBe(expected); }); }); diff --git a/src/infra/backoff.test.ts b/src/infra/backoff.test.ts new file mode 100644 index 00000000000..9181d832402 --- /dev/null +++ b/src/infra/backoff.test.ts @@ -0,0 +1,36 @@ +import { describe, expect, it, vi } from "vitest"; +import { computeBackoff, sleepWithAbort, type BackoffPolicy } from "./backoff.js"; + +describe("backoff helpers", () => { + const policy: BackoffPolicy = { + initialMs: 100, + maxMs: 250, + factor: 2, + jitter: 0.5, + }; + + it("treats attempts below one as the first backoff step", () => { + const randomSpy = vi.spyOn(Math, "random").mockReturnValue(0); + try { + expect(computeBackoff(policy, 0)).toBe(100); + expect(computeBackoff(policy, 1)).toBe(100); + } finally { + randomSpy.mockRestore(); + } + }); + + it("adds jitter and clamps to maxMs", () => { + const randomSpy = vi.spyOn(Math, "random").mockReturnValue(1); + try { + expect(computeBackoff(policy, 2)).toBe(250); + expect(computeBackoff({ ...policy, maxMs: 450 }, 2)).toBe(300); + } finally { + randomSpy.mockRestore(); + } + }); + + it("returns immediately for non-positive sleep durations", async () => { + await expect(sleepWithAbort(0, AbortSignal.abort())).resolves.toBeUndefined(); + await expect(sleepWithAbort(-5)).resolves.toBeUndefined(); + }); +}); diff --git a/src/infra/backup-create.test.ts b/src/infra/backup-create.test.ts new file mode 100644 index 00000000000..5d3a38bee21 --- /dev/null +++ b/src/infra/backup-create.test.ts @@ -0,0 +1,84 @@ +import { describe, expect, it } from "vitest"; +import { formatBackupCreateSummary, type BackupCreateResult } from "./backup-create.js"; + +function makeResult(overrides: Partial = {}): BackupCreateResult { + return { + createdAt: "2026-01-01T00:00:00.000Z", + archiveRoot: "openclaw-backup-2026-01-01", + archivePath: "/tmp/openclaw-backup.tar.gz", + dryRun: false, + includeWorkspace: true, + onlyConfig: false, + verified: false, + assets: [], + skipped: [], + ...overrides, + }; +} + +describe("formatBackupCreateSummary", () => { + it("formats created archives with included and skipped paths", () => { + const lines = formatBackupCreateSummary( + makeResult({ + verified: true, + assets: [ + { + kind: "state", + sourcePath: "/state", + archivePath: "archive/state", + displayPath: "~/.openclaw", + }, + ], + skipped: [ + { + kind: "workspace", + sourcePath: "/workspace", + displayPath: "~/Projects/openclaw", + reason: "covered", + coveredBy: "~/.openclaw", + }, + ], + }), + ); + + expect(lines).toEqual([ + "Backup archive: /tmp/openclaw-backup.tar.gz", + "Included 1 path:", + "- state: ~/.openclaw", + "Skipped 1 path:", + "- workspace: ~/Projects/openclaw (covered by ~/.openclaw)", + "Created /tmp/openclaw-backup.tar.gz", + "Archive verification: passed", + ]); + }); + + it("formats dry runs and pluralized counts", () => { + const lines = formatBackupCreateSummary( + makeResult({ + dryRun: true, + assets: [ + { + kind: "config", + sourcePath: "/config", + archivePath: "archive/config", + displayPath: "~/.openclaw/config.json", + }, + { + kind: "credentials", + sourcePath: "/oauth", + archivePath: "archive/oauth", + displayPath: "~/.openclaw/oauth", + }, + ], + }), + ); + + expect(lines).toEqual([ + "Backup archive: /tmp/openclaw-backup.tar.gz", + "Included 2 paths:", + "- config: ~/.openclaw/config.json", + "- credentials: ~/.openclaw/oauth", + "Dry run only; archive was not written.", + ]); + }); +}); diff --git a/src/infra/binaries.test.ts b/src/infra/binaries.test.ts new file mode 100644 index 00000000000..425a2696fbf --- /dev/null +++ b/src/infra/binaries.test.ts @@ -0,0 +1,38 @@ +import { describe, expect, it, vi } from "vitest"; +import type { runExec } from "../process/exec.js"; +import type { RuntimeEnv } from "../runtime.js"; +import { ensureBinary } from "./binaries.js"; + +describe("ensureBinary", () => { + it("passes through when the binary exists", async () => { + const exec: typeof runExec = vi.fn().mockResolvedValue({ + stdout: "", + stderr: "", + }); + const runtime: RuntimeEnv = { + log: vi.fn(), + error: vi.fn(), + exit: vi.fn(), + }; + + await ensureBinary("node", exec, runtime); + + expect(exec).toHaveBeenCalledWith("which", ["node"]); + expect(runtime.error).not.toHaveBeenCalled(); + expect(runtime.exit).not.toHaveBeenCalled(); + }); + + it("logs and exits when the binary is missing", async () => { + const exec: typeof runExec = vi.fn().mockRejectedValue(new Error("missing")); + const error = vi.fn(); + const exit = vi.fn(() => { + throw new Error("exit"); + }); + + await expect(ensureBinary("ghost", exec, { log: vi.fn(), error, exit })).rejects.toThrow( + "exit", + ); + expect(error).toHaveBeenCalledWith("Missing required binary: ghost. Please install it."); + expect(exit).toHaveBeenCalledWith(1); + }); +}); diff --git a/src/infra/bonjour-ciao.test.ts b/src/infra/bonjour-ciao.test.ts new file mode 100644 index 00000000000..120c46d8dce --- /dev/null +++ b/src/infra/bonjour-ciao.test.ts @@ -0,0 +1,27 @@ +import { describe, expect, it, vi } from "vitest"; + +const logDebugMock = vi.hoisted(() => vi.fn()); + +vi.mock("../logger.js", () => ({ + logDebug: (...args: unknown[]) => logDebugMock(...args), +})); + +const { ignoreCiaoCancellationRejection } = await import("./bonjour-ciao.js"); + +describe("bonjour-ciao", () => { + it("ignores and logs ciao cancellation rejections", () => { + expect( + ignoreCiaoCancellationRejection(new Error("Ciao announcement cancelled by shutdown")), + ).toBe(true); + expect(logDebugMock).toHaveBeenCalledWith( + expect.stringContaining("ignoring unhandled ciao rejection"), + ); + }); + + it("keeps unrelated rejections visible", () => { + logDebugMock.mockReset(); + + expect(ignoreCiaoCancellationRejection(new Error("boom"))).toBe(false); + expect(logDebugMock).not.toHaveBeenCalled(); + }); +}); diff --git a/src/infra/bonjour-errors.test.ts b/src/infra/bonjour-errors.test.ts new file mode 100644 index 00000000000..688335856c4 --- /dev/null +++ b/src/infra/bonjour-errors.test.ts @@ -0,0 +1,16 @@ +import { describe, expect, it } from "vitest"; +import { formatBonjourError } from "./bonjour-errors.js"; + +describe("formatBonjourError", () => { + it("formats named errors with their type prefix", () => { + const err = new Error("timed out"); + err.name = "AbortError"; + expect(formatBonjourError(err)).toBe("AbortError: timed out"); + }); + + it("falls back to plain error strings and non-error values", () => { + expect(formatBonjourError(new Error(""))).toBe("Error"); + expect(formatBonjourError("boom")).toBe("boom"); + expect(formatBonjourError(42)).toBe("42"); + }); +}); diff --git a/src/infra/boundary-file-read.test.ts b/src/infra/boundary-file-read.test.ts new file mode 100644 index 00000000000..2dceb0cb06a --- /dev/null +++ b/src/infra/boundary-file-read.test.ts @@ -0,0 +1,204 @@ +import path from "node:path"; +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const resolveBoundaryPathSyncMock = vi.hoisted(() => vi.fn()); +const resolveBoundaryPathMock = vi.hoisted(() => vi.fn()); +const openVerifiedFileSyncMock = vi.hoisted(() => vi.fn()); + +vi.mock("./boundary-path.js", () => ({ + resolveBoundaryPathSync: (...args: unknown[]) => resolveBoundaryPathSyncMock(...args), + resolveBoundaryPath: (...args: unknown[]) => resolveBoundaryPathMock(...args), +})); + +vi.mock("./safe-open-sync.js", () => ({ + openVerifiedFileSync: (...args: unknown[]) => openVerifiedFileSyncMock(...args), +})); + +const { canUseBoundaryFileOpen, openBoundaryFile, openBoundaryFileSync } = + await import("./boundary-file-read.js"); + +describe("boundary-file-read", () => { + beforeEach(() => { + resolveBoundaryPathSyncMock.mockReset(); + resolveBoundaryPathMock.mockReset(); + openVerifiedFileSyncMock.mockReset(); + }); + + it("recognizes the required sync fs surface", () => { + const validFs = { + openSync() {}, + closeSync() {}, + fstatSync() {}, + lstatSync() {}, + realpathSync() {}, + readFileSync() {}, + constants: {}, + }; + + expect(canUseBoundaryFileOpen(validFs as never)).toBe(true); + expect( + canUseBoundaryFileOpen({ + ...validFs, + openSync: undefined, + } as never), + ).toBe(false); + expect( + canUseBoundaryFileOpen({ + ...validFs, + constants: null, + } as never), + ).toBe(false); + }); + + it("maps sync boundary resolution into verified file opens", () => { + const stat = { size: 3 } as never; + const ioFs = { marker: "io" } as never; + const absolutePath = path.resolve("plugin.json"); + + resolveBoundaryPathSyncMock.mockReturnValue({ + canonicalPath: "/real/plugin.json", + rootCanonicalPath: "/real/root", + }); + openVerifiedFileSyncMock.mockReturnValue({ + ok: true, + path: "/real/plugin.json", + fd: 7, + stat, + }); + + const opened = openBoundaryFileSync({ + absolutePath: "plugin.json", + rootPath: "/workspace", + boundaryLabel: "plugin root", + ioFs, + }); + + expect(resolveBoundaryPathSyncMock).toHaveBeenCalledWith({ + absolutePath, + rootPath: "/workspace", + rootCanonicalPath: undefined, + boundaryLabel: "plugin root", + skipLexicalRootCheck: undefined, + }); + expect(openVerifiedFileSyncMock).toHaveBeenCalledWith({ + filePath: absolutePath, + resolvedPath: "/real/plugin.json", + rejectHardlinks: true, + maxBytes: undefined, + allowedType: undefined, + ioFs, + }); + expect(opened).toEqual({ + ok: true, + path: "/real/plugin.json", + fd: 7, + stat, + rootRealPath: "/real/root", + }); + }); + + it("returns validation errors when sync boundary resolution throws", () => { + const error = new Error("outside root"); + resolveBoundaryPathSyncMock.mockImplementation(() => { + throw error; + }); + + const opened = openBoundaryFileSync({ + absolutePath: "plugin.json", + rootPath: "/workspace", + boundaryLabel: "plugin root", + }); + + expect(opened).toEqual({ + ok: false, + reason: "validation", + error, + }); + expect(openVerifiedFileSyncMock).not.toHaveBeenCalled(); + }); + + it("guards against unexpected async sync-resolution results", () => { + resolveBoundaryPathSyncMock.mockReturnValue( + Promise.resolve({ + canonicalPath: "/real/plugin.json", + rootCanonicalPath: "/real/root", + }), + ); + + const opened = openBoundaryFileSync({ + absolutePath: "plugin.json", + rootPath: "/workspace", + boundaryLabel: "plugin root", + }); + + expect(opened.ok).toBe(false); + if (opened.ok) { + return; + } + expect(opened.reason).toBe("validation"); + expect(String(opened.error)).toContain("Unexpected async boundary resolution"); + }); + + it("awaits async boundary resolution before verifying the file", async () => { + const ioFs = { marker: "io" } as never; + const absolutePath = path.resolve("notes.txt"); + + resolveBoundaryPathMock.mockResolvedValue({ + canonicalPath: "/real/notes.txt", + rootCanonicalPath: "/real/root", + }); + openVerifiedFileSyncMock.mockReturnValue({ + ok: false, + reason: "validation", + error: new Error("blocked"), + }); + + const opened = await openBoundaryFile({ + absolutePath: "notes.txt", + rootPath: "/workspace", + boundaryLabel: "workspace", + aliasPolicy: { allowFinalSymlinkForUnlink: true }, + ioFs, + }); + + expect(resolveBoundaryPathMock).toHaveBeenCalledWith({ + absolutePath, + rootPath: "/workspace", + rootCanonicalPath: undefined, + boundaryLabel: "workspace", + policy: { allowFinalSymlinkForUnlink: true }, + skipLexicalRootCheck: undefined, + }); + expect(openVerifiedFileSyncMock).toHaveBeenCalledWith({ + filePath: absolutePath, + resolvedPath: "/real/notes.txt", + rejectHardlinks: true, + maxBytes: undefined, + allowedType: undefined, + ioFs, + }); + expect(opened).toEqual({ + ok: false, + reason: "validation", + error: expect.any(Error), + }); + }); + + it("maps async boundary resolution failures to validation errors", async () => { + const error = new Error("escaped"); + resolveBoundaryPathMock.mockRejectedValue(error); + + const opened = await openBoundaryFile({ + absolutePath: "notes.txt", + rootPath: "/workspace", + boundaryLabel: "workspace", + }); + + expect(opened).toEqual({ + ok: false, + reason: "validation", + error, + }); + expect(openVerifiedFileSyncMock).not.toHaveBeenCalled(); + }); +}); diff --git a/src/infra/boundary-path.test.ts b/src/infra/boundary-path.test.ts index d28bb6cdffa..bf7b20ffcc0 100644 --- a/src/infra/boundary-path.test.ts +++ b/src/infra/boundary-path.test.ts @@ -1,19 +1,10 @@ import fs from "node:fs/promises"; -import os from "node:os"; import path from "node:path"; import { describe, expect, it } from "vitest"; +import { withTempDir } from "../test-helpers/temp-dir.js"; import { resolveBoundaryPath, resolveBoundaryPathSync } from "./boundary-path.js"; import { isPathInside } from "./path-guards.js"; -async function withTempRoot(prefix: string, run: (root: string) => Promise): Promise { - const root = await fs.mkdtemp(path.join(os.tmpdir(), prefix)); - try { - return await run(root); - } finally { - await fs.rm(root, { recursive: true, force: true }); - } -} - function createSeededRandom(seed: number): () => number { let state = seed >>> 0; return () => { @@ -28,7 +19,7 @@ describe("resolveBoundaryPath", () => { return; } - await withTempRoot("openclaw-boundary-path-", async (base) => { + await withTempDir({ prefix: "openclaw-boundary-path-" }, async (base) => { const root = path.join(base, "workspace"); const targetDir = path.join(root, "target-dir"); const linkPath = path.join(root, "alias"); @@ -55,7 +46,7 @@ describe("resolveBoundaryPath", () => { return; } - await withTempRoot("openclaw-boundary-path-", async (base) => { + await withTempDir({ prefix: "openclaw-boundary-path-" }, async (base) => { const root = path.join(base, "workspace"); const outside = path.join(base, "outside"); const linkPath = path.join(root, "alias-out"); @@ -86,7 +77,7 @@ describe("resolveBoundaryPath", () => { return; } - await withTempRoot("openclaw-boundary-path-", async (base) => { + await withTempDir({ prefix: "openclaw-boundary-path-" }, async (base) => { const root = path.join(base, "workspace"); const outside = path.join(base, "outside"); const outsideFile = path.join(outside, "target.txt"); @@ -122,7 +113,7 @@ describe("resolveBoundaryPath", () => { return; } - await withTempRoot("openclaw-boundary-path-", async (base) => { + await withTempDir({ prefix: "openclaw-boundary-path-" }, async (base) => { const root = path.join(base, "workspace"); const aliasRoot = path.join(base, "workspace-alias"); const fileName = "plugin.js"; @@ -153,7 +144,7 @@ describe("resolveBoundaryPath", () => { return; } - await withTempRoot("openclaw-boundary-path-fuzz-", async (base) => { + await withTempDir({ prefix: "openclaw-boundary-path-fuzz-" }, async (base) => { const root = path.join(base, "workspace"); const outside = path.join(base, "outside"); const safeTarget = path.join(root, "safe-target"); diff --git a/src/infra/canvas-host-url.test.ts b/src/infra/canvas-host-url.test.ts new file mode 100644 index 00000000000..2ca7401a2bb --- /dev/null +++ b/src/infra/canvas-host-url.test.ts @@ -0,0 +1,64 @@ +import { describe, expect, it } from "vitest"; +import { resolveCanvasHostUrl } from "./canvas-host-url.js"; + +describe("resolveCanvasHostUrl", () => { + it("returns undefined when no canvas port or usable host is available", () => { + expect(resolveCanvasHostUrl({})).toBeUndefined(); + expect(resolveCanvasHostUrl({ canvasPort: 3000, hostOverride: "127.0.0.1" })).toBeUndefined(); + }); + + it("prefers non-loopback host overrides and preserves explicit ports", () => { + expect( + resolveCanvasHostUrl({ + canvasPort: 3000, + hostOverride: " canvas.openclaw.ai ", + requestHost: "gateway.local:9000", + localAddress: "192.168.1.10", + }), + ).toBe("http://canvas.openclaw.ai:3000"); + }); + + it("falls back from rejected loopback overrides to request hosts", () => { + expect( + resolveCanvasHostUrl({ + canvasPort: 3000, + hostOverride: "127.0.0.1", + requestHost: "example.com:8443", + }), + ).toBe("http://example.com:3000"); + }); + + it("maps proxied default gateway ports to request-host ports or scheme defaults", () => { + expect( + resolveCanvasHostUrl({ + canvasPort: 18789, + requestHost: "gateway.example.com:9443", + forwardedProto: "https", + }), + ).toBe("https://gateway.example.com:9443"); + expect( + resolveCanvasHostUrl({ + canvasPort: 18789, + requestHost: "gateway.example.com", + forwardedProto: ["https", "http"], + }), + ).toBe("https://gateway.example.com:443"); + expect( + resolveCanvasHostUrl({ + canvasPort: 18789, + requestHost: "gateway.example.com", + }), + ).toBe("http://gateway.example.com:80"); + }); + + it("brackets ipv6 hosts and can fall back to local addresses", () => { + expect( + resolveCanvasHostUrl({ + canvasPort: 3000, + requestHost: "not a host", + localAddress: "2001:db8::1", + scheme: "https", + }), + ).toBe("https://[2001:db8::1]:3000"); + }); +}); diff --git a/src/infra/channel-activity.test.ts b/src/infra/channel-activity.test.ts new file mode 100644 index 00000000000..17791056f5b --- /dev/null +++ b/src/infra/channel-activity.test.ts @@ -0,0 +1,72 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { + getChannelActivity, + recordChannelActivity, + resetChannelActivityForTest, +} from "./channel-activity.js"; + +describe("channel activity", () => { + beforeEach(() => { + resetChannelActivityForTest(); + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-01-08T00:00:00Z")); + }); + + afterEach(() => { + vi.useRealTimers(); + }); + + it("uses the default account for blank inputs and falls back to null timestamps", () => { + expect(getChannelActivity({ channel: "telegram" })).toEqual({ + inboundAt: null, + outboundAt: null, + }); + + recordChannelActivity({ + channel: "telegram", + accountId: " ", + direction: "inbound", + }); + + expect(getChannelActivity({ channel: "telegram", accountId: null })).toEqual({ + inboundAt: 1767830400000, + outboundAt: null, + }); + }); + + it("keeps inbound and outbound timestamps independent and trims account ids", () => { + recordChannelActivity({ + channel: "whatsapp", + accountId: " team-a ", + direction: "inbound", + at: 10, + }); + recordChannelActivity({ + channel: "whatsapp", + accountId: "team-a", + direction: "outbound", + at: 20, + }); + recordChannelActivity({ + channel: "whatsapp", + accountId: "team-a", + direction: "inbound", + at: 30, + }); + + expect(getChannelActivity({ channel: "whatsapp", accountId: " team-a " })).toEqual({ + inboundAt: 30, + outboundAt: 20, + }); + }); + + it("reset clears previously recorded activity", () => { + recordChannelActivity({ channel: "line", direction: "outbound", at: 7 }); + resetChannelActivityForTest(); + + expect(getChannelActivity({ channel: "line" })).toEqual({ + inboundAt: null, + outboundAt: null, + }); + }); +}); diff --git a/src/infra/channels-status-issues.test.ts b/src/infra/channels-status-issues.test.ts new file mode 100644 index 00000000000..92b4008707c --- /dev/null +++ b/src/infra/channels-status-issues.test.ts @@ -0,0 +1,49 @@ +import { describe, expect, it, vi } from "vitest"; + +const listChannelPluginsMock = vi.hoisted(() => vi.fn()); + +vi.mock("../channels/plugins/index.js", () => ({ + listChannelPlugins: () => listChannelPluginsMock(), +})); + +import { collectChannelStatusIssues } from "./channels-status-issues.js"; + +describe("collectChannelStatusIssues", () => { + it("returns no issues when payload accounts are missing or not arrays", () => { + const collectTelegramIssues = vi.fn(() => [{ code: "telegram" }]); + listChannelPluginsMock.mockReturnValue([ + { id: "telegram", status: { collectStatusIssues: collectTelegramIssues } }, + ]); + + expect(collectChannelStatusIssues({})).toEqual([]); + expect(collectChannelStatusIssues({ channelAccounts: { telegram: { bad: true } } })).toEqual( + [], + ); + expect(collectTelegramIssues).not.toHaveBeenCalled(); + }); + + it("skips plugins without collectors and concatenates collector output in plugin order", () => { + const collectTelegramIssues = vi.fn(() => [{ code: "telegram.down" }]); + const collectSlackIssues = vi.fn(() => [{ code: "slack.warn" }, { code: "slack.auth" }]); + const telegramAccounts = [{ id: "tg-1" }]; + const slackAccounts = [{ id: "sl-1" }]; + listChannelPluginsMock.mockReturnValueOnce([ + { id: "discord" }, + { id: "telegram", status: { collectStatusIssues: collectTelegramIssues } }, + { id: "slack", status: { collectStatusIssues: collectSlackIssues } }, + ]); + + expect( + collectChannelStatusIssues({ + channelAccounts: { + discord: [{ id: "dc-1" }], + telegram: telegramAccounts, + slack: slackAccounts, + }, + }), + ).toEqual([{ code: "telegram.down" }, { code: "slack.warn" }, { code: "slack.auth" }]); + + expect(collectTelegramIssues).toHaveBeenCalledWith(telegramAccounts); + expect(collectSlackIssues).toHaveBeenCalledWith(slackAccounts); + }); +}); diff --git a/src/infra/cli-root-options.test.ts b/src/infra/cli-root-options.test.ts index 514548586f7..6d7461a39e5 100644 --- a/src/infra/cli-root-options.test.ts +++ b/src/infra/cli-root-options.test.ts @@ -1,16 +1,37 @@ import { describe, expect, it } from "vitest"; -import { consumeRootOptionToken } from "./cli-root-options.js"; +import { consumeRootOptionToken, isValueToken } from "./cli-root-options.js"; -describe("consumeRootOptionToken", () => { - it("consumes boolean and inline root options", () => { - expect(consumeRootOptionToken(["--dev"], 0)).toBe(1); - expect(consumeRootOptionToken(["--profile=work"], 0)).toBe(1); - expect(consumeRootOptionToken(["--log-level=debug"], 0)).toBe(1); - }); - - it("consumes split root value option only when next token is a value", () => { - expect(consumeRootOptionToken(["--profile", "work"], 0)).toBe(2); - expect(consumeRootOptionToken(["--profile", "--no-color"], 0)).toBe(1); - expect(consumeRootOptionToken(["--profile", "--"], 0)).toBe(1); +describe("isValueToken", () => { + it.each([ + { value: "work", expected: true }, + { value: "-1", expected: true }, + { value: "-1.5", expected: true }, + { value: "-0.5", expected: true }, + { value: "--", expected: false }, + { value: "--dev", expected: false }, + { value: "-", expected: false }, + { value: "", expected: false }, + { value: undefined, expected: false }, + ])("classifies %j", ({ value, expected }) => { + expect(isValueToken(value)).toBe(expected); + }); +}); + +describe("consumeRootOptionToken", () => { + it.each([ + { args: ["--dev"], index: 0, expected: 1 }, + { args: ["--profile=work"], index: 0, expected: 1 }, + { args: ["--log-level=debug"], index: 0, expected: 1 }, + { args: ["--profile", "work"], index: 0, expected: 2 }, + { args: ["--profile", "-1"], index: 0, expected: 2 }, + { args: ["--log-level", "-1.5"], index: 0, expected: 2 }, + { args: ["--profile", "--no-color"], index: 0, expected: 1 }, + { args: ["--profile", "--"], index: 0, expected: 1 }, + { args: ["x", "--profile", "work"], index: 1, expected: 2 }, + { args: ["--log-level", ""], index: 0, expected: 1 }, + { args: ["--unknown"], index: 0, expected: 0 }, + { args: [], index: 0, expected: 0 }, + ])("consumes %j at %d", ({ args, index, expected }) => { + expect(consumeRootOptionToken(args, index)).toBe(expected); }); }); diff --git a/src/infra/clipboard.test.ts b/src/infra/clipboard.test.ts new file mode 100644 index 00000000000..c511d430c3b --- /dev/null +++ b/src/infra/clipboard.test.ts @@ -0,0 +1,52 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const runCommandWithTimeoutMock = vi.hoisted(() => vi.fn()); + +vi.mock("../process/exec.js", () => ({ + runCommandWithTimeout: (...args: unknown[]) => runCommandWithTimeoutMock(...args), +})); + +const { copyToClipboard } = await import("./clipboard.js"); + +describe("copyToClipboard", () => { + beforeEach(() => { + runCommandWithTimeoutMock.mockReset(); + }); + + it("returns true on the first successful clipboard command", async () => { + runCommandWithTimeoutMock.mockResolvedValueOnce({ code: 0, killed: false }); + + await expect(copyToClipboard("hello")).resolves.toBe(true); + expect(runCommandWithTimeoutMock).toHaveBeenCalledWith(["pbcopy"], { + timeoutMs: 3000, + input: "hello", + }); + expect(runCommandWithTimeoutMock).toHaveBeenCalledTimes(1); + }); + + it("falls through failed attempts until a later command succeeds", async () => { + runCommandWithTimeoutMock + .mockRejectedValueOnce(new Error("missing pbcopy")) + .mockResolvedValueOnce({ code: 1, killed: false }) + .mockResolvedValueOnce({ code: 0, killed: false }); + + await expect(copyToClipboard("hello")).resolves.toBe(true); + expect(runCommandWithTimeoutMock.mock.calls.map((call) => call[0])).toEqual([ + ["pbcopy"], + ["xclip", "-selection", "clipboard"], + ["wl-copy"], + ]); + }); + + it("returns false when every clipboard backend fails or is killed", async () => { + runCommandWithTimeoutMock + .mockResolvedValueOnce({ code: 0, killed: true }) + .mockRejectedValueOnce(new Error("missing xclip")) + .mockResolvedValueOnce({ code: 1, killed: false }) + .mockRejectedValueOnce(new Error("missing clip.exe")) + .mockResolvedValueOnce({ code: 2, killed: false }); + + await expect(copyToClipboard("hello")).resolves.toBe(false); + expect(runCommandWithTimeoutMock).toHaveBeenCalledTimes(5); + }); +}); diff --git a/src/infra/dedupe.test.ts b/src/infra/dedupe.test.ts new file mode 100644 index 00000000000..035324e13c9 --- /dev/null +++ b/src/infra/dedupe.test.ts @@ -0,0 +1,57 @@ +import { describe, expect, it } from "vitest"; +import { createDedupeCache } from "./dedupe.js"; + +describe("createDedupeCache", () => { + it("ignores blank cache keys", () => { + const cache = createDedupeCache({ ttlMs: 1_000, maxSize: 10 }); + + expect(cache.check("", 100)).toBe(false); + expect(cache.check(undefined, 100)).toBe(false); + expect(cache.peek(null, 100)).toBe(false); + expect(cache.size()).toBe(0); + }); + + it("keeps entries indefinitely when ttlMs is zero or negative", () => { + const zeroTtlCache = createDedupeCache({ ttlMs: 0, maxSize: 10 }); + expect(zeroTtlCache.check("a", 100)).toBe(false); + expect(zeroTtlCache.check("a", 10_000)).toBe(true); + + const negativeTtlCache = createDedupeCache({ ttlMs: -100, maxSize: 10 }); + expect(negativeTtlCache.check("b", 100)).toBe(false); + expect(negativeTtlCache.peek("b", 10_000)).toBe(true); + }); + + it("touches duplicate reads so the newest key survives max-size pruning", () => { + const cache = createDedupeCache({ ttlMs: 10_000, maxSize: 2 }); + + expect(cache.check("a", 100)).toBe(false); + expect(cache.check("b", 200)).toBe(false); + expect(cache.check("a", 300)).toBe(true); + expect(cache.check("c", 400)).toBe(false); + + expect(cache.peek("a", 500)).toBe(true); + expect(cache.peek("b", 500)).toBe(false); + expect(cache.peek("c", 500)).toBe(true); + }); + + it("clears itself when maxSize floors to zero", () => { + const cache = createDedupeCache({ ttlMs: 1_000, maxSize: 0.9 }); + + expect(cache.check("a", 100)).toBe(false); + expect(cache.size()).toBe(0); + expect(cache.peek("a", 200)).toBe(false); + }); + + it("supports explicit reset", () => { + const cache = createDedupeCache({ ttlMs: 1_000, maxSize: 10 }); + + expect(cache.check("a", 100)).toBe(false); + expect(cache.check("b", 200)).toBe(false); + expect(cache.size()).toBe(2); + + cache.clear(); + + expect(cache.size()).toBe(0); + expect(cache.peek("a", 300)).toBe(false); + }); +}); diff --git a/src/infra/detect-package-manager.test.ts b/src/infra/detect-package-manager.test.ts new file mode 100644 index 00000000000..57e06cf1a67 --- /dev/null +++ b/src/infra/detect-package-manager.test.ts @@ -0,0 +1,41 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { detectPackageManager } from "./detect-package-manager.js"; + +describe("detectPackageManager", () => { + it("prefers packageManager from package.json when supported", async () => { + const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-detect-pm-")); + await fs.writeFile( + path.join(root, "package.json"), + JSON.stringify({ packageManager: "pnpm@10.8.1" }), + "utf8", + ); + await fs.writeFile(path.join(root, "package-lock.json"), "", "utf8"); + + await expect(detectPackageManager(root)).resolves.toBe("pnpm"); + }); + + it("falls back to lockfiles when package.json is missing or unsupported", async () => { + const bunRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-detect-pm-")); + await fs.writeFile(path.join(bunRoot, "bun.lockb"), "", "utf8"); + await expect(detectPackageManager(bunRoot)).resolves.toBe("bun"); + + const npmRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-detect-pm-")); + await fs.writeFile( + path.join(npmRoot, "package.json"), + JSON.stringify({ packageManager: "yarn@4.0.0" }), + "utf8", + ); + await fs.writeFile(path.join(npmRoot, "package-lock.json"), "", "utf8"); + await expect(detectPackageManager(npmRoot)).resolves.toBe("npm"); + }); + + it("returns null when no package manager markers exist", async () => { + const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-detect-pm-")); + await fs.writeFile(path.join(root, "package.json"), "{not-json}", "utf8"); + + await expect(detectPackageManager(root)).resolves.toBeNull(); + }); +}); diff --git a/src/infra/device-auth-store.test.ts b/src/infra/device-auth-store.test.ts new file mode 100644 index 00000000000..82a92492015 --- /dev/null +++ b/src/infra/device-auth-store.test.ts @@ -0,0 +1,109 @@ +import fs from "node:fs/promises"; +import path from "node:path"; +import { describe, expect, it, vi } from "vitest"; +import { withTempDir } from "../test-utils/temp-dir.js"; +import { + clearDeviceAuthToken, + loadDeviceAuthToken, + storeDeviceAuthToken, +} from "./device-auth-store.js"; + +function createEnv(stateDir: string): NodeJS.ProcessEnv { + return { + OPENCLAW_STATE_DIR: stateDir, + OPENCLAW_TEST_FAST: "1", + }; +} + +function deviceAuthFile(stateDir: string): string { + return path.join(stateDir, "identity", "device-auth.json"); +} + +describe("infra/device-auth-store", () => { + it("stores and loads device auth tokens under the configured state dir", async () => { + await withTempDir("openclaw-device-auth-", async (stateDir) => { + vi.spyOn(Date, "now").mockReturnValue(1234); + + const entry = storeDeviceAuthToken({ + deviceId: "device-1", + role: " operator ", + token: "secret", + scopes: [" operator.write ", "operator.read", "operator.read"], + env: createEnv(stateDir), + }); + + expect(entry).toEqual({ + token: "secret", + role: "operator", + scopes: ["operator.read", "operator.write"], + updatedAtMs: 1234, + }); + expect( + loadDeviceAuthToken({ + deviceId: "device-1", + role: "operator", + env: createEnv(stateDir), + }), + ).toEqual(entry); + + const raw = await fs.readFile(deviceAuthFile(stateDir), "utf8"); + expect(raw.endsWith("\n")).toBe(true); + expect(JSON.parse(raw)).toEqual({ + version: 1, + deviceId: "device-1", + tokens: { + operator: entry, + }, + }); + }); + }); + + it("returns null for missing, invalid, or mismatched stores", async () => { + await withTempDir("openclaw-device-auth-", async (stateDir) => { + const env = createEnv(stateDir); + + expect(loadDeviceAuthToken({ deviceId: "device-1", role: "operator", env })).toBeNull(); + + await fs.mkdir(path.dirname(deviceAuthFile(stateDir)), { recursive: true }); + await fs.writeFile(deviceAuthFile(stateDir), '{"version":2,"deviceId":"device-1"}\n', "utf8"); + expect(loadDeviceAuthToken({ deviceId: "device-1", role: "operator", env })).toBeNull(); + + await fs.writeFile( + deviceAuthFile(stateDir), + '{"version":1,"deviceId":"device-2","tokens":{"operator":{"token":"x","role":"operator","scopes":[],"updatedAtMs":1}}}\n', + "utf8", + ); + expect(loadDeviceAuthToken({ deviceId: "device-1", role: "operator", env })).toBeNull(); + }); + }); + + it("clears only the requested role and leaves unrelated tokens intact", async () => { + await withTempDir("openclaw-device-auth-", async (stateDir) => { + const env = createEnv(stateDir); + + storeDeviceAuthToken({ + deviceId: "device-1", + role: "operator", + token: "operator-token", + env, + }); + storeDeviceAuthToken({ + deviceId: "device-1", + role: "node", + token: "node-token", + env, + }); + + clearDeviceAuthToken({ + deviceId: "device-1", + role: " operator ", + env, + }); + + expect(loadDeviceAuthToken({ deviceId: "device-1", role: "operator", env })).toBeNull(); + expect(loadDeviceAuthToken({ deviceId: "device-1", role: "node", env })).toMatchObject({ + token: "node-token", + }); + }); + }); +}); diff --git a/src/infra/device-identity.state-dir.test.ts b/src/infra/device-identity.state-dir.test.ts index 71281344819..00929c26186 100644 --- a/src/infra/device-identity.state-dir.test.ts +++ b/src/infra/device-identity.state-dir.test.ts @@ -13,4 +13,61 @@ describe("device identity state dir defaults", () => { expect(raw.deviceId).toBe(identity.deviceId); }); }); + + it("reuses the stored identity on subsequent loads", async () => { + await withStateDirEnv("openclaw-identity-state-", async ({ stateDir }) => { + const first = loadOrCreateDeviceIdentity(); + const second = loadOrCreateDeviceIdentity(); + const identityPath = path.join(stateDir, "identity", "device.json"); + const raw = JSON.parse(await fs.readFile(identityPath, "utf8")) as { + deviceId?: string; + publicKeyPem?: string; + }; + + expect(second).toEqual(first); + expect(raw.deviceId).toBe(first.deviceId); + expect(raw.publicKeyPem).toBe(first.publicKeyPem); + }); + }); + + it("repairs stored device IDs that no longer match the public key", async () => { + await withStateDirEnv("openclaw-identity-state-", async ({ stateDir }) => { + const original = loadOrCreateDeviceIdentity(); + const identityPath = path.join(stateDir, "identity", "device.json"); + const raw = JSON.parse(await fs.readFile(identityPath, "utf8")) as Record; + + await fs.writeFile( + identityPath, + `${JSON.stringify({ ...raw, deviceId: "stale-device-id" }, null, 2)}\n`, + "utf8", + ); + + const repaired = loadOrCreateDeviceIdentity(); + const stored = JSON.parse(await fs.readFile(identityPath, "utf8")) as { deviceId?: string }; + + expect(repaired.deviceId).toBe(original.deviceId); + expect(stored.deviceId).toBe(original.deviceId); + }); + }); + + it("regenerates the identity when the stored file is invalid", async () => { + await withStateDirEnv("openclaw-identity-state-", async ({ stateDir }) => { + const identityPath = path.join(stateDir, "identity", "device.json"); + await fs.mkdir(path.dirname(identityPath), { recursive: true }); + await fs.writeFile(identityPath, '{"version":1,"deviceId":"broken"}\n', "utf8"); + + const regenerated = loadOrCreateDeviceIdentity(); + const stored = JSON.parse(await fs.readFile(identityPath, "utf8")) as { + version?: number; + deviceId?: string; + publicKeyPem?: string; + privateKeyPem?: string; + }; + + expect(stored.version).toBe(1); + expect(stored.deviceId).toBe(regenerated.deviceId); + expect(stored.publicKeyPem).toBe(regenerated.publicKeyPem); + expect(stored.privateKeyPem).toBe(regenerated.privateKeyPem); + }); + }); }); diff --git a/src/infra/diagnostic-events.test.ts b/src/infra/diagnostic-events.test.ts new file mode 100644 index 00000000000..d2b2af1d04a --- /dev/null +++ b/src/infra/diagnostic-events.test.ts @@ -0,0 +1,121 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { + emitDiagnosticEvent, + isDiagnosticsEnabled, + onDiagnosticEvent, + resetDiagnosticEventsForTest, +} from "./diagnostic-events.js"; + +describe("diagnostic-events", () => { + beforeEach(() => { + resetDiagnosticEventsForTest(); + }); + + afterEach(() => { + resetDiagnosticEventsForTest(); + vi.restoreAllMocks(); + }); + + it("emits monotonic seq and timestamps to subscribers", () => { + vi.spyOn(Date, "now").mockReturnValueOnce(111).mockReturnValueOnce(222); + const events: Array<{ seq: number; ts: number; type: string }> = []; + const stop = onDiagnosticEvent((event) => { + events.push({ seq: event.seq, ts: event.ts, type: event.type }); + }); + + emitDiagnosticEvent({ + type: "model.usage", + usage: { total: 1 }, + }); + emitDiagnosticEvent({ + type: "session.state", + state: "processing", + }); + stop(); + + expect(events).toEqual([ + { seq: 1, ts: 111, type: "model.usage" }, + { seq: 2, ts: 222, type: "session.state" }, + ]); + }); + + it("isolates listener failures and logs them", () => { + const errorSpy = vi.spyOn(console, "error").mockImplementation(() => {}); + const seen: string[] = []; + onDiagnosticEvent(() => { + throw new Error("boom"); + }); + onDiagnosticEvent((event) => { + seen.push(event.type); + }); + + emitDiagnosticEvent({ + type: "message.queued", + source: "telegram", + }); + + expect(seen).toEqual(["message.queued"]); + expect(errorSpy).toHaveBeenCalledWith( + expect.stringContaining("listener error type=message.queued seq=1: Error: boom"), + ); + }); + + it("supports unsubscribe and full reset", () => { + const seen: string[] = []; + const stop = onDiagnosticEvent((event) => { + seen.push(event.type); + }); + + emitDiagnosticEvent({ + type: "webhook.received", + channel: "telegram", + }); + stop(); + emitDiagnosticEvent({ + type: "webhook.processed", + channel: "telegram", + }); + + expect(seen).toEqual(["webhook.received"]); + + resetDiagnosticEventsForTest(); + emitDiagnosticEvent({ + type: "webhook.error", + channel: "telegram", + error: "failed", + }); + expect(seen).toEqual(["webhook.received"]); + }); + + it("drops recursive emissions after the guard threshold", () => { + const errorSpy = vi.spyOn(console, "error").mockImplementation(() => {}); + let calls = 0; + onDiagnosticEvent(() => { + calls += 1; + emitDiagnosticEvent({ + type: "queue.lane.enqueue", + lane: "main", + queueSize: calls, + }); + }); + + emitDiagnosticEvent({ + type: "queue.lane.enqueue", + lane: "main", + queueSize: 0, + }); + + expect(calls).toBe(101); + expect(errorSpy).toHaveBeenCalledWith( + expect.stringContaining( + "recursion guard tripped at depth=101, dropping type=queue.lane.enqueue", + ), + ); + }); + + it("requires an explicit true diagnostics flag", () => { + expect(isDiagnosticsEnabled()).toBe(false); + expect(isDiagnosticsEnabled({ diagnostics: { enabled: false } } as never)).toBe(false); + expect(isDiagnosticsEnabled({ diagnostics: { enabled: true } } as never)).toBe(true); + }); +}); diff --git a/src/infra/diagnostic-flags.test.ts b/src/infra/diagnostic-flags.test.ts new file mode 100644 index 00000000000..7c4c3b0a62d --- /dev/null +++ b/src/infra/diagnostic-flags.test.ts @@ -0,0 +1,65 @@ +import { describe, expect, it } from "vitest"; +import type { OpenClawConfig } from "../config/config.js"; +import { + isDiagnosticFlagEnabled, + matchesDiagnosticFlag, + resolveDiagnosticFlags, +} from "./diagnostic-flags.js"; + +describe("resolveDiagnosticFlags", () => { + it("normalizes and dedupes config and env flags", () => { + const cfg = { + diagnostics: { flags: [" Telegram.Http ", "cache.*", "CACHE.*"] }, + } as OpenClawConfig; + const env = { + OPENCLAW_DIAGNOSTICS: " foo, Cache.* telegram.http ", + } as NodeJS.ProcessEnv; + + expect(resolveDiagnosticFlags(cfg, env)).toEqual(["telegram.http", "cache.*", "foo"]); + }); + + it("treats false-like env values as no extra flags", () => { + const cfg = { + diagnostics: { flags: ["telegram.http"] }, + } as OpenClawConfig; + + for (const raw of ["0", "false", "off", "none", " "]) { + expect( + resolveDiagnosticFlags(cfg, { + OPENCLAW_DIAGNOSTICS: raw, + } as NodeJS.ProcessEnv), + ).toEqual(["telegram.http"]); + } + }); +}); + +describe("matchesDiagnosticFlag", () => { + it("matches exact, namespace, prefix, and wildcard rules", () => { + expect(matchesDiagnosticFlag("telegram.http", ["telegram.http"])).toBe(true); + expect(matchesDiagnosticFlag("cache", ["cache.*"])).toBe(true); + expect(matchesDiagnosticFlag("cache.hit", ["cache.*"])).toBe(true); + expect(matchesDiagnosticFlag("tool.exec.fast", ["tool.exec*"])).toBe(true); + expect(matchesDiagnosticFlag("anything", ["all"])).toBe(true); + expect(matchesDiagnosticFlag("anything", ["*"])).toBe(true); + }); + + it("rejects blank and non-matching flags", () => { + expect(matchesDiagnosticFlag(" ", ["*"])).toBe(false); + expect(matchesDiagnosticFlag("cache.hit", ["cache.miss", "tool.*"])).toBe(false); + }); +}); + +describe("isDiagnosticFlagEnabled", () => { + it("resolves config and env together before matching", () => { + const cfg = { + diagnostics: { flags: ["gateway.*"] }, + } as OpenClawConfig; + const env = { + OPENCLAW_DIAGNOSTICS: "telegram.http", + } as NodeJS.ProcessEnv; + + expect(isDiagnosticFlagEnabled("gateway.ws", cfg, env)).toBe(true); + expect(isDiagnosticFlagEnabled("telegram.http", cfg, env)).toBe(true); + expect(isDiagnosticFlagEnabled("slack.http", cfg, env)).toBe(false); + }); +}); diff --git a/src/infra/env.test.ts b/src/infra/env.test.ts index 42eb0b921cf..5ee0af072fb 100644 --- a/src/infra/env.test.ts +++ b/src/infra/env.test.ts @@ -1,6 +1,17 @@ -import { describe, expect, it } from "vitest"; +import { describe, expect, it, vi } from "vitest"; import { withEnv } from "../test-utils/env.js"; -import { isTruthyEnvValue, normalizeZaiEnv } from "./env.js"; + +const loggerMocks = vi.hoisted(() => ({ + info: vi.fn(), +})); + +vi.mock("../logging/subsystem.js", () => ({ + createSubsystemLogger: () => ({ + info: loggerMocks.info, + }), +})); + +import { isTruthyEnvValue, logAcceptedEnvOption, normalizeEnv, normalizeZaiEnv } from "./env.js"; describe("normalizeZaiEnv", () => { it("copies Z_AI_API_KEY to ZAI_API_KEY when missing", () => { @@ -47,3 +58,77 @@ describe("isTruthyEnvValue", () => { expect(isTruthyEnvValue(undefined)).toBe(false); }); }); + +describe("logAcceptedEnvOption", () => { + it("logs accepted env options once with redaction and formatting", () => { + loggerMocks.info.mockClear(); + + withEnv( + { + VITEST: "", + NODE_ENV: "development", + OPENCLAW_TEST_ENV: " line one\nline two ", + }, + () => { + logAcceptedEnvOption({ + key: "OPENCLAW_TEST_ENV", + description: "test option", + redact: true, + }); + logAcceptedEnvOption({ + key: "OPENCLAW_TEST_ENV", + description: "test option", + redact: true, + }); + }, + ); + + expect(loggerMocks.info).toHaveBeenCalledTimes(1); + expect(loggerMocks.info).toHaveBeenCalledWith( + "env: OPENCLAW_TEST_ENV= (test option)", + ); + }); + + it("skips blank values and test-mode logging", () => { + loggerMocks.info.mockClear(); + + withEnv( + { + VITEST: "1", + NODE_ENV: "development", + OPENCLAW_BLANK_ENV: "value", + }, + () => { + logAcceptedEnvOption({ + key: "OPENCLAW_BLANK_ENV", + description: "skipped in vitest", + }); + }, + ); + + withEnv( + { + VITEST: "", + NODE_ENV: "development", + OPENCLAW_BLANK_ENV: " ", + }, + () => { + logAcceptedEnvOption({ + key: "OPENCLAW_BLANK_ENV", + description: "blank value", + }); + }, + ); + + expect(loggerMocks.info).not.toHaveBeenCalled(); + }); +}); + +describe("normalizeEnv", () => { + it("normalizes the legacy ZAI env alias", () => { + withEnv({ ZAI_API_KEY: "", Z_AI_API_KEY: "zai-legacy" }, () => { + normalizeEnv(); + expect(process.env.ZAI_API_KEY).toBe("zai-legacy"); + }); + }); +}); diff --git a/src/infra/errors.test.ts b/src/infra/errors.test.ts new file mode 100644 index 00000000000..45b6b73e395 --- /dev/null +++ b/src/infra/errors.test.ts @@ -0,0 +1,80 @@ +import { describe, expect, it } from "vitest"; +import { + collectErrorGraphCandidates, + extractErrorCode, + formatErrorMessage, + formatUncaughtError, + hasErrnoCode, + isErrno, + readErrorName, +} from "./errors.js"; + +describe("error helpers", () => { + it("extracts codes and names from string and numeric error metadata", () => { + expect(extractErrorCode({ code: "EADDRINUSE" })).toBe("EADDRINUSE"); + expect(extractErrorCode({ code: 429 })).toBe("429"); + expect(extractErrorCode({ code: false })).toBeUndefined(); + expect(extractErrorCode("boom")).toBeUndefined(); + + expect(readErrorName({ name: "AbortError" })).toBe("AbortError"); + expect(readErrorName({ name: 42 })).toBe(""); + expect(readErrorName(null)).toBe(""); + }); + + it("walks nested error graphs once in breadth-first order", () => { + const leaf = { name: "leaf" }; + const child = { name: "child" } as { + name: string; + cause?: unknown; + errors?: unknown[]; + }; + const root = { name: "root", cause: child, errors: [leaf, child] }; + child.cause = root; + + expect( + collectErrorGraphCandidates(root, (current) => [ + current.cause, + ...((current as { errors?: unknown[] }).errors ?? []), + ]), + ).toEqual([root, child, leaf]); + expect(collectErrorGraphCandidates(null)).toEqual([]); + }); + + it("matches errno-shaped errors by code", () => { + const err = Object.assign(new Error("busy"), { code: "EADDRINUSE" }); + expect(isErrno(err)).toBe(true); + expect(hasErrnoCode(err, "EADDRINUSE")).toBe(true); + expect(hasErrnoCode(err, "ENOENT")).toBe(false); + expect(isErrno("busy")).toBe(false); + }); + + it("formats primitives and circular objects without throwing", () => { + const circular: { self?: unknown } = {}; + circular.self = circular; + + expect(formatErrorMessage(123n)).toBe("123"); + expect(formatErrorMessage(false)).toBe("false"); + expect(formatErrorMessage(circular)).toBe("[object Object]"); + }); + + it("redacts sensitive tokens from formatted error messages", () => { + const token = "sk-abcdefghijklmnopqrstuv"; + const formatted = formatErrorMessage(new Error(`Authorization: Bearer ${token}`)); + expect(formatted).toContain("Authorization: Bearer"); + expect(formatted).not.toContain(token); + }); + + it("uses message-only formatting for INVALID_CONFIG and stack formatting otherwise", () => { + const invalidConfig = Object.assign(new Error("TOKEN=sk-abcdefghijklmnopqrstuv"), { + code: "INVALID_CONFIG", + stack: "Error: TOKEN=sk-abcdefghijklmnopqrstuv\n at ignored", + }); + expect(formatUncaughtError(invalidConfig)).not.toContain("at ignored"); + + const uncaught = new Error("boom"); + uncaught.stack = "Error: Authorization: Bearer sk-abcdefghijklmnopqrstuv\n at runTask"; + const formatted = formatUncaughtError(uncaught); + expect(formatted).toContain("at runTask"); + expect(formatted).not.toContain("sk-abcdefghijklmnopqrstuv"); + }); +}); diff --git a/src/infra/exec-allowlist-matching.test.ts b/src/infra/exec-allowlist-matching.test.ts new file mode 100644 index 00000000000..4376eefeff1 --- /dev/null +++ b/src/infra/exec-allowlist-matching.test.ts @@ -0,0 +1,60 @@ +import { describe, expect, it } from "vitest"; +import { matchAllowlist, type ExecAllowlistEntry } from "./exec-approvals.js"; + +describe("exec allowlist matching", () => { + const baseResolution = { + rawExecutable: "rg", + resolvedPath: "/opt/homebrew/bin/rg", + executableName: "rg", + }; + + it("handles wildcard and path matching semantics", () => { + const cases: Array<{ entries: ExecAllowlistEntry[]; expectedPattern: string | null }> = [ + { entries: [{ pattern: "RG" }], expectedPattern: null }, + { entries: [{ pattern: "/opt/**/rg" }], expectedPattern: "/opt/**/rg" }, + { entries: [{ pattern: "/opt/*/rg" }], expectedPattern: null }, + ]; + for (const testCase of cases) { + const match = matchAllowlist(testCase.entries, baseResolution); + expect(match?.pattern ?? null).toBe(testCase.expectedPattern); + } + }); + + it("matches bare wildcard patterns against arbitrary resolved executables", () => { + expect(matchAllowlist([{ pattern: "*" }], baseResolution)?.pattern).toBe("*"); + expect( + matchAllowlist([{ pattern: "*" }], { + rawExecutable: "python3", + resolvedPath: "/usr/bin/python3", + executableName: "python3", + })?.pattern, + ).toBe("*"); + }); + + it("matches absolute paths containing regex metacharacters literally", () => { + const plusPathCases = ["/usr/bin/g++", "/usr/bin/clang++"]; + for (const candidatePath of plusPathCases) { + const match = matchAllowlist([{ pattern: candidatePath }], { + rawExecutable: candidatePath, + resolvedPath: candidatePath, + executableName: candidatePath.split("/").at(-1) ?? candidatePath, + }); + expect(match?.pattern).toBe(candidatePath); + } + + expect( + matchAllowlist([{ pattern: "/usr/bin/*++" }], { + rawExecutable: "/usr/bin/g++", + resolvedPath: "/usr/bin/g++", + executableName: "g++", + })?.pattern, + ).toBe("/usr/bin/*++"); + expect( + matchAllowlist([{ pattern: "/opt/builds/tool[1](stable)" }], { + rawExecutable: "/opt/builds/tool[1](stable)", + resolvedPath: "/opt/builds/tool[1](stable)", + executableName: "tool[1](stable)", + })?.pattern, + ).toBe("/opt/builds/tool[1](stable)"); + }); +}); diff --git a/src/infra/exec-allowlist-pattern.test.ts b/src/infra/exec-allowlist-pattern.test.ts index 1ac34112311..f7834a4c9fc 100644 --- a/src/infra/exec-allowlist-pattern.test.ts +++ b/src/infra/exec-allowlist-pattern.test.ts @@ -2,13 +2,47 @@ import { describe, expect, it } from "vitest"; import { matchesExecAllowlistPattern } from "./exec-allowlist-pattern.js"; describe("matchesExecAllowlistPattern", () => { + it.each([ + { pattern: "", target: "/tmp/tool", expected: false }, + { pattern: " ", target: "/tmp/tool", expected: false }, + { pattern: "/tmp/tool", target: "/tmp/tool", expected: true }, + ])("handles literal patterns for %j", ({ pattern, target, expected }) => { + expect(matchesExecAllowlistPattern(pattern, target)).toBe(expected); + }); + it("does not let ? cross path separators", () => { expect(matchesExecAllowlistPattern("/tmp/a?b", "/tmp/a/b")).toBe(false); expect(matchesExecAllowlistPattern("/tmp/a?b", "/tmp/acb")).toBe(true); }); - it("keeps ** matching across path separators", () => { - expect(matchesExecAllowlistPattern("/tmp/**/tool", "/tmp/a/b/tool")).toBe(true); + it.each([ + { pattern: "/tmp/*/tool", target: "/tmp/a/tool", expected: true }, + { pattern: "/tmp/*/tool", target: "/tmp/a/b/tool", expected: false }, + { pattern: "/tmp/**/tool", target: "/tmp/a/b/tool", expected: true }, + ])("handles star patterns for %j", ({ pattern, target, expected }) => { + expect(matchesExecAllowlistPattern(pattern, target)).toBe(expected); + }); + + it("expands home-prefix patterns", () => { + const prevOpenClawHome = process.env.OPENCLAW_HOME; + const prevHome = process.env.HOME; + process.env.OPENCLAW_HOME = "/srv/openclaw-home"; + process.env.HOME = "/home/other"; + try { + expect(matchesExecAllowlistPattern("~/bin/tool", "/srv/openclaw-home/bin/tool")).toBe(true); + expect(matchesExecAllowlistPattern("~/bin/tool", "/home/other/bin/tool")).toBe(false); + } finally { + if (prevOpenClawHome === undefined) { + delete process.env.OPENCLAW_HOME; + } else { + process.env.OPENCLAW_HOME = prevOpenClawHome; + } + if (prevHome === undefined) { + delete process.env.HOME; + } else { + process.env.HOME = prevHome; + } + } }); it.runIf(process.platform !== "win32")("preserves case sensitivity on POSIX", () => { diff --git a/src/infra/exec-approval-command-display.test.ts b/src/infra/exec-approval-command-display.test.ts new file mode 100644 index 00000000000..9fefeec1aed --- /dev/null +++ b/src/infra/exec-approval-command-display.test.ts @@ -0,0 +1,66 @@ +import { describe, expect, it } from "vitest"; +import { + resolveExecApprovalCommandDisplay, + sanitizeExecApprovalDisplayText, +} from "./exec-approval-command-display.js"; + +describe("sanitizeExecApprovalDisplayText", () => { + it("escapes unicode format characters but leaves other text intact", () => { + expect(sanitizeExecApprovalDisplayText("echo hi\u200Bthere")).toBe("echo hi\\u{200B}there"); + }); +}); + +describe("resolveExecApprovalCommandDisplay", () => { + it("prefers explicit command fields and drops identical previews after trimming", () => { + expect( + resolveExecApprovalCommandDisplay({ + command: "echo hi", + commandPreview: " echo hi ", + host: "gateway", + }), + ).toEqual({ + commandText: "echo hi", + commandPreview: null, + }); + }); + + it("falls back to node systemRunPlan values and sanitizes preview text", () => { + expect( + resolveExecApprovalCommandDisplay({ + command: "", + host: "node", + systemRunPlan: { + argv: ["python3", "-c", "print(1)"], + cwd: null, + commandText: 'python3 -c "print(1)"', + commandPreview: "print\u200B(1)", + agentId: null, + sessionKey: null, + }, + }), + ).toEqual({ + commandText: 'python3 -c "print(1)"', + commandPreview: "print\\u{200B}(1)", + }); + }); + + it("ignores systemRunPlan fallback for non-node hosts", () => { + expect( + resolveExecApprovalCommandDisplay({ + command: "", + host: "sandbox", + systemRunPlan: { + argv: ["echo", "hi"], + cwd: null, + commandText: "echo hi", + commandPreview: "echo hi", + agentId: null, + sessionKey: null, + }, + }), + ).toEqual({ + commandText: "", + commandPreview: null, + }); + }); +}); diff --git a/src/infra/exec-approval-forwarder.test.ts b/src/infra/exec-approval-forwarder.test.ts index ca4d81e012e..d29856c3088 100644 --- a/src/infra/exec-approval-forwarder.test.ts +++ b/src/infra/exec-approval-forwarder.test.ts @@ -1,6 +1,3 @@ -import fs from "node:fs"; -import os from "node:os"; -import path from "node:path"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import { telegramOutbound } from "../channels/plugins/outbound/telegram.js"; import type { OpenClawConfig } from "../config/config.js"; @@ -380,58 +377,6 @@ describe("exec approval forwarder", () => { }); }); - it("prefers turn-source routing over stale session last route", async () => { - vi.useFakeTimers(); - const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-exec-approval-forwarder-test-")); - try { - const storePath = path.join(tmpDir, "sessions.json"); - fs.writeFileSync( - storePath, - JSON.stringify({ - "agent:main:main": { - updatedAt: 1, - channel: "slack", - to: "U1", - lastChannel: "slack", - lastTo: "U1", - }, - }), - "utf-8", - ); - - const cfg = { - session: { store: storePath }, - approvals: { exec: { enabled: true, mode: "session" } }, - } as OpenClawConfig; - - const { deliver, forwarder } = createForwarder({ cfg }); - await expect( - forwarder.handleRequested({ - ...baseRequest, - request: { - ...baseRequest.request, - turnSourceChannel: "whatsapp", - turnSourceTo: "+15555550123", - turnSourceAccountId: "work", - turnSourceThreadId: "1739201675.123", - }, - }), - ).resolves.toBe(true); - - expect(deliver).toHaveBeenCalledTimes(1); - expect(deliver).toHaveBeenCalledWith( - expect.objectContaining({ - channel: "whatsapp", - to: "+15555550123", - accountId: "work", - threadId: "1739201675.123", - }), - ); - } finally { - fs.rmSync(tmpDir, { recursive: true, force: true }); - } - }); - it("can forward resolved notices without pending cache when request payload is present", async () => { vi.useFakeTimers(); const cfg = { diff --git a/src/infra/exec-approval-forwarder.ts b/src/infra/exec-approval-forwarder.ts index ca9abbc80b5..7a1672e3e76 100644 --- a/src/infra/exec-approval-forwarder.ts +++ b/src/infra/exec-approval-forwarder.ts @@ -1,7 +1,6 @@ import type { ReplyPayload } from "../auto-reply/types.js"; import type { OpenClawConfig } from "../config/config.js"; import { loadConfig } from "../config/config.js"; -import { loadSessionStore, resolveStorePath } from "../config/sessions.js"; import type { ExecApprovalForwardingConfig, ExecApprovalForwardTarget, @@ -18,13 +17,13 @@ import { } from "../utils/message-channel.js"; import { resolveExecApprovalCommandDisplay } from "./exec-approval-command-display.js"; import { buildExecApprovalPendingReplyPayload } from "./exec-approval-reply.js"; +import { resolveExecApprovalSessionTarget } from "./exec-approval-session-target.js"; import type { ExecApprovalDecision, ExecApprovalRequest, ExecApprovalResolved, } from "./exec-approvals.js"; import { deliverOutboundPayloads } from "./outbound/deliver.js"; -import { resolveSessionDeliveryTarget } from "./outbound/targets.js"; const log = createSubsystemLogger("gateway/exec-approvals"); export type { ExecApprovalRequest, ExecApprovalResolved }; @@ -281,37 +280,26 @@ function defaultResolveSessionTarget(params: { cfg: OpenClawConfig; request: ExecApprovalRequest; }): ExecApprovalForwardTarget | null { - const sessionKey = params.request.request.sessionKey?.trim(); - if (!sessionKey) { - return null; - } - const parsed = parseAgentSessionKey(sessionKey); - const agentId = parsed?.agentId ?? params.request.request.agentId ?? "main"; - const storePath = resolveStorePath(params.cfg.session?.store, { agentId }); - const store = loadSessionStore(storePath); - const entry = store[sessionKey]; - if (!entry) { - return null; - } - const target = resolveSessionDeliveryTarget({ - entry, - requestedChannel: "last", + const resolvedTarget = resolveExecApprovalSessionTarget({ + cfg: params.cfg, + request: params.request, turnSourceChannel: normalizeTurnSourceChannel(params.request.request.turnSourceChannel), turnSourceTo: params.request.request.turnSourceTo?.trim() || undefined, turnSourceAccountId: params.request.request.turnSourceAccountId?.trim() || undefined, turnSourceThreadId: params.request.request.turnSourceThreadId ?? undefined, }); - if (!target.channel || !target.to) { + if (!resolvedTarget?.channel || !resolvedTarget.to) { return null; } - if (!isDeliverableMessageChannel(target.channel)) { + const channel = resolvedTarget.channel; + if (!isDeliverableMessageChannel(channel)) { return null; } return { - channel: target.channel, - to: target.to, - accountId: target.accountId, - threadId: target.threadId, + channel, + to: resolvedTarget.to, + accountId: resolvedTarget.accountId, + threadId: resolvedTarget.threadId, }; } diff --git a/src/infra/exec-approval-reply.test.ts b/src/infra/exec-approval-reply.test.ts new file mode 100644 index 00000000000..c56cf996b62 --- /dev/null +++ b/src/infra/exec-approval-reply.test.ts @@ -0,0 +1,143 @@ +import { describe, expect, it } from "vitest"; +import type { ReplyPayload } from "../auto-reply/types.js"; +import { + buildExecApprovalPendingReplyPayload, + buildExecApprovalUnavailableReplyPayload, + getExecApprovalApproverDmNoticeText, + getExecApprovalReplyMetadata, +} from "./exec-approval-reply.js"; + +describe("exec approval reply helpers", () => { + it("returns the approver DM notice text", () => { + expect(getExecApprovalApproverDmNoticeText()).toBe( + "Approval required. I sent the allowed approvers DMs.", + ); + }); + + it("returns null for invalid reply metadata payloads", () => { + for (const payload of [ + {}, + { channelData: null }, + { channelData: [] }, + { channelData: { execApproval: null } }, + { channelData: { execApproval: [] } }, + { channelData: { execApproval: { approvalId: "req-1", approvalSlug: " " } } }, + { channelData: { execApproval: { approvalId: " ", approvalSlug: "slug-1" } } }, + ] as unknown[]) { + expect(getExecApprovalReplyMetadata(payload as ReplyPayload)).toBeNull(); + } + }); + + it("normalizes reply metadata and filters invalid decisions", () => { + expect( + getExecApprovalReplyMetadata({ + channelData: { + execApproval: { + approvalId: " req-1 ", + approvalSlug: " slug-1 ", + allowedDecisions: ["allow-once", "bad", "deny", "allow-always", 3], + }, + }, + }), + ).toEqual({ + approvalId: "req-1", + approvalSlug: "slug-1", + allowedDecisions: ["allow-once", "deny", "allow-always"], + }); + }); + + it("builds pending reply payloads with trimmed warning text and slug fallback", () => { + const payload = buildExecApprovalPendingReplyPayload({ + warningText: " Heads up. ", + approvalId: "req-1", + approvalSlug: "slug-1", + command: "echo ok", + cwd: "/tmp/work", + host: "gateway", + nodeId: "node-1", + expiresAtMs: 2500, + nowMs: 1000, + }); + + expect(payload.channelData).toEqual({ + execApproval: { + approvalId: "req-1", + approvalSlug: "slug-1", + allowedDecisions: ["allow-once", "allow-always", "deny"], + }, + }); + expect(payload.text).toContain("Heads up."); + expect(payload.text).toContain("```txt\n/approve slug-1 allow-once\n```"); + expect(payload.text).toContain("```sh\necho ok\n```"); + expect(payload.text).toContain("Host: gateway\nNode: node-1\nCWD: /tmp/work\nExpires in: 2s"); + expect(payload.text).toContain("Full id: `req-1`"); + }); + + it("uses a longer fence for commands containing triple backticks", () => { + const payload = buildExecApprovalPendingReplyPayload({ + approvalId: "req-2", + approvalSlug: "slug-2", + approvalCommandId: " req-cmd-2 ", + command: "echo ```danger```", + host: "sandbox", + }); + + expect(payload.text).toContain("```txt\n/approve req-cmd-2 allow-once\n```"); + expect(payload.text).toContain("````sh\necho ```danger```\n````"); + expect(payload.text).not.toContain("Expires in:"); + }); + + it("clamps pending reply expiration to zero seconds", () => { + const payload = buildExecApprovalPendingReplyPayload({ + approvalId: "req-3", + approvalSlug: "slug-3", + command: "echo later", + host: "gateway", + expiresAtMs: 1000, + nowMs: 3000, + }); + + expect(payload.text).toContain("Expires in: 0s"); + }); + + it("builds unavailable payloads for approver DMs and each fallback reason", () => { + expect( + buildExecApprovalUnavailableReplyPayload({ + warningText: " Careful. ", + reason: "no-approval-route", + sentApproverDms: true, + }), + ).toEqual({ + text: "Careful.\n\nApproval required. I sent the allowed approvers DMs.", + }); + + const cases = [ + { + reason: "initiating-platform-disabled" as const, + channelLabel: "Slack", + expected: "Exec approval is required, but chat exec approvals are not enabled on Slack.", + }, + { + reason: "initiating-platform-unsupported" as const, + channelLabel: undefined, + expected: + "Exec approval is required, but this platform does not support chat exec approvals.", + }, + { + reason: "no-approval-route" as const, + channelLabel: undefined, + expected: + "Exec approval is required, but no interactive approval client is currently available.", + }, + ]; + + for (const testCase of cases) { + expect( + buildExecApprovalUnavailableReplyPayload({ + reason: testCase.reason, + channelLabel: testCase.channelLabel, + }).text, + ).toContain(testCase.expected); + } + }); +}); diff --git a/src/infra/exec-approval-reply.ts b/src/infra/exec-approval-reply.ts index c1a3cda4a69..3c3eaffd896 100644 --- a/src/infra/exec-approval-reply.ts +++ b/src/infra/exec-approval-reply.ts @@ -83,7 +83,7 @@ export function buildExecApprovalPendingReplyPayload( const lines: string[] = []; const warningText = params.warningText?.trim(); if (warningText) { - lines.push(warningText, ""); + lines.push(warningText); } lines.push("Approval required."); lines.push("Run:"); @@ -133,7 +133,7 @@ export function buildExecApprovalUnavailableReplyPayload( const lines: string[] = []; const warningText = params.warningText?.trim(); if (warningText) { - lines.push(warningText, ""); + lines.push(warningText); } if (params.sentApproverDms) { diff --git a/src/infra/exec-approval-session-target.test.ts b/src/infra/exec-approval-session-target.test.ts new file mode 100644 index 00000000000..aa249e02c0e --- /dev/null +++ b/src/infra/exec-approval-session-target.test.ts @@ -0,0 +1,187 @@ +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; +import { afterEach, describe, expect, it } from "vitest"; +import type { OpenClawConfig } from "../config/config.js"; +import type { SessionEntry } from "../config/sessions.js"; +import { resolveExecApprovalSessionTarget } from "./exec-approval-session-target.js"; +import type { ExecApprovalRequest } from "./exec-approvals.js"; + +const tempDirs: string[] = []; + +afterEach(() => { + for (const dir of tempDirs.splice(0)) { + fs.rmSync(dir, { recursive: true, force: true }); + } +}); + +const baseRequest: ExecApprovalRequest = { + id: "req-1", + request: { + command: "echo hello", + sessionKey: "agent:main:main", + }, + createdAtMs: 1000, + expiresAtMs: 6000, +}; + +function createTempDir(): string { + const dir = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-exec-approval-session-target-")); + tempDirs.push(dir); + return dir; +} + +function writeStoreFile( + storePath: string, + entries: Record>, +): OpenClawConfig { + fs.mkdirSync(path.dirname(storePath), { recursive: true }); + fs.writeFileSync(storePath, JSON.stringify(entries), "utf-8"); + return { + session: { store: storePath }, + } as OpenClawConfig; +} + +describe("exec approval session target", () => { + it("returns null for blank session keys, missing entries, and unresolved targets", () => { + const tmpDir = createTempDir(); + const storePath = path.join(tmpDir, "sessions.json"); + const cfg = writeStoreFile(storePath, { + "agent:main:main": { + sessionId: "main", + updatedAt: 1, + lastChannel: "slack", + }, + }); + + const cases = [ + { + request: { + ...baseRequest, + request: { + ...baseRequest.request, + sessionKey: " ", + }, + }, + }, + { + request: { + ...baseRequest, + request: { + ...baseRequest.request, + sessionKey: "agent:main:missing", + }, + }, + }, + { + request: baseRequest, + }, + ]; + + for (const testCase of cases) { + expect( + resolveExecApprovalSessionTarget({ + cfg, + request: testCase.request, + }), + ).toBeNull(); + } + }); + + it("prefers turn-source routing over stale session delivery state", () => { + const tmpDir = createTempDir(); + const storePath = path.join(tmpDir, "sessions.json"); + const cfg = writeStoreFile(storePath, { + "agent:main:main": { + sessionId: "main", + updatedAt: 1, + lastChannel: "slack", + lastTo: "U1", + }, + }); + + expect( + resolveExecApprovalSessionTarget({ + cfg, + request: baseRequest, + turnSourceChannel: " whatsapp ", + turnSourceTo: " +15555550123 ", + turnSourceAccountId: " work ", + turnSourceThreadId: "1739201675.123", + }), + ).toEqual({ + channel: "whatsapp", + to: "+15555550123", + accountId: "work", + threadId: 1739201675, + }); + }); + + it("uses the parsed session-key agent id for store-path placeholders", () => { + const tmpDir = createTempDir(); + const storePath = path.join(tmpDir, "{agentId}", "sessions.json"); + const cfg = writeStoreFile(path.join(tmpDir, "helper", "sessions.json"), { + "agent:helper:main": { + sessionId: "main", + updatedAt: 1, + lastChannel: "discord", + lastTo: "channel:123", + lastAccountId: " Work ", + lastThreadId: "55", + }, + }); + cfg.session = { store: storePath }; + + expect( + resolveExecApprovalSessionTarget({ + cfg, + request: { + ...baseRequest, + request: { + ...baseRequest.request, + sessionKey: "agent:helper:main", + }, + }, + }), + ).toEqual({ + channel: "discord", + to: "channel:123", + accountId: "work", + threadId: 55, + }); + }); + + it("falls back to request agent id for legacy session keys", () => { + const tmpDir = createTempDir(); + const storePath = path.join(tmpDir, "{agentId}", "sessions.json"); + const cfg = writeStoreFile(path.join(tmpDir, "worker-1", "sessions.json"), { + "legacy-main": { + sessionId: "legacy-main", + updatedAt: 1, + lastChannel: "telegram", + lastTo: "-100123", + lastThreadId: 77, + }, + }); + cfg.session = { store: storePath }; + + expect( + resolveExecApprovalSessionTarget({ + cfg, + request: { + ...baseRequest, + request: { + ...baseRequest.request, + agentId: "Worker 1", + sessionKey: "legacy-main", + }, + }, + }), + ).toEqual({ + channel: "telegram", + to: "-100123", + accountId: undefined, + threadId: 77, + }); + }); +}); diff --git a/src/infra/exec-approval-session-target.ts b/src/infra/exec-approval-session-target.ts new file mode 100644 index 00000000000..71535914c38 --- /dev/null +++ b/src/infra/exec-approval-session-target.ts @@ -0,0 +1,69 @@ +import type { OpenClawConfig } from "../config/config.js"; +import { loadSessionStore, resolveStorePath } from "../config/sessions.js"; +import { parseAgentSessionKey } from "../routing/session-key.js"; +import type { ExecApprovalRequest } from "./exec-approvals.js"; +import { resolveSessionDeliveryTarget } from "./outbound/targets.js"; + +export type ExecApprovalSessionTarget = { + channel?: string; + to: string; + accountId?: string; + threadId?: number; +}; + +function normalizeOptionalString(value?: string | null): string | undefined { + const normalized = value?.trim(); + return normalized ? normalized : undefined; +} + +function normalizeOptionalThreadId(value?: string | number | null): number | undefined { + if (typeof value === "number") { + return Number.isFinite(value) ? value : undefined; + } + if (typeof value !== "string") { + return undefined; + } + const normalized = Number.parseInt(value, 10); + return Number.isFinite(normalized) ? normalized : undefined; +} + +export function resolveExecApprovalSessionTarget(params: { + cfg: OpenClawConfig; + request: ExecApprovalRequest; + turnSourceChannel?: string | null; + turnSourceTo?: string | null; + turnSourceAccountId?: string | null; + turnSourceThreadId?: string | number | null; +}): ExecApprovalSessionTarget | null { + const sessionKey = normalizeOptionalString(params.request.request.sessionKey); + if (!sessionKey) { + return null; + } + const parsed = parseAgentSessionKey(sessionKey); + const agentId = parsed?.agentId ?? params.request.request.agentId ?? "main"; + const storePath = resolveStorePath(params.cfg.session?.store, { agentId }); + const store = loadSessionStore(storePath); + const entry = store[sessionKey]; + if (!entry) { + return null; + } + + const target = resolveSessionDeliveryTarget({ + entry, + requestedChannel: "last", + turnSourceChannel: normalizeOptionalString(params.turnSourceChannel), + turnSourceTo: normalizeOptionalString(params.turnSourceTo), + turnSourceAccountId: normalizeOptionalString(params.turnSourceAccountId), + turnSourceThreadId: normalizeOptionalThreadId(params.turnSourceThreadId), + }); + if (!target.to) { + return null; + } + + return { + channel: normalizeOptionalString(target.channel), + to: target.to, + accountId: normalizeOptionalString(target.accountId), + threadId: normalizeOptionalThreadId(target.threadId), + }; +} diff --git a/src/infra/exec-approval-surface.test.ts b/src/infra/exec-approval-surface.test.ts new file mode 100644 index 00000000000..b263330104a --- /dev/null +++ b/src/infra/exec-approval-surface.test.ts @@ -0,0 +1,196 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const loadConfigMock = vi.hoisted(() => vi.fn()); +const listEnabledDiscordAccountsMock = vi.hoisted(() => vi.fn()); +const isDiscordExecApprovalClientEnabledMock = vi.hoisted(() => vi.fn()); +const listEnabledTelegramAccountsMock = vi.hoisted(() => vi.fn()); +const isTelegramExecApprovalClientEnabledMock = vi.hoisted(() => vi.fn()); +const normalizeMessageChannelMock = vi.hoisted(() => vi.fn()); + +vi.mock("../config/config.js", () => ({ + loadConfig: (...args: unknown[]) => loadConfigMock(...args), +})); + +vi.mock("../discord/accounts.js", () => ({ + listEnabledDiscordAccounts: (...args: unknown[]) => listEnabledDiscordAccountsMock(...args), +})); + +vi.mock("../discord/exec-approvals.js", () => ({ + isDiscordExecApprovalClientEnabled: (...args: unknown[]) => + isDiscordExecApprovalClientEnabledMock(...args), +})); + +vi.mock("../telegram/accounts.js", () => ({ + listEnabledTelegramAccounts: (...args: unknown[]) => listEnabledTelegramAccountsMock(...args), +})); + +vi.mock("../telegram/exec-approvals.js", () => ({ + isTelegramExecApprovalClientEnabled: (...args: unknown[]) => + isTelegramExecApprovalClientEnabledMock(...args), +})); + +vi.mock("../utils/message-channel.js", () => ({ + INTERNAL_MESSAGE_CHANNEL: "web", + normalizeMessageChannel: (...args: unknown[]) => normalizeMessageChannelMock(...args), +})); + +import { + hasConfiguredExecApprovalDmRoute, + resolveExecApprovalInitiatingSurfaceState, +} from "./exec-approval-surface.js"; + +describe("resolveExecApprovalInitiatingSurfaceState", () => { + beforeEach(() => { + loadConfigMock.mockReset(); + listEnabledDiscordAccountsMock.mockReset(); + isDiscordExecApprovalClientEnabledMock.mockReset(); + listEnabledTelegramAccountsMock.mockReset(); + isTelegramExecApprovalClientEnabledMock.mockReset(); + normalizeMessageChannelMock.mockReset(); + normalizeMessageChannelMock.mockImplementation((value?: string | null) => + typeof value === "string" ? value.trim().toLowerCase() : undefined, + ); + }); + + it("treats web UI, terminal UI, and missing channels as enabled", () => { + expect(resolveExecApprovalInitiatingSurfaceState({ channel: null })).toEqual({ + kind: "enabled", + channel: undefined, + channelLabel: "this platform", + }); + expect(resolveExecApprovalInitiatingSurfaceState({ channel: "tui" })).toEqual({ + kind: "enabled", + channel: "tui", + channelLabel: "terminal UI", + }); + expect(resolveExecApprovalInitiatingSurfaceState({ channel: "web" })).toEqual({ + kind: "enabled", + channel: "web", + channelLabel: "Web UI", + }); + }); + + it("uses the provided cfg for telegram and discord client enablement", () => { + isTelegramExecApprovalClientEnabledMock.mockReturnValueOnce(true); + isDiscordExecApprovalClientEnabledMock.mockReturnValueOnce(false); + const cfg = { channels: {} }; + + expect( + resolveExecApprovalInitiatingSurfaceState({ + channel: "telegram", + accountId: "main", + cfg: cfg as never, + }), + ).toEqual({ + kind: "enabled", + channel: "telegram", + channelLabel: "Telegram", + }); + expect( + resolveExecApprovalInitiatingSurfaceState({ + channel: "discord", + accountId: "main", + cfg: cfg as never, + }), + ).toEqual({ + kind: "disabled", + channel: "discord", + channelLabel: "Discord", + }); + + expect(loadConfigMock).not.toHaveBeenCalled(); + }); + + it("loads config lazily when cfg is omitted and marks unsupported channels", () => { + loadConfigMock.mockReturnValueOnce({ loaded: true }); + isTelegramExecApprovalClientEnabledMock.mockReturnValueOnce(false); + + expect( + resolveExecApprovalInitiatingSurfaceState({ + channel: "telegram", + accountId: "main", + }), + ).toEqual({ + kind: "disabled", + channel: "telegram", + channelLabel: "Telegram", + }); + expect(loadConfigMock).toHaveBeenCalledOnce(); + + expect(resolveExecApprovalInitiatingSurfaceState({ channel: "signal" })).toEqual({ + kind: "unsupported", + channel: "signal", + channelLabel: "Signal", + }); + }); +}); + +describe("hasConfiguredExecApprovalDmRoute", () => { + beforeEach(() => { + listEnabledDiscordAccountsMock.mockReset(); + listEnabledTelegramAccountsMock.mockReset(); + }); + + it("returns true when any enabled account routes approvals to DM or both", () => { + listEnabledDiscordAccountsMock.mockReturnValueOnce([ + { + config: { + execApprovals: { + enabled: true, + approvers: ["a"], + target: "channel", + }, + }, + }, + ]); + listEnabledTelegramAccountsMock.mockReturnValueOnce([ + { + config: { + execApprovals: { + enabled: true, + approvers: ["a"], + target: "both", + }, + }, + }, + ]); + + expect(hasConfiguredExecApprovalDmRoute({} as never)).toBe(true); + }); + + it("returns false when exec approvals are disabled or have no DM route", () => { + listEnabledDiscordAccountsMock.mockReturnValueOnce([ + { + config: { + execApprovals: { + enabled: false, + approvers: ["a"], + target: "dm", + }, + }, + }, + ]); + listEnabledTelegramAccountsMock.mockReturnValueOnce([ + { + config: { + execApprovals: { + enabled: true, + approvers: [], + target: "dm", + }, + }, + }, + { + config: { + execApprovals: { + enabled: true, + approvers: ["a"], + target: "channel", + }, + }, + }, + ]); + + expect(hasConfiguredExecApprovalDmRoute({} as never)).toBe(false); + }); +}); diff --git a/src/infra/exec-approval-surface.ts b/src/infra/exec-approval-surface.ts index bdefb933379..b20e31850b8 100644 --- a/src/infra/exec-approval-surface.ts +++ b/src/infra/exec-approval-surface.ts @@ -50,8 +50,18 @@ export function resolveExecApprovalInitiatingSurfaceState(params: { return { kind: "unsupported", channel, channelLabel }; } -export function hasConfiguredExecApprovalDmRoute(cfg: OpenClawConfig): boolean { - for (const account of listEnabledDiscordAccounts(cfg)) { +function hasExecApprovalDmRoute( + accounts: Array<{ + config: { + execApprovals?: { + enabled?: boolean; + approvers?: unknown[]; + target?: string; + }; + }; + }>, +): boolean { + for (const account of accounts) { const execApprovals = account.config.execApprovals; if (!execApprovals?.enabled || (execApprovals.approvers?.length ?? 0) === 0) { continue; @@ -61,17 +71,12 @@ export function hasConfiguredExecApprovalDmRoute(cfg: OpenClawConfig): boolean { return true; } } - - for (const account of listEnabledTelegramAccounts(cfg)) { - const execApprovals = account.config.execApprovals; - if (!execApprovals?.enabled || (execApprovals.approvers?.length ?? 0) === 0) { - continue; - } - const target = execApprovals.target ?? "dm"; - if (target === "dm" || target === "both") { - return true; - } - } - return false; } + +export function hasConfiguredExecApprovalDmRoute(cfg: OpenClawConfig): boolean { + return ( + hasExecApprovalDmRoute(listEnabledDiscordAccounts(cfg)) || + hasExecApprovalDmRoute(listEnabledTelegramAccounts(cfg)) + ); +} diff --git a/src/infra/exec-approvals-allow-always.test.ts b/src/infra/exec-approvals-allow-always.test.ts index 72db45a33ea..a0ba77ecb6b 100644 --- a/src/infra/exec-approvals-allow-always.test.ts +++ b/src/infra/exec-approvals-allow-always.test.ts @@ -18,6 +18,31 @@ describe("resolveAllowAlwaysPatterns", () => { return exe; } + function resolvePersistedPatterns(params: { + command: string; + dir: string; + env: Record; + safeBins: ReturnType; + }) { + const analysis = evaluateShellAllowlist({ + command: params.command, + allowlist: [], + safeBins: params.safeBins, + cwd: params.dir, + env: params.env, + platform: process.platform, + }); + return { + analysis, + persisted: resolveAllowAlwaysPatterns({ + segments: analysis.segments, + cwd: params.dir, + env: params.env, + platform: process.platform, + }), + }; + } + function expectAllowAlwaysBypassBlocked(params: { dir: string; firstCommand: string; @@ -26,19 +51,11 @@ describe("resolveAllowAlwaysPatterns", () => { persistedPattern: string; }) { const safeBins = resolveSafeBins(undefined); - const first = evaluateShellAllowlist({ + const { persisted } = resolvePersistedPatterns({ command: params.firstCommand, - allowlist: [], + dir: params.dir, + env: params.env, safeBins, - cwd: params.dir, - env: params.env, - platform: process.platform, - }); - const persisted = resolveAllowAlwaysPatterns({ - segments: first.segments, - cwd: params.dir, - env: params.env, - platform: process.platform, }); expect(persisted).toEqual([params.persistedPattern]); @@ -61,6 +78,43 @@ describe("resolveAllowAlwaysPatterns", () => { ).toBe(true); } + function createShellScriptFixture() { + const dir = makeTempDir(); + const scriptsDir = path.join(dir, "scripts"); + fs.mkdirSync(scriptsDir, { recursive: true }); + const script = path.join(scriptsDir, "save_crystal.sh"); + fs.writeFileSync(script, "echo ok\n"); + const env = { PATH: `${dir}${path.delimiter}${process.env.PATH ?? ""}` }; + const safeBins = resolveSafeBins(undefined); + return { dir, scriptsDir, script, env, safeBins }; + } + + function expectPersistedShellScriptMatch(params: { + command: string; + script: string; + dir: string; + env: Record; + safeBins: ReturnType; + }) { + const { persisted } = resolvePersistedPatterns({ + command: params.command, + dir: params.dir, + env: params.env, + safeBins: params.safeBins, + }); + expect(persisted).toEqual([params.script]); + + const second = evaluateShellAllowlist({ + command: params.command, + allowlist: [{ pattern: params.script }], + safeBins: params.safeBins, + cwd: params.dir, + env: params.env, + platform: process.platform, + }); + expect(second.allowlistSatisfied).toBe(true); + } + it("returns direct executable paths for non-shell segments", () => { const exe = path.join("/tmp", "openclaw-tool"); const patterns = resolveAllowAlwaysPatterns({ @@ -131,39 +185,14 @@ describe("resolveAllowAlwaysPatterns", () => { if (process.platform === "win32") { return; } - const dir = makeTempDir(); - const scriptsDir = path.join(dir, "scripts"); - fs.mkdirSync(scriptsDir, { recursive: true }); - const script = path.join(scriptsDir, "save_crystal.sh"); - fs.writeFileSync(script, "echo ok\n"); - - const safeBins = resolveSafeBins(undefined); - const env = { PATH: `${dir}${path.delimiter}${process.env.PATH ?? ""}` }; - const first = evaluateShellAllowlist({ + const { dir, scriptsDir, script, env, safeBins } = createShellScriptFixture(); + expectPersistedShellScriptMatch({ command: "bash scripts/save_crystal.sh", - allowlist: [], + script, + dir, + env, safeBins, - cwd: dir, - env, - platform: process.platform, }); - const persisted = resolveAllowAlwaysPatterns({ - segments: first.segments, - cwd: dir, - env, - platform: process.platform, - }); - expect(persisted).toEqual([script]); - - const second = evaluateShellAllowlist({ - command: "bash scripts/save_crystal.sh", - allowlist: [{ pattern: script }], - safeBins, - cwd: dir, - env, - platform: process.platform, - }); - expect(second.allowlistSatisfied).toBe(true); const other = path.join(scriptsDir, "other.sh"); fs.writeFileSync(other, "echo other\n"); @@ -182,51 +211,21 @@ describe("resolveAllowAlwaysPatterns", () => { if (process.platform === "win32") { return; } - const dir = makeTempDir(); - const scriptsDir = path.join(dir, "scripts"); - fs.mkdirSync(scriptsDir, { recursive: true }); - const script = path.join(scriptsDir, "save_crystal.sh"); - fs.writeFileSync(script, "echo ok\n"); - - const safeBins = resolveSafeBins(undefined); - const env = { PATH: `${dir}${path.delimiter}${process.env.PATH ?? ""}` }; - const first = evaluateShellAllowlist({ + const { dir, script, env, safeBins } = createShellScriptFixture(); + expectPersistedShellScriptMatch({ command: "/usr/bin/nice bash scripts/save_crystal.sh", - allowlist: [], + script, + dir, + env, safeBins, - cwd: dir, - env, - platform: process.platform, }); - const persisted = resolveAllowAlwaysPatterns({ - segments: first.segments, - cwd: dir, - env, - platform: process.platform, - }); - expect(persisted).toEqual([script]); - - const second = evaluateShellAllowlist({ - command: "/usr/bin/nice bash scripts/save_crystal.sh", - allowlist: [{ pattern: script }], - safeBins, - cwd: dir, - env, - platform: process.platform, - }); - expect(second.allowlistSatisfied).toBe(true); }); it("does not treat inline shell commands as persisted script paths", () => { if (process.platform === "win32") { return; } - const dir = makeTempDir(); - const scriptsDir = path.join(dir, "scripts"); - fs.mkdirSync(scriptsDir, { recursive: true }); - const script = path.join(scriptsDir, "save_crystal.sh"); - fs.writeFileSync(script, "echo ok\n"); - const env = { PATH: `${dir}${path.delimiter}${process.env.PATH ?? ""}` }; + const { dir, script, env } = createShellScriptFixture(); expectAllowAlwaysBypassBlocked({ dir, firstCommand: "bash scripts/save_crystal.sh", @@ -240,12 +239,7 @@ describe("resolveAllowAlwaysPatterns", () => { if (process.platform === "win32") { return; } - const dir = makeTempDir(); - const scriptsDir = path.join(dir, "scripts"); - fs.mkdirSync(scriptsDir, { recursive: true }); - const script = path.join(scriptsDir, "save_crystal.sh"); - fs.writeFileSync(script, "echo ok\n"); - const env = { PATH: `${dir}${path.delimiter}${process.env.PATH ?? ""}` }; + const { dir, script, env } = createShellScriptFixture(); expectAllowAlwaysBypassBlocked({ dir, firstCommand: "bash scripts/save_crystal.sh", diff --git a/src/infra/exec-approvals-analysis.test.ts b/src/infra/exec-approvals-analysis.test.ts new file mode 100644 index 00000000000..f1083eaa080 --- /dev/null +++ b/src/infra/exec-approvals-analysis.test.ts @@ -0,0 +1,330 @@ +import fs from "node:fs"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { evaluateShellAllowlist, normalizeSafeBins } from "./exec-approvals-allowlist.js"; +import { + analyzeArgvCommand, + analyzeShellCommand, + buildEnforcedShellCommand, + buildSafeBinsShellCommand, +} from "./exec-approvals-analysis.js"; +import { makePathEnv, makeTempDir } from "./exec-approvals-test-helpers.js"; +import type { ExecAllowlistEntry } from "./exec-approvals.js"; + +describe("exec approvals shell analysis", () => { + describe("safe shell command builder", () => { + it("quotes only safeBins segments (leaves other segments untouched)", () => { + if (process.platform === "win32") { + return; + } + + const analysis = analyzeShellCommand({ + command: "rg foo src/*.ts | head -n 5 && echo ok", + cwd: "/tmp", + env: { PATH: "/usr/bin:/bin" }, + platform: process.platform, + }); + expect(analysis.ok).toBe(true); + + const res = buildSafeBinsShellCommand({ + command: "rg foo src/*.ts | head -n 5 && echo ok", + segments: analysis.segments, + segmentSatisfiedBy: [null, "safeBins", null], + platform: process.platform, + }); + expect(res.ok).toBe(true); + expect(res.command).toContain("rg foo src/*.ts"); + expect(res.command).toMatch(/'[^']*\/head' '-n' '5'/); + }); + + it("fails closed on segment metadata mismatch", () => { + const analysis = analyzeShellCommand({ command: "echo ok" }); + expect(analysis.ok).toBe(true); + + expect( + buildSafeBinsShellCommand({ + command: "echo ok", + segments: analysis.segments, + segmentSatisfiedBy: [], + }), + ).toEqual({ ok: false, reason: "segment metadata mismatch" }); + }); + + it("enforces canonical planned argv for every approved segment", () => { + if (process.platform === "win32") { + return; + } + const analysis = analyzeShellCommand({ + command: "env rg -n needle", + cwd: "/tmp", + env: { PATH: "/usr/bin:/bin" }, + platform: process.platform, + }); + expect(analysis.ok).toBe(true); + const res = buildEnforcedShellCommand({ + command: "env rg -n needle", + segments: analysis.segments, + platform: process.platform, + }); + expect(res.ok).toBe(true); + expect(res.command).toMatch(/'(?:[^']*\/)?rg' '-n' 'needle'/); + expect(res.command).not.toContain("'env'"); + }); + }); + + describe("shell parsing", () => { + it("parses pipelines and chained commands", () => { + const cases = [ + { + name: "pipeline", + command: "echo ok | jq .foo", + expectedSegments: ["echo", "jq"], + }, + { + name: "chain", + command: "ls && rm -rf /", + expectedChainHeads: ["ls", "rm"], + }, + ] as const; + for (const testCase of cases) { + const res = analyzeShellCommand({ command: testCase.command }); + expect(res.ok, testCase.name).toBe(true); + if ("expectedSegments" in testCase) { + expect( + res.segments.map((seg) => seg.argv[0]), + testCase.name, + ).toEqual(testCase.expectedSegments); + } else { + expect( + res.chains?.map((chain) => chain[0]?.argv[0]), + testCase.name, + ).toEqual(testCase.expectedChainHeads); + } + } + }); + + it("parses argv commands", () => { + const res = analyzeArgvCommand({ argv: ["/bin/echo", "ok"] }); + expect(res.ok).toBe(true); + expect(res.segments[0]?.argv).toEqual(["/bin/echo", "ok"]); + }); + + it("rejects empty argv commands", () => { + expect(analyzeArgvCommand({ argv: ["", " "] })).toEqual({ + ok: false, + reason: "empty argv", + segments: [], + }); + }); + + it("rejects unsupported shell constructs", () => { + const cases: Array<{ command: string; reason: string; platform?: NodeJS.Platform }> = [ + { command: 'echo "output: $(whoami)"', reason: "unsupported shell token: $()" }, + { command: 'echo "output: `id`"', reason: "unsupported shell token: `" }, + { command: "echo $(whoami)", reason: "unsupported shell token: $()" }, + { command: "cat < input.txt", reason: "unsupported shell token: <" }, + { command: "echo ok > output.txt", reason: "unsupported shell token: >" }, + { + command: "/usr/bin/echo first line\n/usr/bin/echo second line", + reason: "unsupported shell token: \n", + }, + { + command: 'echo "ok $\\\n(id -u)"', + reason: "unsupported shell token: newline", + }, + { + command: 'echo "ok $\\\r\n(id -u)"', + reason: "unsupported shell token: newline", + }, + { + command: "ping 127.0.0.1 -n 1 & whoami", + reason: "unsupported windows shell token: &", + platform: "win32", + }, + ]; + for (const testCase of cases) { + const res = analyzeShellCommand({ command: testCase.command, platform: testCase.platform }); + expect(res.ok).toBe(false); + expect(res.reason).toBe(testCase.reason); + } + }); + + it("accepts inert substitution-like syntax", () => { + const cases = ['echo "output: \\$(whoami)"', "echo 'output: $(whoami)'"]; + for (const command of cases) { + const res = analyzeShellCommand({ command }); + expect(res.ok).toBe(true); + expect(res.segments[0]?.argv[0]).toBe("echo"); + } + }); + + it("accepts safe heredoc forms", () => { + const cases: Array<{ command: string; expectedArgv: string[] }> = [ + { command: "/usr/bin/tee /tmp/file << 'EOF'\nEOF", expectedArgv: ["/usr/bin/tee"] }, + { command: "/usr/bin/tee /tmp/file < segment.argv[0])).toEqual(testCase.expectedArgv); + } + }); + + it("rejects unsafe or malformed heredoc forms", () => { + const cases: Array<{ command: string; reason: string }> = [ + { + command: "/usr/bin/cat < { + const res = analyzeShellCommand({ + command: '"C:\\Program Files\\Tool\\tool.exe" --version', + platform: "win32", + }); + expect(res.ok).toBe(true); + expect(res.segments[0]?.argv).toEqual(["C:\\Program Files\\Tool\\tool.exe", "--version"]); + }); + }); + + describe("shell allowlist (chained commands)", () => { + it("evaluates chained command allowlist scenarios", () => { + const cases: Array<{ + allowlist: ExecAllowlistEntry[]; + command: string; + expectedAnalysisOk: boolean; + expectedAllowlistSatisfied: boolean; + platform?: NodeJS.Platform; + }> = [ + { + allowlist: [{ pattern: "/usr/bin/obsidian-cli" }, { pattern: "/usr/bin/head" }], + command: + "/usr/bin/obsidian-cli print-default && /usr/bin/obsidian-cli search foo | /usr/bin/head", + expectedAnalysisOk: true, + expectedAllowlistSatisfied: true, + }, + { + allowlist: [{ pattern: "/usr/bin/obsidian-cli" }], + command: "/usr/bin/obsidian-cli print-default && /usr/bin/rm -rf /", + expectedAnalysisOk: true, + expectedAllowlistSatisfied: false, + }, + { + allowlist: [{ pattern: "/usr/bin/echo" }], + command: "/usr/bin/echo ok &&", + expectedAnalysisOk: false, + expectedAllowlistSatisfied: false, + }, + { + allowlist: [{ pattern: "/usr/bin/ping" }], + command: "ping 127.0.0.1 -n 1 & whoami", + expectedAnalysisOk: false, + expectedAllowlistSatisfied: false, + platform: "win32", + }, + ]; + for (const testCase of cases) { + const result = evaluateShellAllowlist({ + command: testCase.command, + allowlist: testCase.allowlist, + safeBins: new Set(), + cwd: "/tmp", + platform: testCase.platform, + }); + expect(result.analysisOk).toBe(testCase.expectedAnalysisOk); + expect(result.allowlistSatisfied).toBe(testCase.expectedAllowlistSatisfied); + } + }); + + it("respects quoted chain separators", () => { + const allowlist: ExecAllowlistEntry[] = [{ pattern: "/usr/bin/echo" }]; + const commands = ['/usr/bin/echo "foo && bar"', '/usr/bin/echo "foo\\" && bar"']; + for (const command of commands) { + const result = evaluateShellAllowlist({ + command, + allowlist, + safeBins: new Set(), + cwd: "/tmp", + }); + expect(result.analysisOk).toBe(true); + expect(result.allowlistSatisfied).toBe(true); + } + }); + + it("fails allowlist analysis for shell line continuations", () => { + const result = evaluateShellAllowlist({ + command: 'echo "ok $\\\n(id -u)"', + allowlist: [{ pattern: "/usr/bin/echo" }], + safeBins: new Set(), + cwd: "/tmp", + }); + expect(result.analysisOk).toBe(false); + expect(result.allowlistSatisfied).toBe(false); + }); + + it("satisfies allowlist when bare * wildcard is present", () => { + const dir = makeTempDir(); + const binPath = path.join(dir, "mybin"); + fs.writeFileSync(binPath, "#!/bin/sh\n", { mode: 0o755 }); + const env = makePathEnv(dir); + try { + const result = evaluateShellAllowlist({ + command: "mybin --flag", + allowlist: [{ pattern: "*" }], + safeBins: new Set(), + cwd: dir, + env, + }); + expect(result.analysisOk).toBe(true); + expect(result.allowlistSatisfied).toBe(true); + } finally { + fs.rmSync(dir, { recursive: true, force: true }); + } + }); + + it("normalizes safe bin names", () => { + expect([...normalizeSafeBins([" jq ", "", "JQ", " sort "])]).toEqual(["jq", "sort"]); + }); + }); +}); diff --git a/src/infra/exec-approvals-policy.test.ts b/src/infra/exec-approvals-policy.test.ts new file mode 100644 index 00000000000..b546d89d6c1 --- /dev/null +++ b/src/infra/exec-approvals-policy.test.ts @@ -0,0 +1,84 @@ +import { describe, expect, it } from "vitest"; +import { + maxAsk, + minSecurity, + normalizeExecAsk, + normalizeExecHost, + normalizeExecSecurity, + requiresExecApproval, +} from "./exec-approvals.js"; + +describe("exec approvals policy helpers", () => { + it("normalizes exec host values and rejects blanks or unknown values", () => { + expect(normalizeExecHost(" gateway ")).toBe("gateway"); + expect(normalizeExecHost("NODE")).toBe("node"); + expect(normalizeExecHost("")).toBeNull(); + expect(normalizeExecHost("ssh")).toBeNull(); + }); + + it("normalizes exec security and ask values", () => { + expect(normalizeExecSecurity(" allowlist ")).toBe("allowlist"); + expect(normalizeExecSecurity("FULL")).toBe("full"); + expect(normalizeExecSecurity("unknown")).toBeNull(); + + expect(normalizeExecAsk(" on-miss ")).toBe("on-miss"); + expect(normalizeExecAsk("ALWAYS")).toBe("always"); + expect(normalizeExecAsk("maybe")).toBeNull(); + }); + + it("minSecurity returns the more restrictive value", () => { + expect(minSecurity("deny", "full")).toBe("deny"); + expect(minSecurity("allowlist", "full")).toBe("allowlist"); + expect(minSecurity("full", "allowlist")).toBe("allowlist"); + }); + + it("maxAsk returns the more aggressive ask mode", () => { + expect(maxAsk("off", "always")).toBe("always"); + expect(maxAsk("on-miss", "off")).toBe("on-miss"); + expect(maxAsk("always", "on-miss")).toBe("always"); + }); + + it("requiresExecApproval respects ask mode and allowlist satisfaction", () => { + const cases = [ + { + ask: "always" as const, + security: "allowlist" as const, + analysisOk: true, + allowlistSatisfied: true, + expected: true, + }, + { + ask: "off" as const, + security: "allowlist" as const, + analysisOk: true, + allowlistSatisfied: false, + expected: false, + }, + { + ask: "on-miss" as const, + security: "allowlist" as const, + analysisOk: true, + allowlistSatisfied: true, + expected: false, + }, + { + ask: "on-miss" as const, + security: "allowlist" as const, + analysisOk: false, + allowlistSatisfied: false, + expected: true, + }, + { + ask: "on-miss" as const, + security: "full" as const, + analysisOk: false, + allowlistSatisfied: false, + expected: false, + }, + ]; + + for (const testCase of cases) { + expect(requiresExecApproval(testCase)).toBe(testCase.expected); + } + }); +}); diff --git a/src/infra/exec-approvals-store.test.ts b/src/infra/exec-approvals-store.test.ts new file mode 100644 index 00000000000..d30b3263129 --- /dev/null +++ b/src/infra/exec-approvals-store.test.ts @@ -0,0 +1,235 @@ +import fs from "node:fs"; +import path from "node:path"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { makeTempDir } from "./exec-approvals-test-helpers.js"; + +const requestJsonlSocketMock = vi.hoisted(() => vi.fn()); + +vi.mock("./jsonl-socket.js", () => ({ + requestJsonlSocket: (...args: unknown[]) => requestJsonlSocketMock(...args), +})); + +import { + addAllowlistEntry, + ensureExecApprovals, + mergeExecApprovalsSocketDefaults, + normalizeExecApprovals, + readExecApprovalsSnapshot, + recordAllowlistUse, + requestExecApprovalViaSocket, + resolveExecApprovalsPath, + resolveExecApprovalsSocketPath, + type ExecApprovalsFile, +} from "./exec-approvals.js"; + +const tempDirs: string[] = []; +const originalOpenClawHome = process.env.OPENCLAW_HOME; + +beforeEach(() => { + requestJsonlSocketMock.mockReset(); +}); + +afterEach(() => { + vi.restoreAllMocks(); + if (originalOpenClawHome === undefined) { + delete process.env.OPENCLAW_HOME; + } else { + process.env.OPENCLAW_HOME = originalOpenClawHome; + } + for (const dir of tempDirs.splice(0)) { + fs.rmSync(dir, { recursive: true, force: true }); + } +}); + +function createHomeDir(): string { + const dir = makeTempDir(); + tempDirs.push(dir); + process.env.OPENCLAW_HOME = dir; + return dir; +} + +function approvalsFilePath(homeDir: string): string { + return path.join(homeDir, ".openclaw", "exec-approvals.json"); +} + +function readApprovalsFile(homeDir: string): ExecApprovalsFile { + return JSON.parse(fs.readFileSync(approvalsFilePath(homeDir), "utf8")) as ExecApprovalsFile; +} + +describe("exec approvals store helpers", () => { + it("expands home-prefixed default file and socket paths", () => { + const dir = createHomeDir(); + + expect(path.normalize(resolveExecApprovalsPath())).toBe( + path.normalize(path.join(dir, ".openclaw", "exec-approvals.json")), + ); + expect(path.normalize(resolveExecApprovalsSocketPath())).toBe( + path.normalize(path.join(dir, ".openclaw", "exec-approvals.sock")), + ); + }); + + it("merges socket defaults from normalized, current, and built-in fallback", () => { + const normalized = normalizeExecApprovals({ + version: 1, + agents: {}, + socket: { path: "/tmp/a.sock", token: "a" }, + }); + const current = normalizeExecApprovals({ + version: 1, + agents: {}, + socket: { path: "/tmp/b.sock", token: "b" }, + }); + + expect(mergeExecApprovalsSocketDefaults({ normalized, current }).socket).toEqual({ + path: "/tmp/a.sock", + token: "a", + }); + + const merged = mergeExecApprovalsSocketDefaults({ + normalized: normalizeExecApprovals({ version: 1, agents: {} }), + current, + }); + expect(merged.socket).toEqual({ + path: "/tmp/b.sock", + token: "b", + }); + + createHomeDir(); + expect( + mergeExecApprovalsSocketDefaults({ + normalized: normalizeExecApprovals({ version: 1, agents: {} }), + }).socket, + ).toEqual({ + path: resolveExecApprovalsSocketPath(), + token: "", + }); + }); + + it("returns normalized empty snapshots for missing and invalid approvals files", () => { + const dir = createHomeDir(); + + const missing = readExecApprovalsSnapshot(); + expect(missing.exists).toBe(false); + expect(missing.raw).toBeNull(); + expect(missing.file).toEqual(normalizeExecApprovals({ version: 1, agents: {} })); + expect(missing.path).toBe(approvalsFilePath(dir)); + + fs.mkdirSync(path.dirname(approvalsFilePath(dir)), { recursive: true }); + fs.writeFileSync(approvalsFilePath(dir), "{invalid", "utf8"); + + const invalid = readExecApprovalsSnapshot(); + expect(invalid.exists).toBe(true); + expect(invalid.raw).toBe("{invalid"); + expect(invalid.file).toEqual(normalizeExecApprovals({ version: 1, agents: {} })); + }); + + it("ensures approvals file with default socket path and generated token", () => { + const dir = createHomeDir(); + + const ensured = ensureExecApprovals(); + const raw = fs.readFileSync(approvalsFilePath(dir), "utf8"); + + expect(ensured.socket?.path).toBe(resolveExecApprovalsSocketPath()); + expect(ensured.socket?.token).toMatch(/^[A-Za-z0-9_-]{32}$/); + expect(raw.endsWith("\n")).toBe(true); + expect(readApprovalsFile(dir).socket).toEqual(ensured.socket); + }); + + it("adds trimmed allowlist entries once and persists generated ids", () => { + const dir = createHomeDir(); + vi.spyOn(Date, "now").mockReturnValue(123_456); + + const approvals = ensureExecApprovals(); + addAllowlistEntry(approvals, "worker", " /usr/bin/rg "); + addAllowlistEntry(approvals, "worker", "/usr/bin/rg"); + addAllowlistEntry(approvals, "worker", " "); + + expect(readApprovalsFile(dir).agents?.worker?.allowlist).toEqual([ + expect.objectContaining({ + pattern: "/usr/bin/rg", + lastUsedAt: 123_456, + }), + ]); + expect(readApprovalsFile(dir).agents?.worker?.allowlist?.[0]?.id).toMatch(/^[0-9a-f-]{36}$/i); + }); + + it("records allowlist usage on the matching entry and backfills missing ids", () => { + const dir = createHomeDir(); + vi.spyOn(Date, "now").mockReturnValue(999_000); + + const approvals: ExecApprovalsFile = { + version: 1, + agents: { + main: { + allowlist: [{ pattern: "/usr/bin/rg" }, { pattern: "/usr/bin/jq", id: "keep-id" }], + }, + }, + }; + fs.mkdirSync(path.dirname(approvalsFilePath(dir)), { recursive: true }); + fs.writeFileSync(approvalsFilePath(dir), JSON.stringify(approvals, null, 2), "utf8"); + + recordAllowlistUse( + approvals, + undefined, + { pattern: "/usr/bin/rg" }, + "rg needle", + "/opt/homebrew/bin/rg", + ); + + expect(readApprovalsFile(dir).agents?.main?.allowlist).toEqual([ + expect.objectContaining({ + pattern: "/usr/bin/rg", + lastUsedAt: 999_000, + lastUsedCommand: "rg needle", + lastResolvedPath: "/opt/homebrew/bin/rg", + }), + { pattern: "/usr/bin/jq", id: "keep-id" }, + ]); + expect(readApprovalsFile(dir).agents?.main?.allowlist?.[0]?.id).toMatch(/^[0-9a-f-]{36}$/i); + }); + + it("returns null when approval socket credentials are missing", async () => { + await expect( + requestExecApprovalViaSocket({ + socketPath: "", + token: "secret", + request: { command: "echo hi" }, + }), + ).resolves.toBeNull(); + await expect( + requestExecApprovalViaSocket({ + socketPath: "/tmp/socket", + token: "", + request: { command: "echo hi" }, + }), + ).resolves.toBeNull(); + expect(requestJsonlSocketMock).not.toHaveBeenCalled(); + }); + + it("builds approval socket payloads and accepts decision responses only", async () => { + requestJsonlSocketMock.mockImplementationOnce(async ({ payload, accept, timeoutMs }) => { + expect(timeoutMs).toBe(15_000); + const parsed = JSON.parse(payload) as { + type: string; + token: string; + id: string; + request: { command: string }; + }; + expect(parsed.type).toBe("request"); + expect(parsed.token).toBe("secret"); + expect(parsed.request).toEqual({ command: "echo hi" }); + expect(parsed.id).toMatch(/^[0-9a-f-]{36}$/i); + expect(accept({ type: "noop", decision: "allow-once" })).toBeUndefined(); + expect(accept({ type: "decision", decision: "allow-always" })).toBe("allow-always"); + return "deny"; + }); + + await expect( + requestExecApprovalViaSocket({ + socketPath: "/tmp/socket", + token: "secret", + request: { command: "echo hi" }, + }), + ).resolves.toBe("deny"); + }); +}); diff --git a/src/infra/exec-approvals.test.ts b/src/infra/exec-approvals.test.ts index 57290c07116..75cf2b115b6 100644 --- a/src/infra/exec-approvals.test.ts +++ b/src/infra/exec-approvals.test.ts @@ -1,654 +1,6 @@ -import fs from "node:fs"; -import path from "node:path"; import { describe, expect, it } from "vitest"; -import { makePathEnv, makeTempDir } from "./exec-approvals-test-helpers.js"; -import { - analyzeArgvCommand, - analyzeShellCommand, - buildEnforcedShellCommand, - buildSafeBinsShellCommand, - evaluateExecAllowlist, - evaluateShellAllowlist, - matchAllowlist, - maxAsk, - mergeExecApprovalsSocketDefaults, - minSecurity, - normalizeExecApprovals, - parseExecArgvToken, - normalizeSafeBins, - requiresExecApproval, - resolveCommandResolution, - resolveCommandResolutionFromArgv, - resolveExecApprovalsPath, - resolveExecApprovalsSocketPath, - type ExecAllowlistEntry, -} from "./exec-approvals.js"; - -function buildNestedEnvShellCommand(params: { - envExecutable: string; - depth: number; - payload: string; -}): string[] { - return [...Array(params.depth).fill(params.envExecutable), "/bin/sh", "-c", params.payload]; -} - -function analyzeEnvWrapperAllowlist(params: { argv: string[]; envPath: string; cwd: string }) { - const analysis = analyzeArgvCommand({ - argv: params.argv, - cwd: params.cwd, - env: makePathEnv(params.envPath), - }); - const allowlistEval = evaluateExecAllowlist({ - analysis, - allowlist: [{ pattern: params.envPath }], - safeBins: normalizeSafeBins([]), - cwd: params.cwd, - }); - return { analysis, allowlistEval }; -} - -function createPathExecutableFixture(params?: { executable?: string }): { - exeName: string; - exePath: string; - binDir: string; -} { - const dir = makeTempDir(); - const binDir = path.join(dir, "bin"); - fs.mkdirSync(binDir, { recursive: true }); - const baseName = params?.executable ?? "rg"; - const exeName = process.platform === "win32" ? `${baseName}.exe` : baseName; - const exePath = path.join(binDir, exeName); - fs.writeFileSync(exePath, ""); - fs.chmodSync(exePath, 0o755); - return { exeName, exePath, binDir }; -} - -describe("exec approvals allowlist matching", () => { - const baseResolution = { - rawExecutable: "rg", - resolvedPath: "/opt/homebrew/bin/rg", - executableName: "rg", - }; - - it("handles wildcard/path matching semantics", () => { - const cases: Array<{ entries: ExecAllowlistEntry[]; expectedPattern: string | null }> = [ - { entries: [{ pattern: "RG" }], expectedPattern: null }, - { entries: [{ pattern: "/opt/**/rg" }], expectedPattern: "/opt/**/rg" }, - { entries: [{ pattern: "/opt/*/rg" }], expectedPattern: null }, - ]; - for (const testCase of cases) { - const match = matchAllowlist(testCase.entries, baseResolution); - expect(match?.pattern ?? null).toBe(testCase.expectedPattern); - } - }); - - it("matches bare * wildcard pattern against any resolved path", () => { - const match = matchAllowlist([{ pattern: "*" }], baseResolution); - expect(match).not.toBeNull(); - expect(match?.pattern).toBe("*"); - }); - - it("matches bare * wildcard against arbitrary executables", () => { - const match = matchAllowlist([{ pattern: "*" }], { - rawExecutable: "python3", - resolvedPath: "/usr/bin/python3", - executableName: "python3", - }); - expect(match).not.toBeNull(); - expect(match?.pattern).toBe("*"); - }); - - it("matches absolute paths containing regex metacharacters", () => { - const plusPathCases = ["/usr/bin/g++", "/usr/bin/clang++"]; - for (const candidatePath of plusPathCases) { - const match = matchAllowlist([{ pattern: candidatePath }], { - rawExecutable: candidatePath, - resolvedPath: candidatePath, - executableName: candidatePath.split("/").at(-1) ?? candidatePath, - }); - expect(match?.pattern).toBe(candidatePath); - } - }); - - it("does not throw when wildcard globs are mixed with + in path", () => { - const match = matchAllowlist([{ pattern: "/usr/bin/*++" }], { - rawExecutable: "/usr/bin/g++", - resolvedPath: "/usr/bin/g++", - executableName: "g++", - }); - expect(match?.pattern).toBe("/usr/bin/*++"); - }); - - it("matches paths containing []() regex tokens literally", () => { - const literalPattern = "/opt/builds/tool[1](stable)"; - const match = matchAllowlist([{ pattern: literalPattern }], { - rawExecutable: literalPattern, - resolvedPath: literalPattern, - executableName: "tool[1](stable)", - }); - expect(match?.pattern).toBe(literalPattern); - }); -}); - -describe("mergeExecApprovalsSocketDefaults", () => { - it("prefers normalized socket, then current, then default path", () => { - const normalized = normalizeExecApprovals({ - version: 1, - agents: {}, - socket: { path: "/tmp/a.sock", token: "a" }, - }); - const current = normalizeExecApprovals({ - version: 1, - agents: {}, - socket: { path: "/tmp/b.sock", token: "b" }, - }); - const merged = mergeExecApprovalsSocketDefaults({ normalized, current }); - expect(merged.socket?.path).toBe("/tmp/a.sock"); - expect(merged.socket?.token).toBe("a"); - }); - - it("falls back to current token when missing in normalized", () => { - const normalized = normalizeExecApprovals({ version: 1, agents: {} }); - const current = normalizeExecApprovals({ - version: 1, - agents: {}, - socket: { path: "/tmp/b.sock", token: "b" }, - }); - const merged = mergeExecApprovalsSocketDefaults({ normalized, current }); - expect(merged.socket?.path).toBeTruthy(); - expect(merged.socket?.token).toBe("b"); - }); -}); - -describe("resolve exec approvals defaults", () => { - it("expands home-prefixed default file and socket paths", () => { - const dir = makeTempDir(); - const prevOpenClawHome = process.env.OPENCLAW_HOME; - try { - process.env.OPENCLAW_HOME = dir; - expect(path.normalize(resolveExecApprovalsPath())).toBe( - path.normalize(path.join(dir, ".openclaw", "exec-approvals.json")), - ); - expect(path.normalize(resolveExecApprovalsSocketPath())).toBe( - path.normalize(path.join(dir, ".openclaw", "exec-approvals.sock")), - ); - } finally { - if (prevOpenClawHome === undefined) { - delete process.env.OPENCLAW_HOME; - } else { - process.env.OPENCLAW_HOME = prevOpenClawHome; - } - } - }); -}); - -describe("exec approvals safe shell command builder", () => { - it("quotes only safeBins segments (leaves other segments untouched)", () => { - if (process.platform === "win32") { - return; - } - - const analysis = analyzeShellCommand({ - command: "rg foo src/*.ts | head -n 5 && echo ok", - cwd: "/tmp", - env: { PATH: "/usr/bin:/bin" }, - platform: process.platform, - }); - expect(analysis.ok).toBe(true); - - const res = buildSafeBinsShellCommand({ - command: "rg foo src/*.ts | head -n 5 && echo ok", - segments: analysis.segments, - segmentSatisfiedBy: [null, "safeBins", null], - platform: process.platform, - }); - expect(res.ok).toBe(true); - // Preserve non-safeBins segment raw (glob stays unquoted) - expect(res.command).toContain("rg foo src/*.ts"); - // SafeBins segment is fully quoted and pinned to its resolved absolute path. - expect(res.command).toMatch(/'[^']*\/head' '-n' '5'/); - }); - - it("enforces canonical planned argv for every approved segment", () => { - if (process.platform === "win32") { - return; - } - const analysis = analyzeShellCommand({ - command: "env rg -n needle", - cwd: "/tmp", - env: { PATH: "/usr/bin:/bin" }, - platform: process.platform, - }); - expect(analysis.ok).toBe(true); - const res = buildEnforcedShellCommand({ - command: "env rg -n needle", - segments: analysis.segments, - platform: process.platform, - }); - expect(res.ok).toBe(true); - expect(res.command).toMatch(/'(?:[^']*\/)?rg' '-n' 'needle'/); - expect(res.command).not.toContain("'env'"); - }); -}); - -describe("exec approvals command resolution", () => { - it("resolves PATH, relative, and quoted executables", () => { - const cases = [ - { - name: "PATH executable", - setup: () => { - const fixture = createPathExecutableFixture(); - return { - command: "rg -n foo", - cwd: undefined as string | undefined, - envPath: makePathEnv(fixture.binDir), - expectedPath: fixture.exePath, - expectedExecutableName: fixture.exeName, - }; - }, - }, - { - name: "relative executable", - setup: () => { - const dir = makeTempDir(); - const cwd = path.join(dir, "project"); - const script = path.join(cwd, "scripts", "run.sh"); - fs.mkdirSync(path.dirname(script), { recursive: true }); - fs.writeFileSync(script, ""); - fs.chmodSync(script, 0o755); - return { - command: "./scripts/run.sh --flag", - cwd, - envPath: undefined as NodeJS.ProcessEnv | undefined, - expectedPath: script, - expectedExecutableName: undefined, - }; - }, - }, - { - name: "quoted executable", - setup: () => { - const dir = makeTempDir(); - const cwd = path.join(dir, "project"); - const script = path.join(cwd, "bin", "tool"); - fs.mkdirSync(path.dirname(script), { recursive: true }); - fs.writeFileSync(script, ""); - fs.chmodSync(script, 0o755); - return { - command: '"./bin/tool" --version', - cwd, - envPath: undefined as NodeJS.ProcessEnv | undefined, - expectedPath: script, - expectedExecutableName: undefined, - }; - }, - }, - ] as const; - - for (const testCase of cases) { - const setup = testCase.setup(); - const res = resolveCommandResolution(setup.command, setup.cwd, setup.envPath); - expect(res?.resolvedPath, testCase.name).toBe(setup.expectedPath); - if (setup.expectedExecutableName) { - expect(res?.executableName, testCase.name).toBe(setup.expectedExecutableName); - } - } - }); - - it("unwraps transparent env wrapper argv to resolve the effective executable", () => { - const fixture = createPathExecutableFixture(); - - const resolution = resolveCommandResolutionFromArgv( - ["/usr/bin/env", "rg", "-n", "needle"], - undefined, - makePathEnv(fixture.binDir), - ); - expect(resolution?.resolvedPath).toBe(fixture.exePath); - expect(resolution?.executableName).toBe(fixture.exeName); - }); - - it("blocks semantic env wrappers from allowlist/safeBins auto-resolution", () => { - const resolution = resolveCommandResolutionFromArgv([ - "/usr/bin/env", - "FOO=bar", - "rg", - "-n", - "needle", - ]); - expect(resolution?.policyBlocked).toBe(true); - expect(resolution?.rawExecutable).toBe("/usr/bin/env"); - }); - - it("fails closed for env -S even when env itself is allowlisted", () => { - const dir = makeTempDir(); - const binDir = path.join(dir, "bin"); - fs.mkdirSync(binDir, { recursive: true }); - const envName = process.platform === "win32" ? "env.exe" : "env"; - const envPath = path.join(binDir, envName); - fs.writeFileSync(envPath, process.platform === "win32" ? "" : "#!/bin/sh\n"); - if (process.platform !== "win32") { - fs.chmodSync(envPath, 0o755); - } - const { analysis, allowlistEval } = analyzeEnvWrapperAllowlist({ - argv: [envPath, "-S", 'sh -c "echo pwned"'], - envPath: envPath, - cwd: dir, - }); - - expect(analysis.ok).toBe(true); - expect(analysis.segments[0]?.resolution?.policyBlocked).toBe(true); - expect(allowlistEval.allowlistSatisfied).toBe(false); - expect(allowlistEval.segmentSatisfiedBy).toEqual([null]); - }); - - it("fails closed when transparent env wrappers exceed unwrap depth", () => { - if (process.platform === "win32") { - return; - } - const dir = makeTempDir(); - const binDir = path.join(dir, "bin"); - fs.mkdirSync(binDir, { recursive: true }); - const envPath = path.join(binDir, "env"); - fs.writeFileSync(envPath, "#!/bin/sh\n"); - fs.chmodSync(envPath, 0o755); - const { analysis, allowlistEval } = analyzeEnvWrapperAllowlist({ - argv: buildNestedEnvShellCommand({ - envExecutable: envPath, - depth: 5, - payload: "echo pwned", - }), - envPath, - cwd: dir, - }); - - expect(analysis.ok).toBe(true); - expect(analysis.segments[0]?.resolution?.policyBlocked).toBe(true); - expect(analysis.segments[0]?.resolution?.blockedWrapper).toBe("env"); - expect(allowlistEval.allowlistSatisfied).toBe(false); - expect(allowlistEval.segmentSatisfiedBy).toEqual([null]); - }); - - it("unwraps env wrapper with shell inner executable", () => { - const resolution = resolveCommandResolutionFromArgv(["/usr/bin/env", "bash", "-lc", "echo hi"]); - expect(resolution?.rawExecutable).toBe("bash"); - expect(resolution?.executableName.toLowerCase()).toContain("bash"); - }); - - it("unwraps nice wrapper argv to resolve the effective executable", () => { - const resolution = resolveCommandResolutionFromArgv([ - "/usr/bin/nice", - "bash", - "-lc", - "echo hi", - ]); - expect(resolution?.rawExecutable).toBe("bash"); - expect(resolution?.executableName.toLowerCase()).toContain("bash"); - }); -}); - -describe("exec approvals shell parsing", () => { - it("parses pipelines and chained commands", () => { - const cases = [ - { - name: "pipeline", - command: "echo ok | jq .foo", - expectedSegments: ["echo", "jq"], - }, - { - name: "chain", - command: "ls && rm -rf /", - expectedChainHeads: ["ls", "rm"], - }, - ] as const; - for (const testCase of cases) { - const res = analyzeShellCommand({ command: testCase.command }); - expect(res.ok, testCase.name).toBe(true); - if ("expectedSegments" in testCase) { - expect( - res.segments.map((seg) => seg.argv[0]), - testCase.name, - ).toEqual(testCase.expectedSegments); - } else { - expect( - res.chains?.map((chain) => chain[0]?.argv[0]), - testCase.name, - ).toEqual(testCase.expectedChainHeads); - } - } - }); - - it("parses argv commands", () => { - const res = analyzeArgvCommand({ argv: ["/bin/echo", "ok"] }); - expect(res.ok).toBe(true); - expect(res.segments[0]?.argv).toEqual(["/bin/echo", "ok"]); - }); - - it("rejects unsupported shell constructs", () => { - const cases: Array<{ command: string; reason: string; platform?: NodeJS.Platform }> = [ - { command: 'echo "output: $(whoami)"', reason: "unsupported shell token: $()" }, - { command: 'echo "output: `id`"', reason: "unsupported shell token: `" }, - { command: "echo $(whoami)", reason: "unsupported shell token: $()" }, - { command: "cat < input.txt", reason: "unsupported shell token: <" }, - { command: "echo ok > output.txt", reason: "unsupported shell token: >" }, - { - command: "/usr/bin/echo first line\n/usr/bin/echo second line", - reason: "unsupported shell token: \n", - }, - { - command: 'echo "ok $\\\n(id -u)"', - reason: "unsupported shell token: newline", - }, - { - command: 'echo "ok $\\\r\n(id -u)"', - reason: "unsupported shell token: newline", - }, - { - command: "ping 127.0.0.1 -n 1 & whoami", - reason: "unsupported windows shell token: &", - platform: "win32", - }, - ]; - for (const testCase of cases) { - const res = analyzeShellCommand({ command: testCase.command, platform: testCase.platform }); - expect(res.ok).toBe(false); - expect(res.reason).toBe(testCase.reason); - } - }); - - it("accepts inert substitution-like syntax", () => { - const cases = ['echo "output: \\$(whoami)"', "echo 'output: $(whoami)'"]; - for (const command of cases) { - const res = analyzeShellCommand({ command }); - expect(res.ok).toBe(true); - expect(res.segments[0]?.argv[0]).toBe("echo"); - } - }); - - it("accepts safe heredoc forms", () => { - const cases: Array<{ command: string; expectedArgv: string[] }> = [ - { command: "/usr/bin/tee /tmp/file << 'EOF'\nEOF", expectedArgv: ["/usr/bin/tee"] }, - { command: "/usr/bin/tee /tmp/file < segment.argv[0])).toEqual(testCase.expectedArgv); - } - }); - - it("rejects unsafe or malformed heredoc forms", () => { - const cases: Array<{ command: string; reason: string }> = [ - { - command: "/usr/bin/cat < { - const res = analyzeShellCommand({ - command: '"C:\\Program Files\\Tool\\tool.exe" --version', - platform: "win32", - }); - expect(res.ok).toBe(true); - expect(res.segments[0]?.argv).toEqual(["C:\\Program Files\\Tool\\tool.exe", "--version"]); - }); - - it("normalizes short option clusters with attached payloads", () => { - const parsed = parseExecArgvToken("-oblocked.txt"); - expect(parsed.kind).toBe("option"); - if (parsed.kind !== "option" || parsed.style !== "short-cluster") { - throw new Error("expected short-cluster option"); - } - expect(parsed.flags[0]).toBe("-o"); - expect(parsed.cluster).toBe("oblocked.txt"); - }); - - it("normalizes long options with inline payloads", () => { - const parsed = parseExecArgvToken("--output=blocked.txt"); - expect(parsed.kind).toBe("option"); - if (parsed.kind !== "option" || parsed.style !== "long") { - throw new Error("expected long option"); - } - expect(parsed.flag).toBe("--output"); - expect(parsed.inlineValue).toBe("blocked.txt"); - }); -}); - -describe("exec approvals shell allowlist (chained commands)", () => { - it("evaluates chained command allowlist scenarios", () => { - const cases: Array<{ - allowlist: ExecAllowlistEntry[]; - command: string; - expectedAnalysisOk: boolean; - expectedAllowlistSatisfied: boolean; - platform?: NodeJS.Platform; - }> = [ - { - allowlist: [{ pattern: "/usr/bin/obsidian-cli" }, { pattern: "/usr/bin/head" }], - command: - "/usr/bin/obsidian-cli print-default && /usr/bin/obsidian-cli search foo | /usr/bin/head", - expectedAnalysisOk: true, - expectedAllowlistSatisfied: true, - }, - { - allowlist: [{ pattern: "/usr/bin/obsidian-cli" }], - command: "/usr/bin/obsidian-cli print-default && /usr/bin/rm -rf /", - expectedAnalysisOk: true, - expectedAllowlistSatisfied: false, - }, - { - allowlist: [{ pattern: "/usr/bin/echo" }], - command: "/usr/bin/echo ok &&", - expectedAnalysisOk: false, - expectedAllowlistSatisfied: false, - }, - { - allowlist: [{ pattern: "/usr/bin/ping" }], - command: "ping 127.0.0.1 -n 1 & whoami", - expectedAnalysisOk: false, - expectedAllowlistSatisfied: false, - platform: "win32", - }, - ]; - for (const testCase of cases) { - const result = evaluateShellAllowlist({ - command: testCase.command, - allowlist: testCase.allowlist, - safeBins: new Set(), - cwd: "/tmp", - platform: testCase.platform, - }); - expect(result.analysisOk).toBe(testCase.expectedAnalysisOk); - expect(result.allowlistSatisfied).toBe(testCase.expectedAllowlistSatisfied); - } - }); - - it("respects quoted chain separators", () => { - const allowlist: ExecAllowlistEntry[] = [{ pattern: "/usr/bin/echo" }]; - const commands = ['/usr/bin/echo "foo && bar"', '/usr/bin/echo "foo\\" && bar"']; - for (const command of commands) { - const result = evaluateShellAllowlist({ - command, - allowlist, - safeBins: new Set(), - cwd: "/tmp", - }); - expect(result.analysisOk).toBe(true); - expect(result.allowlistSatisfied).toBe(true); - } - }); - - it("fails allowlist analysis for shell line continuations", () => { - const result = evaluateShellAllowlist({ - command: 'echo "ok $\\\n(id -u)"', - allowlist: [{ pattern: "/usr/bin/echo" }], - safeBins: new Set(), - cwd: "/tmp", - }); - expect(result.analysisOk).toBe(false); - expect(result.allowlistSatisfied).toBe(false); - }); - - it("satisfies allowlist when bare * wildcard is present", () => { - const dir = makeTempDir(); - const binPath = path.join(dir, "mybin"); - fs.writeFileSync(binPath, "#!/bin/sh\n", { mode: 0o755 }); - const env = makePathEnv(dir); - try { - const result = evaluateShellAllowlist({ - command: "mybin --flag", - allowlist: [{ pattern: "*" }], - safeBins: new Set(), - cwd: dir, - env, - }); - expect(result.analysisOk).toBe(true); - expect(result.allowlistSatisfied).toBe(true); - } finally { - fs.rmSync(dir, { recursive: true, force: true }); - } - }); -}); +import { normalizeSafeBins } from "./exec-approvals-allowlist.js"; +import { evaluateExecAllowlist, type ExecAllowlistEntry } from "./exec-approvals.js"; describe("exec approvals allowlist evaluation", () => { function evaluateAutoAllowSkills(params: { @@ -867,58 +219,3 @@ describe("exec approvals allowlist evaluation", () => { expect(result.segmentSatisfiedBy).toEqual(["allowlist", "safeBins"]); }); }); - -describe("exec approvals policy helpers", () => { - it("minSecurity returns the more restrictive value", () => { - expect(minSecurity("deny", "full")).toBe("deny"); - expect(minSecurity("allowlist", "full")).toBe("allowlist"); - }); - - it("maxAsk returns the more aggressive ask mode", () => { - expect(maxAsk("off", "always")).toBe("always"); - expect(maxAsk("on-miss", "off")).toBe("on-miss"); - }); - - it("requiresExecApproval respects ask mode and allowlist satisfaction", () => { - expect( - requiresExecApproval({ - ask: "always", - security: "allowlist", - analysisOk: true, - allowlistSatisfied: true, - }), - ).toBe(true); - expect( - requiresExecApproval({ - ask: "off", - security: "allowlist", - analysisOk: true, - allowlistSatisfied: false, - }), - ).toBe(false); - expect( - requiresExecApproval({ - ask: "on-miss", - security: "allowlist", - analysisOk: true, - allowlistSatisfied: true, - }), - ).toBe(false); - expect( - requiresExecApproval({ - ask: "on-miss", - security: "allowlist", - analysisOk: false, - allowlistSatisfied: false, - }), - ).toBe(true); - expect( - requiresExecApproval({ - ask: "on-miss", - security: "full", - analysisOk: false, - allowlistSatisfied: false, - }), - ).toBe(false); - }); -}); diff --git a/src/infra/exec-command-resolution.test.ts b/src/infra/exec-command-resolution.test.ts new file mode 100644 index 00000000000..4621383a547 --- /dev/null +++ b/src/infra/exec-command-resolution.test.ts @@ -0,0 +1,243 @@ +import fs from "node:fs"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { makePathEnv, makeTempDir } from "./exec-approvals-test-helpers.js"; +import { + evaluateExecAllowlist, + normalizeSafeBins, + parseExecArgvToken, + resolveAllowlistCandidatePath, + resolveCommandResolution, + resolveCommandResolutionFromArgv, +} from "./exec-approvals.js"; + +function buildNestedEnvShellCommand(params: { + envExecutable: string; + depth: number; + payload: string; +}): string[] { + return [...Array(params.depth).fill(params.envExecutable), "/bin/sh", "-c", params.payload]; +} + +function analyzeEnvWrapperAllowlist(params: { argv: string[]; envPath: string; cwd: string }) { + const analysis = { + ok: true as const, + segments: [ + { + raw: params.argv.join(" "), + argv: params.argv, + resolution: resolveCommandResolutionFromArgv( + params.argv, + params.cwd, + makePathEnv(params.envPath), + ), + }, + ], + }; + const allowlistEval = evaluateExecAllowlist({ + analysis, + allowlist: [{ pattern: params.envPath }], + safeBins: normalizeSafeBins([]), + cwd: params.cwd, + }); + return { analysis, allowlistEval }; +} + +function createPathExecutableFixture(params?: { executable?: string }): { + exeName: string; + exePath: string; + binDir: string; +} { + const dir = makeTempDir(); + const binDir = path.join(dir, "bin"); + fs.mkdirSync(binDir, { recursive: true }); + const baseName = params?.executable ?? "rg"; + const exeName = process.platform === "win32" ? `${baseName}.exe` : baseName; + const exePath = path.join(binDir, exeName); + fs.writeFileSync(exePath, ""); + fs.chmodSync(exePath, 0o755); + return { exeName, exePath, binDir }; +} + +describe("exec-command-resolution", () => { + it("resolves PATH, relative, and quoted executables", () => { + const cases = [ + { + name: "PATH executable", + setup: () => { + const fixture = createPathExecutableFixture(); + return { + command: "rg -n foo", + cwd: undefined as string | undefined, + envPath: makePathEnv(fixture.binDir), + expectedPath: fixture.exePath, + expectedExecutableName: fixture.exeName, + }; + }, + }, + { + name: "relative executable", + setup: () => { + const dir = makeTempDir(); + const cwd = path.join(dir, "project"); + const script = path.join(cwd, "scripts", "run.sh"); + fs.mkdirSync(path.dirname(script), { recursive: true }); + fs.writeFileSync(script, ""); + fs.chmodSync(script, 0o755); + return { + command: "./scripts/run.sh --flag", + cwd, + envPath: undefined as NodeJS.ProcessEnv | undefined, + expectedPath: script, + expectedExecutableName: undefined, + }; + }, + }, + { + name: "quoted executable", + setup: () => { + const dir = makeTempDir(); + const cwd = path.join(dir, "project"); + const script = path.join(cwd, "bin", "tool"); + fs.mkdirSync(path.dirname(script), { recursive: true }); + fs.writeFileSync(script, ""); + fs.chmodSync(script, 0o755); + return { + command: '"./bin/tool" --version', + cwd, + envPath: undefined as NodeJS.ProcessEnv | undefined, + expectedPath: script, + expectedExecutableName: undefined, + }; + }, + }, + ] as const; + + for (const testCase of cases) { + const setup = testCase.setup(); + const res = resolveCommandResolution(setup.command, setup.cwd, setup.envPath); + expect(res?.resolvedPath, testCase.name).toBe(setup.expectedPath); + if (setup.expectedExecutableName) { + expect(res?.executableName, testCase.name).toBe(setup.expectedExecutableName); + } + } + }); + + it("unwraps transparent env and nice wrappers to the effective executable", () => { + const fixture = createPathExecutableFixture(); + + const envResolution = resolveCommandResolutionFromArgv( + ["/usr/bin/env", "rg", "-n", "needle"], + undefined, + makePathEnv(fixture.binDir), + ); + expect(envResolution?.resolvedPath).toBe(fixture.exePath); + expect(envResolution?.executableName).toBe(fixture.exeName); + + const niceResolution = resolveCommandResolutionFromArgv([ + "/usr/bin/nice", + "bash", + "-lc", + "echo hi", + ]); + expect(niceResolution?.rawExecutable).toBe("bash"); + expect(niceResolution?.executableName.toLowerCase()).toContain("bash"); + }); + + it("blocks semantic env wrappers, env -S, and deep transparent-wrapper chains", () => { + const blockedEnv = resolveCommandResolutionFromArgv([ + "/usr/bin/env", + "FOO=bar", + "rg", + "-n", + "needle", + ]); + expect(blockedEnv?.policyBlocked).toBe(true); + expect(blockedEnv?.rawExecutable).toBe("/usr/bin/env"); + + if (process.platform === "win32") { + return; + } + + const dir = makeTempDir(); + const binDir = path.join(dir, "bin"); + fs.mkdirSync(binDir, { recursive: true }); + const envPath = path.join(binDir, "env"); + fs.writeFileSync(envPath, "#!/bin/sh\n"); + fs.chmodSync(envPath, 0o755); + + const envS = analyzeEnvWrapperAllowlist({ + argv: [envPath, "-S", 'sh -c "echo pwned"'], + envPath, + cwd: dir, + }); + expect(envS.analysis.segments[0]?.resolution?.policyBlocked).toBe(true); + expect(envS.allowlistEval.allowlistSatisfied).toBe(false); + + const deep = analyzeEnvWrapperAllowlist({ + argv: buildNestedEnvShellCommand({ + envExecutable: envPath, + depth: 5, + payload: "echo pwned", + }), + envPath, + cwd: dir, + }); + expect(deep.analysis.segments[0]?.resolution?.policyBlocked).toBe(true); + expect(deep.analysis.segments[0]?.resolution?.blockedWrapper).toBe("env"); + expect(deep.allowlistEval.allowlistSatisfied).toBe(false); + }); + + it("resolves allowlist candidate paths from unresolved raw executables", () => { + expect( + resolveAllowlistCandidatePath( + { + rawExecutable: "~/bin/tool", + executableName: "tool", + }, + "/tmp", + ), + ).toContain("/bin/tool"); + + expect( + resolveAllowlistCandidatePath( + { + rawExecutable: "./scripts/run.sh", + executableName: "run.sh", + }, + "/repo", + ), + ).toBe(path.resolve("/repo", "./scripts/run.sh")); + + expect( + resolveAllowlistCandidatePath( + { + rawExecutable: "rg", + executableName: "rg", + }, + "/repo", + ), + ).toBeUndefined(); + }); + + it("normalizes argv tokens for short clusters, long options, and special sentinels", () => { + expect(parseExecArgvToken("")).toEqual({ kind: "empty", raw: "" }); + expect(parseExecArgvToken("--")).toEqual({ kind: "terminator", raw: "--" }); + expect(parseExecArgvToken("-")).toEqual({ kind: "stdin", raw: "-" }); + expect(parseExecArgvToken("echo")).toEqual({ kind: "positional", raw: "echo" }); + + const short = parseExecArgvToken("-oblocked.txt"); + expect(short.kind).toBe("option"); + if (short.kind === "option" && short.style === "short-cluster") { + expect(short.flags[0]).toBe("-o"); + expect(short.cluster).toBe("oblocked.txt"); + } + + const long = parseExecArgvToken("--output=blocked.txt"); + expect(long.kind).toBe("option"); + if (long.kind === "option" && long.style === "long") { + expect(long.flag).toBe("--output"); + expect(long.inlineValue).toBe("blocked.txt"); + } + }); +}); diff --git a/src/infra/exec-host.test.ts b/src/infra/exec-host.test.ts new file mode 100644 index 00000000000..08d3d8af3be --- /dev/null +++ b/src/infra/exec-host.test.ts @@ -0,0 +1,109 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const requestJsonlSocketMock = vi.hoisted(() => vi.fn()); + +vi.mock("./jsonl-socket.js", () => ({ + requestJsonlSocket: (...args: unknown[]) => requestJsonlSocketMock(...args), +})); + +import { requestExecHostViaSocket } from "./exec-host.js"; + +describe("requestExecHostViaSocket", () => { + beforeEach(() => { + requestJsonlSocketMock.mockReset(); + }); + + it("returns null when socket credentials are missing", async () => { + await expect( + requestExecHostViaSocket({ + socketPath: "", + token: "secret", + request: { command: ["echo", "hi"] }, + }), + ).resolves.toBeNull(); + await expect( + requestExecHostViaSocket({ + socketPath: "/tmp/socket", + token: "", + request: { command: ["echo", "hi"] }, + }), + ).resolves.toBeNull(); + expect(requestJsonlSocketMock).not.toHaveBeenCalled(); + }); + + it("builds an exec payload and forwards the default timeout", async () => { + requestJsonlSocketMock.mockResolvedValueOnce({ ok: true, payload: { success: true } }); + + await expect( + requestExecHostViaSocket({ + socketPath: "/tmp/socket", + token: "secret", + request: { + command: ["echo", "hi"], + cwd: "/tmp", + }, + }), + ).resolves.toEqual({ ok: true, payload: { success: true } }); + + const call = requestJsonlSocketMock.mock.calls[0]?.[0] as + | { + socketPath: string; + payload: string; + timeoutMs: number; + accept: (msg: unknown) => unknown; + } + | undefined; + if (!call) { + throw new Error("expected requestJsonlSocket call"); + } + + expect(call.socketPath).toBe("/tmp/socket"); + expect(call.timeoutMs).toBe(20_000); + const payload = JSON.parse(call.payload) as { + type: string; + id: string; + nonce: string; + ts: number; + hmac: string; + requestJson: string; + }; + expect(payload.type).toBe("exec"); + expect(payload.id).toMatch(/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i); + expect(payload.nonce).toMatch(/^[0-9a-f]{32}$/); + expect(typeof payload.ts).toBe("number"); + expect(payload.hmac).toMatch(/^[0-9a-f]{64}$/); + expect(JSON.parse(payload.requestJson)).toEqual({ + command: ["echo", "hi"], + cwd: "/tmp", + }); + }); + + it("accepts only exec response messages and maps malformed matches to null", async () => { + requestJsonlSocketMock.mockImplementationOnce(async ({ accept }) => { + expect(accept({ type: "ignore" })).toBeUndefined(); + expect(accept({ type: "exec-res", ok: true, payload: { success: true } })).toEqual({ + ok: true, + payload: { success: true }, + }); + expect(accept({ type: "exec-res", ok: false, error: { code: "DENIED" } })).toEqual({ + ok: false, + error: { code: "DENIED" }, + }); + expect(accept({ type: "exec-res", ok: true })).toBeNull(); + return null; + }); + + await expect( + requestExecHostViaSocket({ + socketPath: "/tmp/socket", + token: "secret", + timeoutMs: 123, + request: { command: ["echo", "hi"] }, + }), + ).resolves.toBeNull(); + + expect( + (requestJsonlSocketMock.mock.calls[0]?.[0] as { timeoutMs?: number } | undefined)?.timeoutMs, + ).toBe(123); + }); +}); diff --git a/src/infra/exec-safety.test.ts b/src/infra/exec-safety.test.ts new file mode 100644 index 00000000000..96dcdba357e --- /dev/null +++ b/src/infra/exec-safety.test.ts @@ -0,0 +1,22 @@ +import { describe, expect, it } from "vitest"; +import { isSafeExecutableValue } from "./exec-safety.js"; + +describe("isSafeExecutableValue", () => { + it("accepts bare executable names and likely paths", () => { + expect(isSafeExecutableValue("node")).toBe(true); + expect(isSafeExecutableValue("/usr/bin/node")).toBe(true); + expect(isSafeExecutableValue("./bin/openclaw")).toBe(true); + expect(isSafeExecutableValue("C:\\Tools\\openclaw.exe")).toBe(true); + expect(isSafeExecutableValue(" tool ")).toBe(true); + }); + + it("rejects blanks, flags, shell metacharacters, quotes, and control chars", () => { + expect(isSafeExecutableValue(undefined)).toBe(false); + expect(isSafeExecutableValue(" ")).toBe(false); + expect(isSafeExecutableValue("-rf")).toBe(false); + expect(isSafeExecutableValue("node;rm -rf /")).toBe(false); + expect(isSafeExecutableValue('node "arg"')).toBe(false); + expect(isSafeExecutableValue("node\nnext")).toBe(false); + expect(isSafeExecutableValue("node\0")).toBe(false); + }); +}); diff --git a/src/infra/exec-wrapper-resolution.test.ts b/src/infra/exec-wrapper-resolution.test.ts index b271c97ee8d..001d0ca2514 100644 --- a/src/infra/exec-wrapper-resolution.test.ts +++ b/src/infra/exec-wrapper-resolution.test.ts @@ -1,16 +1,228 @@ import { describe, expect, test } from "vitest"; -import { normalizeExecutableToken } from "./exec-wrapper-resolution.js"; +import { + basenameLower, + extractShellWrapperCommand, + extractShellWrapperInlineCommand, + hasEnvManipulationBeforeShellWrapper, + isDispatchWrapperExecutable, + isShellWrapperExecutable, + normalizeExecutableToken, + resolveDispatchWrapperExecutionPlan, + unwrapEnvInvocation, + unwrapKnownDispatchWrapperInvocation, + unwrapKnownShellMultiplexerInvocation, +} from "./exec-wrapper-resolution.js"; -describe("normalizeExecutableToken", () => { - test("strips common windows executable suffixes", () => { - expect(normalizeExecutableToken("bun.cmd")).toBe("bun"); - expect(normalizeExecutableToken("deno.bat")).toBe("deno"); - expect(normalizeExecutableToken("pwsh.com")).toBe("pwsh"); - expect(normalizeExecutableToken("cmd.exe")).toBe("cmd"); - }); - - test("normalizes path-qualified windows shims", () => { - expect(normalizeExecutableToken("C:\\tools\\bun.cmd")).toBe("bun"); - expect(normalizeExecutableToken("/tmp/deno.exe")).toBe("deno"); +describe("basenameLower", () => { + test.each([ + { token: " Bun.CMD ", expected: "bun.cmd" }, + { token: "C:\\tools\\PwSh.EXE", expected: "pwsh.exe" }, + { token: "/tmp/bash", expected: "bash" }, + ])("normalizes basenames for %j", ({ token, expected }) => { + expect(basenameLower(token)).toBe(expected); + }); +}); + +describe("normalizeExecutableToken", () => { + test.each([ + { token: "bun.cmd", expected: "bun" }, + { token: "deno.bat", expected: "deno" }, + { token: "pwsh.com", expected: "pwsh" }, + { token: "cmd.exe", expected: "cmd" }, + { token: "C:\\tools\\bun.cmd", expected: "bun" }, + { token: "/tmp/deno.exe", expected: "deno" }, + { token: " /tmp/bash ", expected: "bash" }, + ])("normalizes executable tokens for %j", ({ token, expected }) => { + expect(normalizeExecutableToken(token)).toBe(expected); + }); +}); + +describe("wrapper classification", () => { + test.each([ + { token: "sudo", dispatch: true, shell: false }, + { token: "timeout.exe", dispatch: true, shell: false }, + { token: "bash", dispatch: false, shell: true }, + { token: "pwsh.exe", dispatch: false, shell: true }, + { token: "node", dispatch: false, shell: false }, + ])("classifies wrappers for %j", ({ token, dispatch, shell }) => { + expect(isDispatchWrapperExecutable(token)).toBe(dispatch); + expect(isShellWrapperExecutable(token)).toBe(shell); + }); +}); + +describe("unwrapKnownShellMultiplexerInvocation", () => { + test.each([ + { argv: [], expected: { kind: "not-wrapper" } }, + { argv: ["node", "-e", "1"], expected: { kind: "not-wrapper" } }, + { argv: ["busybox"], expected: { kind: "blocked", wrapper: "busybox" } }, + { argv: ["busybox", "ls"], expected: { kind: "blocked", wrapper: "busybox" } }, + { + argv: ["busybox", "sh", "-lc", "echo hi"], + expected: { kind: "unwrapped", wrapper: "busybox", argv: ["sh", "-lc", "echo hi"] }, + }, + { + argv: ["toybox", "--", "pwsh.exe", "-Command", "Get-Date"], + expected: { + kind: "unwrapped", + wrapper: "toybox", + argv: ["pwsh.exe", "-Command", "Get-Date"], + }, + }, + ])("unwraps shell multiplexers for %j", ({ argv, expected }) => { + expect(unwrapKnownShellMultiplexerInvocation(argv)).toEqual(expected); + }); +}); + +describe("unwrapEnvInvocation", () => { + test.each([ + { + argv: ["env", "FOO=bar", "bash", "-lc", "echo hi"], + expected: ["bash", "-lc", "echo hi"], + }, + { + argv: ["env", "-i", "--unset", "PATH", "--", "sh", "-lc", "echo hi"], + expected: ["sh", "-lc", "echo hi"], + }, + { + argv: ["env", "--chdir=/tmp", "pwsh", "-Command", "Get-Date"], + expected: ["pwsh", "-Command", "Get-Date"], + }, + { + argv: ["env", "-", "bash", "-lc", "echo hi"], + expected: ["bash", "-lc", "echo hi"], + }, + { + argv: ["env", "--bogus", "bash", "-lc", "echo hi"], + expected: null, + }, + { + argv: ["env", "--unset"], + expected: null, + }, + ])("unwraps env invocations for %j", ({ argv, expected }) => { + expect(unwrapEnvInvocation(argv)).toEqual(expected); + }); +}); + +describe("unwrapKnownDispatchWrapperInvocation", () => { + test.each([ + { + argv: ["nice", "-n", "5", "bash", "-lc", "echo hi"], + expected: { kind: "unwrapped", wrapper: "nice", argv: ["bash", "-lc", "echo hi"] }, + }, + { + argv: ["nohup", "--", "bash", "-lc", "echo hi"], + expected: { kind: "unwrapped", wrapper: "nohup", argv: ["bash", "-lc", "echo hi"] }, + }, + { + argv: ["stdbuf", "-o", "L", "bash", "-lc", "echo hi"], + expected: { kind: "unwrapped", wrapper: "stdbuf", argv: ["bash", "-lc", "echo hi"] }, + }, + { + argv: ["timeout", "--signal=TERM", "5s", "bash", "-lc", "echo hi"], + expected: { kind: "unwrapped", wrapper: "timeout", argv: ["bash", "-lc", "echo hi"] }, + }, + { + argv: ["sudo", "bash", "-lc", "echo hi"], + expected: { kind: "blocked", wrapper: "sudo" }, + }, + { + argv: ["timeout", "--bogus", "5s", "bash", "-lc", "echo hi"], + expected: { kind: "blocked", wrapper: "timeout" }, + }, + ])("unwraps known dispatch wrappers for %j", ({ argv, expected }) => { + expect(unwrapKnownDispatchWrapperInvocation(argv)).toEqual(expected); + }); +}); + +describe("resolveDispatchWrapperExecutionPlan", () => { + test("unwraps transparent wrapper chains", () => { + expect( + resolveDispatchWrapperExecutionPlan(["nohup", "nice", "-n", "5", "bash", "-lc", "echo hi"]), + ).toEqual({ + argv: ["bash", "-lc", "echo hi"], + wrappers: ["nohup", "nice"], + policyBlocked: false, + }); + }); + + test("blocks semantic env usage even when it reaches a shell wrapper", () => { + expect( + resolveDispatchWrapperExecutionPlan(["env", "FOO=bar", "bash", "-lc", "echo hi"]), + ).toEqual({ + argv: ["env", "FOO=bar", "bash", "-lc", "echo hi"], + wrappers: ["env"], + policyBlocked: true, + blockedWrapper: "env", + }); + }); + + test("blocks wrapper overflow beyond the configured depth", () => { + expect( + resolveDispatchWrapperExecutionPlan(["nohup", "timeout", "5s", "bash", "-lc", "echo hi"], 1), + ).toEqual({ + argv: ["timeout", "5s", "bash", "-lc", "echo hi"], + wrappers: ["nohup"], + policyBlocked: true, + blockedWrapper: "timeout", + }); + }); +}); + +describe("hasEnvManipulationBeforeShellWrapper", () => { + test.each([ + { + argv: ["env", "FOO=bar", "bash", "-lc", "echo hi"], + expected: true, + }, + { + argv: ["timeout", "5s", "env", "--", "bash", "-lc", "echo hi"], + expected: false, + }, + { + argv: ["timeout", "5s", "env", "FOO=bar", "bash", "-lc", "echo hi"], + expected: true, + }, + { + argv: ["sudo", "bash", "-lc", "echo hi"], + expected: false, + }, + ])("detects env manipulation before shell wrappers for %j", ({ argv, expected }) => { + expect(hasEnvManipulationBeforeShellWrapper(argv)).toBe(expected); + }); +}); + +describe("extractShellWrapperCommand", () => { + test.each([ + { + argv: ["bash", "-lc", "echo hi"], + expectedInline: "echo hi", + expectedCommand: { isWrapper: true, command: "echo hi" }, + }, + { + argv: ["busybox", "sh", "-lc", "echo hi"], + expectedInline: "echo hi", + expectedCommand: { isWrapper: true, command: "echo hi" }, + }, + { + argv: ["env", "--", "pwsh", "-Command", "Get-Date"], + expectedInline: "Get-Date", + expectedCommand: { isWrapper: true, command: "Get-Date" }, + }, + { + argv: ["bash", "script.sh"], + expectedInline: null, + expectedCommand: { isWrapper: false, command: null }, + }, + ])("extracts inline commands for %j", ({ argv, expectedInline, expectedCommand }) => { + expect(extractShellWrapperInlineCommand(argv)).toBe(expectedInline); + expect(extractShellWrapperCommand(argv)).toEqual(expectedCommand); + }); + + test("prefers an explicit raw command override when provided", () => { + expect(extractShellWrapperCommand(["bash", "-lc", "echo hi"], " run this instead ")).toEqual({ + isWrapper: true, + command: "run this instead", + }); }); }); diff --git a/src/infra/executable-path.test.ts b/src/infra/executable-path.test.ts new file mode 100644 index 00000000000..731457ab183 --- /dev/null +++ b/src/infra/executable-path.test.ts @@ -0,0 +1,50 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { + isExecutableFile, + resolveExecutableFromPathEnv, + resolveExecutablePath, +} from "./executable-path.js"; + +describe("executable path helpers", () => { + it("detects executable files and rejects directories or non-executables", async () => { + const base = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-exec-path-")); + const execPath = path.join(base, "tool"); + const filePath = path.join(base, "plain.txt"); + const dirPath = path.join(base, "dir"); + await fs.writeFile(execPath, "#!/bin/sh\nexit 0\n", "utf8"); + await fs.chmod(execPath, 0o755); + await fs.writeFile(filePath, "nope", "utf8"); + await fs.mkdir(dirPath); + + expect(isExecutableFile(execPath)).toBe(true); + expect(isExecutableFile(filePath)).toBe(false); + expect(isExecutableFile(dirPath)).toBe(false); + expect(isExecutableFile(path.join(base, "missing"))).toBe(false); + }); + + it("resolves executables from PATH entries and cwd-relative paths", async () => { + const base = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-exec-path-")); + const binDir = path.join(base, "bin"); + const cwd = path.join(base, "cwd"); + await fs.mkdir(binDir, { recursive: true }); + await fs.mkdir(cwd, { recursive: true }); + + const pathTool = path.join(binDir, "runner"); + const cwdTool = path.join(cwd, "local-tool"); + await fs.writeFile(pathTool, "#!/bin/sh\nexit 0\n", "utf8"); + await fs.writeFile(cwdTool, "#!/bin/sh\nexit 0\n", "utf8"); + await fs.chmod(pathTool, 0o755); + await fs.chmod(cwdTool, 0o755); + + expect(resolveExecutableFromPathEnv("runner", `${binDir}${path.delimiter}/usr/bin`)).toBe( + pathTool, + ); + expect(resolveExecutableFromPathEnv("missing", binDir)).toBeUndefined(); + expect(resolveExecutablePath("./local-tool", { cwd })).toBe(cwdTool); + expect(resolveExecutablePath("runner", { env: { PATH: binDir } })).toBe(pathTool); + expect(resolveExecutablePath("missing", { env: { PATH: binDir } })).toBeUndefined(); + }); +}); diff --git a/src/infra/file-identity.test.ts b/src/infra/file-identity.test.ts index 12b3029cda1..2a28255a1ac 100644 --- a/src/infra/file-identity.test.ts +++ b/src/infra/file-identity.test.ts @@ -6,28 +6,64 @@ function stat(dev: number | bigint, ino: number | bigint): FileIdentityStat { } describe("sameFileIdentity", () => { - it("accepts exact dev+ino match", () => { - expect(sameFileIdentity(stat(7, 11), stat(7, 11), "linux")).toBe(true); - }); - - it("rejects inode mismatch", () => { - expect(sameFileIdentity(stat(7, 11), stat(7, 12), "linux")).toBe(false); - }); - - it("rejects dev mismatch on non-windows", () => { - expect(sameFileIdentity(stat(7, 11), stat(8, 11), "linux")).toBe(false); - }); - - it("accepts win32 dev mismatch when either side is 0", () => { - expect(sameFileIdentity(stat(0, 11), stat(8, 11), "win32")).toBe(true); - expect(sameFileIdentity(stat(7, 11), stat(0, 11), "win32")).toBe(true); - }); - - it("keeps dev strictness on win32 when both dev values are non-zero", () => { - expect(sameFileIdentity(stat(7, 11), stat(8, 11), "win32")).toBe(false); - }); - - it("handles bigint stats", () => { - expect(sameFileIdentity(stat(0n, 11n), stat(8n, 11n), "win32")).toBe(true); + it.each([ + { + name: "accepts exact dev+ino match", + left: stat(7, 11), + right: stat(7, 11), + platform: "linux" as const, + expected: true, + }, + { + name: "rejects inode mismatch", + left: stat(7, 11), + right: stat(7, 12), + platform: "linux" as const, + expected: false, + }, + { + name: "rejects dev mismatch on non-windows", + left: stat(7, 11), + right: stat(8, 11), + platform: "linux" as const, + expected: false, + }, + { + name: "keeps dev strictness on linux when one side is zero", + left: stat(0, 11), + right: stat(8, 11), + platform: "linux" as const, + expected: false, + }, + { + name: "accepts win32 dev mismatch when either side is 0", + left: stat(0, 11), + right: stat(8, 11), + platform: "win32" as const, + expected: true, + }, + { + name: "accepts win32 dev mismatch when right side is 0", + left: stat(7, 11), + right: stat(0, 11), + platform: "win32" as const, + expected: true, + }, + { + name: "keeps dev strictness on win32 when both dev values are non-zero", + left: stat(7, 11), + right: stat(8, 11), + platform: "win32" as const, + expected: false, + }, + { + name: "handles bigint stats", + left: stat(0n, 11n), + right: stat(8n, 11n), + platform: "win32" as const, + expected: true, + }, + ])("$name", ({ left, right, platform, expected }) => { + expect(sameFileIdentity(left, right, platform)).toBe(expected); }); }); diff --git a/src/infra/fixed-window-rate-limit.test.ts b/src/infra/fixed-window-rate-limit.test.ts index 1afc50974d0..8290c1d4176 100644 --- a/src/infra/fixed-window-rate-limit.test.ts +++ b/src/infra/fixed-window-rate-limit.test.ts @@ -18,6 +18,35 @@ describe("fixed-window rate limiter", () => { expect(limiter.consume()).toMatchObject({ allowed: true, remaining: 1 }); }); + it("clamps maxRequests and windowMs to at least one", () => { + let nowMs = 100; + const limiter = createFixedWindowRateLimiter({ + maxRequests: 0.2, + windowMs: 0.4, + now: () => nowMs, + }); + + expect(limiter.consume()).toMatchObject({ allowed: true, remaining: 0, retryAfterMs: 0 }); + expect(limiter.consume()).toMatchObject({ allowed: false, remaining: 0, retryAfterMs: 1 }); + + nowMs += 1; + expect(limiter.consume()).toMatchObject({ allowed: true, remaining: 0 }); + }); + + it("reports the remaining retry window after later blocked attempts", () => { + let nowMs = 1_000; + const limiter = createFixedWindowRateLimiter({ + maxRequests: 1, + windowMs: 1_000, + now: () => nowMs, + }); + + expect(limiter.consume()).toMatchObject({ allowed: true, remaining: 0 }); + + nowMs += 250; + expect(limiter.consume()).toMatchObject({ allowed: false, retryAfterMs: 750 }); + }); + it("supports explicit reset", () => { const limiter = createFixedWindowRateLimiter({ maxRequests: 1, diff --git a/src/infra/format-time/format-datetime.ts b/src/infra/format-time/format-datetime.ts index d7ed13f5c24..37cdf713f8d 100644 --- a/src/infra/format-time/format-datetime.ts +++ b/src/infra/format-time/format-datetime.ts @@ -59,36 +59,40 @@ export function formatZonedTimestamp( date: Date, options?: FormatZonedTimestampOptions, ): string | undefined { - const intlOptions: Intl.DateTimeFormatOptions = { - timeZone: options?.timeZone, - year: "numeric", - month: "2-digit", - day: "2-digit", - hour: "2-digit", - minute: "2-digit", - hourCycle: "h23", - timeZoneName: "short", - }; - if (options?.displaySeconds) { - intlOptions.second = "2-digit"; - } - const parts = new Intl.DateTimeFormat("en-US", intlOptions).formatToParts(date); - const pick = (type: string) => parts.find((part) => part.type === type)?.value; - const yyyy = pick("year"); - const mm = pick("month"); - const dd = pick("day"); - const hh = pick("hour"); - const min = pick("minute"); - const sec = options?.displaySeconds ? pick("second") : undefined; - const tz = [...parts] - .toReversed() - .find((part) => part.type === "timeZoneName") - ?.value?.trim(); - if (!yyyy || !mm || !dd || !hh || !min) { + try { + const intlOptions: Intl.DateTimeFormatOptions = { + timeZone: options?.timeZone, + year: "numeric", + month: "2-digit", + day: "2-digit", + hour: "2-digit", + minute: "2-digit", + hourCycle: "h23", + timeZoneName: "short", + }; + if (options?.displaySeconds) { + intlOptions.second = "2-digit"; + } + const parts = new Intl.DateTimeFormat("en-US", intlOptions).formatToParts(date); + const pick = (type: string) => parts.find((part) => part.type === type)?.value; + const yyyy = pick("year"); + const mm = pick("month"); + const dd = pick("day"); + const hh = pick("hour"); + const min = pick("minute"); + const sec = options?.displaySeconds ? pick("second") : undefined; + const tz = [...parts] + .toReversed() + .find((part) => part.type === "timeZoneName") + ?.value?.trim(); + if (!yyyy || !mm || !dd || !hh || !min) { + return undefined; + } + if (options?.displaySeconds && sec) { + return `${yyyy}-${mm}-${dd} ${hh}:${min}:${sec}${tz ? ` ${tz}` : ""}`; + } + return `${yyyy}-${mm}-${dd} ${hh}:${min}${tz ? ` ${tz}` : ""}`; + } catch { return undefined; } - if (options?.displaySeconds && sec) { - return `${yyyy}-${mm}-${dd} ${hh}:${min}:${sec}${tz ? ` ${tz}` : ""}`; - } - return `${yyyy}-${mm}-${dd} ${hh}:${min}${tz ? ` ${tz}` : ""}`; } diff --git a/src/infra/format-time/format-time.test.ts b/src/infra/format-time/format-time.test.ts index e9a25578edd..f3fddff7f6d 100644 --- a/src/infra/format-time/format-time.test.ts +++ b/src/infra/format-time/format-time.test.ts @@ -1,4 +1,4 @@ -import { describe, expect, it } from "vitest"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import { formatUtcTimestamp, formatZonedTimestamp, resolveTimezone } from "./format-datetime.js"; import { formatDurationCompact, @@ -8,6 +8,12 @@ import { } from "./format-duration.js"; import { formatTimeAgo, formatRelativeTimestamp } from "./format-relative.js"; +const invalidDurationInputs = [null, undefined, -100] as const; + +afterEach(() => { + vi.restoreAllMocks(); +}); + describe("format-duration", () => { describe("formatDurationCompact", () => { it("returns undefined for null/undefined/non-positive", () => { @@ -55,7 +61,7 @@ describe("format-duration", () => { describe("formatDurationHuman", () => { it("returns fallback for invalid duration input", () => { - for (const value of [null, undefined, -100]) { + for (const value of invalidDurationInputs) { expect(formatDurationHuman(value)).toBe("n/a"); } expect(formatDurationHuman(null, "unknown")).toBe("unknown"); @@ -106,6 +112,12 @@ describe("format-duration", () => { it("supports seconds unit", () => { expect(formatDurationSeconds(2000, { unit: "seconds" })).toBe("2 seconds"); }); + + it("clamps negative values and rejects non-finite input", () => { + expect(formatDurationSeconds(-1500, { decimals: 1 })).toBe("0s"); + expect(formatDurationSeconds(NaN)).toBe("unknown"); + expect(formatDurationSeconds(Infinity)).toBe("unknown"); + }); }); }); @@ -152,13 +164,52 @@ describe("format-datetime", () => { const result = formatZonedTimestamp(date, options); expect(result).toMatch(expected); }); + + it("returns undefined when required Intl parts are missing", () => { + function MissingPartsDateTimeFormat() { + return { + formatToParts: () => [ + { type: "month", value: "01" }, + { type: "day", value: "15" }, + { type: "hour", value: "14" }, + { type: "minute", value: "30" }, + ], + } as Intl.DateTimeFormat; + } + + vi.spyOn(Intl, "DateTimeFormat").mockImplementation( + MissingPartsDateTimeFormat as unknown as typeof Intl.DateTimeFormat, + ); + + expect(formatZonedTimestamp(new Date("2024-01-15T14:30:00.000Z"), { timeZone: "UTC" })).toBe( + undefined, + ); + }); + + it("returns undefined when Intl formatting throws", () => { + function ThrowingDateTimeFormat() { + return { + formatToParts: () => { + throw new Error("boom"); + }, + } as unknown as Intl.DateTimeFormat; + } + + vi.spyOn(Intl, "DateTimeFormat").mockImplementation( + ThrowingDateTimeFormat as unknown as typeof Intl.DateTimeFormat, + ); + + expect(formatZonedTimestamp(new Date("2024-01-15T14:30:00.000Z"), { timeZone: "UTC" })).toBe( + undefined, + ); + }); }); }); describe("format-relative", () => { describe("formatTimeAgo", () => { it("returns fallback for invalid elapsed input", () => { - for (const value of [null, undefined, -100]) { + for (const value of invalidDurationInputs) { expect(formatTimeAgo(value)).toBe("unknown"); } expect(formatTimeAgo(null, { fallback: "n/a" })).toBe("n/a"); @@ -188,6 +239,15 @@ describe("format-relative", () => { }); describe("formatRelativeTimestamp", () => { + beforeEach(() => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2024-02-10T12:00:00.000Z")); + }); + + afterEach(() => { + vi.useRealTimers(); + }); + it("returns fallback for invalid timestamp input", () => { for (const value of [null, undefined]) { expect(formatRelativeTimestamp(value)).toBe("n/a"); @@ -197,21 +257,48 @@ describe("format-relative", () => { it.each([ { offsetMs: -10000, expected: "just now" }, + { offsetMs: -30000, expected: "just now" }, { offsetMs: -300000, expected: "5m ago" }, { offsetMs: -7200000, expected: "2h ago" }, + { offsetMs: -(47 * 3600000), expected: "47h ago" }, + { offsetMs: -(48 * 3600000), expected: "2d ago" }, { offsetMs: 30000, expected: "in <1m" }, { offsetMs: 300000, expected: "in 5m" }, { offsetMs: 7200000, expected: "in 2h" }, ])("formats relative timestamp for offset $offsetMs", ({ offsetMs, expected }) => { - const now = Date.now(); - expect(formatRelativeTimestamp(now + offsetMs)).toBe(expected); + expect(formatRelativeTimestamp(Date.now() + offsetMs)).toBe(expected); }); - it("falls back to date for old timestamps when enabled", () => { - const oldDate = Date.now() - 30 * 24 * 3600000; // 30 days ago - const result = formatRelativeTimestamp(oldDate, { dateFallback: true }); - // Should be a short date like "Jan 9" not "30d ago" - expect(result).toMatch(/[A-Z][a-z]{2} \d{1,2}/); + it.each([ + { + name: "keeps 7-day-old timestamps relative", + offsetMs: -7 * 24 * 3600000, + options: { dateFallback: true, timezone: "UTC" }, + expected: "7d ago", + }, + { + name: "falls back to a short date once the timestamp is older than 7 days", + offsetMs: -8 * 24 * 3600000, + options: { dateFallback: true, timezone: "UTC" }, + expected: "Feb 2", + }, + { + name: "keeps relative output when date fallback is disabled", + offsetMs: -8 * 24 * 3600000, + options: { timezone: "UTC" }, + expected: "8d ago", + }, + ])("$name", ({ offsetMs, options, expected }) => { + expect(formatRelativeTimestamp(Date.now() + offsetMs, options)).toBe(expected); + }); + + it("falls back to relative days when date formatting throws", () => { + expect( + formatRelativeTimestamp(Date.now() - 8 * 24 * 3600000, { + dateFallback: true, + timezone: "Invalid/Timezone", + }), + ).toBe("8d ago"); }); }); }); diff --git a/src/infra/gateway-process-argv.test.ts b/src/infra/gateway-process-argv.test.ts new file mode 100644 index 00000000000..f3570316860 --- /dev/null +++ b/src/infra/gateway-process-argv.test.ts @@ -0,0 +1,34 @@ +import { describe, expect, it } from "vitest"; +import { isGatewayArgv, parseProcCmdline } from "./gateway-process-argv.js"; + +describe("parseProcCmdline", () => { + it("splits null-delimited argv and trims empty entries", () => { + expect(parseProcCmdline(" node \0 gateway \0\0 --port \0 18789 \0")).toEqual([ + "node", + "gateway", + "--port", + "18789", + ]); + }); +}); + +describe("isGatewayArgv", () => { + it("requires a gateway token", () => { + expect(isGatewayArgv(["node", "dist/index.js", "--port", "18789"])).toBe(false); + }); + + it("matches known entrypoints across slash and case variants", () => { + expect(isGatewayArgv(["NODE", "C:\\OpenClaw\\DIST\\ENTRY.JS", "gateway"])).toBe(true); + expect(isGatewayArgv(["bun", "/srv/openclaw/scripts/run-node.mjs", "gateway"])).toBe(true); + }); + + it("matches the openclaw executable but gates the gateway binary behind the opt-in flag", () => { + expect(isGatewayArgv(["C:\\bin\\openclaw.cmd", "gateway"])).toBe(true); + expect(isGatewayArgv(["/usr/local/bin/openclaw-gateway", "gateway"])).toBe(false); + expect( + isGatewayArgv(["/usr/local/bin/openclaw-gateway", "gateway"], { + allowGatewayBinary: true, + }), + ).toBe(true); + }); +}); diff --git a/src/infra/gateway-processes.test.ts b/src/infra/gateway-processes.test.ts new file mode 100644 index 00000000000..5eb2fbd1113 --- /dev/null +++ b/src/infra/gateway-processes.test.ts @@ -0,0 +1,165 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; + +const spawnSyncMock = vi.hoisted(() => vi.fn()); +const readFileSyncMock = vi.hoisted(() => vi.fn()); +const parseCmdScriptCommandLineMock = vi.hoisted(() => vi.fn()); +const parseProcCmdlineMock = vi.hoisted(() => vi.fn()); +const isGatewayArgvMock = vi.hoisted(() => vi.fn()); +const findGatewayPidsOnPortSyncMock = vi.hoisted(() => vi.fn()); + +vi.mock("node:child_process", () => ({ + spawnSync: (...args: unknown[]) => spawnSyncMock(...args), +})); + +vi.mock("node:fs", () => ({ + default: { + readFileSync: (...args: unknown[]) => readFileSyncMock(...args), + }, +})); + +vi.mock("../daemon/cmd-argv.js", () => ({ + parseCmdScriptCommandLine: (...args: unknown[]) => parseCmdScriptCommandLineMock(...args), +})); + +vi.mock("./gateway-process-argv.js", () => ({ + parseProcCmdline: (...args: unknown[]) => parseProcCmdlineMock(...args), + isGatewayArgv: (...args: unknown[]) => isGatewayArgvMock(...args), +})); + +vi.mock("./restart-stale-pids.js", () => ({ + findGatewayPidsOnPortSync: (...args: unknown[]) => findGatewayPidsOnPortSyncMock(...args), +})); + +const { + findVerifiedGatewayListenerPidsOnPortSync, + formatGatewayPidList, + readGatewayProcessArgsSync, + signalVerifiedGatewayPidSync, +} = await import("./gateway-processes.js"); + +const originalPlatformDescriptor = Object.getOwnPropertyDescriptor(process, "platform"); + +function setPlatform(platform: NodeJS.Platform): void { + Object.defineProperty(process, "platform", { + value: platform, + configurable: true, + }); +} + +describe("gateway-processes", () => { + beforeEach(() => { + spawnSyncMock.mockReset(); + readFileSyncMock.mockReset(); + parseCmdScriptCommandLineMock.mockReset(); + parseProcCmdlineMock.mockReset(); + isGatewayArgvMock.mockReset(); + findGatewayPidsOnPortSyncMock.mockReset(); + }); + + afterEach(() => { + vi.restoreAllMocks(); + if (originalPlatformDescriptor) { + Object.defineProperty(process, "platform", originalPlatformDescriptor); + } + }); + + it("reads linux process args from /proc and parses cmdlines", () => { + setPlatform("linux"); + readFileSyncMock.mockReturnValue("node\0dist/index.js\0gateway\0run\0"); + parseProcCmdlineMock.mockReturnValue(["node", "dist/index.js", "gateway", "run"]); + + expect(readGatewayProcessArgsSync(4242)).toEqual(["node", "dist/index.js", "gateway", "run"]); + expect(readFileSyncMock).toHaveBeenCalledWith("/proc/4242/cmdline", "utf8"); + expect(parseProcCmdlineMock).toHaveBeenCalledWith("node\0dist/index.js\0gateway\0run\0"); + }); + + it("reads darwin process args from ps output and returns null on ps failure", () => { + setPlatform("darwin"); + spawnSyncMock + .mockReturnValueOnce({ + error: null, + status: 0, + stdout: "node /repo/dist/index.js gateway run\n", + }) + .mockReturnValueOnce({ + error: null, + status: 1, + stdout: "", + }); + + expect(readGatewayProcessArgsSync(123)).toEqual([ + "node", + "/repo/dist/index.js", + "gateway", + "run", + ]); + expect(readGatewayProcessArgsSync(124)).toBeNull(); + }); + + it("falls back from powershell to wmic for windows process args", () => { + setPlatform("win32"); + spawnSyncMock + .mockReturnValueOnce({ + error: new Error("powershell missing"), + status: null, + stdout: "", + }) + .mockReturnValueOnce({ + error: null, + status: 0, + stdout: "CommandLine=node.exe gateway run\r\n", + }); + parseCmdScriptCommandLineMock.mockReturnValue(["node.exe", "gateway", "run"]); + + expect(readGatewayProcessArgsSync(77)).toEqual(["node.exe", "gateway", "run"]); + expect(parseCmdScriptCommandLineMock).toHaveBeenCalledWith("node.exe gateway run"); + }); + + it("signals only verified gateway processes", () => { + setPlatform("linux"); + readFileSyncMock.mockReturnValue("node\0gateway\0"); + parseProcCmdlineMock.mockReturnValue(["node", "gateway"]); + isGatewayArgvMock.mockReturnValueOnce(true).mockReturnValueOnce(false); + const killSpy = vi.spyOn(process, "kill").mockImplementation(() => true); + + signalVerifiedGatewayPidSync(500, "SIGTERM"); + expect(killSpy).toHaveBeenCalledWith(500, "SIGTERM"); + + expect(() => signalVerifiedGatewayPidSync(501, "SIGUSR1")).toThrow( + /refusing to signal non-gateway process pid 501/, + ); + }); + + it("dedupes and filters verified gateway listener pids on unix and windows", () => { + setPlatform("linux"); + findGatewayPidsOnPortSyncMock.mockReturnValue([process.pid, 200, 200, 300, -1]); + readFileSyncMock.mockReturnValueOnce("openclaw-gateway\0gateway\0"); + readFileSyncMock.mockReturnValueOnce("python\0-m\0http.server\0"); + parseProcCmdlineMock + .mockReturnValueOnce(["openclaw-gateway", "gateway"]) + .mockReturnValueOnce(["python", "-m", "http.server"]); + isGatewayArgvMock.mockReturnValueOnce(true).mockReturnValueOnce(false); + + expect(findVerifiedGatewayListenerPidsOnPortSync(18789)).toEqual([200]); + setPlatform("win32"); + spawnSyncMock + .mockReturnValueOnce({ + error: null, + status: 0, + stdout: "200\r\n200\r\n0\r\n", + }) + .mockReturnValueOnce({ + error: null, + status: 0, + stdout: "node.exe gateway run", + }); + parseCmdScriptCommandLineMock.mockReturnValue(["node.exe", "gateway", "run"]); + isGatewayArgvMock.mockReturnValue(true); + + expect(findVerifiedGatewayListenerPidsOnPortSync(18789)).toEqual([200]); + }); + + it("formats pid lists as comma-separated output", () => { + expect(formatGatewayPidList([1, 2, 3])).toBe("1, 2, 3"); + }); +}); diff --git a/src/infra/gateway-processes.ts b/src/infra/gateway-processes.ts new file mode 100644 index 00000000000..340b54a259f --- /dev/null +++ b/src/infra/gateway-processes.ts @@ -0,0 +1,162 @@ +import { spawnSync } from "node:child_process"; +import fsSync from "node:fs"; +import { parseCmdScriptCommandLine } from "../daemon/cmd-argv.js"; +import { isGatewayArgv, parseProcCmdline } from "./gateway-process-argv.js"; +import { findGatewayPidsOnPortSync as findUnixGatewayPidsOnPortSync } from "./restart-stale-pids.js"; + +const WINDOWS_GATEWAY_DISCOVERY_TIMEOUT_MS = 5_000; + +function extractWindowsCommandLine(raw: string): string | null { + const lines = raw + .split(/\r?\n/) + .map((line) => line.trim()) + .filter(Boolean); + for (const line of lines) { + if (!line.toLowerCase().startsWith("commandline=")) { + continue; + } + const value = line.slice("commandline=".length).trim(); + return value || null; + } + return lines.find((line) => line.toLowerCase() !== "commandline") ?? null; +} + +function readWindowsProcessArgsViaPowerShell(pid: number): string[] | null { + const ps = spawnSync( + "powershell", + [ + "-NoProfile", + "-Command", + `(Get-CimInstance Win32_Process -Filter "ProcessId = ${pid}" | Select-Object -ExpandProperty CommandLine)`, + ], + { + encoding: "utf8", + timeout: WINDOWS_GATEWAY_DISCOVERY_TIMEOUT_MS, + windowsHide: true, + }, + ); + if (ps.error || ps.status !== 0) { + return null; + } + const command = ps.stdout.trim(); + return command ? parseCmdScriptCommandLine(command) : null; +} + +function readWindowsProcessArgsViaWmic(pid: number): string[] | null { + const wmic = spawnSync( + "wmic", + ["process", "where", `ProcessId=${pid}`, "get", "CommandLine", "/value"], + { + encoding: "utf8", + timeout: WINDOWS_GATEWAY_DISCOVERY_TIMEOUT_MS, + windowsHide: true, + }, + ); + if (wmic.error || wmic.status !== 0) { + return null; + } + const command = extractWindowsCommandLine(wmic.stdout); + return command ? parseCmdScriptCommandLine(command) : null; +} + +function readWindowsListeningPidsViaPowerShell(port: number): number[] | null { + const ps = spawnSync( + "powershell", + [ + "-NoProfile", + "-Command", + `(Get-NetTCPConnection -LocalPort ${port} -State Listen -ErrorAction SilentlyContinue | Select-Object -ExpandProperty OwningProcess)`, + ], + { + encoding: "utf8", + timeout: WINDOWS_GATEWAY_DISCOVERY_TIMEOUT_MS, + windowsHide: true, + }, + ); + if (ps.error || ps.status !== 0) { + return null; + } + return ps.stdout + .split(/\r?\n/) + .map((line) => Number.parseInt(line.trim(), 10)) + .filter((pid) => Number.isFinite(pid) && pid > 0); +} + +function readWindowsListeningPidsViaNetstat(port: number): number[] { + const netstat = spawnSync("netstat", ["-ano", "-p", "tcp"], { + encoding: "utf8", + timeout: WINDOWS_GATEWAY_DISCOVERY_TIMEOUT_MS, + windowsHide: true, + }); + if (netstat.error || netstat.status !== 0) { + return []; + } + const pids = new Set(); + for (const line of netstat.stdout.split(/\r?\n/)) { + const match = line.match(/^\s*TCP\s+(\S+):(\d+)\s+\S+\s+LISTENING\s+(\d+)\s*$/i); + if (!match) { + continue; + } + const parsedPort = Number.parseInt(match[2] ?? "", 10); + const pid = Number.parseInt(match[3] ?? "", 10); + if (parsedPort === port && Number.isFinite(pid) && pid > 0) { + pids.add(pid); + } + } + return [...pids]; +} + +function readWindowsListeningPidsOnPortSync(port: number): number[] { + return readWindowsListeningPidsViaPowerShell(port) ?? readWindowsListeningPidsViaNetstat(port); +} + +export function readGatewayProcessArgsSync(pid: number): string[] | null { + if (process.platform === "linux") { + try { + return parseProcCmdline(fsSync.readFileSync(`/proc/${pid}/cmdline`, "utf8")); + } catch { + return null; + } + } + if (process.platform === "darwin") { + const ps = spawnSync("ps", ["-o", "command=", "-p", String(pid)], { + encoding: "utf8", + timeout: 1000, + }); + if (ps.error || ps.status !== 0) { + return null; + } + const command = ps.stdout.trim(); + return command ? command.split(/\s+/) : null; + } + if (process.platform === "win32") { + return readWindowsProcessArgsViaPowerShell(pid) ?? readWindowsProcessArgsViaWmic(pid); + } + return null; +} + +export function signalVerifiedGatewayPidSync(pid: number, signal: "SIGTERM" | "SIGUSR1"): void { + const args = readGatewayProcessArgsSync(pid); + if (!args || !isGatewayArgv(args, { allowGatewayBinary: true })) { + throw new Error(`refusing to signal non-gateway process pid ${pid}`); + } + process.kill(pid, signal); +} + +export function findVerifiedGatewayListenerPidsOnPortSync(port: number): number[] { + const rawPids = + process.platform === "win32" + ? readWindowsListeningPidsOnPortSync(port) + : findUnixGatewayPidsOnPortSync(port); + + return Array.from(new Set(rawPids)) + .filter((pid): pid is number => Number.isFinite(pid) && pid > 0 && pid !== process.pid) + .filter((pid) => { + const args = readGatewayProcessArgsSync(pid); + return args != null && isGatewayArgv(args, { allowGatewayBinary: true }); + }); +} + +export function formatGatewayPidList(pids: number[]): string { + return pids.join(", "); +} diff --git a/src/infra/gemini-auth.test.ts b/src/infra/gemini-auth.test.ts new file mode 100644 index 00000000000..6e496f75ec2 --- /dev/null +++ b/src/infra/gemini-auth.test.ts @@ -0,0 +1,34 @@ +import { describe, expect, it } from "vitest"; +import { parseGeminiAuth } from "./gemini-auth.js"; + +describe("parseGeminiAuth", () => { + it("returns bearer auth for OAuth JSON tokens", () => { + expect(parseGeminiAuth('{"token":"oauth-token","projectId":"demo"}')).toEqual({ + headers: { + Authorization: "Bearer oauth-token", + "Content-Type": "application/json", + }, + }); + }); + + it("falls back to API key auth for invalid or unusable OAuth payloads", () => { + expect(parseGeminiAuth('{"token":"","projectId":"demo"}')).toEqual({ + headers: { + "x-goog-api-key": '{"token":"","projectId":"demo"}', + "Content-Type": "application/json", + }, + }); + expect(parseGeminiAuth("{not-json}")).toEqual({ + headers: { + "x-goog-api-key": "{not-json}", + "Content-Type": "application/json", + }, + }); + expect(parseGeminiAuth(' {"token":"oauth-token"}')).toEqual({ + headers: { + "x-goog-api-key": ' {"token":"oauth-token"}', + "Content-Type": "application/json", + }, + }); + }); +}); diff --git a/src/infra/git-root.test.ts b/src/infra/git-root.test.ts index ed313ac9f0d..7f5d8e95eb5 100644 --- a/src/infra/git-root.test.ts +++ b/src/infra/git-root.test.ts @@ -9,6 +9,15 @@ async function makeTempDir(label: string): Promise { } describe("git-root", () => { + it("finds git root when starting at the repo root itself", async () => { + const temp = await makeTempDir("git-root-self"); + const repoRoot = path.join(temp, "repo"); + await fs.mkdir(path.join(repoRoot, ".git"), { recursive: true }); + + expect(findGitRoot(repoRoot)).toBe(repoRoot); + expect(resolveGitHeadPath(repoRoot)).toBe(path.join(repoRoot, ".git", "HEAD")); + }); + it("finds git root and HEAD path when .git is a directory", async () => { const temp = await makeTempDir("git-root-dir"); const repoRoot = path.join(temp, "repo"); @@ -56,4 +65,15 @@ describe("git-root", () => { expect(findGitRoot(nested, { maxDepth: 2 })).toBeNull(); expect(resolveGitHeadPath(nested, { maxDepth: 2 })).toBeNull(); }); + + it("returns null for HEAD lookup when only an invalid .git file exists", async () => { + const temp = await makeTempDir("git-root-invalid-only"); + const repoRoot = path.join(temp, "repo"); + const nested = path.join(repoRoot, "nested"); + await fs.mkdir(nested, { recursive: true }); + await fs.writeFile(path.join(repoRoot, ".git"), "not-a-gitdir-pointer\n", "utf-8"); + + expect(findGitRoot(nested)).toBe(repoRoot); + expect(resolveGitHeadPath(nested)).toBeNull(); + }); }); diff --git a/src/infra/hardlink-guards.test.ts b/src/infra/hardlink-guards.test.ts new file mode 100644 index 00000000000..e96d826c1d8 --- /dev/null +++ b/src/infra/hardlink-guards.test.ts @@ -0,0 +1,67 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it, vi } from "vitest"; +import { withTempDir } from "../test-helpers/temp-dir.js"; +import { assertNoHardlinkedFinalPath } from "./hardlink-guards.js"; + +describe("assertNoHardlinkedFinalPath", () => { + it("allows missing paths, directories, and explicit unlink opt-in", async () => { + await withTempDir({ prefix: "openclaw-hardlink-guards-" }, async (root) => { + const dirPath = path.join(root, "dir"); + await fs.mkdir(dirPath); + + await expect( + assertNoHardlinkedFinalPath({ + filePath: path.join(root, "missing.txt"), + root, + boundaryLabel: "workspace", + }), + ).resolves.toBeUndefined(); + + await expect( + assertNoHardlinkedFinalPath({ + filePath: dirPath, + root, + boundaryLabel: "workspace", + }), + ).resolves.toBeUndefined(); + + const source = path.join(root, "source.txt"); + const linked = path.join(root, "linked.txt"); + await fs.writeFile(source, "hello", "utf8"); + await fs.link(source, linked); + + await expect( + assertNoHardlinkedFinalPath({ + filePath: linked, + root, + boundaryLabel: "workspace", + allowFinalHardlinkForUnlink: true, + }), + ).resolves.toBeUndefined(); + }); + }); + + it("rejects hardlinked files and shortens home-relative paths in the error", async () => { + await withTempDir({ prefix: "openclaw-hardlink-guards-" }, async (root) => { + const source = path.join(root, "source.txt"); + const linked = path.join(root, "linked.txt"); + await fs.writeFile(source, "hello", "utf8"); + await fs.link(source, linked); + const homedirSpy = vi.spyOn(os, "homedir").mockReturnValue(root); + + try { + await expect( + assertNoHardlinkedFinalPath({ + filePath: linked, + root, + boundaryLabel: "workspace", + }), + ).rejects.toThrow("Hardlinked path is not allowed under workspace (~): ~/linked.txt"); + } finally { + homedirSpy.mockRestore(); + } + }); + }); +}); diff --git a/src/infra/heartbeat-events-filter.test.ts b/src/infra/heartbeat-events-filter.test.ts index dab2250dd0e..9cff6652537 100644 --- a/src/infra/heartbeat-events-filter.test.ts +++ b/src/infra/heartbeat-events-filter.test.ts @@ -1,21 +1,93 @@ import { describe, expect, it } from "vitest"; -import { buildCronEventPrompt, buildExecEventPrompt } from "./heartbeat-events-filter.js"; +import { + buildCronEventPrompt, + buildExecEventPrompt, + isCronSystemEvent, + isExecCompletionEvent, +} from "./heartbeat-events-filter.js"; describe("heartbeat event prompts", () => { - it("builds user-relay cron prompt by default", () => { - const prompt = buildCronEventPrompt(["Cron: rotate logs"]); - expect(prompt).toContain("Please relay this reminder to the user"); + it.each([ + { + name: "builds user-relay cron prompt by default", + events: ["Cron: rotate logs"], + expected: ["Cron: rotate logs", "Please relay this reminder to the user"], + unexpected: ["Handle this reminder internally", "Reply HEARTBEAT_OK."], + }, + { + name: "builds internal-only cron prompt when delivery is disabled", + events: ["Cron: rotate logs"], + opts: { deliverToUser: false }, + expected: ["Cron: rotate logs", "Handle this reminder internally"], + unexpected: ["Please relay this reminder to the user"], + }, + { + name: "falls back to bare heartbeat reply when cron content is empty", + events: ["", " "], + expected: ["Reply HEARTBEAT_OK."], + unexpected: ["Handle this reminder internally"], + }, + { + name: "uses internal empty-content fallback when delivery is disabled", + events: ["", " "], + opts: { deliverToUser: false }, + expected: ["Handle this internally", "HEARTBEAT_OK when nothing needs user-facing follow-up"], + unexpected: ["Please relay this reminder to the user"], + }, + ])("$name", ({ events, opts, expected, unexpected }) => { + const prompt = buildCronEventPrompt(events, opts); + for (const part of expected) { + expect(prompt).toContain(part); + } + for (const part of unexpected) { + expect(prompt).not.toContain(part); + } }); - it("builds internal-only cron prompt when delivery is disabled", () => { - const prompt = buildCronEventPrompt(["Cron: rotate logs"], { deliverToUser: false }); - expect(prompt).toContain("Handle this reminder internally"); - expect(prompt).not.toContain("Please relay this reminder to the user"); - }); - - it("builds internal-only exec prompt when delivery is disabled", () => { - const prompt = buildExecEventPrompt({ deliverToUser: false }); - expect(prompt).toContain("Handle the result internally"); - expect(prompt).not.toContain("Please relay the command output to the user"); + it.each([ + { + name: "builds user-relay exec prompt by default", + opts: undefined, + expected: ["Please relay the command output to the user", "If it failed"], + unexpected: ["Handle the result internally"], + }, + { + name: "builds internal-only exec prompt when delivery is disabled", + opts: { deliverToUser: false }, + expected: ["Handle the result internally"], + unexpected: ["Please relay the command output to the user"], + }, + ])("$name", ({ opts, expected, unexpected }) => { + const prompt = buildExecEventPrompt(opts); + for (const part of expected) { + expect(prompt).toContain(part); + } + for (const part of unexpected) { + expect(prompt).not.toContain(part); + } + }); +}); + +describe("heartbeat event classification", () => { + it.each([ + { value: "exec finished: ok", expected: true }, + { value: "Exec Finished: failed", expected: true }, + { value: "cron finished", expected: false }, + ])("classifies exec completion events for %j", ({ value, expected }) => { + expect(isExecCompletionEvent(value)).toBe(expected); + }); + + it.each([ + { value: "Cron: rotate logs", expected: true }, + { value: " Cron: rotate logs ", expected: true }, + { value: "", expected: false }, + { value: " ", expected: false }, + { value: "HEARTBEAT_OK", expected: false }, + { value: "heartbeat_ok: already handled", expected: false }, + { value: "heartbeat poll: noop", expected: false }, + { value: "heartbeat wake: noop", expected: false }, + { value: "exec finished: ok", expected: false }, + ])("classifies cron system events for %j", ({ value, expected }) => { + expect(isCronSystemEvent(value)).toBe(expected); }); }); diff --git a/src/infra/heartbeat-events.test.ts b/src/infra/heartbeat-events.test.ts new file mode 100644 index 00000000000..d1583f8080a --- /dev/null +++ b/src/infra/heartbeat-events.test.ts @@ -0,0 +1,59 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { + emitHeartbeatEvent, + getLastHeartbeatEvent, + onHeartbeatEvent, + resolveIndicatorType, +} from "./heartbeat-events.js"; + +describe("resolveIndicatorType", () => { + it("maps heartbeat statuses to indicator types", () => { + expect(resolveIndicatorType("ok-empty")).toBe("ok"); + expect(resolveIndicatorType("ok-token")).toBe("ok"); + expect(resolveIndicatorType("sent")).toBe("alert"); + expect(resolveIndicatorType("failed")).toBe("error"); + expect(resolveIndicatorType("skipped")).toBeUndefined(); + }); +}); + +describe("heartbeat events", () => { + beforeEach(() => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-01-09T12:00:00Z")); + }); + + afterEach(() => { + vi.useRealTimers(); + }); + + it("stores the last event and timestamps emitted payloads", () => { + emitHeartbeatEvent({ status: "sent", to: "+123", preview: "ping" }); + + expect(getLastHeartbeatEvent()).toEqual({ + ts: 1767960000000, + status: "sent", + to: "+123", + preview: "ping", + }); + }); + + it("delivers events to listeners, isolates listener failures, and supports unsubscribe", () => { + const seen: string[] = []; + const unsubscribeFirst = onHeartbeatEvent((evt) => { + seen.push(`first:${evt.status}`); + }); + onHeartbeatEvent(() => { + throw new Error("boom"); + }); + const unsubscribeThird = onHeartbeatEvent((evt) => { + seen.push(`third:${evt.status}`); + }); + + emitHeartbeatEvent({ status: "ok-empty" }); + unsubscribeFirst(); + unsubscribeThird(); + emitHeartbeatEvent({ status: "failed" }); + + expect(seen).toEqual(["first:ok-empty", "third:ok-empty"]); + }); +}); diff --git a/src/infra/heartbeat-reason.test.ts b/src/infra/heartbeat-reason.test.ts index 69d23e3219d..ab0fe94ec06 100644 --- a/src/infra/heartbeat-reason.test.ts +++ b/src/infra/heartbeat-reason.test.ts @@ -7,48 +7,54 @@ import { } from "./heartbeat-reason.js"; describe("heartbeat-reason", () => { - it("normalizes wake reasons with trim + requested fallback", () => { - expect(normalizeHeartbeatWakeReason(" cron:job-1 ")).toBe("cron:job-1"); - expect(normalizeHeartbeatWakeReason(" ")).toBe("requested"); - expect(normalizeHeartbeatWakeReason(undefined)).toBe("requested"); + it.each([ + { value: " cron:job-1 ", expected: "cron:job-1" }, + { value: " ", expected: "requested" }, + { value: undefined, expected: "requested" }, + ])("normalizes wake reasons for %j", ({ value, expected }) => { + expect(normalizeHeartbeatWakeReason(value)).toBe(expected); }); - it("classifies known reason kinds", () => { - expect(resolveHeartbeatReasonKind("retry")).toBe("retry"); - expect(resolveHeartbeatReasonKind("interval")).toBe("interval"); - expect(resolveHeartbeatReasonKind("manual")).toBe("manual"); - expect(resolveHeartbeatReasonKind("exec-event")).toBe("exec-event"); - expect(resolveHeartbeatReasonKind("wake")).toBe("wake"); - expect(resolveHeartbeatReasonKind("acp:spawn:stream")).toBe("wake"); - expect(resolveHeartbeatReasonKind("cron:job-1")).toBe("cron"); - expect(resolveHeartbeatReasonKind("hook:wake")).toBe("hook"); - expect(resolveHeartbeatReasonKind(" hook:wake ")).toBe("hook"); + it.each([ + { value: "retry", expected: "retry" }, + { value: "interval", expected: "interval" }, + { value: "manual", expected: "manual" }, + { value: "exec-event", expected: "exec-event" }, + { value: "wake", expected: "wake" }, + { value: "acp:spawn:stream", expected: "wake" }, + { value: "acp:spawn:", expected: "wake" }, + { value: "cron:job-1", expected: "cron" }, + { value: "hook:wake", expected: "hook" }, + { value: " hook:wake ", expected: "hook" }, + { value: "requested", expected: "other" }, + { value: "slow", expected: "other" }, + { value: "", expected: "other" }, + { value: undefined, expected: "other" }, + ])("classifies reason kinds for %j", ({ value, expected }) => { + expect(resolveHeartbeatReasonKind(value)).toBe(expected); }); - it("classifies unknown reasons as other", () => { - expect(resolveHeartbeatReasonKind("requested")).toBe("other"); - expect(resolveHeartbeatReasonKind("slow")).toBe("other"); - expect(resolveHeartbeatReasonKind("")).toBe("other"); - expect(resolveHeartbeatReasonKind(undefined)).toBe("other"); + it.each([ + { value: "exec-event", expected: true }, + { value: "cron:job-1", expected: true }, + { value: "wake", expected: true }, + { value: "acp:spawn:stream", expected: true }, + { value: "hook:gmail:sync", expected: true }, + { value: "interval", expected: false }, + { value: "manual", expected: false }, + { value: "other", expected: false }, + ])("matches event-driven behavior for %j", ({ value, expected }) => { + expect(isHeartbeatEventDrivenReason(value)).toBe(expected); }); - it("matches event-driven behavior used by heartbeat preflight", () => { - expect(isHeartbeatEventDrivenReason("exec-event")).toBe(true); - expect(isHeartbeatEventDrivenReason("cron:job-1")).toBe(true); - expect(isHeartbeatEventDrivenReason("wake")).toBe(true); - expect(isHeartbeatEventDrivenReason("acp:spawn:stream")).toBe(true); - expect(isHeartbeatEventDrivenReason("hook:gmail:sync")).toBe(true); - expect(isHeartbeatEventDrivenReason("interval")).toBe(false); - expect(isHeartbeatEventDrivenReason("manual")).toBe(false); - expect(isHeartbeatEventDrivenReason("other")).toBe(false); - }); - - it("matches action-priority wake behavior", () => { - expect(isHeartbeatActionWakeReason("manual")).toBe(true); - expect(isHeartbeatActionWakeReason("exec-event")).toBe(true); - expect(isHeartbeatActionWakeReason("hook:wake")).toBe(true); - expect(isHeartbeatActionWakeReason("interval")).toBe(false); - expect(isHeartbeatActionWakeReason("cron:job-1")).toBe(false); - expect(isHeartbeatActionWakeReason("retry")).toBe(false); + it.each([ + { value: "manual", expected: true }, + { value: "exec-event", expected: true }, + { value: "hook:wake", expected: true }, + { value: "interval", expected: false }, + { value: "cron:job-1", expected: false }, + { value: "retry", expected: false }, + ])("matches action-priority wake behavior for %j", ({ value, expected }) => { + expect(isHeartbeatActionWakeReason(value)).toBe(expected); }); }); diff --git a/src/infra/home-dir.test.ts b/src/infra/home-dir.test.ts index f1f721cd7fe..3096dd1b0b4 100644 --- a/src/infra/home-dir.test.ts +++ b/src/infra/home-dir.test.ts @@ -3,37 +3,64 @@ import { describe, expect, it } from "vitest"; import { expandHomePrefix, resolveEffectiveHomeDir, resolveRequiredHomeDir } from "./home-dir.js"; describe("resolveEffectiveHomeDir", () => { - it("prefers OPENCLAW_HOME over HOME and USERPROFILE", () => { - const env = { - OPENCLAW_HOME: "/srv/openclaw-home", - HOME: "/home/other", - USERPROFILE: "C:/Users/other", - } as NodeJS.ProcessEnv; - - expect(resolveEffectiveHomeDir(env, () => "/fallback")).toBe( - path.resolve("/srv/openclaw-home"), - ); + it.each([ + { + name: "prefers OPENCLAW_HOME over HOME and USERPROFILE", + env: { + OPENCLAW_HOME: " /srv/openclaw-home ", + HOME: "/home/other", + USERPROFILE: "C:/Users/other", + } as NodeJS.ProcessEnv, + homedir: () => "/fallback", + expected: "/srv/openclaw-home", + }, + { + name: "falls back to HOME", + env: { HOME: " /home/alice " } as NodeJS.ProcessEnv, + expected: "/home/alice", + }, + { + name: "falls back to USERPROFILE when HOME is blank", + env: { + HOME: " ", + USERPROFILE: " C:/Users/alice ", + } as NodeJS.ProcessEnv, + expected: "C:/Users/alice", + }, + { + name: "falls back to homedir when env values are blank", + env: { + OPENCLAW_HOME: " ", + HOME: " ", + USERPROFILE: "\t", + } as NodeJS.ProcessEnv, + homedir: () => " /fallback ", + expected: "/fallback", + }, + ])("$name", ({ env, homedir, expected }) => { + expect(resolveEffectiveHomeDir(env, homedir)).toBe(path.resolve(expected)); }); - it("falls back to HOME then USERPROFILE then homedir", () => { - expect(resolveEffectiveHomeDir({ HOME: "/home/alice" } as NodeJS.ProcessEnv)).toBe( - path.resolve("/home/alice"), - ); - expect(resolveEffectiveHomeDir({ USERPROFILE: "C:/Users/alice" } as NodeJS.ProcessEnv)).toBe( - path.resolve("C:/Users/alice"), - ); - expect(resolveEffectiveHomeDir({} as NodeJS.ProcessEnv, () => "/fallback")).toBe( - path.resolve("/fallback"), - ); - }); - - it("expands OPENCLAW_HOME when set to ~", () => { - const env = { - OPENCLAW_HOME: "~/svc", - HOME: "/home/alice", - } as NodeJS.ProcessEnv; - - expect(resolveEffectiveHomeDir(env)).toBe(path.resolve("/home/alice/svc")); + it.each([ + { + name: "expands ~/ using HOME", + env: { + OPENCLAW_HOME: "~/svc", + HOME: "/home/alice", + } as NodeJS.ProcessEnv, + expected: "/home/alice/svc", + }, + { + name: "expands ~\\\\ using USERPROFILE", + env: { + OPENCLAW_HOME: "~\\svc", + HOME: " ", + USERPROFILE: "C:/Users/alice", + } as NodeJS.ProcessEnv, + expected: "C:/Users/alice\\svc", + }, + ])("$name", ({ env, expected }) => { + expect(resolveEffectiveHomeDir(env)).toBe(path.resolve(expected)); }); }); @@ -64,14 +91,35 @@ describe("resolveRequiredHomeDir", () => { }); describe("expandHomePrefix", () => { - it("expands tilde using effective home", () => { - const value = expandHomePrefix("~/x", { - env: { OPENCLAW_HOME: "/srv/openclaw-home" } as NodeJS.ProcessEnv, - }); - expect(value).toBe(`${path.resolve("/srv/openclaw-home")}/x`); - }); - - it("keeps non-tilde values unchanged", () => { - expect(expandHomePrefix("/tmp/x")).toBe("/tmp/x"); + it.each([ + { + name: "expands ~/ using effective home", + input: "~/x", + opts: { + env: { OPENCLAW_HOME: "/srv/openclaw-home" } as NodeJS.ProcessEnv, + }, + expected: `${path.resolve("/srv/openclaw-home")}/x`, + }, + { + name: "expands exact ~ using explicit home", + input: "~", + opts: { home: " /srv/openclaw-home " }, + expected: path.resolve("/srv/openclaw-home"), + }, + { + name: "expands ~\\\\ using resolved env home", + input: "~\\x", + opts: { + env: { HOME: "/home/alice" } as NodeJS.ProcessEnv, + }, + expected: `${path.resolve("/home/alice")}\\x`, + }, + { + name: "keeps non-tilde values unchanged", + input: "/tmp/x", + expected: "/tmp/x", + }, + ])("$name", ({ input, opts, expected }) => { + expect(expandHomePrefix(input, opts)).toBe(expected); }); }); diff --git a/src/infra/host-env-security.test.ts b/src/infra/host-env-security.test.ts index 08f1a3d65fb..acb756b62a2 100644 --- a/src/infra/host-env-security.test.ts +++ b/src/infra/host-env-security.test.ts @@ -12,6 +12,30 @@ import { } from "./host-env-security.js"; import { OPENCLAW_CLI_ENV_VALUE } from "./openclaw-exec-env.js"; +function getSystemGitPath() { + if (process.platform === "win32") { + return null; + } + const gitPath = "/usr/bin/git"; + return fs.existsSync(gitPath) ? gitPath : null; +} + +function clearMarker(marker: string) { + try { + fs.unlinkSync(marker); + } catch { + // no-op + } +} + +async function runGitLsRemote(gitPath: string, target: string, env: NodeJS.ProcessEnv) { + await new Promise((resolve) => { + const child = spawn(gitPath, ["ls-remote", target], { env, stdio: "ignore" }); + child.once("error", () => resolve()); + child.once("close", () => resolve()); + }); +} + describe("isDangerousHostEnvVarName", () => { it("matches dangerous keys and prefixes case-insensitively", () => { expect(isDangerousHostEnvVarName("BASH_ENV")).toBe(true); @@ -120,6 +144,39 @@ describe("sanitizeHostExecEnv", () => { expect(env[" BAD KEY"]).toBeUndefined(); expect(env["NOT-PORTABLE"]).toBeUndefined(); }); + + it("can allow PATH overrides when explicitly opted out of blocking", () => { + const env = sanitizeHostExecEnv({ + baseEnv: { + PATH: "/usr/bin:/bin", + }, + overrides: { + PATH: "/custom/bin", + }, + blockPathOverrides: false, + }); + + expect(env.PATH).toBe("/custom/bin"); + expect(env.OPENCLAW_CLI).toBe(OPENCLAW_CLI_ENV_VALUE); + }); + + it("drops non-string inherited values and non-portable inherited keys", () => { + const env = sanitizeHostExecEnv({ + baseEnv: { + PATH: "/usr/bin:/bin", + GOOD: "1", + // oxlint-disable-next-line typescript/no-explicit-any + BAD_NUMBER: 1 as any, + "NOT-PORTABLE": "x", + }, + }); + + expect(env).toEqual({ + OPENCLAW_CLI: OPENCLAW_CLI_ENV_VALUE, + PATH: "/usr/bin:/bin", + GOOD: "1", + }); + }); }); describe("isDangerousHostEnvOverrideVarName", () => { @@ -174,6 +231,33 @@ describe("sanitizeSystemRunEnvOverrides", () => { LC_ALL: "C", }); }); + + it("returns undefined when no shell-wrapper overrides survive", () => { + expect( + sanitizeSystemRunEnvOverrides({ + shellWrapper: true, + overrides: { + TOKEN: "abc", + }, + }), + ).toBeUndefined(); + expect(sanitizeSystemRunEnvOverrides({ shellWrapper: true })).toBeUndefined(); + }); + + it("keeps allowlisted shell-wrapper overrides case-insensitively", () => { + expect( + sanitizeSystemRunEnvOverrides({ + shellWrapper: true, + overrides: { + lang: "C", + ColorTerm: "truecolor", + }, + }), + ).toEqual({ + lang: "C", + ColorTerm: "truecolor", + }); + }); }); describe("shell wrapper exploit regression", () => { @@ -215,11 +299,8 @@ describe("shell wrapper exploit regression", () => { describe("git env exploit regression", () => { it("blocks inherited GIT_EXEC_PATH so git cannot execute helper payloads", async () => { - if (process.platform === "win32") { - return; - } - const gitPath = "/usr/bin/git"; - if (!fs.existsSync(gitPath)) { + const gitPath = getSystemGitPath(); + if (!gitPath) { return; } @@ -232,11 +313,7 @@ describe("git env exploit regression", () => { `openclaw-git-exec-path-marker-${process.pid}-${Date.now()}`, ); try { - try { - fs.unlinkSync(marker); - } catch { - // no-op - } + clearMarker(marker); fs.writeFileSync(helperPath, `#!/bin/sh\ntouch ${JSON.stringify(marker)}\nexit 1\n`, "utf8"); fs.chmodSync(helperPath, 0o755); @@ -247,24 +324,16 @@ describe("git env exploit regression", () => { GIT_TERMINAL_PROMPT: "0", }; - await new Promise((resolve) => { - const child = spawn(gitPath, ["ls-remote", target], { env: unsafeEnv, stdio: "ignore" }); - child.once("error", () => resolve()); - child.once("close", () => resolve()); - }); + await runGitLsRemote(gitPath, target, unsafeEnv); expect(fs.existsSync(marker)).toBe(true); - fs.unlinkSync(marker); + clearMarker(marker); const safeEnv = sanitizeHostExecEnv({ baseEnv: unsafeEnv, }); - await new Promise((resolve) => { - const child = spawn(gitPath, ["ls-remote", target], { env: safeEnv, stdio: "ignore" }); - child.once("error", () => resolve()); - child.once("close", () => resolve()); - }); + await runGitLsRemote(gitPath, target, safeEnv); expect(fs.existsSync(marker)).toBe(false); } finally { @@ -274,20 +343,13 @@ describe("git env exploit regression", () => { }); it("blocks GIT_SSH_COMMAND override so git cannot execute helper payloads", async () => { - if (process.platform === "win32") { - return; - } - const gitPath = "/usr/bin/git"; - if (!fs.existsSync(gitPath)) { + const gitPath = getSystemGitPath(); + if (!gitPath) { return; } const marker = path.join(os.tmpdir(), `openclaw-git-ssh-command-${process.pid}-${Date.now()}`); - try { - fs.unlinkSync(marker); - } catch { - // no-op - } + clearMarker(marker); const target = "ssh://127.0.0.1:1/does-not-matter"; const exploitValue = `touch ${JSON.stringify(marker)}; false`; @@ -301,14 +363,10 @@ describe("git env exploit regression", () => { GIT_SSH_COMMAND: exploitValue, }; - await new Promise((resolve) => { - const child = spawn(gitPath, ["ls-remote", target], { env: unsafeEnv, stdio: "ignore" }); - child.once("error", () => resolve()); - child.once("close", () => resolve()); - }); + await runGitLsRemote(gitPath, target, unsafeEnv); expect(fs.existsSync(marker)).toBe(true); - fs.unlinkSync(marker); + clearMarker(marker); const safeEnv = sanitizeHostExecEnv({ baseEnv, @@ -317,11 +375,7 @@ describe("git env exploit regression", () => { }, }); - await new Promise((resolve) => { - const child = spawn(gitPath, ["ls-remote", target], { env: safeEnv, stdio: "ignore" }); - child.once("error", () => resolve()); - child.once("close", () => resolve()); - }); + await runGitLsRemote(gitPath, target, safeEnv); expect(fs.existsSync(marker)).toBe(false); }); diff --git a/src/infra/http-body.test.ts b/src/infra/http-body.test.ts index bfb14b92dca..b80169a0602 100644 --- a/src/infra/http-body.test.ts +++ b/src/infra/http-body.test.ts @@ -19,6 +19,49 @@ async function waitForMicrotaskTurn(): Promise { await new Promise((resolve) => queueMicrotask(resolve)); } +async function expectReadPayloadTooLarge(params: { + chunks?: string[]; + headers?: Record; + maxBytes: number; +}) { + const req = createMockRequest({ + chunks: params.chunks, + headers: params.headers, + emitEnd: false, + }); + await expect(readRequestBodyWithLimit(req, { maxBytes: params.maxBytes })).rejects.toMatchObject({ + message: "PayloadTooLarge", + }); + await waitForMicrotaskTurn(); + expect(req.__unhandledDestroyError).toBeUndefined(); +} + +async function expectGuardPayloadTooLarge(params: { + chunks?: string[]; + headers?: Record; + maxBytes: number; + responseFormat?: "json" | "text"; + responseText?: { PAYLOAD_TOO_LARGE?: string }; +}) { + const req = createMockRequest({ + chunks: params.chunks, + headers: params.headers, + emitEnd: false, + }); + const res = createMockServerResponse(); + const guard = installRequestBodyLimitGuard(req, res, { + maxBytes: params.maxBytes, + ...(params.responseFormat ? { responseFormat: params.responseFormat } : {}), + ...(params.responseText ? { responseText: params.responseText } : {}), + }); + await waitForMicrotaskTurn(); + expect(guard.isTripped()).toBe(true); + expect(guard.code()).toBe("PAYLOAD_TOO_LARGE"); + expect(res.statusCode).toBe(413); + expect(req.__unhandledDestroyError).toBeUndefined(); + return { req, res, guard }; +} + function createMockRequest(params: { chunks?: string[]; headers?: Record; @@ -66,12 +109,19 @@ describe("http body limits", () => { await expect(readRequestBodyWithLimit(req, { maxBytes: 1024 })).resolves.toBe('{"ok":true}'); }); - it("rejects oversized body", async () => { - const req = createMockRequest({ chunks: ["x".repeat(512)] }); - await expect(readRequestBodyWithLimit(req, { maxBytes: 64 })).rejects.toMatchObject({ - message: "PayloadTooLarge", - }); - expect(req.__unhandledDestroyError).toBeUndefined(); + it.each([ + { + name: "rejects oversized streamed body", + chunks: ["x".repeat(512)], + maxBytes: 64, + }, + { + name: "declared oversized content-length does not emit unhandled error", + headers: { "content-length": "9999" }, + maxBytes: 128, + }, + ])("$name", async ({ chunks, headers, maxBytes }) => { + await expectReadPayloadTooLarge({ chunks, headers, maxBytes }); }); it("returns json parse error when body is invalid", async () => { @@ -83,34 +133,49 @@ describe("http body limits", () => { } }); + it("returns empty object for an empty body by default", async () => { + const req = createMockRequest({ chunks: [" "] }); + const result = await readJsonBodyWithLimit(req, { maxBytes: 1024 }); + expect(result).toEqual({ ok: true, value: {} }); + }); + it("returns payload-too-large for json body", async () => { const req = createMockRequest({ chunks: ["x".repeat(1024)] }); const result = await readJsonBodyWithLimit(req, { maxBytes: 10 }); expect(result).toEqual({ ok: false, code: "PAYLOAD_TOO_LARGE", error: "Payload too large" }); }); - it("guard rejects oversized declared content-length", () => { - const req = createMockRequest({ + it.each([ + { + name: "guard rejects oversized declared content-length", headers: { "content-length": "9999" }, - emitEnd: false, + maxBytes: 128, + expectedBody: '{"error":"Payload too large"}', + }, + { + name: "guard rejects streamed oversized body", + chunks: ["small", "x".repeat(256)], + maxBytes: 128, + responseFormat: "text" as const, + expectedBody: "Payload too large", + }, + { + name: "guard uses custom response text for payload-too-large", + chunks: ["small", "x".repeat(256)], + maxBytes: 128, + responseFormat: "text" as const, + responseText: { PAYLOAD_TOO_LARGE: "Too much" }, + expectedBody: "Too much", + }, + ])("$name", async ({ chunks, headers, maxBytes, responseFormat, responseText, expectedBody }) => { + const { res } = await expectGuardPayloadTooLarge({ + chunks, + headers, + maxBytes, + ...(responseFormat ? { responseFormat } : {}), + ...(responseText ? { responseText } : {}), }); - const res = createMockServerResponse(); - const guard = installRequestBodyLimitGuard(req, res, { maxBytes: 128 }); - expect(guard.isTripped()).toBe(true); - expect(guard.code()).toBe("PAYLOAD_TOO_LARGE"); - expect(res.statusCode).toBe(413); - }); - - it("guard rejects streamed oversized body", async () => { - const req = createMockRequest({ chunks: ["small", "x".repeat(256)], emitEnd: false }); - const res = createMockServerResponse(); - const guard = installRequestBodyLimitGuard(req, res, { maxBytes: 128, responseFormat: "text" }); - await waitForMicrotaskTurn(); - expect(guard.isTripped()).toBe(true); - expect(guard.code()).toBe("PAYLOAD_TOO_LARGE"); - expect(res.statusCode).toBe(413); - expect(res.body).toBe("Payload too large"); - expect(req.__unhandledDestroyError).toBeUndefined(); + expect(res.body).toBe(expectedBody); }); it("timeout surfaces typed error when timeoutMs is clamped", async () => { @@ -123,29 +188,20 @@ describe("http body limits", () => { }); it("guard clamps invalid maxBytes to one byte", async () => { - const req = createMockRequest({ chunks: ["ab"], emitEnd: false }); - const res = createMockServerResponse(); - const guard = installRequestBodyLimitGuard(req, res, { + const { res } = await expectGuardPayloadTooLarge({ + chunks: ["ab"], maxBytes: Number.NaN, responseFormat: "text", }); - await waitForMicrotaskTurn(); - expect(guard.isTripped()).toBe(true); - expect(guard.code()).toBe("PAYLOAD_TOO_LARGE"); - expect(res.statusCode).toBe(413); - expect(req.__unhandledDestroyError).toBeUndefined(); + expect(res.body).toBe("Payload too large"); }); - it("declared oversized content-length does not emit unhandled error", async () => { - const req = createMockRequest({ - headers: { "content-length": "9999" }, - emitEnd: false, - }); - await expect(readRequestBodyWithLimit(req, { maxBytes: 128 })).rejects.toMatchObject({ - message: "PayloadTooLarge", - }); - // Wait a tick for any async destroy(err) emission. - await waitForMicrotaskTurn(); - expect(req.__unhandledDestroyError).toBeUndefined(); + it("surfaces connection-closed as a typed limit error", async () => { + const req = createMockRequest({ emitEnd: false }); + const promise = readRequestBodyWithLimit(req, { maxBytes: 128 }); + queueMicrotask(() => req.emit("close")); + await expect(promise).rejects.toSatisfy((error: unknown) => + isRequestBodyLimitError(error, "CONNECTION_CLOSED"), + ); }); }); diff --git a/src/infra/infra-parsing.test.ts b/src/infra/infra-parsing.test.ts deleted file mode 100644 index 10590c96790..00000000000 --- a/src/infra/infra-parsing.test.ts +++ /dev/null @@ -1,166 +0,0 @@ -import { describe, expect, it } from "vitest"; -import type { OpenClawConfig } from "../config/config.js"; -import { isDiagnosticFlagEnabled, resolveDiagnosticFlags } from "./diagnostic-flags.js"; -import { isMainModule } from "./is-main.js"; -import { buildNodeShellCommand } from "./node-shell.js"; -import { parseSshTarget } from "./ssh-tunnel.js"; - -describe("infra parsing", () => { - describe("diagnostic flags", () => { - it("merges config + env flags", () => { - const cfg = { - diagnostics: { flags: ["telegram.http", "cache.*"] }, - } as OpenClawConfig; - const env = { - OPENCLAW_DIAGNOSTICS: "foo,bar", - } as NodeJS.ProcessEnv; - - const flags = resolveDiagnosticFlags(cfg, env); - expect(flags).toEqual(expect.arrayContaining(["telegram.http", "cache.*", "foo", "bar"])); - expect(isDiagnosticFlagEnabled("telegram.http", cfg, env)).toBe(true); - expect(isDiagnosticFlagEnabled("cache.hit", cfg, env)).toBe(true); - expect(isDiagnosticFlagEnabled("foo", cfg, env)).toBe(true); - }); - - it("treats env true as wildcard", () => { - const env = { OPENCLAW_DIAGNOSTICS: "1" } as NodeJS.ProcessEnv; - expect(isDiagnosticFlagEnabled("anything.here", undefined, env)).toBe(true); - }); - - it("treats env false as disabled", () => { - const env = { OPENCLAW_DIAGNOSTICS: "0" } as NodeJS.ProcessEnv; - expect(isDiagnosticFlagEnabled("telegram.http", undefined, env)).toBe(false); - }); - }); - - describe("isMainModule", () => { - it("returns true when argv[1] matches current file", () => { - expect( - isMainModule({ - currentFile: "/repo/dist/index.js", - argv: ["node", "/repo/dist/index.js"], - cwd: "/repo", - env: {}, - }), - ).toBe(true); - }); - - it("returns true under PM2 when pm_exec_path matches current file", () => { - expect( - isMainModule({ - currentFile: "/repo/dist/index.js", - argv: ["node", "/pm2/lib/ProcessContainerFork.js"], - cwd: "/repo", - env: { pm_exec_path: "/repo/dist/index.js", pm_id: "0" }, - }), - ).toBe(true); - }); - - it("returns true for dist/entry.js when launched via openclaw.mjs wrapper", () => { - expect( - isMainModule({ - currentFile: "/repo/dist/entry.js", - argv: ["node", "/repo/openclaw.mjs"], - cwd: "/repo", - env: {}, - wrapperEntryPairs: [{ wrapperBasename: "openclaw.mjs", entryBasename: "entry.js" }], - }), - ).toBe(true); - }); - - it("returns false for wrapper launches when wrapper pair is not configured", () => { - expect( - isMainModule({ - currentFile: "/repo/dist/entry.js", - argv: ["node", "/repo/openclaw.mjs"], - cwd: "/repo", - env: {}, - }), - ).toBe(false); - }); - - it("returns false when wrapper pair targets a different entry basename", () => { - expect( - isMainModule({ - currentFile: "/repo/dist/index.js", - argv: ["node", "/repo/openclaw.mjs"], - cwd: "/repo", - env: {}, - wrapperEntryPairs: [{ wrapperBasename: "openclaw.mjs", entryBasename: "entry.js" }], - }), - ).toBe(false); - }); - - it("returns false when running under PM2 but this module is imported", () => { - expect( - isMainModule({ - currentFile: "/repo/node_modules/openclaw/dist/index.js", - argv: ["node", "/repo/app.js"], - cwd: "/repo", - env: { pm_exec_path: "/repo/app.js", pm_id: "0" }, - }), - ).toBe(false); - }); - }); - - describe("buildNodeShellCommand", () => { - it("uses cmd.exe for win32", () => { - expect(buildNodeShellCommand("echo hi", "win32")).toEqual([ - "cmd.exe", - "/d", - "/s", - "/c", - "echo hi", - ]); - }); - - it("uses cmd.exe for windows labels", () => { - expect(buildNodeShellCommand("echo hi", "windows")).toEqual([ - "cmd.exe", - "/d", - "/s", - "/c", - "echo hi", - ]); - expect(buildNodeShellCommand("echo hi", "Windows 11")).toEqual([ - "cmd.exe", - "/d", - "/s", - "/c", - "echo hi", - ]); - }); - - it("uses /bin/sh for darwin", () => { - expect(buildNodeShellCommand("echo hi", "darwin")).toEqual(["/bin/sh", "-lc", "echo hi"]); - }); - - it("uses /bin/sh when platform missing", () => { - expect(buildNodeShellCommand("echo hi")).toEqual(["/bin/sh", "-lc", "echo hi"]); - }); - }); - - describe("parseSshTarget", () => { - it("parses user@host:port targets", () => { - expect(parseSshTarget("me@example.com:2222")).toEqual({ - user: "me", - host: "example.com", - port: 2222, - }); - }); - - it("parses host-only targets with default port", () => { - expect(parseSshTarget("example.com")).toEqual({ - user: undefined, - host: "example.com", - port: 22, - }); - }); - - it("rejects hostnames that start with '-'", () => { - expect(parseSshTarget("-V")).toBeNull(); - expect(parseSshTarget("me@-badhost")).toBeNull(); - expect(parseSshTarget("-oProxyCommand=echo")).toBeNull(); - }); - }); -}); diff --git a/src/infra/infra-runtime.test.ts b/src/infra/infra-runtime.test.ts index e7656de974f..2072f8f2da3 100644 --- a/src/infra/infra-runtime.test.ts +++ b/src/infra/infra-runtime.test.ts @@ -1,8 +1,5 @@ import os from "node:os"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; -import type { runExec } from "../process/exec.js"; -import type { RuntimeEnv } from "../runtime.js"; -import { ensureBinary } from "./binaries.js"; import { __testing, consumeGatewaySigusr1RestartAuthorization, @@ -13,7 +10,6 @@ import { setGatewaySigusr1RestartPolicy, setPreRestartDeferralCheck, } from "./restart.js"; -import { createTelegramRetryRunner } from "./retry-policy.js"; import { listTailnetAddresses } from "./tailnet.js"; describe("infra runtime", () => { @@ -32,56 +28,6 @@ describe("infra runtime", () => { }); } - describe("ensureBinary", () => { - it("passes through when binary exists", async () => { - const exec: typeof runExec = vi.fn().mockResolvedValue({ - stdout: "", - stderr: "", - }); - const runtime: RuntimeEnv = { - log: vi.fn(), - error: vi.fn(), - exit: vi.fn(), - }; - await ensureBinary("node", exec, runtime); - expect(exec).toHaveBeenCalledWith("which", ["node"]); - }); - - it("logs and exits when missing", async () => { - const exec: typeof runExec = vi.fn().mockRejectedValue(new Error("missing")); - const error = vi.fn(); - const exit = vi.fn(() => { - throw new Error("exit"); - }); - await expect(ensureBinary("ghost", exec, { log: vi.fn(), error, exit })).rejects.toThrow( - "exit", - ); - expect(error).toHaveBeenCalledWith("Missing required binary: ghost. Please install it."); - expect(exit).toHaveBeenCalledWith(1); - }); - }); - - describe("createTelegramRetryRunner", () => { - afterEach(() => { - vi.useRealTimers(); - }); - - it("retries when custom shouldRetry matches non-telegram error", async () => { - vi.useFakeTimers(); - const runner = createTelegramRetryRunner({ - retry: { attempts: 2, minDelayMs: 0, maxDelayMs: 0, jitter: 0 }, - shouldRetry: (err) => err instanceof Error && err.message === "boom", - }); - const fn = vi.fn().mockRejectedValueOnce(new Error("boom")).mockResolvedValue("ok"); - - const promise = runner(fn, "request"); - await vi.runAllTimersAsync(); - - await expect(promise).resolves.toBe("ok"); - expect(fn).toHaveBeenCalledTimes(2); - }); - }); - describe("restart authorization", () => { setupRestartSignalSuite(); diff --git a/src/infra/infra-store.test.ts b/src/infra/infra-store.test.ts index 1f65b005652..dfa6b1715c4 100644 --- a/src/infra/infra-store.test.ts +++ b/src/infra/infra-store.test.ts @@ -1,24 +1,8 @@ import fs from "node:fs/promises"; import path from "node:path"; -import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { describe, expect, it } from "vitest"; import { withTempDir } from "../test-utils/temp-dir.js"; -import { - getChannelActivity, - recordChannelActivity, - resetChannelActivityForTest, -} from "./channel-activity.js"; -import { createDedupeCache } from "./dedupe.js"; -import { - emitDiagnosticEvent, - onDiagnosticEvent, - resetDiagnosticEventsForTest, -} from "./diagnostic-events.js"; import { readSessionStoreJson5 } from "./state-migrations.fs.js"; -import { - defaultVoiceWakeTriggers, - loadVoiceWakeConfig, - setVoiceWakeTriggers, -} from "./voicewake.js"; describe("infra store", () => { describe("state migrations fs", () => { @@ -49,182 +33,4 @@ describe("infra store", () => { }); }); }); - - describe("voicewake store", () => { - it("returns defaults when missing", async () => { - await withTempDir("openclaw-voicewake-", async (baseDir) => { - const cfg = await loadVoiceWakeConfig(baseDir); - expect(cfg.triggers).toEqual(defaultVoiceWakeTriggers()); - expect(cfg.updatedAtMs).toBe(0); - }); - }); - - it("sanitizes and persists triggers", async () => { - await withTempDir("openclaw-voicewake-", async (baseDir) => { - const saved = await setVoiceWakeTriggers([" hi ", "", " there "], baseDir); - expect(saved.triggers).toEqual(["hi", "there"]); - expect(saved.updatedAtMs).toBeGreaterThan(0); - - const loaded = await loadVoiceWakeConfig(baseDir); - expect(loaded.triggers).toEqual(["hi", "there"]); - expect(loaded.updatedAtMs).toBeGreaterThan(0); - }); - }); - - it("falls back to defaults when triggers empty", async () => { - await withTempDir("openclaw-voicewake-", async (baseDir) => { - const saved = await setVoiceWakeTriggers(["", " "], baseDir); - expect(saved.triggers).toEqual(defaultVoiceWakeTriggers()); - }); - }); - - it("sanitizes malformed persisted config values", async () => { - await withTempDir("openclaw-voicewake-", async (baseDir) => { - await fs.mkdir(path.join(baseDir, "settings"), { recursive: true }); - await fs.writeFile( - path.join(baseDir, "settings", "voicewake.json"), - JSON.stringify({ - triggers: [" wake ", "", 42, null], - updatedAtMs: -1, - }), - "utf-8", - ); - - const loaded = await loadVoiceWakeConfig(baseDir); - expect(loaded.triggers).toEqual(["wake"]); - expect(loaded.updatedAtMs).toBe(0); - }); - }); - }); - - describe("diagnostic-events", () => { - it("emits monotonic seq", async () => { - resetDiagnosticEventsForTest(); - const seqs: number[] = []; - const stop = onDiagnosticEvent((evt) => seqs.push(evt.seq)); - - emitDiagnosticEvent({ - type: "model.usage", - usage: { total: 1 }, - }); - emitDiagnosticEvent({ - type: "model.usage", - usage: { total: 2 }, - }); - - stop(); - - expect(seqs).toEqual([1, 2]); - }); - - it("emits message-flow events", async () => { - resetDiagnosticEventsForTest(); - const types: string[] = []; - const stop = onDiagnosticEvent((evt) => types.push(evt.type)); - - emitDiagnosticEvent({ - type: "webhook.received", - channel: "telegram", - updateType: "telegram-post", - }); - emitDiagnosticEvent({ - type: "message.queued", - channel: "telegram", - source: "telegram", - queueDepth: 1, - }); - emitDiagnosticEvent({ - type: "session.state", - state: "processing", - reason: "run_started", - }); - - stop(); - - expect(types).toEqual(["webhook.received", "message.queued", "session.state"]); - }); - }); - - describe("channel activity", () => { - beforeEach(() => { - resetChannelActivityForTest(); - vi.useFakeTimers(); - vi.setSystemTime(new Date("2026-01-08T00:00:00Z")); - }); - - afterEach(() => { - vi.useRealTimers(); - }); - - it("records inbound/outbound separately", () => { - recordChannelActivity({ channel: "telegram", direction: "inbound" }); - vi.advanceTimersByTime(1000); - recordChannelActivity({ channel: "telegram", direction: "outbound" }); - const res = getChannelActivity({ channel: "telegram" }); - expect(res.inboundAt).toBe(1767830400000); - expect(res.outboundAt).toBe(1767830401000); - }); - - it("isolates accounts", () => { - recordChannelActivity({ - channel: "whatsapp", - accountId: "a", - direction: "inbound", - at: 1, - }); - recordChannelActivity({ - channel: "whatsapp", - accountId: "b", - direction: "inbound", - at: 2, - }); - expect(getChannelActivity({ channel: "whatsapp", accountId: "a" })).toEqual({ - inboundAt: 1, - outboundAt: null, - }); - expect(getChannelActivity({ channel: "whatsapp", accountId: "b" })).toEqual({ - inboundAt: 2, - outboundAt: null, - }); - }); - }); - - describe("createDedupeCache", () => { - it("marks duplicates within TTL", () => { - const cache = createDedupeCache({ ttlMs: 1000, maxSize: 10 }); - expect(cache.check("a", 100)).toBe(false); - expect(cache.check("a", 500)).toBe(true); - }); - - it("expires entries after TTL", () => { - const cache = createDedupeCache({ ttlMs: 1000, maxSize: 10 }); - expect(cache.check("a", 100)).toBe(false); - expect(cache.check("a", 1501)).toBe(false); - }); - - it("evicts oldest entries when over max size", () => { - const cache = createDedupeCache({ ttlMs: 10_000, maxSize: 2 }); - expect(cache.check("a", 100)).toBe(false); - expect(cache.check("b", 200)).toBe(false); - expect(cache.check("c", 300)).toBe(false); - expect(cache.check("a", 400)).toBe(false); - }); - - it("prunes expired entries even when refreshed keys are older in insertion order", () => { - const cache = createDedupeCache({ ttlMs: 100, maxSize: 10 }); - expect(cache.check("a", 0)).toBe(false); - expect(cache.check("b", 50)).toBe(false); - expect(cache.check("a", 120)).toBe(false); - expect(cache.check("c", 200)).toBe(false); - expect(cache.size()).toBe(2); - }); - - it("supports non-mutating existence checks via peek()", () => { - const cache = createDedupeCache({ ttlMs: 1000, maxSize: 10 }); - expect(cache.peek("a", 100)).toBe(false); - expect(cache.check("a", 100)).toBe(false); - expect(cache.peek("a", 200)).toBe(true); - expect(cache.peek("a", 1201)).toBe(false); - }); - }); }); diff --git a/src/infra/install-from-npm-spec.test.ts b/src/infra/install-from-npm-spec.test.ts new file mode 100644 index 00000000000..f2e5132f96f --- /dev/null +++ b/src/infra/install-from-npm-spec.test.ts @@ -0,0 +1,77 @@ +import { describe, expect, it, vi } from "vitest"; + +const validateRegistryNpmSpecMock = vi.hoisted(() => vi.fn()); +const installFromNpmSpecArchiveWithInstallerMock = vi.hoisted(() => vi.fn()); +const finalizeNpmSpecArchiveInstallMock = vi.hoisted(() => vi.fn()); + +vi.mock("./npm-registry-spec.js", () => ({ + validateRegistryNpmSpec: (...args: unknown[]) => validateRegistryNpmSpecMock(...args), +})); + +vi.mock("./npm-pack-install.js", () => ({ + installFromNpmSpecArchiveWithInstaller: (...args: unknown[]) => + installFromNpmSpecArchiveWithInstallerMock(...args), + finalizeNpmSpecArchiveInstall: (...args: unknown[]) => finalizeNpmSpecArchiveInstallMock(...args), +})); + +import { installFromValidatedNpmSpecArchive } from "./install-from-npm-spec.js"; + +describe("installFromValidatedNpmSpecArchive", () => { + it("trims the spec and returns validation errors before running the installer", async () => { + validateRegistryNpmSpecMock.mockReturnValueOnce("unsupported npm spec"); + + await expect( + installFromValidatedNpmSpecArchive({ + spec: " nope ", + timeoutMs: 30_000, + tempDirPrefix: "openclaw-npm-", + installFromArchive: vi.fn(), + archiveInstallParams: {}, + }), + ).resolves.toEqual({ ok: false, error: "unsupported npm spec" }); + + expect(validateRegistryNpmSpecMock).toHaveBeenCalledWith("nope"); + expect(installFromNpmSpecArchiveWithInstallerMock).not.toHaveBeenCalled(); + expect(finalizeNpmSpecArchiveInstallMock).not.toHaveBeenCalled(); + }); + + it("passes the trimmed spec through the archive installer and finalizer", async () => { + const installFromArchive = vi.fn(); + const warn = vi.fn(); + const onIntegrityDrift = vi.fn(); + const flowResult = { + ok: true, + installResult: { ok: true }, + npmResolution: { version: "1.2.3" }, + }; + const finalized = { ok: true, archivePath: "/tmp/pkg.tgz" }; + validateRegistryNpmSpecMock.mockReturnValueOnce(null); + installFromNpmSpecArchiveWithInstallerMock.mockResolvedValueOnce(flowResult); + finalizeNpmSpecArchiveInstallMock.mockReturnValueOnce(finalized); + + await expect( + installFromValidatedNpmSpecArchive({ + spec: " @openclaw/demo@beta ", + timeoutMs: 45_000, + tempDirPrefix: "openclaw-npm-", + expectedIntegrity: "sha512-demo", + onIntegrityDrift, + warn, + installFromArchive, + archiveInstallParams: { destination: "/tmp/demo" }, + }), + ).resolves.toBe(finalized); + + expect(installFromNpmSpecArchiveWithInstallerMock).toHaveBeenCalledWith({ + tempDirPrefix: "openclaw-npm-", + spec: "@openclaw/demo@beta", + timeoutMs: 45_000, + expectedIntegrity: "sha512-demo", + onIntegrityDrift, + warn, + installFromArchive, + archiveInstallParams: { destination: "/tmp/demo" }, + }); + expect(finalizeNpmSpecArchiveInstallMock).toHaveBeenCalledWith(flowResult); + }); +}); diff --git a/src/infra/install-mode-options.test.ts b/src/infra/install-mode-options.test.ts index fe9cfa1a64c..6fd450ee370 100644 --- a/src/infra/install-mode-options.test.ts +++ b/src/infra/install-mode-options.test.ts @@ -4,48 +4,83 @@ import { resolveTimedInstallModeOptions, } from "./install-mode-options.js"; +type LoggerKey = "default" | "explicit"; + describe("install mode option helpers", () => { - it("applies logger, mode, and dryRun defaults", () => { - const logger = { warn: (_message: string) => {} }; - const result = resolveInstallModeOptions({}, logger); + it.each([ + { + name: "applies logger, mode, and dryRun defaults", + params: {}, + expected: { loggerKey: "default", mode: "install", dryRun: false }, + }, + { + name: "preserves explicit mode and dryRun values", + params: { loggerKey: "explicit", mode: "update" as const, dryRun: true }, + expected: { loggerKey: "explicit", mode: "update", dryRun: true }, + }, + { + name: "preserves explicit false dryRun values", + params: { mode: "update" as const, dryRun: false }, + expected: { loggerKey: "default", mode: "update", dryRun: false }, + }, + ] satisfies Array<{ + name: string; + params: { loggerKey?: LoggerKey; mode?: "install" | "update"; dryRun?: boolean }; + expected: { loggerKey: LoggerKey; mode: "install" | "update"; dryRun: boolean }; + }>)("$name", ({ params, expected }) => { + const loggers = { + default: { warn: (_message: string) => {} }, + explicit: { warn: (_message: string) => {} }, + } satisfies Record void }>; - expect(result).toEqual({ - logger, - mode: "install", - dryRun: false, + expect( + resolveInstallModeOptions( + { + logger: params.loggerKey ? loggers[params.loggerKey] : undefined, + mode: params.mode, + dryRun: params.dryRun, + }, + loggers.default, + ), + ).toEqual({ + logger: loggers[expected.loggerKey], + mode: expected.mode, + dryRun: expected.dryRun, }); }); - it("preserves explicit mode and dryRun values", () => { + it.each([ + { + name: "uses default timeout when not provided", + params: {}, + defaultTimeoutMs: undefined, + expectedTimeoutMs: 120_000, + expectedMode: "install", + expectedDryRun: false, + }, + { + name: "honors custom timeout default override", + params: {}, + defaultTimeoutMs: 5000, + expectedTimeoutMs: 5000, + expectedMode: "install", + expectedDryRun: false, + }, + { + name: "preserves explicit timeout values", + params: { timeoutMs: 0, mode: "update" as const, dryRun: true }, + defaultTimeoutMs: 5000, + expectedTimeoutMs: 0, + expectedMode: "update", + expectedDryRun: true, + }, + ])("$name", ({ params, defaultTimeoutMs, expectedTimeoutMs, expectedMode, expectedDryRun }) => { const logger = { warn: (_message: string) => {} }; - const result = resolveInstallModeOptions( - { - logger, - mode: "update", - dryRun: true, - }, - { warn: () => {} }, - ); + const result = resolveTimedInstallModeOptions(params, logger, defaultTimeoutMs); - expect(result).toEqual({ - logger, - mode: "update", - dryRun: true, - }); - }); - - it("uses default timeout when not provided", () => { - const logger = { warn: (_message: string) => {} }; - const result = resolveTimedInstallModeOptions({}, logger); - - expect(result.timeoutMs).toBe(120_000); - expect(result.mode).toBe("install"); - expect(result.dryRun).toBe(false); - }); - - it("honors custom timeout default override", () => { - const result = resolveTimedInstallModeOptions({}, { warn: () => {} }, 5000); - - expect(result.timeoutMs).toBe(5000); + expect(result.timeoutMs).toBe(expectedTimeoutMs); + expect(result.mode).toBe(expectedMode); + expect(result.dryRun).toBe(expectedDryRun); + expect(result.logger).toBe(logger); }); }); diff --git a/src/infra/install-package-dir.test.ts b/src/infra/install-package-dir.test.ts index 1386f6074fa..cacbcadf5cc 100644 --- a/src/infra/install-package-dir.test.ts +++ b/src/infra/install-package-dir.test.ts @@ -3,8 +3,17 @@ import fs from "node:fs/promises"; import os from "node:os"; import path from "node:path"; import { afterEach, describe, expect, it, vi } from "vitest"; +import { runCommandWithTimeout } from "../process/exec.js"; import { installPackageDir } from "./install-package-dir.js"; +vi.mock("../process/exec.js", async () => { + const actual = await vi.importActual("../process/exec.js"); + return { + ...actual, + runCommandWithTimeout: vi.fn(actual.runCommandWithTimeout), + }; +}); + async function listMatchingDirs(root: string, prefix: string): Promise { const entries = await fs.readdir(root, { withFileTypes: true }); return entries @@ -263,4 +272,49 @@ describe("installPackageDir", () => { const backupRoot = path.join(preservedInstallRoot, ".openclaw-install-backups"); await expect(fs.readdir(backupRoot)).resolves.toHaveLength(1); }); + + it("installs peer dependencies for isolated plugin package installs", async () => { + fixtureRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-install-package-dir-")); + const sourceDir = path.join(fixtureRoot, "source"); + const targetDir = path.join(fixtureRoot, "plugins", "demo"); + await fs.mkdir(sourceDir, { recursive: true }); + await fs.writeFile( + path.join(sourceDir, "package.json"), + JSON.stringify({ + name: "demo-plugin", + version: "1.0.0", + dependencies: { + zod: "^4.0.0", + }, + }), + "utf-8", + ); + + vi.mocked(runCommandWithTimeout).mockResolvedValue({ + stdout: "", + stderr: "", + code: 0, + signal: null, + killed: false, + termination: "exit", + }); + + const result = await installPackageDir({ + sourceDir, + targetDir, + mode: "install", + timeoutMs: 1_000, + copyErrorPrefix: "failed to copy plugin", + hasDeps: true, + depsLogMessage: "Installing deps…", + }); + + expect(result).toEqual({ ok: true }); + expect(vi.mocked(runCommandWithTimeout)).toHaveBeenCalledWith( + ["npm", "install", "--omit=dev", "--silent", "--ignore-scripts"], + expect.objectContaining({ + cwd: expect.stringContaining(".openclaw-install-stage-"), + }), + ); + }); }); diff --git a/src/infra/install-package-dir.ts b/src/infra/install-package-dir.ts index 17878599160..45611b17ffe 100644 --- a/src/infra/install-package-dir.ts +++ b/src/infra/install-package-dir.ts @@ -189,7 +189,9 @@ export async function installPackageDir(params: { await sanitizeManifestForNpmInstall(stageDir); params.logger?.info?.(params.depsLogMessage); const npmRes = await runCommandWithTimeout( - ["npm", "install", "--omit=dev", "--omit=peer", "--silent", "--ignore-scripts"], + // Plugins install into isolated directories, so omitting peer deps can strip + // runtime requirements that npm would otherwise materialize for the package. + ["npm", "install", "--omit=dev", "--silent", "--ignore-scripts"], { timeoutMs: Math.max(params.timeoutMs, 300_000), cwd: stageDir, diff --git a/src/infra/install-safe-path.test.ts b/src/infra/install-safe-path.test.ts index 3ec0679c6cf..61ac64a2126 100644 --- a/src/infra/install-safe-path.test.ts +++ b/src/infra/install-safe-path.test.ts @@ -2,7 +2,33 @@ import fs from "node:fs/promises"; import os from "node:os"; import path from "node:path"; import { describe, expect, it } from "vitest"; -import { assertCanonicalPathWithinBase, safePathSegmentHashed } from "./install-safe-path.js"; +import { + assertCanonicalPathWithinBase, + resolveSafeInstallDir, + safeDirName, + safePathSegmentHashed, + unscopedPackageName, +} from "./install-safe-path.js"; + +describe("unscopedPackageName", () => { + it.each([ + { value: "@openclaw/matrix", expected: "matrix" }, + { value: " matrix ", expected: "matrix" }, + { value: "", expected: "" }, + ])("normalizes package names for %j", ({ value, expected }) => { + expect(unscopedPackageName(value)).toBe(expected); + }); +}); + +describe("safeDirName", () => { + it.each([ + { value: " matrix ", expected: "matrix" }, + { value: "../matrix/plugin", expected: "..__matrix__plugin" }, + { value: "dir\\plugin", expected: "dir__plugin" }, + ])("normalizes install dir names for %j", ({ value, expected }) => { + expect(safeDirName(value)).toBe(expected); + }); +}); describe("safePathSegmentHashed", () => { it("keeps safe names unchanged", () => { @@ -24,6 +50,34 @@ describe("safePathSegmentHashed", () => { }); }); +describe("resolveSafeInstallDir", () => { + it("resolves install dirs under the base directory", () => { + expect( + resolveSafeInstallDir({ + baseDir: "/tmp/plugins", + id: "@openclaw/matrix", + invalidNameMessage: "invalid plugin name", + }), + ).toEqual({ + ok: true, + path: path.join("/tmp/plugins", "@openclaw__matrix"), + }); + }); + + it("rejects ids that resolve to the base directory itself", () => { + expect( + resolveSafeInstallDir({ + baseDir: "/tmp/plugins", + id: " ", + invalidNameMessage: "invalid plugin name", + }), + ).toEqual({ + ok: false, + error: "invalid plugin name", + }); + }); +}); + describe("assertCanonicalPathWithinBase", () => { it("accepts in-base directories", async () => { const baseDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-install-safe-")); diff --git a/src/infra/install-source-utils.test.ts b/src/infra/install-source-utils.test.ts index bbcc17cb968..95215530bb2 100644 --- a/src/infra/install-source-utils.test.ts +++ b/src/infra/install-source-utils.test.ts @@ -56,7 +56,10 @@ async function runPack(spec: string, cwd: string, timeoutMs = 1000) { }); } -async function expectPackFallsBackToDetectedArchive(params: { stdout: string }) { +async function expectPackFallsBackToDetectedArchive(params: { + stdout: string; + expectedMetadata?: Record; +}) { const cwd = await createTempDir("openclaw-install-source-utils-"); const archivePath = path.join(cwd, "openclaw-plugin-1.2.3.tgz"); await fs.writeFile(archivePath, "", "utf-8"); @@ -77,7 +80,7 @@ async function expectPackFallsBackToDetectedArchive(params: { stdout: string }) expect(result).toEqual({ ok: true, archivePath, - metadata: {}, + metadata: params.expectedMetadata ?? {}, }); } @@ -134,15 +137,18 @@ describe("resolveArchiveSourcePath", () => { } }); - it("accepts supported archive extensions", async () => { - const { filePath } = await createFixtureFile({ - fileName: "plugin.zip", - contents: "", - }); + it.each(["plugin.zip", "plugin.tgz", "plugin.tar.gz"])( + "accepts supported archive extension %s", + async (fileName) => { + const { filePath } = await createFixtureFile({ + fileName, + contents: "", + }); - const result = await resolveArchiveSourcePath(filePath); - expect(result).toEqual({ ok: true, path: filePath }); - }); + const result = await resolveArchiveSourcePath(filePath); + expect(result).toEqual({ ok: true, path: filePath }); + }, + ); }); describe("packNpmSpecToArchive", () => { @@ -219,12 +225,29 @@ describe("packNpmSpecToArchive", () => { } }); - it("falls back to archive detected in cwd when npm pack stdout is empty", async () => { - await expectPackFallsBackToDetectedArchive({ stdout: " \n\n" }); - }); - - it("falls back to archive detected in cwd when stdout does not contain a tgz", async () => { - await expectPackFallsBackToDetectedArchive({ stdout: "npm pack completed successfully\n" }); + it.each([ + { + name: "falls back to archive detected in cwd when npm pack stdout is empty", + stdout: " \n\n", + }, + { + name: "falls back to archive detected in cwd when stdout does not contain a tgz", + stdout: "npm pack completed successfully\n", + }, + { + name: "falls back to cwd archive when logged JSON metadata omits filename", + stdout: + 'npm notice using cache\n[{"id":"openclaw-plugin@1.2.3","name":"openclaw-plugin","version":"1.2.3","integrity":"sha512-test-integrity","shasum":"abc123"}]\n', + expectedMetadata: { + name: "openclaw-plugin", + version: "1.2.3", + resolvedSpec: "openclaw-plugin@1.2.3", + integrity: "sha512-test-integrity", + shasum: "abc123", + }, + }, + ])("$name", async ({ stdout, expectedMetadata }) => { + await expectPackFallsBackToDetectedArchive({ stdout, expectedMetadata }); }); it("returns friendly error for 404 (package not on npm)", async () => { diff --git a/src/infra/install-target.test.ts b/src/infra/install-target.test.ts new file mode 100644 index 00000000000..211d5c1a99d --- /dev/null +++ b/src/infra/install-target.test.ts @@ -0,0 +1,129 @@ +import fs from "node:fs/promises"; +import path from "node:path"; +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { withTempDir } from "../test-helpers/temp-dir.js"; + +const fileExistsMock = vi.hoisted(() => vi.fn()); +const resolveSafeInstallDirMock = vi.hoisted(() => vi.fn()); +const assertCanonicalPathWithinBaseMock = vi.hoisted(() => vi.fn()); + +vi.mock("./archive.js", () => ({ + fileExists: (...args: unknown[]) => fileExistsMock(...args), +})); + +vi.mock("./install-safe-path.js", () => ({ + resolveSafeInstallDir: (...args: unknown[]) => resolveSafeInstallDirMock(...args), + assertCanonicalPathWithinBase: (...args: unknown[]) => assertCanonicalPathWithinBaseMock(...args), +})); + +import { ensureInstallTargetAvailable, resolveCanonicalInstallTarget } from "./install-target.js"; + +beforeEach(() => { + fileExistsMock.mockReset(); + resolveSafeInstallDirMock.mockReset(); + assertCanonicalPathWithinBaseMock.mockReset(); +}); + +describe("resolveCanonicalInstallTarget", () => { + it("creates the base dir and returns early for invalid install ids", async () => { + await withTempDir({ prefix: "openclaw-install-target-" }, async (root) => { + const baseDir = path.join(root, "plugins"); + resolveSafeInstallDirMock.mockReturnValueOnce({ + ok: false, + error: "bad id", + }); + + await expect( + resolveCanonicalInstallTarget({ + baseDir, + id: "../oops", + invalidNameMessage: "bad id", + boundaryLabel: "plugin dir", + }), + ).resolves.toEqual({ ok: false, error: "bad id" }); + + await expect(fs.stat(baseDir)).resolves.toMatchObject({ isDirectory: expect.any(Function) }); + expect(assertCanonicalPathWithinBaseMock).not.toHaveBeenCalled(); + }); + }); + + it("returns canonical boundary errors for Error and non-Error throws", async () => { + await withTempDir({ prefix: "openclaw-install-target-" }, async (baseDir) => { + const targetDir = path.join(baseDir, "demo"); + resolveSafeInstallDirMock.mockReturnValue({ + ok: true, + path: targetDir, + }); + assertCanonicalPathWithinBaseMock.mockRejectedValueOnce(new Error("escaped")); + assertCanonicalPathWithinBaseMock.mockRejectedValueOnce("boom"); + + await expect( + resolveCanonicalInstallTarget({ + baseDir, + id: "demo", + invalidNameMessage: "bad id", + boundaryLabel: "plugin dir", + }), + ).resolves.toEqual({ ok: false, error: "escaped" }); + + await expect( + resolveCanonicalInstallTarget({ + baseDir, + id: "demo", + invalidNameMessage: "bad id", + boundaryLabel: "plugin dir", + }), + ).resolves.toEqual({ ok: false, error: "boom" }); + }); + }); + + it("returns the resolved target path on success", async () => { + await withTempDir({ prefix: "openclaw-install-target-" }, async (baseDir) => { + const targetDir = path.join(baseDir, "demo"); + resolveSafeInstallDirMock.mockReturnValueOnce({ + ok: true, + path: targetDir, + }); + + await expect( + resolveCanonicalInstallTarget({ + baseDir, + id: "demo", + invalidNameMessage: "bad id", + boundaryLabel: "plugin dir", + }), + ).resolves.toEqual({ ok: true, targetDir }); + }); + }); +}); + +describe("ensureInstallTargetAvailable", () => { + it("blocks only install mode when the target already exists", async () => { + fileExistsMock.mockResolvedValueOnce(true); + fileExistsMock.mockResolvedValueOnce(false); + + await expect( + ensureInstallTargetAvailable({ + mode: "install", + targetDir: "/tmp/demo", + alreadyExistsError: "already there", + }), + ).resolves.toEqual({ ok: false, error: "already there" }); + + await expect( + ensureInstallTargetAvailable({ + mode: "update", + targetDir: "/tmp/demo", + alreadyExistsError: "already there", + }), + ).resolves.toEqual({ ok: true }); + + await expect( + ensureInstallTargetAvailable({ + mode: "install", + targetDir: "/tmp/demo", + alreadyExistsError: "already there", + }), + ).resolves.toEqual({ ok: true }); + }); +}); diff --git a/src/infra/is-main.test.ts b/src/infra/is-main.test.ts new file mode 100644 index 00000000000..b2f6197ad24 --- /dev/null +++ b/src/infra/is-main.test.ts @@ -0,0 +1,80 @@ +import { describe, expect, it } from "vitest"; +import { isMainModule } from "./is-main.js"; + +describe("isMainModule", () => { + it("returns true when argv[1] matches current file", () => { + expect( + isMainModule({ + currentFile: "/repo/dist/index.js", + argv: ["node", "/repo/dist/index.js"], + cwd: "/repo", + env: {}, + }), + ).toBe(true); + }); + + it("returns true under PM2 when pm_exec_path matches current file", () => { + expect( + isMainModule({ + currentFile: "/repo/dist/index.js", + argv: ["node", "/pm2/lib/ProcessContainerFork.js"], + cwd: "/repo", + env: { pm_exec_path: "/repo/dist/index.js", pm_id: "0" }, + }), + ).toBe(true); + }); + + it("returns true for configured wrapper-to-entry pairs", () => { + expect( + isMainModule({ + currentFile: "/repo/dist/entry.js", + argv: ["node", "/repo/openclaw.mjs"], + cwd: "/repo", + env: {}, + wrapperEntryPairs: [{ wrapperBasename: "openclaw.mjs", entryBasename: "entry.js" }], + }), + ).toBe(true); + }); + + it("returns false for unmatched wrapper launches", () => { + expect( + isMainModule({ + currentFile: "/repo/dist/entry.js", + argv: ["node", "/repo/openclaw.mjs"], + cwd: "/repo", + env: {}, + }), + ).toBe(false); + expect( + isMainModule({ + currentFile: "/repo/dist/index.js", + argv: ["node", "/repo/openclaw.mjs"], + cwd: "/repo", + env: {}, + wrapperEntryPairs: [{ wrapperBasename: "openclaw.mjs", entryBasename: "entry.js" }], + }), + ).toBe(false); + }); + + it("returns false when this module is only imported under PM2", () => { + expect( + isMainModule({ + currentFile: "/repo/node_modules/openclaw/dist/index.js", + argv: ["node", "/repo/app.js"], + cwd: "/repo", + env: { pm_exec_path: "/repo/app.js", pm_id: "0" }, + }), + ).toBe(false); + }); + + it("falls back to basename matching for relative or symlinked entrypoints", () => { + expect( + isMainModule({ + currentFile: "/repo/dist/index.js", + argv: ["node", "../other/index.js"], + cwd: "/repo/dist", + env: {}, + }), + ).toBe(true); + }); +}); diff --git a/src/infra/json-file.test.ts b/src/infra/json-file.test.ts new file mode 100644 index 00000000000..95def5fa54a --- /dev/null +++ b/src/infra/json-file.test.ts @@ -0,0 +1,33 @@ +import fs from "node:fs"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { withTempDir } from "../test-helpers/temp-dir.js"; +import { loadJsonFile, saveJsonFile } from "./json-file.js"; + +describe("json-file helpers", () => { + it("returns undefined for missing and invalid JSON files", async () => { + await withTempDir({ prefix: "openclaw-json-file-" }, async (root) => { + const pathname = path.join(root, "config.json"); + expect(loadJsonFile(pathname)).toBeUndefined(); + + fs.writeFileSync(pathname, "{", "utf8"); + expect(loadJsonFile(pathname)).toBeUndefined(); + }); + }); + + it("creates parent dirs, writes a trailing newline, and loads the saved object", async () => { + await withTempDir({ prefix: "openclaw-json-file-" }, async (root) => { + const pathname = path.join(root, "nested", "config.json"); + saveJsonFile(pathname, { enabled: true, count: 2 }); + + const raw = fs.readFileSync(pathname, "utf8"); + expect(raw.endsWith("\n")).toBe(true); + expect(loadJsonFile(pathname)).toEqual({ enabled: true, count: 2 }); + + const fileMode = fs.statSync(pathname).mode & 0o777; + const dirMode = fs.statSync(path.dirname(pathname)).mode & 0o777; + expect(fileMode).toBe(0o600); + expect(dirMode).toBe(0o700); + }); + }); +}); diff --git a/src/infra/json-files.test.ts b/src/infra/json-files.test.ts new file mode 100644 index 00000000000..d2d0fa600f5 --- /dev/null +++ b/src/infra/json-files.test.ts @@ -0,0 +1,68 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { createAsyncLock, readJsonFile, writeJsonAtomic, writeTextAtomic } from "./json-files.js"; + +describe("json file helpers", () => { + it("reads valid json and returns null for missing or invalid files", async () => { + const base = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-json-files-")); + const validPath = path.join(base, "valid.json"); + const invalidPath = path.join(base, "invalid.json"); + + await fs.writeFile(validPath, '{"ok":true}', "utf8"); + await fs.writeFile(invalidPath, "{not-json}", "utf8"); + + await expect(readJsonFile<{ ok: boolean }>(validPath)).resolves.toEqual({ ok: true }); + await expect(readJsonFile(invalidPath)).resolves.toBeNull(); + await expect(readJsonFile(path.join(base, "missing.json"))).resolves.toBeNull(); + }); + + it("writes json atomically with pretty formatting and optional trailing newline", async () => { + const base = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-json-files-")); + const filePath = path.join(base, "nested", "config.json"); + + await writeJsonAtomic( + filePath, + { ok: true, nested: { value: 1 } }, + { trailingNewline: true, ensureDirMode: 0o755 }, + ); + + await expect(fs.readFile(filePath, "utf8")).resolves.toBe( + '{\n "ok": true,\n "nested": {\n "value": 1\n }\n}\n', + ); + }); + + it("writes text atomically and avoids duplicate trailing newlines", async () => { + const base = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-json-files-")); + const filePath = path.join(base, "nested", "note.txt"); + + await writeTextAtomic(filePath, "hello", { appendTrailingNewline: true }); + await expect(fs.readFile(filePath, "utf8")).resolves.toBe("hello\n"); + + await writeTextAtomic(filePath, "hello\n", { appendTrailingNewline: true }); + await expect(fs.readFile(filePath, "utf8")).resolves.toBe("hello\n"); + }); + + it("serializes async lock callers even across rejections", async () => { + const withLock = createAsyncLock(); + const events: string[] = []; + + const first = withLock(async () => { + events.push("first:start"); + await new Promise((resolve) => setTimeout(resolve, 20)); + events.push("first:end"); + throw new Error("boom"); + }); + + const second = withLock(async () => { + events.push("second:start"); + events.push("second:end"); + return "ok"; + }); + + await expect(first).rejects.toThrow("boom"); + await expect(second).resolves.toBe("ok"); + expect(events).toEqual(["first:start", "first:end", "second:start", "second:end"]); + }); +}); diff --git a/src/infra/json-utf8-bytes.test.ts b/src/infra/json-utf8-bytes.test.ts index 3418359ae5f..5009301ffd6 100644 --- a/src/infra/json-utf8-bytes.test.ts +++ b/src/infra/json-utf8-bytes.test.ts @@ -2,10 +2,29 @@ import { describe, expect, it } from "vitest"; import { jsonUtf8Bytes } from "./json-utf8-bytes.js"; describe("jsonUtf8Bytes", () => { - it("returns utf8 byte length for serializable values", () => { - expect(jsonUtf8Bytes({ a: "x", b: [1, 2, 3] })).toBe( - Buffer.byteLength(JSON.stringify({ a: "x", b: [1, 2, 3] }), "utf8"), - ); + it.each([ + { + name: "object payloads", + value: { a: "x", b: [1, 2, 3] }, + expected: Buffer.byteLength(JSON.stringify({ a: "x", b: [1, 2, 3] }), "utf8"), + }, + { + name: "strings", + value: "hello", + expected: Buffer.byteLength(JSON.stringify("hello"), "utf8"), + }, + { + name: "undefined via string fallback", + value: undefined, + expected: Buffer.byteLength("undefined", "utf8"), + }, + { + name: "unicode strings", + value: "🙂", + expected: Buffer.byteLength(JSON.stringify("🙂"), "utf8"), + }, + ])("returns utf8 byte length for $name", ({ value, expected }) => { + expect(jsonUtf8Bytes(value)).toBe(expected); }); it("falls back to string conversion when JSON serialization throws", () => { @@ -13,4 +32,12 @@ describe("jsonUtf8Bytes", () => { circular.self = circular; expect(jsonUtf8Bytes(circular)).toBe(Buffer.byteLength("[object Object]", "utf8")); }); + + it("uses string conversion for BigInt serialization failures", () => { + expect(jsonUtf8Bytes(12n)).toBe(Buffer.byteLength("12", "utf8")); + }); + + it("uses string conversion for symbol serialization failures", () => { + expect(jsonUtf8Bytes(Symbol("token"))).toBe(Buffer.byteLength("Symbol(token)", "utf8")); + }); }); diff --git a/src/infra/jsonl-socket.test.ts b/src/infra/jsonl-socket.test.ts new file mode 100644 index 00000000000..af8bf0fdaed --- /dev/null +++ b/src/infra/jsonl-socket.test.ts @@ -0,0 +1,69 @@ +import net from "node:net"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { withTempDir } from "../test-helpers/temp-dir.js"; +import { requestJsonlSocket } from "./jsonl-socket.js"; + +describe.runIf(process.platform !== "win32")("requestJsonlSocket", () => { + it("ignores malformed and non-accepted lines until one is accepted", async () => { + await withTempDir({ prefix: "openclaw-jsonl-socket-" }, async (dir) => { + const socketPath = path.join(dir, "socket.sock"); + const server = net.createServer((socket) => { + socket.on("data", () => { + socket.write("{bad json}\n"); + socket.write('{"type":"ignore"}\n'); + socket.write('{"type":"done","value":42}\n'); + }); + }); + await new Promise((resolve) => server.listen(socketPath, resolve)); + + try { + await expect( + requestJsonlSocket({ + socketPath, + payload: '{"hello":"world"}', + timeoutMs: 500, + accept: (msg) => { + const value = msg as { type?: string; value?: number }; + return value.type === "done" ? (value.value ?? null) : undefined; + }, + }), + ).resolves.toBe(42); + } finally { + server.close(); + } + }); + }); + + it("returns null on timeout and on socket errors", async () => { + await withTempDir({ prefix: "openclaw-jsonl-socket-" }, async (dir) => { + const socketPath = path.join(dir, "socket.sock"); + const server = net.createServer(() => { + // Intentionally never reply. + }); + await new Promise((resolve) => server.listen(socketPath, resolve)); + + try { + await expect( + requestJsonlSocket({ + socketPath, + payload: "{}", + timeoutMs: 50, + accept: () => undefined, + }), + ).resolves.toBeNull(); + } finally { + server.close(); + } + + await expect( + requestJsonlSocket({ + socketPath, + payload: "{}", + timeoutMs: 50, + accept: () => undefined, + }), + ).resolves.toBeNull(); + }); + }); +}); diff --git a/src/infra/machine-name.test.ts b/src/infra/machine-name.test.ts new file mode 100644 index 00000000000..f36efd6ceee --- /dev/null +++ b/src/infra/machine-name.test.ts @@ -0,0 +1,54 @@ +import os from "node:os"; +import { afterEach, describe, expect, it, vi } from "vitest"; +import { importFreshModule } from "../../test/helpers/import-fresh.js"; + +const execFileMock = vi.hoisted(() => vi.fn()); + +vi.mock("node:child_process", () => ({ + execFile: (...args: unknown[]) => execFileMock(...args), +})); + +const originalVitest = process.env.VITEST; +const originalNodeEnv = process.env.NODE_ENV; + +async function importMachineName(scope: string) { + return await importFreshModule( + import.meta.url, + `./machine-name.js?scope=${scope}`, + ); +} + +afterEach(() => { + execFileMock.mockReset(); + vi.restoreAllMocks(); + if (originalVitest === undefined) { + delete process.env.VITEST; + } else { + process.env.VITEST = originalVitest; + } + if (originalNodeEnv === undefined) { + delete process.env.NODE_ENV; + } else { + process.env.NODE_ENV = originalNodeEnv; + } +}); + +describe("getMachineDisplayName", () => { + it("uses the hostname fallback in test mode and trims .local", async () => { + const hostnameSpy = vi.spyOn(os, "hostname").mockReturnValue(" clawbox.local "); + const machineName = await importMachineName("test-fallback"); + + await expect(machineName.getMachineDisplayName()).resolves.toBe("clawbox.local"); + await expect(machineName.getMachineDisplayName()).resolves.toBe("clawbox.local"); + expect(hostnameSpy).toHaveBeenCalledTimes(1); + expect(execFileMock).not.toHaveBeenCalled(); + }); + + it("falls back to the default product name when hostname is blank", async () => { + vi.spyOn(os, "hostname").mockReturnValue(" "); + const machineName = await importMachineName("blank-hostname"); + + await expect(machineName.getMachineDisplayName()).resolves.toBe("openclaw"); + expect(execFileMock).not.toHaveBeenCalled(); + }); +}); diff --git a/src/infra/map-size.test.ts b/src/infra/map-size.test.ts new file mode 100644 index 00000000000..82fbe8a52ac --- /dev/null +++ b/src/infra/map-size.test.ts @@ -0,0 +1,39 @@ +import { describe, expect, it } from "vitest"; +import { pruneMapToMaxSize } from "./map-size.js"; + +describe("pruneMapToMaxSize", () => { + it("keeps the newest entries after flooring fractional limits", () => { + const map = new Map([ + ["a", 1], + ["b", 2], + ["c", 3], + ]); + + pruneMapToMaxSize(map, 2.9); + + expect([...map.entries()]).toEqual([ + ["b", 2], + ["c", 3], + ]); + }); + + it("clears maps for zero or negative limits and leaves undersized maps untouched", () => { + const cleared = new Map([ + ["a", 1], + ["b", 2], + ]); + pruneMapToMaxSize(cleared, 0); + expect([...cleared.entries()]).toEqual([]); + + const alsoCleared = new Map([ + ["a", 1], + ["b", 2], + ]); + pruneMapToMaxSize(alsoCleared, -4); + expect([...alsoCleared.entries()]).toEqual([]); + + const unchanged = new Map([["a", 1]]); + pruneMapToMaxSize(unchanged, 5); + expect([...unchanged.entries()]).toEqual([["a", 1]]); + }); +}); diff --git a/src/infra/net/fetch-guard.ssrf.test.ts b/src/infra/net/fetch-guard.ssrf.test.ts index 1817cc7e7d6..f90df5271f1 100644 --- a/src/infra/net/fetch-guard.ssrf.test.ts +++ b/src/infra/net/fetch-guard.ssrf.test.ts @@ -13,6 +13,34 @@ function okResponse(body = "ok"): Response { return new Response(body, { status: 200 }); } +function getSecondRequestHeaders(fetchImpl: ReturnType): Headers { + const [, secondInit] = fetchImpl.mock.calls[1] as [string, RequestInit]; + return new Headers(secondInit.headers); +} + +async function expectRedirectFailure(params: { + url: string; + responses: Response[]; + expectedError: RegExp; + lookupFn?: NonNullable[0]["lookupFn"]>; + maxRedirects?: number; +}) { + const fetchImpl = vi.fn(); + for (const response of params.responses) { + fetchImpl.mockResolvedValueOnce(response); + } + + await expect( + fetchWithSsrFGuard({ + url: params.url, + fetchImpl, + ...(params.lookupFn ? { lookupFn: params.lookupFn } : {}), + ...(params.maxRedirects === undefined ? {} : { maxRedirects: params.maxRedirects }), + }), + ).rejects.toThrow(params.expectedError); + return fetchImpl; +} + describe("fetchWithSsrFGuard hardening", () => { type LookupFn = NonNullable[0]["lookupFn"]>; const CROSS_ORIGIN_REDIRECT_STRIPPED_HEADERS = [ @@ -33,11 +61,6 @@ describe("fetchWithSsrFGuard hardening", () => { const createPublicLookup = (): LookupFn => vi.fn(async () => [{ address: "93.184.216.34", family: 4 }]) as unknown as LookupFn; - const getSecondRequestHeaders = (fetchImpl: ReturnType): Headers => { - const [, secondInit] = fetchImpl.mock.calls[1] as [string, RequestInit]; - return new Headers(secondInit.headers); - }; - async function runProxyModeDispatcherTest(params: { mode: (typeof GUARDED_FETCH_MODE)[keyof typeof GUARDED_FETCH_MODE]; expectEnvProxy: boolean; @@ -112,15 +135,12 @@ describe("fetchWithSsrFGuard hardening", () => { it("blocks redirect chains that hop to private hosts", async () => { const lookupFn = createPublicLookup(); - const fetchImpl = vi.fn().mockResolvedValueOnce(redirectResponse("http://127.0.0.1:6379/")); - - await expect( - fetchWithSsrFGuard({ - url: "https://public.example/start", - fetchImpl, - lookupFn, - }), - ).rejects.toThrow(/private|internal|blocked/i); + const fetchImpl = await expectRedirectFailure({ + url: "https://public.example/start", + responses: [redirectResponse("http://127.0.0.1:6379/")], + expectedError: /private|internal|blocked/i, + lookupFn, + }); expect(fetchImpl).toHaveBeenCalledTimes(1); }); @@ -136,6 +156,18 @@ describe("fetchWithSsrFGuard hardening", () => { expect(fetchImpl).not.toHaveBeenCalled(); }); + it("does not let wildcard allowlists match the apex host", async () => { + const fetchImpl = vi.fn(); + await expect( + fetchWithSsrFGuard({ + url: "https://assets.example.com/pic.png", + fetchImpl, + policy: { hostnameAllowlist: ["*.assets.example.com"] }, + }), + ).rejects.toThrow(/allowlist/i); + expect(fetchImpl).not.toHaveBeenCalled(); + }); + it("allows wildcard allowlisted hosts", async () => { const lookupFn = createPublicLookup(); const fetchImpl = vi.fn(async () => new Response("ok", { status: 200 })); @@ -211,6 +243,41 @@ describe("fetchWithSsrFGuard hardening", () => { await result.release(); }); + it.each([ + { + name: "rejects redirects without a location header", + responses: [new Response(null, { status: 302 })], + expectedError: /missing location header/i, + maxRedirects: undefined, + }, + { + name: "rejects redirect loops", + responses: [ + redirectResponse("https://public.example/next"), + redirectResponse("https://public.example/next"), + ], + expectedError: /redirect loop/i, + maxRedirects: undefined, + }, + { + name: "rejects too many redirects", + responses: [ + redirectResponse("https://public.example/one"), + redirectResponse("https://public.example/two"), + ], + expectedError: /too many redirects/i, + maxRedirects: 1, + }, + ])("$name", async ({ responses, expectedError, maxRedirects }) => { + await expectRedirectFailure({ + url: "https://public.example/start", + responses, + expectedError, + lookupFn: createPublicLookup(), + maxRedirects, + }); + }); + it("ignores env proxy by default to preserve DNS-pinned destination binding", async () => { await runProxyModeDispatcherTest({ mode: GUARDED_FETCH_MODE.STRICT, diff --git a/src/infra/net/hostname.test.ts b/src/infra/net/hostname.test.ts new file mode 100644 index 00000000000..90e4c939e91 --- /dev/null +++ b/src/infra/net/hostname.test.ts @@ -0,0 +1,17 @@ +import { describe, expect, it } from "vitest"; +import { normalizeHostname } from "./hostname.js"; + +describe("normalizeHostname", () => { + it("trims, lowercases, and strips a trailing dot", () => { + expect(normalizeHostname(" Example.COM. ")).toBe("example.com"); + }); + + it("unwraps bracketed ipv6 hosts after normalization", () => { + expect(normalizeHostname(" [FD7A:115C:A1E0::1] ")).toBe("fd7a:115c:a1e0::1"); + }); + + it("leaves non-fully-bracketed values otherwise unchanged", () => { + expect(normalizeHostname("[fd7a:115c:a1e0::1")).toBe("[fd7a:115c:a1e0::1"); + expect(normalizeHostname("fd7a:115c:a1e0::1]")).toBe("fd7a:115c:a1e0::1]"); + }); +}); diff --git a/src/infra/net/proxy-env.test.ts b/src/infra/net/proxy-env.test.ts index 37b910f1769..3f3031f028a 100644 --- a/src/infra/net/proxy-env.test.ts +++ b/src/infra/net/proxy-env.test.ts @@ -1,5 +1,31 @@ import { describe, expect, it } from "vitest"; -import { hasEnvHttpProxyConfigured, resolveEnvHttpProxyUrl } from "./proxy-env.js"; +import { + hasEnvHttpProxyConfigured, + hasProxyEnvConfigured, + resolveEnvHttpProxyUrl, +} from "./proxy-env.js"; + +describe("hasProxyEnvConfigured", () => { + it.each([ + { + name: "detects upper-case HTTP proxy values", + env: { HTTP_PROXY: "http://upper-http.test:8080" } as NodeJS.ProcessEnv, + expected: true, + }, + { + name: "detects lower-case all_proxy values", + env: { all_proxy: "socks5://proxy.test:1080" } as NodeJS.ProcessEnv, + expected: true, + }, + { + name: "ignores blank proxy values", + env: { HTTP_PROXY: " ", all_proxy: "" } as NodeJS.ProcessEnv, + expected: false, + }, + ])("$name", ({ env, expected }) => { + expect(hasProxyEnvConfigured(env)).toBe(expected); + }); +}); describe("resolveEnvHttpProxyUrl", () => { it("uses lower-case https_proxy before upper-case HTTPS_PROXY", () => { @@ -39,4 +65,24 @@ describe("resolveEnvHttpProxyUrl", () => { expect(resolveEnvHttpProxyUrl("https", env)).toBe("http://upper-http.test:8080"); expect(hasEnvHttpProxyConfigured("https", env)).toBe(true); }); + + it("does not use ALL_PROXY for EnvHttpProxyAgent-style resolution", () => { + const env = { + ALL_PROXY: "http://all-proxy.test:8080", + all_proxy: "http://lower-all-proxy.test:8080", + } as NodeJS.ProcessEnv; + + expect(resolveEnvHttpProxyUrl("https", env)).toBeUndefined(); + expect(resolveEnvHttpProxyUrl("http", env)).toBeUndefined(); + expect(hasEnvHttpProxyConfigured("https", env)).toBe(false); + }); + + it("returns only HTTP proxies for http requests", () => { + const env = { + https_proxy: "http://lower-https.test:8080", + http_proxy: "http://lower-http.test:8080", + } as NodeJS.ProcessEnv; + + expect(resolveEnvHttpProxyUrl("http", env)).toBe("http://lower-http.test:8080"); + }); }); diff --git a/src/infra/net/ssrf.test.ts b/src/infra/net/ssrf.test.ts index 2698bf3db9e..637bd5c2e9e 100644 --- a/src/infra/net/ssrf.test.ts +++ b/src/infra/net/ssrf.test.ts @@ -111,19 +111,23 @@ describe("normalizeFingerprint", () => { }); describe("isBlockedHostnameOrIp", () => { - it("blocks localhost.localdomain and metadata hostname aliases", () => { - expect(isBlockedHostnameOrIp("localhost.localdomain")).toBe(true); - expect(isBlockedHostnameOrIp("metadata.google.internal")).toBe(true); + it.each([ + "localhost.localdomain", + "metadata.google.internal", + "api.localhost", + "svc.local", + "db.internal", + ])("blocks reserved hostname %s", (hostname) => { + expect(isBlockedHostnameOrIp(hostname)).toBe(true); }); - it("blocks private transition addresses via shared IP classifier", () => { - expect(isBlockedHostnameOrIp("2001:db8:1234::5efe:127.0.0.1")).toBe(true); - expect(isBlockedHostnameOrIp("2001:db8::1")).toBe(false); - }); - - it("blocks IPv4 special-use ranges but allows adjacent public ranges", () => { - expect(isBlockedHostnameOrIp("198.18.0.1")).toBe(true); - expect(isBlockedHostnameOrIp("198.20.0.1")).toBe(false); + it.each([ + ["2001:db8:1234::5efe:127.0.0.1", true], + ["2001:db8::1", false], + ["198.18.0.1", true], + ["198.20.0.1", false], + ])("returns %s => %s", (value, expected) => { + expect(isBlockedHostnameOrIp(value)).toBe(expected); }); it("supports opt-in policy to allow RFC2544 benchmark range", () => { @@ -134,10 +138,15 @@ describe("isBlockedHostnameOrIp", () => { expect(isBlockedHostnameOrIp("198.51.100.1", policy)).toBe(true); }); - it("blocks legacy IPv4 literal representations", () => { - expect(isBlockedHostnameOrIp("0177.0.0.1")).toBe(true); - expect(isBlockedHostnameOrIp("8.8.2056")).toBe(true); - expect(isBlockedHostnameOrIp("127.1")).toBe(true); - expect(isBlockedHostnameOrIp("2130706433")).toBe(true); + it.each(["0177.0.0.1", "8.8.2056", "127.1", "2130706433"])( + "blocks legacy IPv4 literal %s", + (address) => { + expect(isBlockedHostnameOrIp(address)).toBe(true); + }, + ); + + it("does not block ordinary hostnames", () => { + expect(isBlockedHostnameOrIp("example.com")).toBe(false); + expect(isBlockedHostnameOrIp("api.example.net")).toBe(false); }); }); diff --git a/src/infra/node-shell.test.ts b/src/infra/node-shell.test.ts new file mode 100644 index 00000000000..8a0dc72bde1 --- /dev/null +++ b/src/infra/node-shell.test.ts @@ -0,0 +1,35 @@ +import { describe, expect, it } from "vitest"; +import { buildNodeShellCommand } from "./node-shell.js"; + +describe("buildNodeShellCommand", () => { + it("uses cmd.exe for win-prefixed platform labels", () => { + expect(buildNodeShellCommand("echo hi", "win32")).toEqual([ + "cmd.exe", + "/d", + "/s", + "/c", + "echo hi", + ]); + expect(buildNodeShellCommand("echo hi", "windows")).toEqual([ + "cmd.exe", + "/d", + "/s", + "/c", + "echo hi", + ]); + expect(buildNodeShellCommand("echo hi", " Windows 11 ")).toEqual([ + "cmd.exe", + "/d", + "/s", + "/c", + "echo hi", + ]); + }); + + it("uses /bin/sh for non-windows and missing platform values", () => { + expect(buildNodeShellCommand("echo hi", "darwin")).toEqual(["/bin/sh", "-lc", "echo hi"]); + expect(buildNodeShellCommand("echo hi", "linux")).toEqual(["/bin/sh", "-lc", "echo hi"]); + expect(buildNodeShellCommand("echo hi")).toEqual(["/bin/sh", "-lc", "echo hi"]); + expect(buildNodeShellCommand("echo hi", null)).toEqual(["/bin/sh", "-lc", "echo hi"]); + }); +}); diff --git a/src/infra/npm-integrity.test.ts b/src/infra/npm-integrity.test.ts index e7e40b46413..aa96da76fab 100644 --- a/src/infra/npm-integrity.test.ts +++ b/src/infra/npm-integrity.test.ts @@ -6,23 +6,34 @@ import { describe("resolveNpmIntegrityDrift", () => { it("returns proceed=true when integrity is missing or unchanged", async () => { - await expect( - resolveNpmIntegrityDrift({ - spec: "@openclaw/test@1.0.0", - expectedIntegrity: "sha512-same", + const createPayload = vi.fn(() => "unused"); + const cases = [ + { + expectedIntegrity: undefined, resolution: { integrity: "sha512-same", resolvedAt: "2026-01-01T00:00:00.000Z" }, - createPayload: () => "unused", - }), - ).resolves.toEqual({ proceed: true }); - - await expect( - resolveNpmIntegrityDrift({ - spec: "@openclaw/test@1.0.0", + }, + { expectedIntegrity: "sha512-same", resolution: { resolvedAt: "2026-01-01T00:00:00.000Z" }, - createPayload: () => "unused", - }), - ).resolves.toEqual({ proceed: true }); + }, + { + expectedIntegrity: "sha512-same", + resolution: { integrity: "sha512-same", resolvedAt: "2026-01-01T00:00:00.000Z" }, + }, + ]; + + for (const testCase of cases) { + await expect( + resolveNpmIntegrityDrift({ + spec: "@openclaw/test@1.0.0", + expectedIntegrity: testCase.expectedIntegrity, + resolution: testCase.resolution, + createPayload, + }), + ).resolves.toEqual({ proceed: true }); + } + + expect(createPayload).not.toHaveBeenCalled(); }); it("uses callback on integrity drift", async () => { @@ -52,6 +63,31 @@ describe("resolveNpmIntegrityDrift", () => { }); }); + it("returns payload when the drift callback allows continuing", async () => { + const result = await resolveNpmIntegrityDrift({ + spec: "@openclaw/test@1.0.0", + expectedIntegrity: "sha512-old", + resolution: { + integrity: "sha512-new", + resolvedAt: "2026-01-01T00:00:00.000Z", + }, + createPayload: ({ spec, actualIntegrity }) => ({ spec, actualIntegrity }), + onIntegrityDrift: async () => true, + }); + + expect(result).toEqual({ + integrityDrift: { + expectedIntegrity: "sha512-old", + actualIntegrity: "sha512-new", + }, + payload: { + spec: "@openclaw/test@1.0.0", + actualIntegrity: "sha512-new", + }, + proceed: true, + }); + }); + it("warns by default when no callback is provided", async () => { const warn = vi.fn(); const result = await resolveNpmIntegrityDrift({ @@ -100,4 +136,22 @@ describe("resolveNpmIntegrityDrift", () => { "aborted: npm package integrity drift detected for @openclaw/test@1.0.0", ); }); + + it("falls back to the original spec when resolvedSpec is missing", async () => { + const warn = vi.fn(); + + await resolveNpmIntegrityDriftWithDefaultMessage({ + spec: "@openclaw/test@1.0.0", + expectedIntegrity: "sha512-old", + resolution: { + integrity: "sha512-new", + resolvedAt: "2026-01-01T00:00:00.000Z", + }, + warn, + }); + + expect(warn).toHaveBeenCalledWith( + "Integrity drift detected for @openclaw/test@1.0.0: expected sha512-old, got sha512-new", + ); + }); }); diff --git a/src/infra/npm-pack-install.test.ts b/src/infra/npm-pack-install.test.ts index c0428ec03c5..94d732deef6 100644 --- a/src/infra/npm-pack-install.test.ts +++ b/src/infra/npm-pack-install.test.ts @@ -91,6 +91,24 @@ describe("installFromNpmSpecArchive", () => { expect(withTempDir).toHaveBeenCalledWith("openclaw-test-", expect.any(Function)); }); + it("rejects unsupported npm specs before packing", async () => { + const installFromArchive = vi.fn(async () => ({ ok: true as const })); + + const result = await installFromNpmSpecArchive({ + tempDirPrefix: "openclaw-test-", + spec: "file:/tmp/openclaw.tgz", + timeoutMs: 1000, + installFromArchive, + }); + + expect(result).toEqual({ + ok: false, + error: "unsupported npm spec", + }); + expect(packNpmSpecToArchive).not.toHaveBeenCalled(); + expect(installFromArchive).not.toHaveBeenCalled(); + }); + it("returns resolution metadata and installer result on success", async () => { mockPackedSuccess({ name: "@openclaw/test", version: "1.0.0" }); const installFromArchive = vi.fn(async () => ({ ok: true as const, target: "done" })); @@ -176,6 +194,56 @@ describe("installFromNpmSpecArchive", () => { const okResult = expectWrappedOkResult(result, { ok: false, error: "install failed" }); expect(okResult.integrityDrift).toBeUndefined(); }); + + it("rejects prerelease resolutions unless explicitly requested", async () => { + vi.mocked(packNpmSpecToArchive).mockResolvedValue({ + ok: true, + archivePath: baseArchivePath, + metadata: { + resolvedSpec: "@openclaw/test@latest", + integrity: "sha512-same", + version: "1.1.0-beta.1", + }, + }); + const installFromArchive = vi.fn(async () => ({ ok: true as const })); + + const result = await installFromNpmSpecArchive({ + tempDirPrefix: "openclaw-test-", + spec: "@openclaw/test@latest", + timeoutMs: 1000, + installFromArchive, + }); + + expect(result.ok).toBe(false); + if (result.ok) { + throw new Error("expected prerelease rejection"); + } + expect(result.error).toContain("prerelease version 1.1.0-beta.1"); + expect(installFromArchive).not.toHaveBeenCalled(); + }); + + it("allows prerelease resolutions when explicitly requested by tag", async () => { + vi.mocked(packNpmSpecToArchive).mockResolvedValue({ + ok: true, + archivePath: baseArchivePath, + metadata: { + resolvedSpec: "@openclaw/test@beta", + integrity: "sha512-same", + version: "1.1.0-beta.1", + }, + }); + const installFromArchive = vi.fn(async () => ({ ok: true as const, pluginId: "beta-plugin" })); + + const result = await installFromNpmSpecArchive({ + tempDirPrefix: "openclaw-test-", + spec: "@openclaw/test@beta", + timeoutMs: 1000, + installFromArchive, + }); + + const okResult = expectWrappedOkResult(result, { ok: true, pluginId: "beta-plugin" }); + expect(okResult.npmResolution.version).toBe("1.1.0-beta.1"); + }); }); describe("installFromNpmSpecArchiveWithInstaller", () => { diff --git a/src/infra/npm-registry-spec.test.ts b/src/infra/npm-registry-spec.test.ts index 8c0b62c5667..fe7058dc5b7 100644 --- a/src/infra/npm-registry-spec.test.ts +++ b/src/infra/npm-registry-spec.test.ts @@ -1,5 +1,8 @@ import { describe, expect, it } from "vitest"; import { + formatPrereleaseResolutionError, + isExactSemverVersion, + isPrereleaseSemverVersion, isPrereleaseResolutionAllowed, parseRegistryNpmSpec, validateRegistryNpmSpec, @@ -22,6 +25,51 @@ describe("npm registry spec validation", () => { "exact version or dist-tag", ); }); + + it("rejects unsupported registry protocols and malformed selectors", () => { + expect(validateRegistryNpmSpec("https://npmjs.org/pkg.tgz")).toContain("URLs are not allowed"); + expect(validateRegistryNpmSpec("git+ssh://github.com/openclaw/openclaw")).toContain( + "URLs are not allowed", + ); + expect(validateRegistryNpmSpec("@openclaw/voice-call@")).toContain( + "missing version/tag after @", + ); + expect(validateRegistryNpmSpec("@openclaw/voice-call@../beta")).toContain( + "invalid version/tag", + ); + }); +}); + +describe("npm registry spec parsing helpers", () => { + it("parses bare, tag, and exact prerelease specs", () => { + expect(parseRegistryNpmSpec("@openclaw/voice-call")).toEqual({ + name: "@openclaw/voice-call", + raw: "@openclaw/voice-call", + selectorKind: "none", + selectorIsPrerelease: false, + }); + expect(parseRegistryNpmSpec("@openclaw/voice-call@beta")).toEqual({ + name: "@openclaw/voice-call", + raw: "@openclaw/voice-call@beta", + selector: "beta", + selectorKind: "tag", + selectorIsPrerelease: false, + }); + expect(parseRegistryNpmSpec("@openclaw/voice-call@1.2.3-beta.1")).toEqual({ + name: "@openclaw/voice-call", + raw: "@openclaw/voice-call@1.2.3-beta.1", + selector: "1.2.3-beta.1", + selectorKind: "exact-version", + selectorIsPrerelease: true, + }); + }); + + it("detects exact and prerelease semver versions", () => { + expect(isExactSemverVersion("v1.2.3")).toBe(true); + expect(isExactSemverVersion("1.2")).toBe(false); + expect(isPrereleaseSemverVersion("1.2.3-beta.1")).toBe(true); + expect(isPrereleaseSemverVersion("1.2.3")).toBe(false); + }); }); describe("npm prerelease resolution policy", () => { @@ -66,4 +114,44 @@ describe("npm prerelease resolution policy", () => { }), ).toBe(true); }); + + it("allows stable resolutions even for bare and latest specs", () => { + const bareSpec = parseRegistryNpmSpec("@openclaw/voice-call"); + const latestSpec = parseRegistryNpmSpec("@openclaw/voice-call@latest"); + + expect(bareSpec).not.toBeNull(); + expect(latestSpec).not.toBeNull(); + expect( + isPrereleaseResolutionAllowed({ + spec: bareSpec!, + resolvedVersion: "1.2.3", + }), + ).toBe(true); + expect( + isPrereleaseResolutionAllowed({ + spec: latestSpec!, + resolvedVersion: undefined, + }), + ).toBe(true); + }); + + it("formats prerelease resolution guidance based on selector intent", () => { + const bareSpec = parseRegistryNpmSpec("@openclaw/voice-call"); + const tagSpec = parseRegistryNpmSpec("@openclaw/voice-call@beta"); + + expect(bareSpec).not.toBeNull(); + expect(tagSpec).not.toBeNull(); + expect( + formatPrereleaseResolutionError({ + spec: bareSpec!, + resolvedVersion: "1.2.3-beta.1", + }), + ).toContain(`Use "@openclaw/voice-call@beta"`); + expect( + formatPrereleaseResolutionError({ + spec: tagSpec!, + resolvedVersion: "1.2.3-rc.1", + }), + ).toContain("Use an explicit prerelease tag or exact prerelease version"); + }); }); diff --git a/src/infra/openclaw-exec-env.test.ts b/src/infra/openclaw-exec-env.test.ts new file mode 100644 index 00000000000..488fa1dd5ef --- /dev/null +++ b/src/infra/openclaw-exec-env.test.ts @@ -0,0 +1,30 @@ +import { describe, expect, it } from "vitest"; +import { + ensureOpenClawExecMarkerOnProcess, + markOpenClawExecEnv, + OPENCLAW_CLI_ENV_VALUE, + OPENCLAW_CLI_ENV_VAR, +} from "./openclaw-exec-env.js"; + +describe("markOpenClawExecEnv", () => { + it("returns a cloned env object with the exec marker set", () => { + const env = { PATH: "/usr/bin", OPENCLAW_CLI: "0" }; + const marked = markOpenClawExecEnv(env); + + expect(marked).toEqual({ + PATH: "/usr/bin", + OPENCLAW_CLI: OPENCLAW_CLI_ENV_VALUE, + }); + expect(marked).not.toBe(env); + expect(env.OPENCLAW_CLI).toBe("0"); + }); +}); + +describe("ensureOpenClawExecMarkerOnProcess", () => { + it("mutates and returns the provided process env", () => { + const env: NodeJS.ProcessEnv = { PATH: "/usr/bin" }; + + expect(ensureOpenClawExecMarkerOnProcess(env)).toBe(env); + expect(env[OPENCLAW_CLI_ENV_VAR]).toBe(OPENCLAW_CLI_ENV_VALUE); + }); +}); diff --git a/src/infra/openclaw-root.test.ts b/src/infra/openclaw-root.test.ts index 85d24512468..e12b2d77f64 100644 --- a/src/infra/openclaw-root.test.ts +++ b/src/infra/openclaw-root.test.ts @@ -141,6 +141,19 @@ describe("resolveOpenClawPackageRoot", () => { expect(resolveOpenClawPackageRootSync({ moduleUrl })).toBe(pkgRoot); }); + it("falls through from a non-openclaw moduleUrl candidate to cwd", async () => { + const wrongPkgRoot = fx("moduleurl-fallthrough", "wrong"); + const cwdPkgRoot = fx("moduleurl-fallthrough", "cwd"); + setFile(path.join(wrongPkgRoot, "package.json"), JSON.stringify({ name: "not-openclaw" })); + setFile(path.join(cwdPkgRoot, "package.json"), JSON.stringify({ name: "openclaw" })); + const moduleUrl = pathToFileURL(path.join(wrongPkgRoot, "dist", "index.js")).toString(); + + expect(resolveOpenClawPackageRootSync({ moduleUrl, cwd: cwdPkgRoot })).toBe(cwdPkgRoot); + await expect(resolveOpenClawPackageRoot({ moduleUrl, cwd: cwdPkgRoot })).resolves.toBe( + cwdPkgRoot, + ); + }); + it("ignores invalid moduleUrl values and falls back to cwd", async () => { const pkgRoot = fx("invalid-moduleurl"); setFile(path.join(pkgRoot, "package.json"), JSON.stringify({ name: "openclaw" })); @@ -160,6 +173,16 @@ describe("resolveOpenClawPackageRoot", () => { expect(resolveOpenClawPackageRootSync({ cwd: pkgRoot })).toBeNull(); }); + it("falls back from a symlinked argv1 to the node_modules package root", () => { + const project = fx("symlink-node-modules-fallback"); + const argv1 = path.join(project, "node_modules", ".bin", "openclaw"); + state.realpaths.set(abs(argv1), abs(path.join(project, "versions", "current", "openclaw.mjs"))); + const pkgRoot = path.join(project, "node_modules", "openclaw"); + setFile(path.join(pkgRoot, "package.json"), JSON.stringify({ name: "openclaw" })); + + expect(resolveOpenClawPackageRootSync({ argv1 })).toBe(pkgRoot); + }); + it("async resolver matches sync behavior", async () => { const pkgRoot = fx("async"); setFile(path.join(pkgRoot, "package.json"), JSON.stringify({ name: "openclaw" })); diff --git a/src/infra/os-summary.test.ts b/src/infra/os-summary.test.ts new file mode 100644 index 00000000000..533321f8dba --- /dev/null +++ b/src/infra/os-summary.test.ts @@ -0,0 +1,64 @@ +import os from "node:os"; +import { afterEach, describe, expect, it, vi } from "vitest"; + +const spawnSyncMock = vi.hoisted(() => vi.fn()); + +vi.mock("node:child_process", () => ({ + spawnSync: (...args: unknown[]) => spawnSyncMock(...args), +})); + +import { resolveOsSummary } from "./os-summary.js"; + +describe("resolveOsSummary", () => { + afterEach(() => { + vi.restoreAllMocks(); + }); + + it("formats darwin labels from sw_vers output", () => { + vi.spyOn(os, "platform").mockReturnValue("darwin"); + vi.spyOn(os, "release").mockReturnValue("24.0.0"); + vi.spyOn(os, "arch").mockReturnValue("arm64"); + spawnSyncMock.mockReturnValue({ + stdout: " 15.4 \n", + stderr: "", + pid: 1, + output: [], + status: 0, + signal: null, + }); + + expect(resolveOsSummary()).toEqual({ + platform: "darwin", + arch: "arm64", + release: "24.0.0", + label: "macos 15.4 (arm64)", + }); + }); + + it("falls back to os.release when sw_vers output is blank", () => { + vi.spyOn(os, "platform").mockReturnValue("darwin"); + vi.spyOn(os, "release").mockReturnValue("24.1.0"); + vi.spyOn(os, "arch").mockReturnValue("x64"); + spawnSyncMock.mockReturnValue({ + stdout: " ", + stderr: "", + pid: 1, + output: [], + status: 0, + signal: null, + }); + + expect(resolveOsSummary().label).toBe("macos 24.1.0 (x64)"); + }); + + it("formats windows and non-darwin labels from os metadata", () => { + vi.spyOn(os, "release").mockReturnValue("10.0.26100"); + vi.spyOn(os, "arch").mockReturnValue("x64"); + + vi.spyOn(os, "platform").mockReturnValue("win32"); + expect(resolveOsSummary().label).toBe("windows 10.0.26100 (x64)"); + + vi.spyOn(os, "platform").mockReturnValue("linux"); + expect(resolveOsSummary().label).toBe("linux 10.0.26100 (x64)"); + }); +}); diff --git a/src/infra/outbound/abort.test.ts b/src/infra/outbound/abort.test.ts new file mode 100644 index 00000000000..794615b2a28 --- /dev/null +++ b/src/infra/outbound/abort.test.ts @@ -0,0 +1,21 @@ +import { describe, expect, it } from "vitest"; +import { throwIfAborted } from "./abort.js"; + +describe("throwIfAborted", () => { + it("does nothing when the signal is missing or not aborted", () => { + expect(() => throwIfAborted()).not.toThrow(); + expect(() => throwIfAborted(new AbortController().signal)).not.toThrow(); + }); + + it("throws a standard AbortError when the signal is aborted", () => { + const controller = new AbortController(); + controller.abort(); + + expect(() => throwIfAborted(controller.signal)).toThrowError( + expect.objectContaining({ + name: "AbortError", + message: "Operation aborted", + }), + ); + }); +}); diff --git a/src/infra/outbound/channel-adapters.test.ts b/src/infra/outbound/channel-adapters.test.ts new file mode 100644 index 00000000000..ee2b5fe6dc8 --- /dev/null +++ b/src/infra/outbound/channel-adapters.test.ts @@ -0,0 +1,48 @@ +import { Separator, TextDisplay } from "@buape/carbon"; +import { describe, expect, it } from "vitest"; +import { DiscordUiContainer } from "../../discord/ui.js"; +import { getChannelMessageAdapter } from "./channel-adapters.js"; + +describe("getChannelMessageAdapter", () => { + it("returns the default adapter for non-discord channels", () => { + expect(getChannelMessageAdapter("telegram")).toEqual({ + supportsComponentsV2: false, + }); + }); + + it("returns the discord adapter with a cross-context component builder", () => { + const adapter = getChannelMessageAdapter("discord"); + + expect(adapter.supportsComponentsV2).toBe(true); + expect(adapter.buildCrossContextComponents).toBeTypeOf("function"); + + const components = adapter.buildCrossContextComponents?.({ + originLabel: "Telegram", + message: "Hello from chat", + cfg: {} as never, + accountId: "primary", + }); + const container = components?.[0] as DiscordUiContainer | undefined; + + expect(components).toHaveLength(1); + expect(container).toBeInstanceOf(DiscordUiContainer); + expect(container?.components).toEqual([ + expect.any(TextDisplay), + expect.any(Separator), + expect.any(TextDisplay), + ]); + }); + + it("omits the message body block when the cross-context message is blank", () => { + const adapter = getChannelMessageAdapter("discord"); + const components = adapter.buildCrossContextComponents?.({ + originLabel: "Signal", + message: " ", + cfg: {} as never, + }); + const container = components?.[0] as DiscordUiContainer | undefined; + + expect(components).toHaveLength(1); + expect(container?.components).toEqual([expect.any(TextDisplay)]); + }); +}); diff --git a/src/infra/outbound/channel-resolution.test.ts b/src/infra/outbound/channel-resolution.test.ts new file mode 100644 index 00000000000..407994b152f --- /dev/null +++ b/src/infra/outbound/channel-resolution.test.ts @@ -0,0 +1,156 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const resolveDefaultAgentIdMock = vi.hoisted(() => vi.fn()); +const resolveAgentWorkspaceDirMock = vi.hoisted(() => vi.fn()); +const getChannelPluginMock = vi.hoisted(() => vi.fn()); +const applyPluginAutoEnableMock = vi.hoisted(() => vi.fn()); +const loadOpenClawPluginsMock = vi.hoisted(() => vi.fn()); +const getActivePluginRegistryMock = vi.hoisted(() => vi.fn()); +const getActivePluginRegistryKeyMock = vi.hoisted(() => vi.fn()); +const normalizeMessageChannelMock = vi.hoisted(() => vi.fn()); +const isDeliverableMessageChannelMock = vi.hoisted(() => vi.fn()); + +vi.mock("../../agents/agent-scope.js", () => ({ + resolveDefaultAgentId: (...args: unknown[]) => resolveDefaultAgentIdMock(...args), + resolveAgentWorkspaceDir: (...args: unknown[]) => resolveAgentWorkspaceDirMock(...args), +})); + +vi.mock("../../channels/plugins/index.js", () => ({ + getChannelPlugin: (...args: unknown[]) => getChannelPluginMock(...args), +})); + +vi.mock("../../config/plugin-auto-enable.js", () => ({ + applyPluginAutoEnable: (...args: unknown[]) => applyPluginAutoEnableMock(...args), +})); + +vi.mock("../../plugins/loader.js", () => ({ + loadOpenClawPlugins: (...args: unknown[]) => loadOpenClawPluginsMock(...args), +})); + +vi.mock("../../plugins/runtime.js", () => ({ + getActivePluginRegistry: (...args: unknown[]) => getActivePluginRegistryMock(...args), + getActivePluginRegistryKey: (...args: unknown[]) => getActivePluginRegistryKeyMock(...args), +})); + +vi.mock("../../utils/message-channel.js", () => ({ + normalizeMessageChannel: (...args: unknown[]) => normalizeMessageChannelMock(...args), + isDeliverableMessageChannel: (...args: unknown[]) => isDeliverableMessageChannelMock(...args), +})); + +import { importFreshModule } from "../../../test/helpers/import-fresh.js"; + +async function importChannelResolution(scope: string) { + return await importFreshModule( + import.meta.url, + `./channel-resolution.js?scope=${scope}`, + ); +} + +describe("outbound channel resolution", () => { + beforeEach(() => { + resolveDefaultAgentIdMock.mockReset(); + resolveAgentWorkspaceDirMock.mockReset(); + getChannelPluginMock.mockReset(); + applyPluginAutoEnableMock.mockReset(); + loadOpenClawPluginsMock.mockReset(); + getActivePluginRegistryMock.mockReset(); + getActivePluginRegistryKeyMock.mockReset(); + normalizeMessageChannelMock.mockReset(); + isDeliverableMessageChannelMock.mockReset(); + + normalizeMessageChannelMock.mockImplementation((value?: string | null) => + typeof value === "string" ? value.trim().toLowerCase() : undefined, + ); + isDeliverableMessageChannelMock.mockImplementation((value?: string) => + ["telegram", "discord", "slack"].includes(String(value)), + ); + getActivePluginRegistryMock.mockReturnValue({ channels: [] }); + getActivePluginRegistryKeyMock.mockReturnValue("registry-key"); + applyPluginAutoEnableMock.mockReturnValue({ config: { autoEnabled: true } }); + resolveDefaultAgentIdMock.mockReturnValue("main"); + resolveAgentWorkspaceDirMock.mockReturnValue("/tmp/workspace"); + }); + + it("normalizes deliverable channels and rejects unknown ones", async () => { + const channelResolution = await importChannelResolution("normalize"); + + expect(channelResolution.normalizeDeliverableOutboundChannel(" Telegram ")).toBe("telegram"); + expect(channelResolution.normalizeDeliverableOutboundChannel("unknown")).toBeUndefined(); + expect(channelResolution.normalizeDeliverableOutboundChannel(null)).toBeUndefined(); + }); + + it("returns the already-registered plugin without bootstrapping", async () => { + const plugin = { id: "telegram" }; + getChannelPluginMock.mockReturnValueOnce(plugin); + const channelResolution = await importChannelResolution("existing-plugin"); + + expect( + channelResolution.resolveOutboundChannelPlugin({ + channel: "telegram", + cfg: {} as never, + }), + ).toBe(plugin); + expect(loadOpenClawPluginsMock).not.toHaveBeenCalled(); + }); + + it("falls back to the active registry when getChannelPlugin misses", async () => { + const plugin = { id: "telegram" }; + getChannelPluginMock.mockReturnValue(undefined); + getActivePluginRegistryMock.mockReturnValue({ + channels: [{ plugin }], + }); + const channelResolution = await importChannelResolution("direct-registry"); + + expect( + channelResolution.resolveOutboundChannelPlugin({ + channel: "telegram", + cfg: {} as never, + }), + ).toBe(plugin); + }); + + it("bootstraps plugins once per registry key and returns the newly loaded plugin", async () => { + const plugin = { id: "telegram" }; + getChannelPluginMock.mockReturnValueOnce(undefined).mockReturnValueOnce(plugin); + const channelResolution = await importChannelResolution("bootstrap-success"); + + expect( + channelResolution.resolveOutboundChannelPlugin({ + channel: "telegram", + cfg: { channels: {} } as never, + }), + ).toBe(plugin); + expect(loadOpenClawPluginsMock).toHaveBeenCalledWith({ + config: { autoEnabled: true }, + workspaceDir: "/tmp/workspace", + }); + + getChannelPluginMock.mockReturnValue(undefined); + channelResolution.resolveOutboundChannelPlugin({ + channel: "telegram", + cfg: { channels: {} } as never, + }); + expect(loadOpenClawPluginsMock).toHaveBeenCalledTimes(1); + }); + + it("retries bootstrap after a transient load failure", async () => { + getChannelPluginMock.mockReturnValue(undefined); + loadOpenClawPluginsMock.mockImplementationOnce(() => { + throw new Error("transient"); + }); + const channelResolution = await importChannelResolution("bootstrap-retry"); + + expect( + channelResolution.resolveOutboundChannelPlugin({ + channel: "telegram", + cfg: { channels: {} } as never, + }), + ).toBeUndefined(); + + channelResolution.resolveOutboundChannelPlugin({ + channel: "telegram", + cfg: { channels: {} } as never, + }); + expect(loadOpenClawPluginsMock).toHaveBeenCalledTimes(2); + }); +}); diff --git a/src/infra/outbound/channel-selection.test.ts b/src/infra/outbound/channel-selection.test.ts index 15642a33bb1..da605dcdb63 100644 --- a/src/infra/outbound/channel-selection.test.ts +++ b/src/infra/outbound/channel-selection.test.ts @@ -8,7 +8,83 @@ vi.mock("../../channels/plugins/index.js", () => ({ listChannelPlugins: mocks.listChannelPlugins, })); -import { resolveMessageChannelSelection } from "./channel-selection.js"; +import { + listConfiguredMessageChannels, + resolveMessageChannelSelection, +} from "./channel-selection.js"; + +function makePlugin(params: { + id: string; + accountIds?: string[]; + resolveAccount?: (accountId: string) => unknown; + isEnabled?: (account: unknown) => boolean; + isConfigured?: (account: unknown) => boolean | Promise; +}) { + return { + id: params.id, + config: { + listAccountIds: () => params.accountIds ?? ["default"], + resolveAccount: (_cfg: unknown, accountId: string) => + params.resolveAccount ? params.resolveAccount(accountId) : {}, + ...(params.isEnabled ? { isEnabled: params.isEnabled } : {}), + ...(params.isConfigured ? { isConfigured: params.isConfigured } : {}), + }, + }; +} + +describe("listConfiguredMessageChannels", () => { + beforeEach(() => { + mocks.listChannelPlugins.mockReset(); + mocks.listChannelPlugins.mockReturnValue([]); + }); + + it("skips unknown plugin ids and plugins without accounts", async () => { + mocks.listChannelPlugins.mockReturnValue([ + makePlugin({ id: "not-a-channel" }), + makePlugin({ id: "slack", accountIds: [] }), + ]); + + await expect(listConfiguredMessageChannels({} as never)).resolves.toEqual([]); + }); + + it("includes plugins without isConfigured when an enabled account exists", async () => { + mocks.listChannelPlugins.mockReturnValue([ + makePlugin({ + id: "discord", + resolveAccount: () => ({ enabled: true }), + }), + ]); + + await expect(listConfiguredMessageChannels({} as never)).resolves.toEqual(["discord"]); + }); + + it("skips disabled accounts and keeps later configured accounts", async () => { + mocks.listChannelPlugins.mockReturnValue([ + makePlugin({ + id: "telegram", + accountIds: ["disabled", "enabled"], + resolveAccount: (accountId) => + accountId === "disabled" ? { enabled: false } : { enabled: true }, + isConfigured: (account) => (account as { enabled?: boolean }).enabled === true, + }), + ]); + + await expect(listConfiguredMessageChannels({} as never)).resolves.toEqual(["telegram"]); + }); + + it("respects custom isEnabled checks", async () => { + mocks.listChannelPlugins.mockReturnValue([ + makePlugin({ + id: "signal", + resolveAccount: () => ({ token: "x" }), + isEnabled: () => false, + isConfigured: () => true, + }), + ]); + + await expect(listConfiguredMessageChannels({} as never)).resolves.toEqual([]); + }); +}); describe("resolveMessageChannelSelection", () => { beforeEach(() => { @@ -58,14 +134,7 @@ describe("resolveMessageChannelSelection", () => { it("selects single configured channel when no explicit/fallback channel exists", async () => { mocks.listChannelPlugins.mockReturnValue([ - { - id: "discord", - config: { - listAccountIds: () => ["default"], - resolveAccount: () => ({}), - isConfigured: async () => true, - }, - }, + makePlugin({ id: "discord", isConfigured: async () => true }), ]); const selection = await resolveMessageChannelSelection({ @@ -88,4 +157,27 @@ describe("resolveMessageChannelSelection", () => { }), ).rejects.toThrow("Unknown channel: channel:c123"); }); + + it("throws when no channel is provided and nothing is configured", async () => { + await expect( + resolveMessageChannelSelection({ + cfg: {} as never, + }), + ).rejects.toThrow("Channel is required (no configured channels detected)."); + }); + + it("throws when multiple channels are configured and no channel is selected", async () => { + mocks.listChannelPlugins.mockReturnValue([ + makePlugin({ id: "discord", isConfigured: async () => true }), + makePlugin({ id: "telegram", isConfigured: async () => true }), + ]); + + await expect( + resolveMessageChannelSelection({ + cfg: {} as never, + }), + ).rejects.toThrow( + "Channel is required when multiple channels are configured: discord, telegram", + ); + }); }); diff --git a/src/infra/outbound/channel-target.test.ts b/src/infra/outbound/channel-target.test.ts new file mode 100644 index 00000000000..5d1f290d8f5 --- /dev/null +++ b/src/infra/outbound/channel-target.test.ts @@ -0,0 +1,63 @@ +import { describe, expect, it } from "vitest"; +import { applyTargetToParams } from "./channel-target.js"; + +describe("applyTargetToParams", () => { + it("maps trimmed target values into the configured target field", () => { + const toParams = { + action: "send", + args: { target: " channel:C1 " } as Record, + }; + applyTargetToParams(toParams); + expect(toParams.args.to).toBe("channel:C1"); + + const channelIdParams = { + action: "channel-info", + args: { target: " C123 " } as Record, + }; + applyTargetToParams(channelIdParams); + expect(channelIdParams.args.channelId).toBe("C123"); + }); + + it("throws on legacy destination fields when the action has canonical target support", () => { + expect(() => + applyTargetToParams({ + action: "send", + args: { + target: "channel:C1", + to: "legacy", + }, + }), + ).toThrow("Use `target` instead of `to`/`channelId`."); + }); + + it("throws when a no-target action receives target or legacy destination fields", () => { + expect(() => + applyTargetToParams({ + action: "broadcast", + args: { + to: "legacy", + }, + }), + ).toThrow("Use `target` for actions that accept a destination."); + + expect(() => + applyTargetToParams({ + action: "broadcast", + args: { + target: "channel:C1", + }, + }), + ).toThrow("Action broadcast does not accept a target."); + }); + + it("does nothing when target is blank", () => { + const params = { + action: "send", + args: { target: " " } as Record, + }; + + applyTargetToParams(params); + + expect(params.args).toEqual({ target: " " }); + }); +}); diff --git a/src/infra/outbound/conversation-id.test.ts b/src/infra/outbound/conversation-id.test.ts index b35c8e2e4a1..68865219c37 100644 --- a/src/infra/outbound/conversation-id.test.ts +++ b/src/infra/outbound/conversation-id.test.ts @@ -2,39 +2,58 @@ import { describe, expect, it } from "vitest"; import { resolveConversationIdFromTargets } from "./conversation-id.js"; describe("resolveConversationIdFromTargets", () => { - it("prefers explicit thread id when present", () => { - const resolved = resolveConversationIdFromTargets({ - threadId: "123456789", - targets: ["channel:987654321"], - }); - expect(resolved).toBe("123456789"); + it.each([ + { + name: "prefers explicit thread id strings", + params: { threadId: "123456789", targets: ["channel:987654321"] }, + expected: "123456789", + }, + { + name: "normalizes numeric thread ids", + params: { threadId: 123456789, targets: ["channel:987654321"] }, + expected: "123456789", + }, + { + name: "falls back when the thread id is blank", + params: { threadId: " ", targets: ["channel:987654321"] }, + expected: "987654321", + }, + ])("$name", ({ params, expected }) => { + expect(resolveConversationIdFromTargets(params)).toBe(expected); }); - it("extracts channel ids from channel: targets", () => { - const resolved = resolveConversationIdFromTargets({ + it.each([ + { + name: "extracts channel ids from channel targets", targets: ["channel:987654321"], - }); - expect(resolved).toBe("987654321"); - }); - - it("extracts ids from Discord channel mentions", () => { - const resolved = resolveConversationIdFromTargets({ + expected: "987654321", + }, + { + name: "trims channel target ids", + targets: ["channel: 987654321 "], + expected: "987654321", + }, + { + name: "extracts ids from Discord channel mentions", targets: ["<#1475250310120214812>"], - }); - expect(resolved).toBe("1475250310120214812"); - }); - - it("accepts raw numeric ids", () => { - const resolved = resolveConversationIdFromTargets({ + expected: "1475250310120214812", + }, + { + name: "accepts raw numeric ids", targets: ["1475250310120214812"], - }); - expect(resolved).toBe("1475250310120214812"); - }); - - it("returns undefined for non-channel targets", () => { - const resolved = resolveConversationIdFromTargets({ + expected: "1475250310120214812", + }, + { + name: "returns undefined for non-channel targets", targets: ["user:alice", "general"], - }); - expect(resolved).toBeUndefined(); + expected: undefined, + }, + { + name: "skips blank and malformed targets", + targets: [undefined, null, " ", "channel: ", "<#not-a-number>"], + expected: undefined, + }, + ])("$name", ({ targets, expected }) => { + expect(resolveConversationIdFromTargets({ targets })).toBe(expected); }); }); diff --git a/src/infra/outbound/deliver.lifecycle.test.ts b/src/infra/outbound/deliver.lifecycle.test.ts new file mode 100644 index 00000000000..00d696162d8 --- /dev/null +++ b/src/infra/outbound/deliver.lifecycle.test.ts @@ -0,0 +1,415 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { signalOutbound } from "../../channels/plugins/outbound/signal.js"; +import { telegramOutbound } from "../../channels/plugins/outbound/telegram.js"; +import { whatsappOutbound } from "../../channels/plugins/outbound/whatsapp.js"; +import type { OpenClawConfig } from "../../config/config.js"; +import { setActivePluginRegistry } from "../../plugins/runtime.js"; +import { createOutboundTestPlugin, createTestRegistry } from "../../test-utils/channel-plugins.js"; +import { createIMessageTestPlugin } from "../../test-utils/imessage-test-plugin.js"; +import { createInternalHookEventPayload } from "../../test-utils/internal-hook-event-payload.js"; + +const mocks = vi.hoisted(() => ({ + appendAssistantMessageToSessionTranscript: vi.fn(async () => ({ ok: true, sessionFile: "x" })), +})); +const hookMocks = vi.hoisted(() => ({ + runner: { + hasHooks: vi.fn(() => false), + runMessageSent: vi.fn(async () => {}), + }, +})); +const internalHookMocks = vi.hoisted(() => ({ + createInternalHookEvent: vi.fn(), + triggerInternalHook: vi.fn(async () => {}), +})); +const queueMocks = vi.hoisted(() => ({ + enqueueDelivery: vi.fn(async () => "mock-queue-id"), + ackDelivery: vi.fn(async () => {}), + failDelivery: vi.fn(async () => {}), +})); +const logMocks = vi.hoisted(() => ({ + warn: vi.fn(), +})); + +vi.mock("../../config/sessions.js", async () => { + const actual = await vi.importActual( + "../../config/sessions.js", + ); + return { + ...actual, + appendAssistantMessageToSessionTranscript: mocks.appendAssistantMessageToSessionTranscript, + }; +}); +vi.mock("../../plugins/hook-runner-global.js", () => ({ + getGlobalHookRunner: () => hookMocks.runner, +})); +vi.mock("../../hooks/internal-hooks.js", () => ({ + createInternalHookEvent: internalHookMocks.createInternalHookEvent, + triggerInternalHook: internalHookMocks.triggerInternalHook, +})); +vi.mock("./delivery-queue.js", () => ({ + enqueueDelivery: queueMocks.enqueueDelivery, + ackDelivery: queueMocks.ackDelivery, + failDelivery: queueMocks.failDelivery, +})); +vi.mock("../../logging/subsystem.js", () => ({ + createSubsystemLogger: () => { + const makeLogger = () => ({ + warn: logMocks.warn, + info: vi.fn(), + error: vi.fn(), + debug: vi.fn(), + child: vi.fn(() => makeLogger()), + }); + return makeLogger(); + }, +})); + +const { deliverOutboundPayloads } = await import("./deliver.js"); + +const whatsappChunkConfig: OpenClawConfig = { + channels: { whatsapp: { textChunkLimit: 4000 } }, +}; + +async function runChunkedWhatsAppDelivery(params?: { + mirror?: Parameters[0]["mirror"]; +}) { + const sendWhatsApp = vi + .fn() + .mockResolvedValueOnce({ messageId: "w1", toJid: "jid" }) + .mockResolvedValueOnce({ messageId: "w2", toJid: "jid" }); + const cfg: OpenClawConfig = { + channels: { whatsapp: { textChunkLimit: 2 } }, + }; + const results = await deliverOutboundPayloads({ + cfg, + channel: "whatsapp", + to: "+1555", + payloads: [{ text: "abcd" }], + deps: { sendWhatsApp }, + ...(params?.mirror ? { mirror: params.mirror } : {}), + }); + return { sendWhatsApp, results }; +} + +async function deliverSingleWhatsAppForHookTest(params?: { sessionKey?: string }) { + const sendWhatsApp = vi.fn().mockResolvedValue({ messageId: "w1", toJid: "jid" }); + await deliverOutboundPayloads({ + cfg: whatsappChunkConfig, + channel: "whatsapp", + to: "+1555", + payloads: [{ text: "hello" }], + deps: { sendWhatsApp }, + ...(params?.sessionKey ? { session: { key: params.sessionKey } } : {}), + }); +} + +async function runBestEffortPartialFailureDelivery() { + const sendWhatsApp = vi + .fn() + .mockRejectedValueOnce(new Error("fail")) + .mockResolvedValueOnce({ messageId: "w2", toJid: "jid" }); + const onError = vi.fn(); + const cfg: OpenClawConfig = {}; + const results = await deliverOutboundPayloads({ + cfg, + channel: "whatsapp", + to: "+1555", + payloads: [{ text: "a" }, { text: "b" }], + deps: { sendWhatsApp }, + bestEffort: true, + onError, + }); + return { sendWhatsApp, onError, results }; +} + +function expectSuccessfulWhatsAppInternalHookPayload( + expected: Partial<{ + content: string; + messageId: string; + isGroup: boolean; + groupId: string; + }>, +) { + return expect.objectContaining({ + to: "+1555", + success: true, + channelId: "whatsapp", + conversationId: "+1555", + ...expected, + }); +} + +describe("deliverOutboundPayloads lifecycle", () => { + beforeEach(() => { + setActivePluginRegistry(defaultRegistry); + hookMocks.runner.hasHooks.mockClear(); + hookMocks.runner.hasHooks.mockReturnValue(false); + hookMocks.runner.runMessageSent.mockClear(); + hookMocks.runner.runMessageSent.mockResolvedValue(undefined); + internalHookMocks.createInternalHookEvent.mockClear(); + internalHookMocks.createInternalHookEvent.mockImplementation(createInternalHookEventPayload); + internalHookMocks.triggerInternalHook.mockClear(); + queueMocks.enqueueDelivery.mockClear(); + queueMocks.enqueueDelivery.mockResolvedValue("mock-queue-id"); + queueMocks.ackDelivery.mockClear(); + queueMocks.ackDelivery.mockResolvedValue(undefined); + queueMocks.failDelivery.mockClear(); + queueMocks.failDelivery.mockResolvedValue(undefined); + logMocks.warn.mockClear(); + mocks.appendAssistantMessageToSessionTranscript.mockClear(); + }); + + afterEach(() => { + setActivePluginRegistry(emptyRegistry); + }); + + it("continues on errors when bestEffort is enabled", async () => { + const { sendWhatsApp, onError, results } = await runBestEffortPartialFailureDelivery(); + + expect(sendWhatsApp).toHaveBeenCalledTimes(2); + expect(onError).toHaveBeenCalledTimes(1); + expect(results).toEqual([{ channel: "whatsapp", messageId: "w2", toJid: "jid" }]); + }); + + it("calls failDelivery instead of ackDelivery on bestEffort partial failure", async () => { + const { onError } = await runBestEffortPartialFailureDelivery(); + + expect(onError).toHaveBeenCalledTimes(1); + expect(queueMocks.ackDelivery).not.toHaveBeenCalled(); + expect(queueMocks.failDelivery).toHaveBeenCalledWith( + "mock-queue-id", + "partial delivery failure (bestEffort)", + ); + }); + + it("passes normalized payload to onError", async () => { + const sendWhatsApp = vi.fn().mockRejectedValue(new Error("boom")); + const onError = vi.fn(); + + await deliverOutboundPayloads({ + cfg: {}, + channel: "whatsapp", + to: "+1555", + payloads: [{ text: "hi", mediaUrl: "https://x.test/a.jpg" }], + deps: { sendWhatsApp }, + bestEffort: true, + onError, + }); + + expect(onError).toHaveBeenCalledTimes(1); + expect(onError).toHaveBeenCalledWith( + expect.any(Error), + expect.objectContaining({ text: "hi", mediaUrls: ["https://x.test/a.jpg"] }), + ); + }); + + it("acks the queue entry when delivery is aborted", async () => { + const sendWhatsApp = vi.fn().mockResolvedValue({ messageId: "w1", toJid: "jid" }); + const abortController = new AbortController(); + abortController.abort(); + + await expect( + deliverOutboundPayloads({ + cfg: {}, + channel: "whatsapp", + to: "+1555", + payloads: [{ text: "a" }], + deps: { sendWhatsApp }, + abortSignal: abortController.signal, + }), + ).rejects.toThrow("Operation aborted"); + + expect(queueMocks.ackDelivery).toHaveBeenCalledWith("mock-queue-id"); + expect(queueMocks.failDelivery).not.toHaveBeenCalled(); + expect(sendWhatsApp).not.toHaveBeenCalled(); + }); + + it("emits internal message:sent hook with success=true for chunked payload delivery", async () => { + const { sendWhatsApp } = await runChunkedWhatsAppDelivery({ + mirror: { + sessionKey: "agent:main:main", + isGroup: true, + groupId: "whatsapp:group:123", + }, + }); + expect(sendWhatsApp).toHaveBeenCalledTimes(2); + + expect(internalHookMocks.createInternalHookEvent).toHaveBeenCalledTimes(1); + expect(internalHookMocks.createInternalHookEvent).toHaveBeenCalledWith( + "message", + "sent", + "agent:main:main", + expectSuccessfulWhatsAppInternalHookPayload({ + content: "abcd", + messageId: "w2", + isGroup: true, + groupId: "whatsapp:group:123", + }), + ); + expect(internalHookMocks.triggerInternalHook).toHaveBeenCalledTimes(1); + }); + + it("does not emit internal message:sent hook when neither mirror nor sessionKey is provided", async () => { + await deliverSingleWhatsAppForHookTest(); + + expect(internalHookMocks.createInternalHookEvent).not.toHaveBeenCalled(); + expect(internalHookMocks.triggerInternalHook).not.toHaveBeenCalled(); + }); + + it("emits internal message:sent hook when sessionKey is provided without mirror", async () => { + await deliverSingleWhatsAppForHookTest({ sessionKey: "agent:main:main" }); + + expect(internalHookMocks.createInternalHookEvent).toHaveBeenCalledTimes(1); + expect(internalHookMocks.createInternalHookEvent).toHaveBeenCalledWith( + "message", + "sent", + "agent:main:main", + expectSuccessfulWhatsAppInternalHookPayload({ content: "hello", messageId: "w1" }), + ); + expect(internalHookMocks.triggerInternalHook).toHaveBeenCalledTimes(1); + }); + + it("warns when session.agentId is set without a session key", async () => { + const sendWhatsApp = vi.fn().mockResolvedValue({ messageId: "w1", toJid: "jid" }); + hookMocks.runner.hasHooks.mockReturnValue(true); + + await deliverOutboundPayloads({ + cfg: whatsappChunkConfig, + channel: "whatsapp", + to: "+1555", + payloads: [{ text: "hello" }], + deps: { sendWhatsApp }, + session: { agentId: "agent-main" }, + }); + + expect(logMocks.warn).toHaveBeenCalledWith( + "deliverOutboundPayloads: session.agentId present without session key; internal message:sent hook will be skipped", + expect.objectContaining({ channel: "whatsapp", to: "+1555", agentId: "agent-main" }), + ); + }); + + it("mirrors delivered output when mirror options are provided", async () => { + const sendTelegram = vi.fn().mockResolvedValue({ messageId: "m1", chatId: "c1" }); + + await deliverOutboundPayloads({ + cfg: { + channels: { telegram: { botToken: "tok-1", textChunkLimit: 2 } }, + }, + channel: "telegram", + to: "123", + payloads: [{ text: "caption", mediaUrl: "https://example.com/files/report.pdf?sig=1" }], + deps: { sendTelegram }, + mirror: { + sessionKey: "agent:main:main", + text: "caption", + mediaUrls: ["https://example.com/files/report.pdf?sig=1"], + idempotencyKey: "idem-deliver-1", + }, + }); + + expect(mocks.appendAssistantMessageToSessionTranscript).toHaveBeenCalledWith( + expect.objectContaining({ + text: "report.pdf", + idempotencyKey: "idem-deliver-1", + }), + ); + }); + + it("emits message_sent success for text-only deliveries", async () => { + hookMocks.runner.hasHooks.mockReturnValue(true); + const sendWhatsApp = vi.fn().mockResolvedValue({ messageId: "w1", toJid: "jid" }); + + await deliverOutboundPayloads({ + cfg: {}, + channel: "whatsapp", + to: "+1555", + payloads: [{ text: "hello" }], + deps: { sendWhatsApp }, + }); + + expect(hookMocks.runner.runMessageSent).toHaveBeenCalledWith( + expect.objectContaining({ to: "+1555", content: "hello", success: true }), + expect.objectContaining({ channelId: "whatsapp" }), + ); + }); + + it("emits message_sent success for sendPayload deliveries", async () => { + hookMocks.runner.hasHooks.mockReturnValue(true); + const sendPayload = vi.fn().mockResolvedValue({ channel: "matrix", messageId: "mx-1" }); + const sendText = vi.fn(); + const sendMedia = vi.fn(); + setActivePluginRegistry( + createTestRegistry([ + { + pluginId: "matrix", + source: "test", + plugin: createOutboundTestPlugin({ + id: "matrix", + outbound: { deliveryMode: "direct", sendPayload, sendText, sendMedia }, + }), + }, + ]), + ); + + await deliverOutboundPayloads({ + cfg: {}, + channel: "matrix", + to: "!room:1", + payloads: [{ text: "payload text", channelData: { mode: "custom" } }], + }); + + expect(hookMocks.runner.runMessageSent).toHaveBeenCalledWith( + expect.objectContaining({ to: "!room:1", content: "payload text", success: true }), + expect.objectContaining({ channelId: "matrix" }), + ); + }); + + it("emits message_sent failure when delivery errors", async () => { + hookMocks.runner.hasHooks.mockReturnValue(true); + const sendWhatsApp = vi.fn().mockRejectedValue(new Error("downstream failed")); + + await expect( + deliverOutboundPayloads({ + cfg: {}, + channel: "whatsapp", + to: "+1555", + payloads: [{ text: "hi" }], + deps: { sendWhatsApp }, + }), + ).rejects.toThrow("downstream failed"); + + expect(hookMocks.runner.runMessageSent).toHaveBeenCalledWith( + expect.objectContaining({ + to: "+1555", + content: "hi", + success: false, + error: "downstream failed", + }), + expect.objectContaining({ channelId: "whatsapp" }), + ); + }); +}); + +const emptyRegistry = createTestRegistry([]); +const defaultRegistry = createTestRegistry([ + { + pluginId: "telegram", + plugin: createOutboundTestPlugin({ id: "telegram", outbound: telegramOutbound }), + source: "test", + }, + { + pluginId: "signal", + plugin: createOutboundTestPlugin({ id: "signal", outbound: signalOutbound }), + source: "test", + }, + { + pluginId: "whatsapp", + plugin: createOutboundTestPlugin({ id: "whatsapp", outbound: whatsappOutbound }), + source: "test", + }, + { + pluginId: "imessage", + plugin: createIMessageTestPlugin(), + source: "test", + }, +]); diff --git a/src/infra/outbound/deliver.test.ts b/src/infra/outbound/deliver.test.ts index 8e5383ea055..223b984382b 100644 --- a/src/infra/outbound/deliver.test.ts +++ b/src/infra/outbound/deliver.test.ts @@ -117,75 +117,6 @@ async function deliverTelegramPayload(params: { }); } -async function runChunkedWhatsAppDelivery(params?: { - mirror?: Parameters[0]["mirror"]; -}) { - const sendWhatsApp = vi - .fn() - .mockResolvedValueOnce({ messageId: "w1", toJid: "jid" }) - .mockResolvedValueOnce({ messageId: "w2", toJid: "jid" }); - const cfg: OpenClawConfig = { - channels: { whatsapp: { textChunkLimit: 2 } }, - }; - const results = await deliverOutboundPayloads({ - cfg, - channel: "whatsapp", - to: "+1555", - payloads: [{ text: "abcd" }], - deps: { sendWhatsApp }, - ...(params?.mirror ? { mirror: params.mirror } : {}), - }); - return { sendWhatsApp, results }; -} - -async function deliverSingleWhatsAppForHookTest(params?: { sessionKey?: string }) { - const sendWhatsApp = vi.fn().mockResolvedValue({ messageId: "w1", toJid: "jid" }); - await deliverOutboundPayloads({ - cfg: whatsappChunkConfig, - channel: "whatsapp", - to: "+1555", - payloads: [{ text: "hello" }], - deps: { sendWhatsApp }, - ...(params?.sessionKey ? { session: { key: params.sessionKey } } : {}), - }); -} - -async function runBestEffortPartialFailureDelivery() { - const sendWhatsApp = vi - .fn() - .mockRejectedValueOnce(new Error("fail")) - .mockResolvedValueOnce({ messageId: "w2", toJid: "jid" }); - const onError = vi.fn(); - const cfg: OpenClawConfig = {}; - const results = await deliverOutboundPayloads({ - cfg, - channel: "whatsapp", - to: "+1555", - payloads: [{ text: "a" }, { text: "b" }], - deps: { sendWhatsApp }, - bestEffort: true, - onError, - }); - return { sendWhatsApp, onError, results }; -} - -function expectSuccessfulWhatsAppInternalHookPayload( - expected: Partial<{ - content: string; - messageId: string; - isGroup: boolean; - groupId: string; - }>, -) { - return expect.objectContaining({ - to: "+1555", - success: true, - channelId: "whatsapp", - conversationId: "+1555", - ...expected, - }); -} - describe("deliverOutboundPayloads", () => { beforeEach(() => { setActivePluginRegistry(defaultRegistry); @@ -529,7 +460,20 @@ describe("deliverOutboundPayloads", () => { }); it("chunks WhatsApp text and returns all results", async () => { - const { sendWhatsApp, results } = await runChunkedWhatsAppDelivery(); + const sendWhatsApp = vi + .fn() + .mockResolvedValueOnce({ messageId: "w1", toJid: "jid" }) + .mockResolvedValueOnce({ messageId: "w2", toJid: "jid" }); + const cfg: OpenClawConfig = { + channels: { whatsapp: { textChunkLimit: 2 } }, + }; + const results = await deliverOutboundPayloads({ + cfg, + channel: "whatsapp", + to: "+1555", + payloads: [{ text: "abcd" }], + deps: { sendWhatsApp }, + }); expect(sendWhatsApp).toHaveBeenCalledTimes(2); expect(results.map((r) => r.messageId)).toEqual(["w1", "w2"]); @@ -725,211 +669,6 @@ describe("deliverOutboundPayloads", () => { ]); }); - it("continues on errors when bestEffort is enabled", async () => { - const { sendWhatsApp, onError, results } = await runBestEffortPartialFailureDelivery(); - - expect(sendWhatsApp).toHaveBeenCalledTimes(2); - expect(onError).toHaveBeenCalledTimes(1); - expect(results).toEqual([{ channel: "whatsapp", messageId: "w2", toJid: "jid" }]); - }); - - it("emits internal message:sent hook with success=true for chunked payload delivery", async () => { - const { sendWhatsApp } = await runChunkedWhatsAppDelivery({ - mirror: { - sessionKey: "agent:main:main", - isGroup: true, - groupId: "whatsapp:group:123", - }, - }); - expect(sendWhatsApp).toHaveBeenCalledTimes(2); - - expect(internalHookMocks.createInternalHookEvent).toHaveBeenCalledTimes(1); - expect(internalHookMocks.createInternalHookEvent).toHaveBeenCalledWith( - "message", - "sent", - "agent:main:main", - expectSuccessfulWhatsAppInternalHookPayload({ - content: "abcd", - messageId: "w2", - isGroup: true, - groupId: "whatsapp:group:123", - }), - ); - expect(internalHookMocks.triggerInternalHook).toHaveBeenCalledTimes(1); - }); - - it("does not emit internal message:sent hook when neither mirror nor sessionKey is provided", async () => { - await deliverSingleWhatsAppForHookTest(); - - expect(internalHookMocks.createInternalHookEvent).not.toHaveBeenCalled(); - expect(internalHookMocks.triggerInternalHook).not.toHaveBeenCalled(); - }); - - it("emits internal message:sent hook when sessionKey is provided without mirror", async () => { - await deliverSingleWhatsAppForHookTest({ sessionKey: "agent:main:main" }); - - expect(internalHookMocks.createInternalHookEvent).toHaveBeenCalledTimes(1); - expect(internalHookMocks.createInternalHookEvent).toHaveBeenCalledWith( - "message", - "sent", - "agent:main:main", - expectSuccessfulWhatsAppInternalHookPayload({ content: "hello", messageId: "w1" }), - ); - expect(internalHookMocks.triggerInternalHook).toHaveBeenCalledTimes(1); - }); - - it("warns when session.agentId is set without a session key", async () => { - const sendWhatsApp = vi.fn().mockResolvedValue({ messageId: "w1", toJid: "jid" }); - hookMocks.runner.hasHooks.mockReturnValue(true); - - await deliverOutboundPayloads({ - cfg: whatsappChunkConfig, - channel: "whatsapp", - to: "+1555", - payloads: [{ text: "hello" }], - deps: { sendWhatsApp }, - session: { agentId: "agent-main" }, - }); - - expect(logMocks.warn).toHaveBeenCalledWith( - "deliverOutboundPayloads: session.agentId present without session key; internal message:sent hook will be skipped", - expect.objectContaining({ channel: "whatsapp", to: "+1555", agentId: "agent-main" }), - ); - }); - - it("calls failDelivery instead of ackDelivery on bestEffort partial failure", async () => { - const { onError } = await runBestEffortPartialFailureDelivery(); - - // onError was called for the first payload's failure. - expect(onError).toHaveBeenCalledTimes(1); - - // Queue entry should NOT be acked — failDelivery should be called instead. - expect(queueMocks.ackDelivery).not.toHaveBeenCalled(); - expect(queueMocks.failDelivery).toHaveBeenCalledWith( - "mock-queue-id", - "partial delivery failure (bestEffort)", - ); - }); - - it("acks the queue entry when delivery is aborted", async () => { - const sendWhatsApp = vi.fn().mockResolvedValue({ messageId: "w1", toJid: "jid" }); - const abortController = new AbortController(); - abortController.abort(); - const cfg: OpenClawConfig = {}; - - await expect( - deliverOutboundPayloads({ - cfg, - channel: "whatsapp", - to: "+1555", - payloads: [{ text: "a" }], - deps: { sendWhatsApp }, - abortSignal: abortController.signal, - }), - ).rejects.toThrow("Operation aborted"); - - expect(queueMocks.ackDelivery).toHaveBeenCalledWith("mock-queue-id"); - expect(queueMocks.failDelivery).not.toHaveBeenCalled(); - expect(sendWhatsApp).not.toHaveBeenCalled(); - }); - - it("passes normalized payload to onError", async () => { - const sendWhatsApp = vi.fn().mockRejectedValue(new Error("boom")); - const onError = vi.fn(); - const cfg: OpenClawConfig = {}; - - await deliverOutboundPayloads({ - cfg, - channel: "whatsapp", - to: "+1555", - payloads: [{ text: "hi", mediaUrl: "https://x.test/a.jpg" }], - deps: { sendWhatsApp }, - bestEffort: true, - onError, - }); - - expect(onError).toHaveBeenCalledTimes(1); - expect(onError).toHaveBeenCalledWith( - expect.any(Error), - expect.objectContaining({ text: "hi", mediaUrls: ["https://x.test/a.jpg"] }), - ); - }); - - it("mirrors delivered output when mirror options are provided", async () => { - const sendTelegram = vi.fn().mockResolvedValue({ messageId: "m1", chatId: "c1" }); - mocks.appendAssistantMessageToSessionTranscript.mockClear(); - - await deliverOutboundPayloads({ - cfg: telegramChunkConfig, - channel: "telegram", - to: "123", - payloads: [{ text: "caption", mediaUrl: "https://example.com/files/report.pdf?sig=1" }], - deps: { sendTelegram }, - mirror: { - sessionKey: "agent:main:main", - text: "caption", - mediaUrls: ["https://example.com/files/report.pdf?sig=1"], - idempotencyKey: "idem-deliver-1", - }, - }); - - expect(mocks.appendAssistantMessageToSessionTranscript).toHaveBeenCalledWith( - expect.objectContaining({ - text: "report.pdf", - idempotencyKey: "idem-deliver-1", - }), - ); - }); - - it("emits message_sent success for text-only deliveries", async () => { - hookMocks.runner.hasHooks.mockReturnValue(true); - const sendWhatsApp = vi.fn().mockResolvedValue({ messageId: "w1", toJid: "jid" }); - - await deliverOutboundPayloads({ - cfg: {}, - channel: "whatsapp", - to: "+1555", - payloads: [{ text: "hello" }], - deps: { sendWhatsApp }, - }); - - expect(hookMocks.runner.runMessageSent).toHaveBeenCalledWith( - expect.objectContaining({ to: "+1555", content: "hello", success: true }), - expect.objectContaining({ channelId: "whatsapp" }), - ); - }); - - it("emits message_sent success for sendPayload deliveries", async () => { - hookMocks.runner.hasHooks.mockReturnValue(true); - const sendPayload = vi.fn().mockResolvedValue({ channel: "matrix", messageId: "mx-1" }); - const sendText = vi.fn(); - const sendMedia = vi.fn(); - setActivePluginRegistry( - createTestRegistry([ - { - pluginId: "matrix", - source: "test", - plugin: createOutboundTestPlugin({ - id: "matrix", - outbound: { deliveryMode: "direct", sendPayload, sendText, sendMedia }, - }), - }, - ]), - ); - - await deliverOutboundPayloads({ - cfg: {}, - channel: "matrix", - to: "!room:1", - payloads: [{ text: "payload text", channelData: { mode: "custom" } }], - }); - - expect(hookMocks.runner.runMessageSent).toHaveBeenCalledWith( - expect.objectContaining({ to: "!room:1", content: "payload text", success: true }), - expect.objectContaining({ channelId: "matrix" }), - ); - }); - it("preserves channelData-only payloads with empty text for non-WhatsApp sendPayload channels", async () => { const sendPayload = vi.fn().mockResolvedValue({ channel: "line", messageId: "ln-1" }); const sendText = vi.fn(); @@ -1090,31 +829,6 @@ describe("deliverOutboundPayloads", () => { expect.objectContaining({ channelId: "matrix" }), ); }); - - it("emits message_sent failure when delivery errors", async () => { - hookMocks.runner.hasHooks.mockReturnValue(true); - const sendWhatsApp = vi.fn().mockRejectedValue(new Error("downstream failed")); - - await expect( - deliverOutboundPayloads({ - cfg: {}, - channel: "whatsapp", - to: "+1555", - payloads: [{ text: "hi" }], - deps: { sendWhatsApp }, - }), - ).rejects.toThrow("downstream failed"); - - expect(hookMocks.runner.runMessageSent).toHaveBeenCalledWith( - expect.objectContaining({ - to: "+1555", - content: "hi", - success: false, - error: "downstream failed", - }), - expect.objectContaining({ channelId: "whatsapp" }), - ); - }); }); const emptyRegistry = createTestRegistry([]); diff --git a/src/infra/outbound/delivery-queue.test.ts b/src/infra/outbound/delivery-queue.test.ts new file mode 100644 index 00000000000..da102d2904c --- /dev/null +++ b/src/infra/outbound/delivery-queue.test.ts @@ -0,0 +1,580 @@ +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; +import { afterAll, beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; +import { + ackDelivery, + computeBackoffMs, + type DeliverFn, + enqueueDelivery, + failDelivery, + isEntryEligibleForRecoveryRetry, + isPermanentDeliveryError, + loadPendingDeliveries, + MAX_RETRIES, + moveToFailed, + recoverPendingDeliveries, +} from "./delivery-queue.js"; + +describe("delivery-queue", () => { + let tmpDir: string; + let fixtureRoot = ""; + let fixtureCount = 0; + + beforeAll(() => { + fixtureRoot = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-dq-suite-")); + }); + + beforeEach(() => { + tmpDir = path.join(fixtureRoot, `case-${fixtureCount++}`); + fs.mkdirSync(tmpDir, { recursive: true }); + }); + + afterAll(() => { + if (!fixtureRoot) { + return; + } + fs.rmSync(fixtureRoot, { recursive: true, force: true }); + fixtureRoot = ""; + }); + + describe("enqueue + ack lifecycle", () => { + it("creates and removes a queue entry", async () => { + const id = await enqueueDelivery( + { + channel: "whatsapp", + to: "+1555", + payloads: [{ text: "hello" }], + bestEffort: true, + gifPlayback: true, + silent: true, + mirror: { + sessionKey: "agent:main:main", + text: "hello", + mediaUrls: ["https://example.com/file.png"], + }, + }, + tmpDir, + ); + + const queueDir = path.join(tmpDir, "delivery-queue"); + const files = fs.readdirSync(queueDir).filter((f) => f.endsWith(".json")); + expect(files).toHaveLength(1); + expect(files[0]).toBe(`${id}.json`); + + const entry = JSON.parse(fs.readFileSync(path.join(queueDir, files[0]), "utf-8")); + expect(entry).toMatchObject({ + id, + channel: "whatsapp", + to: "+1555", + bestEffort: true, + gifPlayback: true, + silent: true, + mirror: { + sessionKey: "agent:main:main", + text: "hello", + mediaUrls: ["https://example.com/file.png"], + }, + retryCount: 0, + }); + expect(entry.payloads).toEqual([{ text: "hello" }]); + + await ackDelivery(id, tmpDir); + const remaining = fs.readdirSync(queueDir).filter((f) => f.endsWith(".json")); + expect(remaining).toHaveLength(0); + }); + + it("ack is idempotent (no error on missing file)", async () => { + await expect(ackDelivery("nonexistent-id", tmpDir)).resolves.toBeUndefined(); + }); + + it("ack cleans up leftover .delivered marker when .json is already gone", async () => { + const id = await enqueueDelivery( + { channel: "whatsapp", to: "+1", payloads: [{ text: "stale-marker" }] }, + tmpDir, + ); + const queueDir = path.join(tmpDir, "delivery-queue"); + + fs.renameSync(path.join(queueDir, `${id}.json`), path.join(queueDir, `${id}.delivered`)); + await expect(ackDelivery(id, tmpDir)).resolves.toBeUndefined(); + + expect(fs.existsSync(path.join(queueDir, `${id}.delivered`))).toBe(false); + }); + + it("ack removes .delivered marker so recovery does not replay", async () => { + const id = await enqueueDelivery( + { channel: "whatsapp", to: "+1", payloads: [{ text: "ack-test" }] }, + tmpDir, + ); + const queueDir = path.join(tmpDir, "delivery-queue"); + + await ackDelivery(id, tmpDir); + + expect(fs.existsSync(path.join(queueDir, `${id}.json`))).toBe(false); + expect(fs.existsSync(path.join(queueDir, `${id}.delivered`))).toBe(false); + }); + + it("loadPendingDeliveries cleans up stale .delivered markers without replaying", async () => { + const id = await enqueueDelivery( + { channel: "telegram", to: "99", payloads: [{ text: "stale" }] }, + tmpDir, + ); + const queueDir = path.join(tmpDir, "delivery-queue"); + + fs.renameSync(path.join(queueDir, `${id}.json`), path.join(queueDir, `${id}.delivered`)); + + const entries = await loadPendingDeliveries(tmpDir); + + expect(entries).toHaveLength(0); + expect(fs.existsSync(path.join(queueDir, `${id}.delivered`))).toBe(false); + }); + }); + + describe("failDelivery", () => { + it("increments retryCount, records attempt time, and sets lastError", async () => { + const id = await enqueueDelivery( + { + channel: "telegram", + to: "123", + payloads: [{ text: "test" }], + }, + tmpDir, + ); + + await failDelivery(id, "connection refused", tmpDir); + + const queueDir = path.join(tmpDir, "delivery-queue"); + const entry = JSON.parse(fs.readFileSync(path.join(queueDir, `${id}.json`), "utf-8")); + expect(entry.retryCount).toBe(1); + expect(typeof entry.lastAttemptAt).toBe("number"); + expect(entry.lastAttemptAt).toBeGreaterThan(0); + expect(entry.lastError).toBe("connection refused"); + }); + }); + + describe("moveToFailed", () => { + it("moves entry to failed/ subdirectory", async () => { + const id = await enqueueDelivery( + { + channel: "slack", + to: "#general", + payloads: [{ text: "hi" }], + }, + tmpDir, + ); + + await moveToFailed(id, tmpDir); + + const queueDir = path.join(tmpDir, "delivery-queue"); + const failedDir = path.join(queueDir, "failed"); + expect(fs.existsSync(path.join(queueDir, `${id}.json`))).toBe(false); + expect(fs.existsSync(path.join(failedDir, `${id}.json`))).toBe(true); + }); + }); + + describe("isPermanentDeliveryError", () => { + it.each([ + "No conversation reference found for user:abc", + "Telegram send failed: chat not found (chat_id=user:123)", + "user not found", + "Bot was blocked by the user", + "Forbidden: bot was kicked from the group chat", + "chat_id is empty", + "Outbound not configured for channel: msteams", + ])("returns true for permanent error: %s", (msg) => { + expect(isPermanentDeliveryError(msg)).toBe(true); + }); + + it.each([ + "network down", + "ETIMEDOUT", + "socket hang up", + "rate limited", + "500 Internal Server Error", + ])("returns false for transient error: %s", (msg) => { + expect(isPermanentDeliveryError(msg)).toBe(false); + }); + }); + + describe("loadPendingDeliveries", () => { + it("returns empty array when queue directory does not exist", async () => { + const nonexistent = path.join(tmpDir, "no-such-dir"); + const entries = await loadPendingDeliveries(nonexistent); + expect(entries).toEqual([]); + }); + + it("loads multiple entries", async () => { + await enqueueDelivery({ channel: "whatsapp", to: "+1", payloads: [{ text: "a" }] }, tmpDir); + await enqueueDelivery({ channel: "telegram", to: "2", payloads: [{ text: "b" }] }, tmpDir); + + const entries = await loadPendingDeliveries(tmpDir); + expect(entries).toHaveLength(2); + }); + + it("backfills lastAttemptAt for legacy retry entries during load", async () => { + const id = await enqueueDelivery( + { channel: "whatsapp", to: "+1", payloads: [{ text: "legacy" }] }, + tmpDir, + ); + const filePath = path.join(tmpDir, "delivery-queue", `${id}.json`); + const legacyEntry = JSON.parse(fs.readFileSync(filePath, "utf-8")); + legacyEntry.retryCount = 2; + delete legacyEntry.lastAttemptAt; + fs.writeFileSync(filePath, JSON.stringify(legacyEntry), "utf-8"); + + const entries = await loadPendingDeliveries(tmpDir); + expect(entries).toHaveLength(1); + expect(entries[0]?.lastAttemptAt).toBe(entries[0]?.enqueuedAt); + + const persisted = JSON.parse(fs.readFileSync(filePath, "utf-8")); + expect(persisted.lastAttemptAt).toBe(persisted.enqueuedAt); + }); + }); + + describe("computeBackoffMs", () => { + it("returns scheduled backoff values and clamps at max retry", () => { + const cases = [ + { retryCount: 0, expected: 0 }, + { retryCount: 1, expected: 5_000 }, + { retryCount: 2, expected: 25_000 }, + { retryCount: 3, expected: 120_000 }, + { retryCount: 4, expected: 600_000 }, + { retryCount: 5, expected: 600_000 }, + ] as const; + + for (const testCase of cases) { + expect(computeBackoffMs(testCase.retryCount), String(testCase.retryCount)).toBe( + testCase.expected, + ); + } + }); + }); + + describe("isEntryEligibleForRecoveryRetry", () => { + it("allows first replay after crash for retryCount=0 without lastAttemptAt", () => { + const now = Date.now(); + const result = isEntryEligibleForRecoveryRetry( + { + id: "entry-1", + channel: "whatsapp", + to: "+1", + payloads: [{ text: "a" }], + enqueuedAt: now, + retryCount: 0, + }, + now, + ); + expect(result).toEqual({ eligible: true }); + }); + + it("defers retry entries until backoff window elapses", () => { + const now = Date.now(); + const result = isEntryEligibleForRecoveryRetry( + { + id: "entry-2", + channel: "whatsapp", + to: "+1", + payloads: [{ text: "a" }], + enqueuedAt: now - 30_000, + retryCount: 3, + lastAttemptAt: now, + }, + now, + ); + expect(result.eligible).toBe(false); + if (result.eligible) { + throw new Error("Expected ineligible retry entry"); + } + expect(result.remainingBackoffMs).toBeGreaterThan(0); + }); + }); + + describe("recoverPendingDeliveries", () => { + const baseCfg = {}; + const createLog = () => ({ info: vi.fn(), warn: vi.fn(), error: vi.fn() }); + const enqueueCrashRecoveryEntries = async () => { + await enqueueDelivery({ channel: "whatsapp", to: "+1", payloads: [{ text: "a" }] }, tmpDir); + await enqueueDelivery({ channel: "telegram", to: "2", payloads: [{ text: "b" }] }, tmpDir); + }; + const setEntryState = ( + id: string, + state: { retryCount: number; lastAttemptAt?: number; enqueuedAt?: number }, + ) => { + const filePath = path.join(tmpDir, "delivery-queue", `${id}.json`); + const entry = JSON.parse(fs.readFileSync(filePath, "utf-8")); + entry.retryCount = state.retryCount; + if (state.lastAttemptAt === undefined) { + delete entry.lastAttemptAt; + } else { + entry.lastAttemptAt = state.lastAttemptAt; + } + if (state.enqueuedAt !== undefined) { + entry.enqueuedAt = state.enqueuedAt; + } + fs.writeFileSync(filePath, JSON.stringify(entry), "utf-8"); + }; + const runRecovery = async ({ + deliver, + log = createLog(), + maxRecoveryMs, + }: { + deliver: ReturnType; + log?: ReturnType; + maxRecoveryMs?: number; + }) => { + const result = await recoverPendingDeliveries({ + deliver: deliver as DeliverFn, + log, + cfg: baseCfg, + stateDir: tmpDir, + ...(maxRecoveryMs === undefined ? {} : { maxRecoveryMs }), + }); + return { result, log }; + }; + + it("recovers entries from a simulated crash", async () => { + await enqueueCrashRecoveryEntries(); + const deliver = vi.fn().mockResolvedValue([]); + const { result } = await runRecovery({ deliver }); + + expect(deliver).toHaveBeenCalledTimes(2); + expect(result.recovered).toBe(2); + expect(result.failed).toBe(0); + expect(result.skippedMaxRetries).toBe(0); + expect(result.deferredBackoff).toBe(0); + + const remaining = await loadPendingDeliveries(tmpDir); + expect(remaining).toHaveLength(0); + }); + + it("moves entries that exceeded max retries to failed/", async () => { + const id = await enqueueDelivery( + { channel: "whatsapp", to: "+1", payloads: [{ text: "a" }] }, + tmpDir, + ); + setEntryState(id, { retryCount: MAX_RETRIES }); + + const deliver = vi.fn(); + const { result } = await runRecovery({ deliver }); + + expect(deliver).not.toHaveBeenCalled(); + expect(result.skippedMaxRetries).toBe(1); + expect(result.deferredBackoff).toBe(0); + + const failedDir = path.join(tmpDir, "delivery-queue", "failed"); + expect(fs.existsSync(path.join(failedDir, `${id}.json`))).toBe(true); + }); + + it("increments retryCount on failed recovery attempt", async () => { + await enqueueDelivery({ channel: "slack", to: "#ch", payloads: [{ text: "x" }] }, tmpDir); + + const deliver = vi.fn().mockRejectedValue(new Error("network down")); + const { result } = await runRecovery({ deliver }); + + expect(result.failed).toBe(1); + expect(result.recovered).toBe(0); + + const entries = await loadPendingDeliveries(tmpDir); + expect(entries).toHaveLength(1); + expect(entries[0].retryCount).toBe(1); + expect(entries[0].lastError).toBe("network down"); + }); + + it("moves entries to failed/ immediately on permanent delivery errors", async () => { + const id = await enqueueDelivery( + { channel: "msteams", to: "user:abc", payloads: [{ text: "hi" }] }, + tmpDir, + ); + const deliver = vi + .fn() + .mockRejectedValue(new Error("No conversation reference found for user:abc")); + const log = createLog(); + const { result } = await runRecovery({ deliver, log }); + + expect(result.failed).toBe(1); + expect(result.recovered).toBe(0); + const remaining = await loadPendingDeliveries(tmpDir); + expect(remaining).toHaveLength(0); + const failedDir = path.join(tmpDir, "delivery-queue", "failed"); + expect(fs.existsSync(path.join(failedDir, `${id}.json`))).toBe(true); + expect(log.warn).toHaveBeenCalledWith(expect.stringContaining("permanent error")); + }); + + it("passes skipQueue: true to prevent re-enqueueing during recovery", async () => { + await enqueueDelivery({ channel: "whatsapp", to: "+1", payloads: [{ text: "a" }] }, tmpDir); + + const deliver = vi.fn().mockResolvedValue([]); + await runRecovery({ deliver }); + + expect(deliver).toHaveBeenCalledWith(expect.objectContaining({ skipQueue: true })); + }); + + it("replays stored delivery options during recovery", async () => { + await enqueueDelivery( + { + channel: "whatsapp", + to: "+1", + payloads: [{ text: "a" }], + bestEffort: true, + gifPlayback: true, + silent: true, + mirror: { + sessionKey: "agent:main:main", + text: "a", + mediaUrls: ["https://example.com/a.png"], + }, + }, + tmpDir, + ); + + const deliver = vi.fn().mockResolvedValue([]); + await runRecovery({ deliver }); + + expect(deliver).toHaveBeenCalledWith( + expect.objectContaining({ + bestEffort: true, + gifPlayback: true, + silent: true, + mirror: { + sessionKey: "agent:main:main", + text: "a", + mediaUrls: ["https://example.com/a.png"], + }, + }), + ); + }); + + it("respects maxRecoveryMs time budget", async () => { + await enqueueCrashRecoveryEntries(); + await enqueueDelivery({ channel: "slack", to: "#c", payloads: [{ text: "c" }] }, tmpDir); + + const deliver = vi.fn().mockResolvedValue([]); + const { result, log } = await runRecovery({ + deliver, + maxRecoveryMs: 0, + }); + + expect(deliver).not.toHaveBeenCalled(); + expect(result.recovered).toBe(0); + expect(result.failed).toBe(0); + expect(result.skippedMaxRetries).toBe(0); + expect(result.deferredBackoff).toBe(0); + + const remaining = await loadPendingDeliveries(tmpDir); + expect(remaining).toHaveLength(3); + + expect(log.warn).toHaveBeenCalledWith(expect.stringContaining("deferred to next restart")); + }); + + it("defers entries until backoff becomes eligible", async () => { + const id = await enqueueDelivery( + { channel: "whatsapp", to: "+1", payloads: [{ text: "a" }] }, + tmpDir, + ); + setEntryState(id, { retryCount: 3, lastAttemptAt: Date.now() }); + + const deliver = vi.fn().mockResolvedValue([]); + const { result, log } = await runRecovery({ + deliver, + maxRecoveryMs: 60_000, + }); + + expect(deliver).not.toHaveBeenCalled(); + expect(result).toEqual({ + recovered: 0, + failed: 0, + skippedMaxRetries: 0, + deferredBackoff: 1, + }); + + const remaining = await loadPendingDeliveries(tmpDir); + expect(remaining).toHaveLength(1); + + expect(log.info).toHaveBeenCalledWith(expect.stringContaining("not ready for retry yet")); + }); + + it("continues past high-backoff entries and recovers ready entries behind them", async () => { + const now = Date.now(); + const blockedId = await enqueueDelivery( + { channel: "whatsapp", to: "+1", payloads: [{ text: "blocked" }] }, + tmpDir, + ); + const readyId = await enqueueDelivery( + { channel: "telegram", to: "2", payloads: [{ text: "ready" }] }, + tmpDir, + ); + + setEntryState(blockedId, { retryCount: 3, lastAttemptAt: now, enqueuedAt: now - 30_000 }); + setEntryState(readyId, { retryCount: 0, enqueuedAt: now - 10_000 }); + + const deliver = vi.fn().mockResolvedValue([]); + const { result } = await runRecovery({ deliver, maxRecoveryMs: 60_000 }); + + expect(result).toEqual({ + recovered: 1, + failed: 0, + skippedMaxRetries: 0, + deferredBackoff: 1, + }); + expect(deliver).toHaveBeenCalledTimes(1); + expect(deliver).toHaveBeenCalledWith( + expect.objectContaining({ channel: "telegram", to: "2", skipQueue: true }), + ); + + const remaining = await loadPendingDeliveries(tmpDir); + expect(remaining).toHaveLength(1); + expect(remaining[0]?.id).toBe(blockedId); + }); + + it("recovers deferred entries on a later restart once backoff elapsed", async () => { + vi.useFakeTimers(); + const start = new Date("2026-01-01T00:00:00.000Z"); + vi.setSystemTime(start); + + const id = await enqueueDelivery( + { channel: "whatsapp", to: "+1", payloads: [{ text: "later" }] }, + tmpDir, + ); + setEntryState(id, { retryCount: 3, lastAttemptAt: start.getTime() }); + + const firstDeliver = vi.fn().mockResolvedValue([]); + const firstRun = await runRecovery({ deliver: firstDeliver, maxRecoveryMs: 60_000 }); + expect(firstRun.result).toEqual({ + recovered: 0, + failed: 0, + skippedMaxRetries: 0, + deferredBackoff: 1, + }); + expect(firstDeliver).not.toHaveBeenCalled(); + + vi.setSystemTime(new Date(start.getTime() + 600_000 + 1)); + const secondDeliver = vi.fn().mockResolvedValue([]); + const secondRun = await runRecovery({ deliver: secondDeliver, maxRecoveryMs: 60_000 }); + expect(secondRun.result).toEqual({ + recovered: 1, + failed: 0, + skippedMaxRetries: 0, + deferredBackoff: 0, + }); + expect(secondDeliver).toHaveBeenCalledTimes(1); + + const remaining = await loadPendingDeliveries(tmpDir); + expect(remaining).toHaveLength(0); + + vi.useRealTimers(); + }); + + it("returns zeros when queue is empty", async () => { + const deliver = vi.fn(); + const { result } = await runRecovery({ deliver }); + + expect(result).toEqual({ + recovered: 0, + failed: 0, + skippedMaxRetries: 0, + deferredBackoff: 0, + }); + expect(deliver).not.toHaveBeenCalled(); + }); + }); +}); diff --git a/src/infra/outbound/directory-cache.test.ts b/src/infra/outbound/directory-cache.test.ts new file mode 100644 index 00000000000..5234662b6cf --- /dev/null +++ b/src/infra/outbound/directory-cache.test.ts @@ -0,0 +1,65 @@ +import { describe, expect, it, vi } from "vitest"; +import type { OpenClawConfig } from "../../config/config.js"; +import { DirectoryCache, buildDirectoryCacheKey } from "./directory-cache.js"; + +describe("buildDirectoryCacheKey", () => { + it("includes account and signature fallbacks", () => { + expect( + buildDirectoryCacheKey({ + channel: "slack", + kind: "channel", + source: "cache", + }), + ).toBe("slack:default:channel:cache:default"); + + expect( + buildDirectoryCacheKey({ + channel: "discord", + accountId: "work", + kind: "user", + source: "live", + signature: "v2", + }), + ).toBe("discord:work:user:live:v2"); + }); +}); + +describe("DirectoryCache", () => { + it("expires entries after ttl and resets when config ref changes", () => { + vi.useFakeTimers(); + const cache = new DirectoryCache(1_000); + const cfgA = {} as OpenClawConfig; + const cfgB = {} as OpenClawConfig; + + cache.set("a", "first", cfgA); + expect(cache.get("a", cfgA)).toBe("first"); + + vi.advanceTimersByTime(1_001); + expect(cache.get("a", cfgA)).toBeUndefined(); + + cache.set("b", "second", cfgA); + expect(cache.get("b", cfgB)).toBeUndefined(); + + vi.useRealTimers(); + }); + + it("evicts least-recent entries, refreshes insertion order, and clears matches", () => { + const cache = new DirectoryCache(60_000, 2); + const cfg = {} as OpenClawConfig; + + cache.set("a", "A", cfg); + cache.set("b", "B", cfg); + cache.set("a", "A2", cfg); + cache.set("c", "C", cfg); + + expect(cache.get("a", cfg)).toBe("A2"); + expect(cache.get("b", cfg)).toBeUndefined(); + expect(cache.get("c", cfg)).toBe("C"); + + cache.clearMatching((key) => key.startsWith("c")); + expect(cache.get("c", cfg)).toBeUndefined(); + + cache.clear(cfg); + expect(cache.get("a", cfg)).toBeUndefined(); + }); +}); diff --git a/src/infra/outbound/envelope.test.ts b/src/infra/outbound/envelope.test.ts new file mode 100644 index 00000000000..68b2aa28b96 --- /dev/null +++ b/src/infra/outbound/envelope.test.ts @@ -0,0 +1,55 @@ +import { describe, expect, it } from "vitest"; +import type { ReplyPayload } from "../../auto-reply/types.js"; +import { buildOutboundResultEnvelope } from "./envelope.js"; +import type { OutboundDeliveryJson } from "./format.js"; + +describe("buildOutboundResultEnvelope", () => { + const delivery: OutboundDeliveryJson = { + channel: "telegram", + via: "direct", + to: "123", + messageId: "m1", + mediaUrl: null, + chatId: "c1", + }; + + it("flattens delivery by default when nothing else is present", () => { + expect(buildOutboundResultEnvelope({ delivery })).toEqual(delivery); + }); + + it("keeps pre-normalized payload JSON entries but clones the array", () => { + const payloads = [{ text: "hi", mediaUrl: null, mediaUrls: undefined }]; + + const envelope = buildOutboundResultEnvelope({ + payloads, + meta: { ok: true }, + }); + + expect(envelope).toEqual({ + payloads: [{ text: "hi", mediaUrl: null, mediaUrls: undefined }], + meta: { ok: true }, + }); + expect((envelope as { payloads: unknown[] }).payloads).not.toBe(payloads); + }); + + it("normalizes reply payloads and keeps wrapped delivery when flattening is disabled", () => { + const payloads: ReplyPayload[] = [{ text: "hello" }]; + + expect( + buildOutboundResultEnvelope({ + payloads, + delivery, + flattenDelivery: false, + }), + ).toEqual({ + payloads: [ + { + text: "hello", + mediaUrl: null, + channelData: undefined, + }, + ], + delivery, + }); + }); +}); diff --git a/src/infra/outbound/format.test.ts b/src/infra/outbound/format.test.ts new file mode 100644 index 00000000000..db30cd4c511 --- /dev/null +++ b/src/infra/outbound/format.test.ts @@ -0,0 +1,179 @@ +import { describe, expect, it } from "vitest"; +import { + buildOutboundDeliveryJson, + formatGatewaySummary, + formatOutboundDeliverySummary, +} from "./format.js"; + +describe("formatOutboundDeliverySummary", () => { + it("formats fallback and provider-specific detail variants", () => { + const cases = [ + { + name: "fallback telegram", + channel: "telegram" as const, + result: undefined, + expected: "✅ Sent via Telegram. Message ID: unknown", + }, + { + name: "fallback imessage", + channel: "imessage" as const, + result: undefined, + expected: "✅ Sent via iMessage. Message ID: unknown", + }, + { + name: "telegram with chat detail", + channel: "telegram" as const, + result: { + channel: "telegram" as const, + messageId: "m1", + chatId: "c1", + }, + expected: "✅ Sent via Telegram. Message ID: m1 (chat c1)", + }, + { + name: "discord with channel detail", + channel: "discord" as const, + result: { + channel: "discord" as const, + messageId: "d1", + channelId: "chan", + }, + expected: "✅ Sent via Discord. Message ID: d1 (channel chan)", + }, + { + name: "slack with room detail", + channel: "slack" as const, + result: { + channel: "slack" as const, + messageId: "s1", + roomId: "room-1", + }, + expected: "✅ Sent via Slack. Message ID: s1 (room room-1)", + }, + { + name: "msteams with conversation detail", + channel: "msteams" as const, + result: { + channel: "msteams" as const, + messageId: "t1", + conversationId: "conv-1", + }, + expected: "✅ Sent via msteams. Message ID: t1 (conversation conv-1)", + }, + ]; + + for (const testCase of cases) { + expect(formatOutboundDeliverySummary(testCase.channel, testCase.result), testCase.name).toBe( + testCase.expected, + ); + } + }); +}); + +describe("buildOutboundDeliveryJson", () => { + it("builds delivery payloads across provider-specific fields", () => { + const cases = [ + { + name: "telegram direct payload", + input: { + channel: "telegram" as const, + to: "123", + result: { channel: "telegram" as const, messageId: "m1", chatId: "c1" }, + mediaUrl: "https://example.com/a.png", + }, + expected: { + channel: "telegram", + via: "direct", + to: "123", + messageId: "m1", + mediaUrl: "https://example.com/a.png", + chatId: "c1", + }, + }, + { + name: "whatsapp metadata", + input: { + channel: "whatsapp" as const, + to: "+1", + result: { channel: "whatsapp" as const, messageId: "w1", toJid: "jid" }, + }, + expected: { + channel: "whatsapp", + via: "direct", + to: "+1", + messageId: "w1", + mediaUrl: null, + toJid: "jid", + }, + }, + { + name: "signal timestamp", + input: { + channel: "signal" as const, + to: "+1", + result: { channel: "signal" as const, messageId: "s1", timestamp: 123 }, + }, + expected: { + channel: "signal", + via: "direct", + to: "+1", + messageId: "s1", + mediaUrl: null, + timestamp: 123, + }, + }, + { + name: "gateway payload with meta and explicit via", + input: { + channel: "discord" as const, + to: "channel:1", + via: "gateway" as const, + result: { + messageId: "g1", + channelId: "1", + meta: { thread: "2" }, + }, + }, + expected: { + channel: "discord", + via: "gateway", + to: "channel:1", + messageId: "g1", + mediaUrl: null, + channelId: "1", + meta: { thread: "2" }, + }, + }, + ]; + + for (const testCase of cases) { + expect(buildOutboundDeliveryJson(testCase.input), testCase.name).toEqual(testCase.expected); + } + }); +}); + +describe("formatGatewaySummary", () => { + it("formats default and custom gateway action summaries", () => { + const cases = [ + { + name: "default send action", + input: { channel: "whatsapp", messageId: "m1" }, + expected: "✅ Sent via gateway (whatsapp). Message ID: m1", + }, + { + name: "custom action", + input: { action: "Poll sent", channel: "discord", messageId: "p1" }, + expected: "✅ Poll sent via gateway (discord). Message ID: p1", + }, + { + name: "missing channel and message id", + input: {}, + expected: "✅ Sent via gateway. Message ID: unknown", + }, + ]; + + for (const testCase of cases) { + expect(formatGatewaySummary(testCase.input), testCase.name).toBe(testCase.expected); + } + }); +}); diff --git a/src/infra/outbound/identity.test.ts b/src/infra/outbound/identity.test.ts new file mode 100644 index 00000000000..ea1c3623fbc --- /dev/null +++ b/src/infra/outbound/identity.test.ts @@ -0,0 +1,69 @@ +import { describe, expect, it, vi } from "vitest"; + +const resolveAgentIdentityMock = vi.hoisted(() => vi.fn()); +const resolveAgentAvatarMock = vi.hoisted(() => vi.fn()); + +vi.mock("../../agents/identity.js", () => ({ + resolveAgentIdentity: (...args: unknown[]) => resolveAgentIdentityMock(...args), +})); + +vi.mock("../../agents/identity-avatar.js", () => ({ + resolveAgentAvatar: (...args: unknown[]) => resolveAgentAvatarMock(...args), +})); + +import { normalizeOutboundIdentity, resolveAgentOutboundIdentity } from "./identity.js"; + +describe("normalizeOutboundIdentity", () => { + it("trims fields and drops empty identities", () => { + expect( + normalizeOutboundIdentity({ + name: " Demo Bot ", + avatarUrl: " https://example.com/a.png ", + emoji: " 🤖 ", + }), + ).toEqual({ + name: "Demo Bot", + avatarUrl: "https://example.com/a.png", + emoji: "🤖", + }); + expect( + normalizeOutboundIdentity({ + name: " ", + avatarUrl: "\n", + emoji: "", + }), + ).toBeUndefined(); + }); +}); + +describe("resolveAgentOutboundIdentity", () => { + it("builds normalized identity data and keeps only remote avatars", () => { + resolveAgentIdentityMock.mockReturnValueOnce({ + name: " Agent Smith ", + emoji: " 🕶️ ", + }); + resolveAgentAvatarMock.mockReturnValueOnce({ + kind: "remote", + url: "https://example.com/avatar.png", + }); + + expect(resolveAgentOutboundIdentity({} as never, "main")).toEqual({ + name: "Agent Smith", + emoji: "🕶️", + avatarUrl: "https://example.com/avatar.png", + }); + }); + + it("drops blank and non-remote avatar values after normalization", () => { + resolveAgentIdentityMock.mockReturnValueOnce({ + name: " ", + emoji: "", + }); + resolveAgentAvatarMock.mockReturnValueOnce({ + kind: "data", + dataUrl: "data:image/png;base64,abc", + }); + + expect(resolveAgentOutboundIdentity({} as never, "main")).toBeUndefined(); + }); +}); diff --git a/src/infra/outbound/message-action-normalization.test.ts b/src/infra/outbound/message-action-normalization.test.ts index 5f0647b955c..87fa7a8503c 100644 --- a/src/infra/outbound/message-action-normalization.test.ts +++ b/src/infra/outbound/message-action-normalization.test.ts @@ -72,6 +72,62 @@ describe("normalizeMessageActionInput", () => { expect(normalized.channel).toBe("slack"); }); + it("does not infer a target for actions that do not accept one", () => { + const normalized = normalizeMessageActionInput({ + action: "broadcast", + args: {}, + toolContext: { + currentChannelId: "channel:C1", + }, + }); + + expect("target" in normalized).toBe(false); + expect("to" in normalized).toBe(false); + }); + + it("does not backfill a non-deliverable tool-context channel", () => { + const normalized = normalizeMessageActionInput({ + action: "send", + args: { + target: "channel:C1", + }, + toolContext: { + currentChannelProvider: "webchat", + }, + }); + + expect("channel" in normalized).toBe(false); + }); + + it("keeps alias-based targets without inferring the current channel", () => { + const normalized = normalizeMessageActionInput({ + action: "edit", + args: { + messageId: "msg_123", + }, + toolContext: { + currentChannelId: "channel:C1", + }, + }); + + expect(normalized.messageId).toBe("msg_123"); + expect("target" in normalized).toBe(false); + expect("to" in normalized).toBe(false); + }); + + it("maps legacy channelId inputs through canonical target for channel-id actions", () => { + const normalized = normalizeMessageActionInput({ + action: "channel-info", + args: { + channelId: "C123", + }, + }); + + expect(normalized.target).toBe("C123"); + expect(normalized.channelId).toBe("C123"); + expect("to" in normalized).toBe(false); + }); + it("throws when required target remains unresolved", () => { expect(() => normalizeMessageActionInput({ diff --git a/src/infra/outbound/message-action-params.test.ts b/src/infra/outbound/message-action-params.test.ts index 996db9682b0..f72bd2d26aa 100644 --- a/src/infra/outbound/message-action-params.test.ts +++ b/src/infra/outbound/message-action-params.test.ts @@ -2,15 +2,145 @@ import fs from "node:fs/promises"; import os from "node:os"; import path from "node:path"; import { describe, expect, it } from "vitest"; +import type { ChannelThreadingToolContext } from "../../channels/plugins/types.js"; import type { OpenClawConfig } from "../../config/config.js"; import { hydrateAttachmentParamsForAction, + normalizeSandboxMediaList, normalizeSandboxMediaParams, + resolveAttachmentMediaPolicy, + resolveSlackAutoThreadId, + resolveTelegramAutoThreadId, } from "./message-action-params.js"; const cfg = {} as OpenClawConfig; const maybeIt = process.platform === "win32" ? it.skip : it; +function createToolContext( + overrides: Partial = {}, +): ChannelThreadingToolContext { + return { + currentChannelId: "C123", + currentThreadTs: "thread-1", + replyToMode: "all", + ...overrides, + }; +} + +describe("message action threading helpers", () => { + it("resolves Slack auto-thread ids only for matching active channels", () => { + expect( + resolveSlackAutoThreadId({ + to: "#c123", + toolContext: createToolContext(), + }), + ).toBe("thread-1"); + expect( + resolveSlackAutoThreadId({ + to: "channel:C999", + toolContext: createToolContext(), + }), + ).toBeUndefined(); + expect( + resolveSlackAutoThreadId({ + to: "user:U123", + toolContext: createToolContext(), + }), + ).toBeUndefined(); + }); + + it("skips Slack auto-thread ids when reply mode or context blocks them", () => { + expect( + resolveSlackAutoThreadId({ + to: "C123", + toolContext: createToolContext({ + replyToMode: "first", + hasRepliedRef: { value: true }, + }), + }), + ).toBeUndefined(); + expect( + resolveSlackAutoThreadId({ + to: "C123", + toolContext: createToolContext({ replyToMode: "off" }), + }), + ).toBeUndefined(); + expect( + resolveSlackAutoThreadId({ + to: "C123", + toolContext: createToolContext({ currentThreadTs: undefined }), + }), + ).toBeUndefined(); + }); + + it("resolves Telegram auto-thread ids for matching chats across target formats", () => { + expect( + resolveTelegramAutoThreadId({ + to: "telegram:group:-100123:topic:77", + toolContext: createToolContext({ + currentChannelId: "tg:group:-100123", + }), + }), + ).toBe("thread-1"); + expect( + resolveTelegramAutoThreadId({ + to: "-100999:77", + toolContext: createToolContext({ + currentChannelId: "-100123", + }), + }), + ).toBeUndefined(); + expect( + resolveTelegramAutoThreadId({ + to: "-100123", + toolContext: createToolContext({ currentChannelId: undefined }), + }), + ).toBeUndefined(); + }); +}); + +describe("message action media helpers", () => { + it("prefers sandbox media policy when sandbox roots are non-blank", () => { + expect( + resolveAttachmentMediaPolicy({ + sandboxRoot: " /tmp/workspace ", + mediaLocalRoots: ["/tmp/a"], + }), + ).toEqual({ + mode: "sandbox", + sandboxRoot: "/tmp/workspace", + }); + expect( + resolveAttachmentMediaPolicy({ + sandboxRoot: " ", + mediaLocalRoots: ["/tmp/a"], + }), + ).toEqual({ + mode: "host", + localRoots: ["/tmp/a"], + }); + }); + + maybeIt("normalizes sandbox media lists and dedupes resolved workspace paths", async () => { + const sandboxRoot = await fs.mkdtemp(path.join(os.tmpdir(), "msg-params-list-")); + try { + await expect( + normalizeSandboxMediaList({ + values: [" data:text/plain;base64,QQ== "], + }), + ).rejects.toThrow(/data:/i); + await expect( + normalizeSandboxMediaList({ + values: [" file:///workspace/assets/photo.png ", "/workspace/assets/photo.png", " "], + sandboxRoot: ` ${sandboxRoot} `, + }), + ).resolves.toEqual([path.join(sandboxRoot, "assets", "photo.png")]); + } finally { + await fs.rm(sandboxRoot, { recursive: true, force: true }); + } + }); +}); + describe("message action sandbox media hydration", () => { maybeIt("rejects symlink retarget escapes after sandbox media normalization", async () => { const sandboxRoot = await fs.mkdtemp(path.join(os.tmpdir(), "msg-params-sandbox-")); diff --git a/src/infra/outbound/message-action-runner.context.test.ts b/src/infra/outbound/message-action-runner.context.test.ts new file mode 100644 index 00000000000..185ff2bf648 --- /dev/null +++ b/src/infra/outbound/message-action-runner.context.test.ts @@ -0,0 +1,447 @@ +import { afterEach, beforeAll, beforeEach, describe, expect, it } from "vitest"; +import { slackPlugin } from "../../../extensions/slack/src/channel.js"; +import { telegramPlugin } from "../../../extensions/telegram/src/channel.js"; +import { whatsappPlugin } from "../../../extensions/whatsapp/src/channel.js"; +import type { OpenClawConfig } from "../../config/config.js"; +import { setActivePluginRegistry } from "../../plugins/runtime.js"; +import { createTestRegistry } from "../../test-utils/channel-plugins.js"; +import { createIMessageTestPlugin } from "../../test-utils/imessage-test-plugin.js"; +import { runMessageAction } from "./message-action-runner.js"; + +const slackConfig = { + channels: { + slack: { + botToken: "xoxb-test", + appToken: "xapp-test", + }, + }, +} as OpenClawConfig; + +const whatsappConfig = { + channels: { + whatsapp: { + allowFrom: ["*"], + }, + }, +} as OpenClawConfig; + +const runDryAction = (params: { + cfg: OpenClawConfig; + action: "send" | "thread-reply" | "broadcast"; + actionParams: Record; + toolContext?: Record; + abortSignal?: AbortSignal; + sandboxRoot?: string; +}) => + runMessageAction({ + cfg: params.cfg, + action: params.action, + params: params.actionParams as never, + toolContext: params.toolContext as never, + dryRun: true, + abortSignal: params.abortSignal, + sandboxRoot: params.sandboxRoot, + }); + +const runDrySend = (params: { + cfg: OpenClawConfig; + actionParams: Record; + toolContext?: Record; + abortSignal?: AbortSignal; + sandboxRoot?: string; +}) => + runDryAction({ + ...params, + action: "send", + }); + +let createPluginRuntime: typeof import("../../plugins/runtime/index.js").createPluginRuntime; +let setSlackRuntime: typeof import("../../../extensions/slack/src/runtime.js").setSlackRuntime; +let setTelegramRuntime: typeof import("../../../extensions/telegram/src/runtime.js").setTelegramRuntime; +let setWhatsAppRuntime: typeof import("../../../extensions/whatsapp/src/runtime.js").setWhatsAppRuntime; + +function installChannelRuntimes(params?: { includeTelegram?: boolean; includeWhatsApp?: boolean }) { + const runtime = createPluginRuntime(); + setSlackRuntime(runtime); + if (params?.includeTelegram !== false) { + setTelegramRuntime(runtime); + } + if (params?.includeWhatsApp !== false) { + setWhatsAppRuntime(runtime); + } +} + +describe("runMessageAction context isolation", () => { + beforeAll(async () => { + ({ createPluginRuntime } = await import("../../plugins/runtime/index.js")); + ({ setSlackRuntime } = await import("../../../extensions/slack/src/runtime.js")); + ({ setTelegramRuntime } = await import("../../../extensions/telegram/src/runtime.js")); + ({ setWhatsAppRuntime } = await import("../../../extensions/whatsapp/src/runtime.js")); + }); + + beforeEach(() => { + installChannelRuntimes(); + setActivePluginRegistry( + createTestRegistry([ + { + pluginId: "slack", + source: "test", + plugin: slackPlugin, + }, + { + pluginId: "whatsapp", + source: "test", + plugin: whatsappPlugin, + }, + { + pluginId: "telegram", + source: "test", + plugin: telegramPlugin, + }, + { + pluginId: "imessage", + source: "test", + plugin: createIMessageTestPlugin(), + }, + ]), + ); + }); + + afterEach(() => { + setActivePluginRegistry(createTestRegistry([])); + }); + + it.each([ + { + name: "allows send when target matches current channel", + cfg: slackConfig, + actionParams: { + channel: "slack", + target: "#C12345678", + message: "hi", + }, + toolContext: { currentChannelId: "C12345678" }, + }, + { + name: "accepts legacy to parameter for send", + cfg: slackConfig, + actionParams: { + channel: "slack", + to: "#C12345678", + message: "hi", + }, + }, + { + name: "defaults to current channel when target is omitted", + cfg: slackConfig, + actionParams: { + channel: "slack", + message: "hi", + }, + toolContext: { currentChannelId: "C12345678" }, + }, + { + name: "allows media-only send when target matches current channel", + cfg: slackConfig, + actionParams: { + channel: "slack", + target: "#C12345678", + media: "https://example.com/note.ogg", + }, + toolContext: { currentChannelId: "C12345678" }, + }, + { + name: "allows send when poll booleans are explicitly false", + cfg: slackConfig, + actionParams: { + channel: "slack", + target: "#C12345678", + message: "hi", + pollMulti: false, + pollAnonymous: false, + pollPublic: false, + }, + toolContext: { currentChannelId: "C12345678" }, + }, + ])("$name", async ({ cfg, actionParams, toolContext }) => { + const result = await runDrySend({ + cfg, + actionParams, + ...(toolContext ? { toolContext } : {}), + }); + + expect(result.kind).toBe("send"); + }); + + it("requires message when no media hint is provided", async () => { + await expect( + runDrySend({ + cfg: slackConfig, + actionParams: { + channel: "slack", + target: "#C12345678", + }, + toolContext: { currentChannelId: "C12345678" }, + }), + ).rejects.toThrow(/message required/i); + }); + + it.each([ + { + name: "structured poll params", + actionParams: { + channel: "slack", + target: "#C12345678", + message: "hi", + pollQuestion: "Ready?", + pollOption: ["Yes", "No"], + }, + }, + { + name: "string-encoded poll params", + actionParams: { + channel: "slack", + target: "#C12345678", + message: "hi", + pollDurationSeconds: "60", + pollPublic: "true", + }, + }, + { + name: "snake_case poll params", + actionParams: { + channel: "slack", + target: "#C12345678", + message: "hi", + poll_question: "Ready?", + poll_option: ["Yes", "No"], + poll_public: "true", + }, + }, + ])("rejects send actions that include $name", async ({ actionParams }) => { + await expect( + runDrySend({ + cfg: slackConfig, + actionParams, + toolContext: { currentChannelId: "C12345678" }, + }), + ).rejects.toThrow(/use action "poll" instead of "send"/i); + }); + + it.each([ + { + name: "send when target differs from current slack channel", + run: () => + runDrySend({ + cfg: slackConfig, + actionParams: { + channel: "slack", + target: "channel:C99999999", + message: "hi", + }, + toolContext: { currentChannelId: "C12345678", currentChannelProvider: "slack" }, + }), + expectedKind: "send", + }, + { + name: "thread-reply when channelId differs from current slack channel", + run: () => + runDryAction({ + cfg: slackConfig, + action: "thread-reply", + actionParams: { + channel: "slack", + target: "C99999999", + message: "hi", + }, + toolContext: { currentChannelId: "C12345678", currentChannelProvider: "slack" }, + }), + expectedKind: "action", + }, + ])("blocks cross-context UI handoff for $name", async ({ run, expectedKind }) => { + const result = await run(); + expect(result.kind).toBe(expectedKind); + }); + + it.each([ + { + name: "whatsapp match", + channel: "whatsapp", + target: "123@g.us", + currentChannelId: "123@g.us", + }, + { + name: "imessage match", + channel: "imessage", + target: "imessage:+15551234567", + currentChannelId: "imessage:+15551234567", + }, + { + name: "whatsapp mismatch", + channel: "whatsapp", + target: "456@g.us", + currentChannelId: "123@g.us", + currentChannelProvider: "whatsapp", + }, + { + name: "imessage mismatch", + channel: "imessage", + target: "imessage:+15551230000", + currentChannelId: "imessage:+15551234567", + currentChannelProvider: "imessage", + }, + ] as const)("$name", async (testCase) => { + const result = await runDrySend({ + cfg: whatsappConfig, + actionParams: { + channel: testCase.channel, + target: testCase.target, + message: "hi", + }, + toolContext: { + currentChannelId: testCase.currentChannelId, + ...(testCase.currentChannelProvider + ? { currentChannelProvider: testCase.currentChannelProvider } + : {}), + }, + }); + + expect(result.kind).toBe("send"); + }); + + it.each([ + { + name: "infers channel + target from tool context when missing", + cfg: { + channels: { + slack: { + botToken: "xoxb-test", + appToken: "xapp-test", + }, + telegram: { + token: "tg-test", + }, + }, + } as OpenClawConfig, + action: "send" as const, + actionParams: { + message: "hi", + }, + toolContext: { currentChannelId: "C12345678", currentChannelProvider: "slack" }, + expectedKind: "send", + expectedChannel: "slack", + }, + { + name: "falls back to tool-context provider when channel param is an id", + cfg: slackConfig, + action: "send" as const, + actionParams: { + channel: "C12345678", + target: "#C12345678", + message: "hi", + }, + toolContext: { currentChannelId: "C12345678", currentChannelProvider: "slack" }, + expectedKind: "send", + expectedChannel: "slack", + }, + { + name: "falls back to tool-context provider for broadcast channel ids", + cfg: slackConfig, + action: "broadcast" as const, + actionParams: { + targets: ["channel:C12345678"], + channel: "C12345678", + message: "hi", + }, + toolContext: { currentChannelProvider: "slack" }, + expectedKind: "broadcast", + expectedChannel: "slack", + }, + ])("$name", async ({ cfg, action, actionParams, toolContext, expectedKind, expectedChannel }) => { + const result = await runDryAction({ + cfg, + action, + actionParams, + toolContext, + }); + + expect(result.kind).toBe(expectedKind); + expect(result.channel).toBe(expectedChannel); + }); + + it.each([ + { + name: "blocks cross-provider sends by default", + cfg: slackConfig, + actionParams: { + channel: "telegram", + target: "@opsbot", + message: "hi", + }, + toolContext: { currentChannelId: "C12345678", currentChannelProvider: "slack" }, + message: /Cross-context messaging denied/, + }, + { + name: "blocks same-provider cross-context when disabled", + cfg: { + ...slackConfig, + tools: { + message: { + crossContext: { + allowWithinProvider: false, + }, + }, + }, + } as OpenClawConfig, + actionParams: { + channel: "slack", + target: "channel:C99999999", + message: "hi", + }, + toolContext: { currentChannelId: "C12345678", currentChannelProvider: "slack" }, + message: /Cross-context messaging denied/, + }, + ])("$name", async ({ cfg, actionParams, toolContext, message }) => { + await expect( + runDrySend({ + cfg, + actionParams, + toolContext, + }), + ).rejects.toThrow(message); + }); + + it.each([ + { + name: "send", + run: (abortSignal: AbortSignal) => + runDrySend({ + cfg: slackConfig, + actionParams: { + channel: "slack", + target: "#C12345678", + message: "hi", + }, + abortSignal, + }), + }, + { + name: "broadcast", + run: (abortSignal: AbortSignal) => + runDryAction({ + cfg: slackConfig, + action: "broadcast", + actionParams: { + targets: ["channel:C12345678"], + channel: "slack", + message: "hi", + }, + abortSignal, + }), + }, + ])("aborts $name when abortSignal is already aborted", async ({ run }) => { + const controller = new AbortController(); + controller.abort(); + await expect(run(controller.signal)).rejects.toMatchObject({ name: "AbortError" }); + }); +}); diff --git a/src/infra/outbound/message-action-runner.media.test.ts b/src/infra/outbound/message-action-runner.media.test.ts new file mode 100644 index 00000000000..287f8e3c677 --- /dev/null +++ b/src/infra/outbound/message-action-runner.media.test.ts @@ -0,0 +1,422 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; +import { slackPlugin } from "../../../extensions/slack/src/channel.js"; +import { jsonResult } from "../../agents/tools/common.js"; +import type { ChannelPlugin } from "../../channels/plugins/types.js"; +import type { OpenClawConfig } from "../../config/config.js"; +import { setActivePluginRegistry } from "../../plugins/runtime.js"; +import { createTestRegistry } from "../../test-utils/channel-plugins.js"; +import { loadWebMedia } from "../../web/media.js"; +import { resolvePreferredOpenClawTmpDir } from "../tmp-openclaw-dir.js"; +import { runMessageAction } from "./message-action-runner.js"; + +vi.mock("../../web/media.js", async () => { + const actual = await vi.importActual("../../web/media.js"); + return { + ...actual, + loadWebMedia: vi.fn(actual.loadWebMedia), + }; +}); + +const slackConfig = { + channels: { + slack: { + botToken: "xoxb-test", + appToken: "xapp-test", + }, + }, +} as OpenClawConfig; + +async function withSandbox(test: (sandboxDir: string) => Promise) { + const sandboxDir = await fs.mkdtemp(path.join(os.tmpdir(), "msg-sandbox-")); + try { + await test(sandboxDir); + } finally { + await fs.rm(sandboxDir, { recursive: true, force: true }); + } +} + +const runDrySend = (params: { + cfg: OpenClawConfig; + actionParams: Record; + sandboxRoot?: string; +}) => + runMessageAction({ + cfg: params.cfg, + action: "send", + params: params.actionParams as never, + dryRun: true, + sandboxRoot: params.sandboxRoot, + }); + +async function expectSandboxMediaRewrite(params: { + sandboxDir: string; + media?: string; + message?: string; + expectedRelativePath: string; +}) { + const result = await runDrySend({ + cfg: slackConfig, + actionParams: { + channel: "slack", + target: "#C12345678", + ...(params.media ? { media: params.media } : {}), + ...(params.message ? { message: params.message } : {}), + }, + sandboxRoot: params.sandboxDir, + }); + + expect(result.kind).toBe("send"); + if (result.kind !== "send") { + throw new Error("expected send result"); + } + expect(result.sendResult?.mediaUrl).toBe( + path.join(params.sandboxDir, params.expectedRelativePath), + ); +} + +let createPluginRuntime: typeof import("../../plugins/runtime/index.js").createPluginRuntime; +let setSlackRuntime: typeof import("../../../extensions/slack/src/runtime.js").setSlackRuntime; + +function installSlackRuntime() { + const runtime = createPluginRuntime(); + setSlackRuntime(runtime); +} + +describe("runMessageAction media behavior", () => { + beforeAll(async () => { + ({ createPluginRuntime } = await import("../../plugins/runtime/index.js")); + ({ setSlackRuntime } = await import("../../../extensions/slack/src/runtime.js")); + }); + + describe("sendAttachment hydration", () => { + const cfg = { + channels: { + bluebubbles: { + enabled: true, + serverUrl: "http://localhost:1234", + password: "test-password", + }, + }, + } as OpenClawConfig; + const attachmentPlugin: ChannelPlugin = { + id: "bluebubbles", + meta: { + id: "bluebubbles", + label: "BlueBubbles", + selectionLabel: "BlueBubbles", + docsPath: "/channels/bluebubbles", + blurb: "BlueBubbles test plugin.", + }, + capabilities: { chatTypes: ["direct", "group"], media: true }, + config: { + listAccountIds: () => ["default"], + resolveAccount: () => ({ enabled: true }), + isConfigured: () => true, + }, + actions: { + listActions: () => ["sendAttachment", "setGroupIcon"], + supportsAction: ({ action }) => action === "sendAttachment" || action === "setGroupIcon", + handleAction: async ({ params }) => + jsonResult({ + ok: true, + buffer: params.buffer, + filename: params.filename, + caption: params.caption, + contentType: params.contentType, + }), + }, + }; + + beforeEach(() => { + setActivePluginRegistry( + createTestRegistry([ + { + pluginId: "bluebubbles", + source: "test", + plugin: attachmentPlugin, + }, + ]), + ); + vi.mocked(loadWebMedia).mockResolvedValue({ + buffer: Buffer.from("hello"), + contentType: "image/png", + kind: "image", + fileName: "pic.png", + }); + }); + + afterEach(() => { + setActivePluginRegistry(createTestRegistry([])); + vi.clearAllMocks(); + }); + + async function restoreRealMediaLoader() { + const actual = + await vi.importActual("../../web/media.js"); + vi.mocked(loadWebMedia).mockImplementation(actual.loadWebMedia); + } + + async function expectRejectsLocalAbsolutePathWithoutSandbox(params: { + action: "sendAttachment" | "setGroupIcon"; + target: string; + message?: string; + tempPrefix: string; + }) { + await restoreRealMediaLoader(); + + const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), params.tempPrefix)); + try { + const outsidePath = path.join(tempDir, "secret.txt"); + await fs.writeFile(outsidePath, "secret", "utf8"); + + const actionParams: Record = { + channel: "bluebubbles", + target: params.target, + media: outsidePath, + }; + if (params.message) { + actionParams.message = params.message; + } + + await expect( + runMessageAction({ + cfg, + action: params.action, + params: actionParams, + }), + ).rejects.toThrow(/allowed directory|path-not-allowed/i); + } finally { + await fs.rm(tempDir, { recursive: true, force: true }); + } + } + + it("hydrates buffer and filename from media for sendAttachment", async () => { + const result = await runMessageAction({ + cfg, + action: "sendAttachment", + params: { + channel: "bluebubbles", + target: "+15551234567", + media: "https://example.com/pic.png", + message: "caption", + }, + }); + + expect(result.kind).toBe("action"); + expect(result.payload).toMatchObject({ + ok: true, + filename: "pic.png", + caption: "caption", + contentType: "image/png", + }); + expect((result.payload as { buffer?: string }).buffer).toBe( + Buffer.from("hello").toString("base64"), + ); + const call = vi.mocked(loadWebMedia).mock.calls[0]; + expect(call?.[1]).toEqual( + expect.objectContaining({ + localRoots: expect.any(Array), + }), + ); + expect((call?.[1] as { sandboxValidated?: boolean } | undefined)?.sandboxValidated).not.toBe( + true, + ); + }); + + it("rewrites sandboxed media paths for sendAttachment", async () => { + await withSandbox(async (sandboxDir) => { + await runMessageAction({ + cfg, + action: "sendAttachment", + params: { + channel: "bluebubbles", + target: "+15551234567", + media: "./data/pic.png", + message: "caption", + }, + sandboxRoot: sandboxDir, + }); + + const call = vi.mocked(loadWebMedia).mock.calls[0]; + expect(call?.[0]).toBe(path.join(sandboxDir, "data", "pic.png")); + expect(call?.[1]).toEqual( + expect.objectContaining({ + sandboxValidated: true, + }), + ); + }); + }); + + it("rewrites sandboxed media paths for setGroupIcon", async () => { + await withSandbox(async (sandboxDir) => { + await runMessageAction({ + cfg, + action: "setGroupIcon", + params: { + channel: "bluebubbles", + target: "group:123", + media: "./icons/group.png", + }, + sandboxRoot: sandboxDir, + }); + + const call = vi.mocked(loadWebMedia).mock.calls[0]; + expect(call?.[0]).toBe(path.join(sandboxDir, "icons", "group.png")); + expect(call?.[1]).toEqual( + expect.objectContaining({ + sandboxValidated: true, + }), + ); + }); + }); + + it("rejects local absolute path for sendAttachment when sandboxRoot is missing", async () => { + await expectRejectsLocalAbsolutePathWithoutSandbox({ + action: "sendAttachment", + target: "+15551234567", + message: "caption", + tempPrefix: "msg-attachment-", + }); + }); + + it("rejects local absolute path for setGroupIcon when sandboxRoot is missing", async () => { + await expectRejectsLocalAbsolutePathWithoutSandbox({ + action: "setGroupIcon", + target: "group:123", + tempPrefix: "msg-group-icon-", + }); + }); + }); + + describe("sandboxed media validation", () => { + beforeEach(() => { + installSlackRuntime(); + setActivePluginRegistry( + createTestRegistry([ + { + pluginId: "slack", + source: "test", + plugin: slackPlugin, + }, + ]), + ); + }); + + afterEach(() => { + setActivePluginRegistry(createTestRegistry([])); + }); + + it.each(["/etc/passwd", "file:///etc/passwd"])( + "rejects out-of-sandbox media reference: %s", + async (media) => { + await withSandbox(async (sandboxDir) => { + await expect( + runDrySend({ + cfg: slackConfig, + actionParams: { + channel: "slack", + target: "#C12345678", + media, + message: "", + }, + sandboxRoot: sandboxDir, + }), + ).rejects.toThrow(/sandbox/i); + }); + }, + ); + + it("rejects data URLs in media params", async () => { + await expect( + runDrySend({ + cfg: slackConfig, + actionParams: { + channel: "slack", + target: "#C12345678", + media: "data:image/png;base64,abcd", + message: "", + }, + }), + ).rejects.toThrow(/data:/i); + }); + + it("rewrites sandbox-relative media paths", async () => { + await withSandbox(async (sandboxDir) => { + await expectSandboxMediaRewrite({ + sandboxDir, + media: "./data/file.txt", + message: "", + expectedRelativePath: path.join("data", "file.txt"), + }); + }); + }); + + it("rewrites /workspace media paths to host sandbox root", async () => { + await withSandbox(async (sandboxDir) => { + await expectSandboxMediaRewrite({ + sandboxDir, + media: "/workspace/data/file.txt", + message: "", + expectedRelativePath: path.join("data", "file.txt"), + }); + }); + }); + + it("rewrites MEDIA directives under sandbox", async () => { + await withSandbox(async (sandboxDir) => { + await expectSandboxMediaRewrite({ + sandboxDir, + message: "Hello\nMEDIA: ./data/note.ogg", + expectedRelativePath: path.join("data", "note.ogg"), + }); + }); + }); + + it("allows media paths under preferred OpenClaw tmp root", async () => { + const tmpRoot = resolvePreferredOpenClawTmpDir(); + await fs.mkdir(tmpRoot, { recursive: true }); + const sandboxDir = await fs.mkdtemp(path.join(os.tmpdir(), "msg-sandbox-")); + try { + const tmpFile = path.join(tmpRoot, "test-media-image.png"); + const result = await runMessageAction({ + cfg: slackConfig, + action: "send", + params: { + channel: "slack", + target: "#C12345678", + media: tmpFile, + message: "", + }, + sandboxRoot: sandboxDir, + dryRun: true, + }); + + expect(result.kind).toBe("send"); + if (result.kind !== "send") { + throw new Error("expected send result"); + } + expect(result.sendResult?.mediaUrl).toBe(path.resolve(tmpFile)); + const hostTmpOutsideOpenClaw = path.join(os.tmpdir(), "outside-openclaw", "test-media.png"); + await expect( + runMessageAction({ + cfg: slackConfig, + action: "send", + params: { + channel: "slack", + target: "#C12345678", + media: hostTmpOutsideOpenClaw, + message: "", + }, + sandboxRoot: sandboxDir, + dryRun: true, + }), + ).rejects.toThrow(/sandbox/i); + } finally { + await fs.rm(sandboxDir, { recursive: true, force: true }); + } + }); + }); +}); diff --git a/src/infra/outbound/message-action-runner.plugin-dispatch.test.ts b/src/infra/outbound/message-action-runner.plugin-dispatch.test.ts new file mode 100644 index 00000000000..00c4bafef11 --- /dev/null +++ b/src/infra/outbound/message-action-runner.plugin-dispatch.test.ts @@ -0,0 +1,439 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { jsonResult } from "../../agents/tools/common.js"; +import type { ChannelPlugin } from "../../channels/plugins/types.js"; +import type { OpenClawConfig } from "../../config/config.js"; +import { setActivePluginRegistry } from "../../plugins/runtime.js"; +import { createOutboundTestPlugin, createTestRegistry } from "../../test-utils/channel-plugins.js"; +import { runMessageAction } from "./message-action-runner.js"; + +function createAlwaysConfiguredPluginConfig(account: Record = { enabled: true }) { + return { + listAccountIds: () => ["default"], + resolveAccount: () => account, + isConfigured: () => true, + }; +} + +describe("runMessageAction plugin dispatch", () => { + describe("media caption behavior", () => { + afterEach(() => { + setActivePluginRegistry(createTestRegistry([])); + }); + + it("promotes caption to message for media sends when message is empty", async () => { + const sendMedia = vi.fn().mockResolvedValue({ + channel: "testchat", + messageId: "m1", + chatId: "c1", + }); + setActivePluginRegistry( + createTestRegistry([ + { + pluginId: "testchat", + source: "test", + plugin: createOutboundTestPlugin({ + id: "testchat", + outbound: { + deliveryMode: "direct", + sendText: vi.fn().mockResolvedValue({ + channel: "testchat", + messageId: "t1", + chatId: "c1", + }), + sendMedia, + }, + }), + }, + ]), + ); + const cfg = { + channels: { + testchat: { + enabled: true, + }, + }, + } as OpenClawConfig; + + const result = await runMessageAction({ + cfg, + action: "send", + params: { + channel: "testchat", + target: "channel:abc", + media: "https://example.com/cat.png", + caption: "caption-only text", + }, + dryRun: false, + }); + + expect(result.kind).toBe("send"); + expect(sendMedia).toHaveBeenCalledWith( + expect.objectContaining({ + text: "caption-only text", + mediaUrl: "https://example.com/cat.png", + }), + ); + }); + }); + + describe("card-only send behavior", () => { + const handleAction = vi.fn(async ({ params }: { params: Record }) => + jsonResult({ + ok: true, + card: params.card ?? null, + message: params.message ?? null, + }), + ); + + const cardPlugin: ChannelPlugin = { + id: "cardchat", + meta: { + id: "cardchat", + label: "Card Chat", + selectionLabel: "Card Chat", + docsPath: "/channels/cardchat", + blurb: "Card-only send test plugin.", + }, + capabilities: { chatTypes: ["direct"] }, + config: createAlwaysConfiguredPluginConfig(), + actions: { + listActions: () => ["send"], + supportsAction: ({ action }) => action === "send", + handleAction, + }, + }; + + beforeEach(() => { + setActivePluginRegistry( + createTestRegistry([ + { + pluginId: "cardchat", + source: "test", + plugin: cardPlugin, + }, + ]), + ); + handleAction.mockClear(); + }); + + afterEach(() => { + setActivePluginRegistry(createTestRegistry([])); + vi.clearAllMocks(); + }); + + it("allows card-only sends without text or media", async () => { + const cfg = { + channels: { + cardchat: { + enabled: true, + }, + }, + } as OpenClawConfig; + + const card = { + type: "AdaptiveCard", + version: "1.4", + body: [{ type: "TextBlock", text: "Card-only payload" }], + }; + + const result = await runMessageAction({ + cfg, + action: "send", + params: { + channel: "cardchat", + target: "channel:test-card", + card, + }, + dryRun: false, + }); + + expect(result.kind).toBe("send"); + expect(result.handledBy).toBe("plugin"); + expect(handleAction).toHaveBeenCalled(); + expect(result.payload).toMatchObject({ + ok: true, + card, + }); + }); + }); + + describe("telegram plugin poll forwarding", () => { + const handleAction = vi.fn(async ({ params }: { params: Record }) => + jsonResult({ + ok: true, + forwarded: { + to: params.to ?? null, + pollQuestion: params.pollQuestion ?? null, + pollOption: params.pollOption ?? null, + pollDurationSeconds: params.pollDurationSeconds ?? null, + pollPublic: params.pollPublic ?? null, + threadId: params.threadId ?? null, + }, + }), + ); + + const telegramPollPlugin: ChannelPlugin = { + id: "telegram", + meta: { + id: "telegram", + label: "Telegram", + selectionLabel: "Telegram", + docsPath: "/channels/telegram", + blurb: "Telegram poll forwarding test plugin.", + }, + capabilities: { chatTypes: ["direct"] }, + config: createAlwaysConfiguredPluginConfig(), + messaging: { + targetResolver: { + looksLikeId: () => true, + }, + }, + actions: { + listActions: () => ["poll"], + supportsAction: ({ action }) => action === "poll", + handleAction, + }, + }; + + beforeEach(() => { + setActivePluginRegistry( + createTestRegistry([ + { + pluginId: "telegram", + source: "test", + plugin: telegramPollPlugin, + }, + ]), + ); + handleAction.mockClear(); + }); + + afterEach(() => { + setActivePluginRegistry(createTestRegistry([])); + vi.clearAllMocks(); + }); + + it("forwards telegram poll params through plugin dispatch", async () => { + const result = await runMessageAction({ + cfg: { + channels: { + telegram: { + botToken: "tok", + }, + }, + } as OpenClawConfig, + action: "poll", + params: { + channel: "telegram", + target: "telegram:123", + pollQuestion: "Lunch?", + pollOption: ["Pizza", "Sushi"], + pollDurationSeconds: 120, + pollPublic: true, + threadId: "42", + }, + dryRun: false, + }); + + expect(result.kind).toBe("poll"); + expect(result.handledBy).toBe("plugin"); + expect(handleAction).toHaveBeenCalledWith( + expect.objectContaining({ + action: "poll", + channel: "telegram", + params: expect.objectContaining({ + to: "telegram:123", + pollQuestion: "Lunch?", + pollOption: ["Pizza", "Sushi"], + pollDurationSeconds: 120, + pollPublic: true, + threadId: "42", + }), + }), + ); + expect(result.payload).toMatchObject({ + ok: true, + forwarded: { + to: "telegram:123", + pollQuestion: "Lunch?", + pollOption: ["Pizza", "Sushi"], + pollDurationSeconds: 120, + pollPublic: true, + threadId: "42", + }, + }); + }); + }); + + describe("components parsing", () => { + const handleAction = vi.fn(async ({ params }: { params: Record }) => + jsonResult({ + ok: true, + components: params.components ?? null, + }), + ); + + const componentsPlugin: ChannelPlugin = { + id: "discord", + meta: { + id: "discord", + label: "Discord", + selectionLabel: "Discord", + docsPath: "/channels/discord", + blurb: "Discord components send test plugin.", + }, + capabilities: { chatTypes: ["direct"] }, + config: createAlwaysConfiguredPluginConfig({}), + actions: { + listActions: () => ["send"], + supportsAction: ({ action }) => action === "send", + handleAction, + }, + }; + + beforeEach(() => { + setActivePluginRegistry( + createTestRegistry([ + { + pluginId: "discord", + source: "test", + plugin: componentsPlugin, + }, + ]), + ); + handleAction.mockClear(); + }); + + afterEach(() => { + setActivePluginRegistry(createTestRegistry([])); + vi.clearAllMocks(); + }); + + it("parses components JSON strings before plugin dispatch", async () => { + const components = { + text: "hello", + buttons: [{ label: "A", customId: "a" }], + }; + const result = await runMessageAction({ + cfg: {} as OpenClawConfig, + action: "send", + params: { + channel: "discord", + target: "channel:123", + message: "hi", + components: JSON.stringify(components), + }, + dryRun: false, + }); + + expect(result.kind).toBe("send"); + expect(handleAction).toHaveBeenCalled(); + expect(result.payload).toMatchObject({ ok: true, components }); + }); + + it("throws on invalid components JSON strings", async () => { + await expect( + runMessageAction({ + cfg: {} as OpenClawConfig, + action: "send", + params: { + channel: "discord", + target: "channel:123", + message: "hi", + components: "{not-json}", + }, + dryRun: false, + }), + ).rejects.toThrow(/--components must be valid JSON/); + + expect(handleAction).not.toHaveBeenCalled(); + }); + }); + + describe("accountId defaults", () => { + const handleAction = vi.fn(async () => jsonResult({ ok: true })); + const accountPlugin: ChannelPlugin = { + id: "discord", + meta: { + id: "discord", + label: "Discord", + selectionLabel: "Discord", + docsPath: "/channels/discord", + blurb: "Discord test plugin.", + }, + capabilities: { chatTypes: ["direct"] }, + config: { + listAccountIds: () => ["default"], + resolveAccount: () => ({}), + }, + actions: { + listActions: () => ["send"], + handleAction, + }, + }; + + beforeEach(() => { + setActivePluginRegistry( + createTestRegistry([ + { + pluginId: "discord", + source: "test", + plugin: accountPlugin, + }, + ]), + ); + handleAction.mockClear(); + }); + + afterEach(() => { + setActivePluginRegistry(createTestRegistry([])); + vi.clearAllMocks(); + }); + + it.each([ + { + name: "uses defaultAccountId override", + args: { + cfg: {} as OpenClawConfig, + defaultAccountId: "ops", + }, + expectedAccountId: "ops", + }, + { + name: "falls back to agent binding account", + args: { + cfg: { + bindings: [ + { agentId: "agent-b", match: { channel: "discord", accountId: "account-b" } }, + ], + } as OpenClawConfig, + agentId: "agent-b", + }, + expectedAccountId: "account-b", + }, + ])("$name", async ({ args, expectedAccountId }) => { + await runMessageAction({ + ...args, + action: "send", + params: { + channel: "discord", + target: "channel:123", + message: "hi", + }, + }); + + expect(handleAction).toHaveBeenCalled(); + const ctx = (handleAction.mock.calls as unknown as Array<[unknown]>)[0]?.[0] as + | { + accountId?: string | null; + params: Record; + } + | undefined; + if (!ctx) { + throw new Error("expected action context"); + } + expect(ctx.accountId).toBe(expectedAccountId); + expect(ctx.params.accountId).toBe(expectedAccountId); + }); + }); +}); diff --git a/src/infra/outbound/message-action-runner.poll.test.ts b/src/infra/outbound/message-action-runner.poll.test.ts new file mode 100644 index 00000000000..43c7489c6fd --- /dev/null +++ b/src/infra/outbound/message-action-runner.poll.test.ts @@ -0,0 +1,196 @@ +import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; +import { slackPlugin } from "../../../extensions/slack/src/channel.js"; +import { telegramPlugin } from "../../../extensions/telegram/src/channel.js"; +import type { OpenClawConfig } from "../../config/config.js"; +import { setActivePluginRegistry } from "../../plugins/runtime.js"; +import { createTestRegistry } from "../../test-utils/channel-plugins.js"; + +const mocks = vi.hoisted(() => ({ + executePollAction: vi.fn(), +})); + +vi.mock("./outbound-send-service.js", async () => { + const actual = await vi.importActual( + "./outbound-send-service.js", + ); + return { + ...actual, + executePollAction: mocks.executePollAction, + }; +}); + +import { runMessageAction } from "./message-action-runner.js"; + +const slackConfig = { + channels: { + slack: { + botToken: "xoxb-test", + appToken: "xapp-test", + }, + }, +} as OpenClawConfig; + +const telegramConfig = { + channels: { + telegram: { + botToken: "telegram-test", + }, + }, +} as OpenClawConfig; + +async function runPollAction(params: { + cfg: OpenClawConfig; + actionParams: Record; + toolContext?: Record; +}) { + await runMessageAction({ + cfg: params.cfg, + action: "poll", + params: params.actionParams as never, + toolContext: params.toolContext as never, + }); + return mocks.executePollAction.mock.calls[0]?.[0] as + | { + durationSeconds?: number; + maxSelections?: number; + threadId?: string; + isAnonymous?: boolean; + ctx?: { params?: Record }; + } + | undefined; +} + +let createPluginRuntime: typeof import("../../plugins/runtime/index.js").createPluginRuntime; +let setSlackRuntime: typeof import("../../../extensions/slack/src/runtime.js").setSlackRuntime; +let setTelegramRuntime: typeof import("../../../extensions/telegram/src/runtime.js").setTelegramRuntime; + +describe("runMessageAction poll handling", () => { + beforeAll(async () => { + ({ createPluginRuntime } = await import("../../plugins/runtime/index.js")); + ({ setSlackRuntime } = await import("../../../extensions/slack/src/runtime.js")); + ({ setTelegramRuntime } = await import("../../../extensions/telegram/src/runtime.js")); + }); + + beforeEach(() => { + const runtime = createPluginRuntime(); + setSlackRuntime(runtime); + setTelegramRuntime(runtime); + setActivePluginRegistry( + createTestRegistry([ + { + pluginId: "slack", + source: "test", + plugin: slackPlugin, + }, + { + pluginId: "telegram", + source: "test", + plugin: telegramPlugin, + }, + ]), + ); + mocks.executePollAction.mockResolvedValue({ + handledBy: "core", + payload: { ok: true }, + pollResult: { ok: true }, + }); + }); + + afterEach(() => { + setActivePluginRegistry(createTestRegistry([])); + mocks.executePollAction.mockReset(); + }); + + it.each([ + { + name: "requires at least two poll options", + cfg: telegramConfig, + actionParams: { + channel: "telegram", + target: "telegram:123", + pollQuestion: "Lunch?", + pollOption: ["Pizza"], + }, + message: /pollOption requires at least two values/i, + }, + { + name: "rejects durationSeconds outside telegram", + cfg: slackConfig, + actionParams: { + channel: "slack", + target: "#C12345678", + pollQuestion: "Lunch?", + pollOption: ["Pizza", "Sushi"], + pollDurationSeconds: 60, + }, + message: /pollDurationSeconds is only supported for Telegram polls/i, + }, + { + name: "rejects poll visibility outside telegram", + cfg: slackConfig, + actionParams: { + channel: "slack", + target: "#C12345678", + pollQuestion: "Lunch?", + pollOption: ["Pizza", "Sushi"], + pollPublic: true, + }, + message: /pollAnonymous\/pollPublic are only supported for Telegram polls/i, + }, + ])("$name", async ({ cfg, actionParams, message }) => { + await expect(runPollAction({ cfg, actionParams })).rejects.toThrow(message); + expect(mocks.executePollAction).not.toHaveBeenCalled(); + }); + + it("passes Telegram durationSeconds, visibility, and auto threadId to executePollAction", async () => { + const call = await runPollAction({ + cfg: telegramConfig, + actionParams: { + channel: "telegram", + target: "telegram:123", + pollQuestion: "Lunch?", + pollOption: ["Pizza", "Sushi"], + pollDurationSeconds: 90, + pollPublic: true, + }, + toolContext: { + currentChannelId: "telegram:123", + currentThreadTs: "42", + }, + }); + + expect(call?.durationSeconds).toBe(90); + expect(call?.isAnonymous).toBe(false); + expect(call?.threadId).toBe("42"); + expect(call?.ctx?.params?.threadId).toBe("42"); + }); + + it("expands maxSelections when pollMulti is enabled", async () => { + const call = await runPollAction({ + cfg: telegramConfig, + actionParams: { + channel: "telegram", + target: "telegram:123", + pollQuestion: "Lunch?", + pollOption: ["Pizza", "Sushi", "Soup"], + pollMulti: true, + }, + }); + + expect(call?.maxSelections).toBe(3); + }); + + it("defaults maxSelections to one choice when pollMulti is omitted", async () => { + const call = await runPollAction({ + cfg: telegramConfig, + actionParams: { + channel: "telegram", + target: "telegram:123", + pollQuestion: "Lunch?", + pollOption: ["Pizza", "Sushi", "Soup"], + }, + }); + + expect(call?.maxSelections).toBe(1); + }); +}); diff --git a/src/infra/outbound/message-action-runner.test.ts b/src/infra/outbound/message-action-runner.test.ts deleted file mode 100644 index cc7d68df9d3..00000000000 --- a/src/infra/outbound/message-action-runner.test.ts +++ /dev/null @@ -1,1257 +0,0 @@ -import fs from "node:fs/promises"; -import os from "node:os"; -import path from "node:path"; -import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; -import { slackPlugin } from "../../../extensions/slack/src/channel.js"; -import { telegramPlugin } from "../../../extensions/telegram/src/channel.js"; -import { whatsappPlugin } from "../../../extensions/whatsapp/src/channel.js"; -import { jsonResult } from "../../agents/tools/common.js"; -import type { ChannelPlugin } from "../../channels/plugins/types.js"; -import type { OpenClawConfig } from "../../config/config.js"; -import { setActivePluginRegistry } from "../../plugins/runtime.js"; -import { createOutboundTestPlugin, createTestRegistry } from "../../test-utils/channel-plugins.js"; -import { createIMessageTestPlugin } from "../../test-utils/imessage-test-plugin.js"; -import { loadWebMedia } from "../../web/media.js"; -import { resolvePreferredOpenClawTmpDir } from "../tmp-openclaw-dir.js"; -import { runMessageAction } from "./message-action-runner.js"; - -vi.mock("../../web/media.js", async () => { - const actual = await vi.importActual("../../web/media.js"); - return { - ...actual, - loadWebMedia: vi.fn(actual.loadWebMedia), - }; -}); - -const slackConfig = { - channels: { - slack: { - botToken: "xoxb-test", - appToken: "xapp-test", - }, - }, -} as OpenClawConfig; - -const whatsappConfig = { - channels: { - whatsapp: { - allowFrom: ["*"], - }, - }, -} as OpenClawConfig; - -async function withSandbox(test: (sandboxDir: string) => Promise) { - const sandboxDir = await fs.mkdtemp(path.join(os.tmpdir(), "msg-sandbox-")); - try { - await test(sandboxDir); - } finally { - await fs.rm(sandboxDir, { recursive: true, force: true }); - } -} - -const runDryAction = (params: { - cfg: OpenClawConfig; - action: "send" | "thread-reply" | "broadcast"; - actionParams: Record; - toolContext?: Record; - abortSignal?: AbortSignal; - sandboxRoot?: string; -}) => - runMessageAction({ - cfg: params.cfg, - action: params.action, - params: params.actionParams as never, - toolContext: params.toolContext as never, - dryRun: true, - abortSignal: params.abortSignal, - sandboxRoot: params.sandboxRoot, - }); - -const runDrySend = (params: { - cfg: OpenClawConfig; - actionParams: Record; - toolContext?: Record; - abortSignal?: AbortSignal; - sandboxRoot?: string; -}) => - runDryAction({ - ...params, - action: "send", - }); - -async function expectSandboxMediaRewrite(params: { - sandboxDir: string; - media?: string; - message?: string; - expectedRelativePath: string; -}) { - const result = await runDrySend({ - cfg: slackConfig, - actionParams: { - channel: "slack", - target: "#C12345678", - ...(params.media ? { media: params.media } : {}), - ...(params.message ? { message: params.message } : {}), - }, - sandboxRoot: params.sandboxDir, - }); - - expect(result.kind).toBe("send"); - if (result.kind !== "send") { - throw new Error("expected send result"); - } - expect(result.sendResult?.mediaUrl).toBe( - path.join(params.sandboxDir, params.expectedRelativePath), - ); -} - -function createAlwaysConfiguredPluginConfig(account: Record = { enabled: true }) { - return { - listAccountIds: () => ["default"], - resolveAccount: () => account, - isConfigured: () => true, - }; -} - -let createPluginRuntime: typeof import("../../plugins/runtime/index.js").createPluginRuntime; -let setSlackRuntime: typeof import("../../../extensions/slack/src/runtime.js").setSlackRuntime; -let setTelegramRuntime: typeof import("../../../extensions/telegram/src/runtime.js").setTelegramRuntime; -let setWhatsAppRuntime: typeof import("../../../extensions/whatsapp/src/runtime.js").setWhatsAppRuntime; - -function installChannelRuntimes(params?: { includeTelegram?: boolean; includeWhatsApp?: boolean }) { - const runtime = createPluginRuntime(); - setSlackRuntime(runtime); - if (params?.includeTelegram !== false) { - setTelegramRuntime(runtime); - } - if (params?.includeWhatsApp !== false) { - setWhatsAppRuntime(runtime); - } -} - -describe("runMessageAction context isolation", () => { - beforeAll(async () => { - ({ createPluginRuntime } = await import("../../plugins/runtime/index.js")); - ({ setSlackRuntime } = await import("../../../extensions/slack/src/runtime.js")); - ({ setTelegramRuntime } = await import("../../../extensions/telegram/src/runtime.js")); - ({ setWhatsAppRuntime } = await import("../../../extensions/whatsapp/src/runtime.js")); - }); - - beforeEach(() => { - installChannelRuntimes(); - setActivePluginRegistry( - createTestRegistry([ - { - pluginId: "slack", - source: "test", - plugin: slackPlugin, - }, - { - pluginId: "whatsapp", - source: "test", - plugin: whatsappPlugin, - }, - { - pluginId: "telegram", - source: "test", - plugin: telegramPlugin, - }, - { - pluginId: "imessage", - source: "test", - plugin: createIMessageTestPlugin(), - }, - ]), - ); - }); - - afterEach(() => { - setActivePluginRegistry(createTestRegistry([])); - }); - - it("allows send when target matches current channel", async () => { - const result = await runDrySend({ - cfg: slackConfig, - actionParams: { - channel: "slack", - target: "#C12345678", - message: "hi", - }, - toolContext: { currentChannelId: "C12345678" }, - }); - - expect(result.kind).toBe("send"); - }); - - it("accepts legacy to parameter for send", async () => { - const result = await runDrySend({ - cfg: slackConfig, - actionParams: { - channel: "slack", - to: "#C12345678", - message: "hi", - }, - }); - - expect(result.kind).toBe("send"); - }); - - it("defaults to current channel when target is omitted", async () => { - const result = await runDrySend({ - cfg: slackConfig, - actionParams: { - channel: "slack", - message: "hi", - }, - toolContext: { currentChannelId: "C12345678" }, - }); - - expect(result.kind).toBe("send"); - }); - - it("allows media-only send when target matches current channel", async () => { - const result = await runDrySend({ - cfg: slackConfig, - actionParams: { - channel: "slack", - target: "#C12345678", - media: "https://example.com/note.ogg", - }, - toolContext: { currentChannelId: "C12345678" }, - }); - - expect(result.kind).toBe("send"); - }); - - it("requires message when no media hint is provided", async () => { - await expect( - runDrySend({ - cfg: slackConfig, - actionParams: { - channel: "slack", - target: "#C12345678", - }, - toolContext: { currentChannelId: "C12345678" }, - }), - ).rejects.toThrow(/message required/i); - }); - - it("rejects send actions that include poll creation params", async () => { - await expect( - runDrySend({ - cfg: slackConfig, - actionParams: { - channel: "slack", - target: "#C12345678", - message: "hi", - pollQuestion: "Ready?", - pollOption: ["Yes", "No"], - }, - toolContext: { currentChannelId: "C12345678" }, - }), - ).rejects.toThrow(/use action "poll" instead of "send"/i); - }); - - it("rejects send actions that include string-encoded poll params", async () => { - await expect( - runDrySend({ - cfg: slackConfig, - actionParams: { - channel: "slack", - target: "#C12345678", - message: "hi", - pollDurationSeconds: "60", - pollPublic: "true", - }, - toolContext: { currentChannelId: "C12345678" }, - }), - ).rejects.toThrow(/use action "poll" instead of "send"/i); - }); - - it("rejects send actions that include snake_case poll params", async () => { - await expect( - runDrySend({ - cfg: slackConfig, - actionParams: { - channel: "slack", - target: "#C12345678", - message: "hi", - poll_question: "Ready?", - poll_option: ["Yes", "No"], - poll_public: "true", - }, - toolContext: { currentChannelId: "C12345678" }, - }), - ).rejects.toThrow(/use action "poll" instead of "send"/i); - }); - - it("allows send when poll booleans are explicitly false", async () => { - const result = await runDrySend({ - cfg: slackConfig, - actionParams: { - channel: "slack", - target: "#C12345678", - message: "hi", - pollMulti: false, - pollAnonymous: false, - pollPublic: false, - }, - toolContext: { currentChannelId: "C12345678" }, - }); - - expect(result.kind).toBe("send"); - }); - - it("blocks send when target differs from current channel", async () => { - const result = await runDrySend({ - cfg: slackConfig, - actionParams: { - channel: "slack", - target: "channel:C99999999", - message: "hi", - }, - toolContext: { currentChannelId: "C12345678", currentChannelProvider: "slack" }, - }); - - expect(result.kind).toBe("send"); - }); - - it("blocks thread-reply when channelId differs from current channel", async () => { - const result = await runDryAction({ - cfg: slackConfig, - action: "thread-reply", - actionParams: { - channel: "slack", - target: "C99999999", - message: "hi", - }, - toolContext: { currentChannelId: "C12345678", currentChannelProvider: "slack" }, - }); - - expect(result.kind).toBe("action"); - }); - - it.each([ - { - name: "whatsapp", - channel: "whatsapp", - target: "123@g.us", - currentChannelId: "123@g.us", - }, - { - name: "imessage", - channel: "imessage", - target: "imessage:+15551234567", - currentChannelId: "imessage:+15551234567", - }, - ] as const)("allows $name send when target matches current context", async (testCase) => { - const result = await runDrySend({ - cfg: whatsappConfig, - actionParams: { - channel: testCase.channel, - target: testCase.target, - message: "hi", - }, - toolContext: { currentChannelId: testCase.currentChannelId }, - }); - - expect(result.kind).toBe("send"); - }); - - it.each([ - { - name: "whatsapp", - channel: "whatsapp", - target: "456@g.us", - currentChannelId: "123@g.us", - currentChannelProvider: "whatsapp", - }, - { - name: "imessage", - channel: "imessage", - target: "imessage:+15551230000", - currentChannelId: "imessage:+15551234567", - currentChannelProvider: "imessage", - }, - ] as const)("blocks $name send when target differs from current context", async (testCase) => { - const result = await runDrySend({ - cfg: whatsappConfig, - actionParams: { - channel: testCase.channel, - target: testCase.target, - message: "hi", - }, - toolContext: { - currentChannelId: testCase.currentChannelId, - currentChannelProvider: testCase.currentChannelProvider, - }, - }); - - expect(result.kind).toBe("send"); - }); - - it("infers channel + target from tool context when missing", async () => { - const multiConfig = { - channels: { - slack: { - botToken: "xoxb-test", - appToken: "xapp-test", - }, - telegram: { - token: "tg-test", - }, - }, - } as OpenClawConfig; - - const result = await runDrySend({ - cfg: multiConfig, - actionParams: { - message: "hi", - }, - toolContext: { currentChannelId: "C12345678", currentChannelProvider: "slack" }, - }); - - expect(result.kind).toBe("send"); - expect(result.channel).toBe("slack"); - }); - - it("falls back to tool-context provider when channel param is an id", async () => { - const result = await runDrySend({ - cfg: slackConfig, - actionParams: { - channel: "C12345678", - target: "#C12345678", - message: "hi", - }, - toolContext: { currentChannelId: "C12345678", currentChannelProvider: "slack" }, - }); - - expect(result.kind).toBe("send"); - expect(result.channel).toBe("slack"); - }); - - it("falls back to tool-context provider for broadcast channel ids", async () => { - const result = await runDryAction({ - cfg: slackConfig, - action: "broadcast", - actionParams: { - targets: ["channel:C12345678"], - channel: "C12345678", - message: "hi", - }, - toolContext: { currentChannelProvider: "slack" }, - }); - - expect(result.kind).toBe("broadcast"); - expect(result.channel).toBe("slack"); - }); - - it("blocks cross-provider sends by default", async () => { - await expect( - runDrySend({ - cfg: slackConfig, - actionParams: { - channel: "telegram", - target: "@opsbot", - message: "hi", - }, - toolContext: { currentChannelId: "C12345678", currentChannelProvider: "slack" }, - }), - ).rejects.toThrow(/Cross-context messaging denied/); - }); - - it("blocks same-provider cross-context when disabled", async () => { - const cfg = { - ...slackConfig, - tools: { - message: { - crossContext: { - allowWithinProvider: false, - }, - }, - }, - } as OpenClawConfig; - - await expect( - runDrySend({ - cfg, - actionParams: { - channel: "slack", - target: "channel:C99999999", - message: "hi", - }, - toolContext: { currentChannelId: "C12345678", currentChannelProvider: "slack" }, - }), - ).rejects.toThrow(/Cross-context messaging denied/); - }); - - it.each([ - { - name: "send", - run: (abortSignal: AbortSignal) => - runDrySend({ - cfg: slackConfig, - actionParams: { - channel: "slack", - target: "#C12345678", - message: "hi", - }, - abortSignal, - }), - }, - { - name: "broadcast", - run: (abortSignal: AbortSignal) => - runDryAction({ - cfg: slackConfig, - action: "broadcast", - actionParams: { - targets: ["channel:C12345678"], - channel: "slack", - message: "hi", - }, - abortSignal, - }), - }, - ])("aborts $name when abortSignal is already aborted", async ({ run }) => { - const controller = new AbortController(); - controller.abort(); - await expect(run(controller.signal)).rejects.toMatchObject({ name: "AbortError" }); - }); -}); - -describe("runMessageAction sendAttachment hydration", () => { - const cfg = { - channels: { - bluebubbles: { - enabled: true, - serverUrl: "http://localhost:1234", - password: "test-password", - }, - }, - } as OpenClawConfig; - const attachmentPlugin: ChannelPlugin = { - id: "bluebubbles", - meta: { - id: "bluebubbles", - label: "BlueBubbles", - selectionLabel: "BlueBubbles", - docsPath: "/channels/bluebubbles", - blurb: "BlueBubbles test plugin.", - }, - capabilities: { chatTypes: ["direct", "group"], media: true }, - config: { - listAccountIds: () => ["default"], - resolveAccount: () => ({ enabled: true }), - isConfigured: () => true, - }, - actions: { - listActions: () => ["sendAttachment", "setGroupIcon"], - supportsAction: ({ action }) => action === "sendAttachment" || action === "setGroupIcon", - handleAction: async ({ params }) => - jsonResult({ - ok: true, - buffer: params.buffer, - filename: params.filename, - caption: params.caption, - contentType: params.contentType, - }), - }, - }; - - beforeEach(() => { - setActivePluginRegistry( - createTestRegistry([ - { - pluginId: "bluebubbles", - source: "test", - plugin: attachmentPlugin, - }, - ]), - ); - vi.mocked(loadWebMedia).mockResolvedValue({ - buffer: Buffer.from("hello"), - contentType: "image/png", - kind: "image", - fileName: "pic.png", - }); - }); - - afterEach(() => { - setActivePluginRegistry(createTestRegistry([])); - vi.clearAllMocks(); - }); - - async function restoreRealMediaLoader() { - const actual = await vi.importActual("../../web/media.js"); - vi.mocked(loadWebMedia).mockImplementation(actual.loadWebMedia); - } - - async function expectRejectsLocalAbsolutePathWithoutSandbox(params: { - action: "sendAttachment" | "setGroupIcon"; - target: string; - message?: string; - tempPrefix: string; - }) { - await restoreRealMediaLoader(); - - const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), params.tempPrefix)); - try { - const outsidePath = path.join(tempDir, "secret.txt"); - await fs.writeFile(outsidePath, "secret", "utf8"); - - const actionParams: Record = { - channel: "bluebubbles", - target: params.target, - media: outsidePath, - }; - if (params.message) { - actionParams.message = params.message; - } - - await expect( - runMessageAction({ - cfg, - action: params.action, - params: actionParams, - }), - ).rejects.toThrow(/allowed directory|path-not-allowed/i); - } finally { - await fs.rm(tempDir, { recursive: true, force: true }); - } - } - - it("hydrates buffer and filename from media for sendAttachment", async () => { - const result = await runMessageAction({ - cfg, - action: "sendAttachment", - params: { - channel: "bluebubbles", - target: "+15551234567", - media: "https://example.com/pic.png", - message: "caption", - }, - }); - - expect(result.kind).toBe("action"); - expect(result.payload).toMatchObject({ - ok: true, - filename: "pic.png", - caption: "caption", - contentType: "image/png", - }); - expect((result.payload as { buffer?: string }).buffer).toBe( - Buffer.from("hello").toString("base64"), - ); - const call = vi.mocked(loadWebMedia).mock.calls[0]; - expect(call?.[1]).toEqual( - expect.objectContaining({ - localRoots: expect.any(Array), - }), - ); - expect((call?.[1] as { sandboxValidated?: boolean } | undefined)?.sandboxValidated).not.toBe( - true, - ); - }); - - it("rewrites sandboxed media paths for sendAttachment", async () => { - await withSandbox(async (sandboxDir) => { - await runMessageAction({ - cfg, - action: "sendAttachment", - params: { - channel: "bluebubbles", - target: "+15551234567", - media: "./data/pic.png", - message: "caption", - }, - sandboxRoot: sandboxDir, - }); - - const call = vi.mocked(loadWebMedia).mock.calls[0]; - expect(call?.[0]).toBe(path.join(sandboxDir, "data", "pic.png")); - expect(call?.[1]).toEqual( - expect.objectContaining({ - sandboxValidated: true, - }), - ); - }); - }); - - it("rejects local absolute path for sendAttachment when sandboxRoot is missing", async () => { - await expectRejectsLocalAbsolutePathWithoutSandbox({ - action: "sendAttachment", - target: "+15551234567", - message: "caption", - tempPrefix: "msg-attachment-", - }); - }); - - it("rejects local absolute path for setGroupIcon when sandboxRoot is missing", async () => { - await expectRejectsLocalAbsolutePathWithoutSandbox({ - action: "setGroupIcon", - target: "group:123", - tempPrefix: "msg-group-icon-", - }); - }); -}); - -describe("runMessageAction sandboxed media validation", () => { - beforeEach(() => { - installChannelRuntimes({ includeTelegram: false, includeWhatsApp: false }); - setActivePluginRegistry( - createTestRegistry([ - { - pluginId: "slack", - source: "test", - plugin: slackPlugin, - }, - ]), - ); - }); - - afterEach(() => { - setActivePluginRegistry(createTestRegistry([])); - }); - - it.each(["/etc/passwd", "file:///etc/passwd"])( - "rejects out-of-sandbox media reference: %s", - async (media) => { - await withSandbox(async (sandboxDir) => { - await expect( - runDrySend({ - cfg: slackConfig, - actionParams: { - channel: "slack", - target: "#C12345678", - media, - message: "", - }, - sandboxRoot: sandboxDir, - }), - ).rejects.toThrow(/sandbox/i); - }); - }, - ); - - it("rejects data URLs in media params", async () => { - await expect( - runDrySend({ - cfg: slackConfig, - actionParams: { - channel: "slack", - target: "#C12345678", - media: "data:image/png;base64,abcd", - message: "", - }, - }), - ).rejects.toThrow(/data:/i); - }); - - it("rewrites sandbox-relative media paths", async () => { - await withSandbox(async (sandboxDir) => { - await expectSandboxMediaRewrite({ - sandboxDir, - media: "./data/file.txt", - message: "", - expectedRelativePath: path.join("data", "file.txt"), - }); - }); - }); - - it("rewrites /workspace media paths to host sandbox root", async () => { - await withSandbox(async (sandboxDir) => { - await expectSandboxMediaRewrite({ - sandboxDir, - media: "/workspace/data/file.txt", - message: "", - expectedRelativePath: path.join("data", "file.txt"), - }); - }); - }); - - it("rewrites MEDIA directives under sandbox", async () => { - await withSandbox(async (sandboxDir) => { - await expectSandboxMediaRewrite({ - sandboxDir, - message: "Hello\nMEDIA: ./data/note.ogg", - expectedRelativePath: path.join("data", "note.ogg"), - }); - }); - }); - - it("allows media paths under preferred OpenClaw tmp root", async () => { - const tmpRoot = resolvePreferredOpenClawTmpDir(); - await fs.mkdir(tmpRoot, { recursive: true }); - const sandboxDir = await fs.mkdtemp(path.join(os.tmpdir(), "msg-sandbox-")); - try { - const tmpFile = path.join(tmpRoot, "test-media-image.png"); - const result = await runMessageAction({ - cfg: slackConfig, - action: "send", - params: { - channel: "slack", - target: "#C12345678", - media: tmpFile, - message: "", - }, - sandboxRoot: sandboxDir, - dryRun: true, - }); - - expect(result.kind).toBe("send"); - if (result.kind !== "send") { - throw new Error("expected send result"); - } - // runMessageAction normalizes media paths through platform resolution. - expect(result.sendResult?.mediaUrl).toBe(path.resolve(tmpFile)); - const hostTmpOutsideOpenClaw = path.join(os.tmpdir(), "outside-openclaw", "test-media.png"); - await expect( - runMessageAction({ - cfg: slackConfig, - action: "send", - params: { - channel: "slack", - target: "#C12345678", - media: hostTmpOutsideOpenClaw, - message: "", - }, - sandboxRoot: sandboxDir, - dryRun: true, - }), - ).rejects.toThrow(/sandbox/i); - } finally { - await fs.rm(sandboxDir, { recursive: true, force: true }); - } - }); -}); - -describe("runMessageAction media caption behavior", () => { - afterEach(() => { - setActivePluginRegistry(createTestRegistry([])); - }); - - it("promotes caption to message for media sends when message is empty", async () => { - const sendMedia = vi.fn().mockResolvedValue({ - channel: "testchat", - messageId: "m1", - chatId: "c1", - }); - setActivePluginRegistry( - createTestRegistry([ - { - pluginId: "testchat", - source: "test", - plugin: createOutboundTestPlugin({ - id: "testchat", - outbound: { - deliveryMode: "direct", - sendText: vi.fn().mockResolvedValue({ - channel: "testchat", - messageId: "t1", - chatId: "c1", - }), - sendMedia, - }, - }), - }, - ]), - ); - const cfg = { - channels: { - testchat: { - enabled: true, - }, - }, - } as OpenClawConfig; - - const result = await runMessageAction({ - cfg, - action: "send", - params: { - channel: "testchat", - target: "channel:abc", - media: "https://example.com/cat.png", - caption: "caption-only text", - }, - dryRun: false, - }); - - expect(result.kind).toBe("send"); - expect(sendMedia).toHaveBeenCalledWith( - expect.objectContaining({ - text: "caption-only text", - mediaUrl: "https://example.com/cat.png", - }), - ); - }); -}); - -describe("runMessageAction card-only send behavior", () => { - const handleAction = vi.fn(async ({ params }: { params: Record }) => - jsonResult({ - ok: true, - card: params.card ?? null, - message: params.message ?? null, - }), - ); - - const cardPlugin: ChannelPlugin = { - id: "cardchat", - meta: { - id: "cardchat", - label: "Card Chat", - selectionLabel: "Card Chat", - docsPath: "/channels/cardchat", - blurb: "Card-only send test plugin.", - }, - capabilities: { chatTypes: ["direct"] }, - config: createAlwaysConfiguredPluginConfig(), - actions: { - listActions: () => ["send"], - supportsAction: ({ action }) => action === "send", - handleAction, - }, - }; - - beforeEach(() => { - setActivePluginRegistry( - createTestRegistry([ - { - pluginId: "cardchat", - source: "test", - plugin: cardPlugin, - }, - ]), - ); - handleAction.mockClear(); - }); - - afterEach(() => { - setActivePluginRegistry(createTestRegistry([])); - vi.clearAllMocks(); - }); - - it("allows card-only sends without text or media", async () => { - const cfg = { - channels: { - cardchat: { - enabled: true, - }, - }, - } as OpenClawConfig; - - const card = { - type: "AdaptiveCard", - version: "1.4", - body: [{ type: "TextBlock", text: "Card-only payload" }], - }; - - const result = await runMessageAction({ - cfg, - action: "send", - params: { - channel: "cardchat", - target: "channel:test-card", - card, - }, - dryRun: false, - }); - - expect(result.kind).toBe("send"); - expect(result.handledBy).toBe("plugin"); - expect(handleAction).toHaveBeenCalled(); - expect(result.payload).toMatchObject({ - ok: true, - card, - }); - }); -}); - -describe("runMessageAction telegram plugin poll forwarding", () => { - const handleAction = vi.fn(async ({ params }: { params: Record }) => - jsonResult({ - ok: true, - forwarded: { - to: params.to ?? null, - pollQuestion: params.pollQuestion ?? null, - pollOption: params.pollOption ?? null, - pollDurationSeconds: params.pollDurationSeconds ?? null, - pollPublic: params.pollPublic ?? null, - threadId: params.threadId ?? null, - }, - }), - ); - - const telegramPollPlugin: ChannelPlugin = { - id: "telegram", - meta: { - id: "telegram", - label: "Telegram", - selectionLabel: "Telegram", - docsPath: "/channels/telegram", - blurb: "Telegram poll forwarding test plugin.", - }, - capabilities: { chatTypes: ["direct"] }, - config: createAlwaysConfiguredPluginConfig(), - messaging: { - targetResolver: { - looksLikeId: () => true, - }, - }, - actions: { - listActions: () => ["poll"], - supportsAction: ({ action }) => action === "poll", - handleAction, - }, - }; - - beforeEach(() => { - setActivePluginRegistry( - createTestRegistry([ - { - pluginId: "telegram", - source: "test", - plugin: telegramPollPlugin, - }, - ]), - ); - handleAction.mockClear(); - }); - - afterEach(() => { - setActivePluginRegistry(createTestRegistry([])); - vi.clearAllMocks(); - }); - - it("forwards telegram poll params through plugin dispatch", async () => { - const result = await runMessageAction({ - cfg: { - channels: { - telegram: { - botToken: "tok", - }, - }, - } as OpenClawConfig, - action: "poll", - params: { - channel: "telegram", - target: "telegram:123", - pollQuestion: "Lunch?", - pollOption: ["Pizza", "Sushi"], - pollDurationSeconds: 120, - pollPublic: true, - threadId: "42", - }, - dryRun: false, - }); - - expect(result.kind).toBe("poll"); - expect(result.handledBy).toBe("plugin"); - expect(handleAction).toHaveBeenCalledWith( - expect.objectContaining({ - action: "poll", - channel: "telegram", - params: expect.objectContaining({ - to: "telegram:123", - pollQuestion: "Lunch?", - pollOption: ["Pizza", "Sushi"], - pollDurationSeconds: 120, - pollPublic: true, - threadId: "42", - }), - }), - ); - expect(result.payload).toMatchObject({ - ok: true, - forwarded: { - to: "telegram:123", - pollQuestion: "Lunch?", - pollOption: ["Pizza", "Sushi"], - pollDurationSeconds: 120, - pollPublic: true, - threadId: "42", - }, - }); - }); -}); - -describe("runMessageAction components parsing", () => { - const handleAction = vi.fn(async ({ params }: { params: Record }) => - jsonResult({ - ok: true, - components: params.components ?? null, - }), - ); - - const componentsPlugin: ChannelPlugin = { - id: "discord", - meta: { - id: "discord", - label: "Discord", - selectionLabel: "Discord", - docsPath: "/channels/discord", - blurb: "Discord components send test plugin.", - }, - capabilities: { chatTypes: ["direct"] }, - config: createAlwaysConfiguredPluginConfig({}), - actions: { - listActions: () => ["send"], - supportsAction: ({ action }) => action === "send", - handleAction, - }, - }; - - beforeEach(() => { - setActivePluginRegistry( - createTestRegistry([ - { - pluginId: "discord", - source: "test", - plugin: componentsPlugin, - }, - ]), - ); - handleAction.mockClear(); - }); - - afterEach(() => { - setActivePluginRegistry(createTestRegistry([])); - vi.clearAllMocks(); - }); - - it("parses components JSON strings before plugin dispatch", async () => { - const components = { - text: "hello", - buttons: [{ label: "A", customId: "a" }], - }; - const result = await runMessageAction({ - cfg: {} as OpenClawConfig, - action: "send", - params: { - channel: "discord", - target: "channel:123", - message: "hi", - components: JSON.stringify(components), - }, - dryRun: false, - }); - - expect(result.kind).toBe("send"); - expect(handleAction).toHaveBeenCalled(); - expect(result.payload).toMatchObject({ ok: true, components }); - }); - - it("throws on invalid components JSON strings", async () => { - await expect( - runMessageAction({ - cfg: {} as OpenClawConfig, - action: "send", - params: { - channel: "discord", - target: "channel:123", - message: "hi", - components: "{not-json}", - }, - dryRun: false, - }), - ).rejects.toThrow(/--components must be valid JSON/); - - expect(handleAction).not.toHaveBeenCalled(); - }); -}); - -describe("runMessageAction accountId defaults", () => { - const handleAction = vi.fn(async () => jsonResult({ ok: true })); - const accountPlugin: ChannelPlugin = { - id: "discord", - meta: { - id: "discord", - label: "Discord", - selectionLabel: "Discord", - docsPath: "/channels/discord", - blurb: "Discord test plugin.", - }, - capabilities: { chatTypes: ["direct"] }, - config: { - listAccountIds: () => ["default"], - resolveAccount: () => ({}), - }, - actions: { - listActions: () => ["send"], - handleAction, - }, - }; - - beforeEach(() => { - setActivePluginRegistry( - createTestRegistry([ - { - pluginId: "discord", - source: "test", - plugin: accountPlugin, - }, - ]), - ); - handleAction.mockClear(); - }); - - afterEach(() => { - setActivePluginRegistry(createTestRegistry([])); - vi.clearAllMocks(); - }); - - it("propagates defaultAccountId into params", async () => { - await runMessageAction({ - cfg: {} as OpenClawConfig, - action: "send", - params: { - channel: "discord", - target: "channel:123", - message: "hi", - }, - defaultAccountId: "ops", - }); - - expect(handleAction).toHaveBeenCalled(); - const ctx = (handleAction.mock.calls as unknown as Array<[unknown]>)[0]?.[0] as - | { - accountId?: string | null; - params: Record; - } - | undefined; - if (!ctx) { - throw new Error("expected action context"); - } - expect(ctx.accountId).toBe("ops"); - expect(ctx.params.accountId).toBe("ops"); - }); - - it("falls back to the agent's bound account when accountId is omitted", async () => { - await runMessageAction({ - cfg: { - bindings: [{ agentId: "agent-b", match: { channel: "discord", accountId: "account-b" } }], - } as OpenClawConfig, - action: "send", - params: { - channel: "discord", - target: "channel:123", - message: "hi", - }, - agentId: "agent-b", - }); - - expect(handleAction).toHaveBeenCalled(); - const ctx = (handleAction.mock.calls as unknown as Array<[unknown]>)[0]?.[0] as - | { - accountId?: string | null; - params: Record; - } - | undefined; - if (!ctx) { - throw new Error("expected action context"); - } - expect(ctx.accountId).toBe("account-b"); - expect(ctx.params.accountId).toBe("account-b"); - }); -}); diff --git a/src/infra/outbound/message-action-spec.test.ts b/src/infra/outbound/message-action-spec.test.ts new file mode 100644 index 00000000000..138f61e08a0 --- /dev/null +++ b/src/infra/outbound/message-action-spec.test.ts @@ -0,0 +1,39 @@ +import { describe, expect, it } from "vitest"; +import { actionHasTarget, actionRequiresTarget } from "./message-action-spec.js"; + +describe("actionRequiresTarget", () => { + it.each([ + ["send", true], + ["channel-info", true], + ["broadcast", false], + ["search", false], + ])("returns %s for %s", (action, expected) => { + expect(actionRequiresTarget(action as never)).toBe(expected); + }); +}); + +describe("actionHasTarget", () => { + it("detects canonical target fields", () => { + expect(actionHasTarget("send", { to: " channel:C1 " })).toBe(true); + expect(actionHasTarget("channel-info", { channelId: " C123 " })).toBe(true); + expect(actionHasTarget("send", { to: " ", channelId: "" })).toBe(false); + }); + + it("detects alias targets for message and chat actions", () => { + expect(actionHasTarget("edit", { messageId: " msg_123 " })).toBe(true); + expect(actionHasTarget("react", { chatGuid: "chat-guid" })).toBe(true); + expect(actionHasTarget("react", { chatIdentifier: "chat-id" })).toBe(true); + expect(actionHasTarget("react", { chatId: 42 })).toBe(true); + }); + + it("rejects blank and non-finite alias targets", () => { + expect(actionHasTarget("edit", { messageId: " " })).toBe(false); + expect(actionHasTarget("react", { chatGuid: "" })).toBe(false); + expect(actionHasTarget("react", { chatId: Number.NaN })).toBe(false); + expect(actionHasTarget("react", { chatId: Number.POSITIVE_INFINITY })).toBe(false); + }); + + it("ignores alias fields for actions without alias target support", () => { + expect(actionHasTarget("send", { messageId: "msg_123", chatId: 42 })).toBe(false); + }); +}); diff --git a/src/infra/outbound/message.channels.test.ts b/src/infra/outbound/message.channels.test.ts index 0a21264b43e..257d2ec94d6 100644 --- a/src/infra/outbound/message.channels.test.ts +++ b/src/infra/outbound/message.channels.test.ts @@ -97,13 +97,10 @@ describe("sendMessage channel normalization", () => { expect(seen.to).toBe("+15551234567"); }); - it("normalizes Teams alias", async () => { - const sendMSTeams = vi.fn(async () => ({ - messageId: "m1", - conversationId: "c1", - })); - setRegistry( - createTestRegistry([ + it.each([ + { + name: "normalizes Teams aliases", + registry: createTestRegistry([ { pluginId: "msteams", source: "test", @@ -113,40 +110,57 @@ describe("sendMessage channel normalization", () => { }), }, ]), - ); - const result = await sendMessage({ - cfg: {}, - to: "conversation:19:abc@thread.tacv2", - content: "hi", - channel: "teams", - deps: { sendMSTeams }, - }); - - expect(sendMSTeams).toHaveBeenCalledWith("conversation:19:abc@thread.tacv2", "hi"); - expect(result.channel).toBe("msteams"); - }); - - it("normalizes iMessage alias", async () => { - const sendIMessage = vi.fn(async () => ({ messageId: "i1" })); - setRegistry( - createTestRegistry([ + params: { + to: "conversation:19:abc@thread.tacv2", + channel: "teams", + deps: { + sendMSTeams: vi.fn(async () => ({ + messageId: "m1", + conversationId: "c1", + })), + }, + }, + assertDeps: (deps: { sendMSTeams?: ReturnType }) => { + expect(deps.sendMSTeams).toHaveBeenCalledWith("conversation:19:abc@thread.tacv2", "hi"); + }, + expectedChannel: "msteams", + }, + { + name: "normalizes iMessage aliases", + registry: createTestRegistry([ { pluginId: "imessage", source: "test", plugin: createIMessageTestPlugin(), }, ]), - ); + params: { + to: "someone@example.com", + channel: "imsg", + deps: { + sendIMessage: vi.fn(async () => ({ messageId: "i1" })), + }, + }, + assertDeps: (deps: { sendIMessage?: ReturnType }) => { + expect(deps.sendIMessage).toHaveBeenCalledWith( + "someone@example.com", + "hi", + expect.any(Object), + ); + }, + expectedChannel: "imessage", + }, + ])("$name", async ({ registry, params, assertDeps, expectedChannel }) => { + setRegistry(registry); + const result = await sendMessage({ cfg: {}, - to: "someone@example.com", content: "hi", - channel: "imsg", - deps: { sendIMessage }, + ...params, }); - expect(sendIMessage).toHaveBeenCalledWith("someone@example.com", "hi", expect.any(Object)); - expect(result.channel).toBe("imessage"); + assertDeps(params.deps); + expect(result.channel).toBe(expectedChannel); }); }); @@ -162,34 +176,31 @@ describe("sendMessage replyToId threading", () => { return capturedCtx; }; - it("passes replyToId through to the outbound adapter", async () => { + it.each([ + { + name: "passes replyToId through to the outbound adapter", + params: { content: "thread reply", replyToId: "post123" }, + field: "replyToId", + expected: "post123", + }, + { + name: "passes threadId through to the outbound adapter", + params: { content: "topic reply", threadId: "topic456" }, + field: "threadId", + expected: "topic456", + }, + ])("$name", async ({ params, field, expected }) => { const capturedCtx = setupMattermostCapture(); await sendMessage({ cfg: {}, to: "channel:town-square", - content: "thread reply", channel: "mattermost", - replyToId: "post123", + ...params, }); expect(capturedCtx).toHaveLength(1); - expect(capturedCtx[0]?.replyToId).toBe("post123"); - }); - - it("passes threadId through to the outbound adapter", async () => { - const capturedCtx = setupMattermostCapture(); - - await sendMessage({ - cfg: {}, - to: "channel:town-square", - content: "topic reply", - channel: "mattermost", - threadId: "topic456", - }); - - expect(capturedCtx).toHaveLength(1); - expect(capturedCtx[0]?.threadId).toBe("topic456"); + expect(capturedCtx[0]?.[field]).toBe(expected); }); }); diff --git a/src/infra/outbound/outbound-policy.test.ts b/src/infra/outbound/outbound-policy.test.ts new file mode 100644 index 00000000000..fd19649c345 --- /dev/null +++ b/src/infra/outbound/outbound-policy.test.ts @@ -0,0 +1,127 @@ +import { describe, expect, it } from "vitest"; +import type { OpenClawConfig } from "../../config/config.js"; +import { + applyCrossContextDecoration, + buildCrossContextDecoration, + enforceCrossContextPolicy, + shouldApplyCrossContextMarker, +} from "./outbound-policy.js"; + +const slackConfig = { + channels: { + slack: { + botToken: "xoxb-test", + appToken: "xapp-test", + }, + }, +} as OpenClawConfig; + +const discordConfig = { + channels: { + discord: {}, + }, +} as OpenClawConfig; + +describe("outbound policy helpers", () => { + it("allows cross-provider sends when enabled", () => { + const cfg = { + ...slackConfig, + tools: { + message: { crossContext: { allowAcrossProviders: true } }, + }, + } as OpenClawConfig; + + expect(() => + enforceCrossContextPolicy({ + cfg, + channel: "telegram", + action: "send", + args: { to: "telegram:@ops" }, + toolContext: { currentChannelId: "C12345678", currentChannelProvider: "slack" }, + }), + ).not.toThrow(); + }); + + it("blocks cross-provider sends when not allowed", () => { + expect(() => + enforceCrossContextPolicy({ + cfg: slackConfig, + channel: "telegram", + action: "send", + args: { to: "telegram:@ops" }, + toolContext: { currentChannelId: "C12345678", currentChannelProvider: "slack" }, + }), + ).toThrow(/target provider "telegram" while bound to "slack"/); + }); + + it("blocks same-provider cross-context sends when allowWithinProvider is false", () => { + const cfg = { + ...slackConfig, + tools: { + message: { crossContext: { allowWithinProvider: false } }, + }, + } as OpenClawConfig; + + expect(() => + enforceCrossContextPolicy({ + cfg, + channel: "slack", + action: "send", + args: { to: "C999" }, + toolContext: { currentChannelId: "C123", currentChannelProvider: "slack" }, + }), + ).toThrow(/target="C999" while bound to "C123"/); + }); + + it("uses components when available and preferred", async () => { + const decoration = await buildCrossContextDecoration({ + cfg: discordConfig, + channel: "discord", + target: "123", + toolContext: { currentChannelId: "C12345678", currentChannelProvider: "discord" }, + }); + + expect(decoration).not.toBeNull(); + const applied = applyCrossContextDecoration({ + message: "hello", + decoration: decoration!, + preferComponents: true, + }); + + expect(applied.usedComponents).toBe(true); + expect(applied.componentsBuilder).toBeDefined(); + expect(applied.componentsBuilder?.("hello").length).toBeGreaterThan(0); + expect(applied.message).toBe("hello"); + }); + + it("returns null when decoration is skipped and falls back to text markers", async () => { + await expect( + buildCrossContextDecoration({ + cfg: discordConfig, + channel: "discord", + target: "123", + toolContext: { + currentChannelId: "C12345678", + currentChannelProvider: "discord", + skipCrossContextDecoration: true, + }, + }), + ).resolves.toBeNull(); + + const applied = applyCrossContextDecoration({ + message: "hello", + decoration: { prefix: "[from ops] ", suffix: " [cc]" }, + preferComponents: true, + }); + expect(applied).toEqual({ + message: "[from ops] hello [cc]", + usedComponents: false, + }); + }); + + it("marks only supported cross-context actions", () => { + expect(shouldApplyCrossContextMarker("send")).toBe(true); + expect(shouldApplyCrossContextMarker("thread-reply")).toBe(true); + expect(shouldApplyCrossContextMarker("thread-create")).toBe(false); + }); +}); diff --git a/src/infra/outbound/outbound-send-service.test.ts b/src/infra/outbound/outbound-send-service.test.ts index 68c956d93fc..391abee8dda 100644 --- a/src/infra/outbound/outbound-send-service.test.ts +++ b/src/infra/outbound/outbound-send-service.test.ts @@ -156,6 +156,78 @@ describe("executeSendAction", () => { ); }); + it("falls back to message and media params for plugin-handled mirror writes", async () => { + mocks.dispatchChannelMessageAction.mockResolvedValue(pluginActionResult("msg-plugin")); + + await executeSendAction({ + ctx: { + cfg: {}, + channel: "discord", + params: { to: "channel:123", message: "hello" }, + dryRun: false, + mirror: { + sessionKey: "agent:main:discord:channel:123", + agentId: "agent-9", + }, + }, + to: "channel:123", + message: "hello", + mediaUrls: ["https://example.com/a.png", "https://example.com/b.png"], + }); + + expect(mocks.appendAssistantMessageToSessionTranscript).toHaveBeenCalledWith( + expect.objectContaining({ + agentId: "agent-9", + sessionKey: "agent:main:discord:channel:123", + text: "hello", + mediaUrls: ["https://example.com/a.png", "https://example.com/b.png"], + }), + ); + }); + + it("skips plugin dispatch during dry-run sends and forwards gateway + silent to sendMessage", async () => { + mocks.sendMessage.mockResolvedValue({ + channel: "discord", + to: "channel:123", + via: "gateway", + mediaUrl: null, + }); + + await executeSendAction({ + ctx: { + cfg: {}, + channel: "discord", + params: { to: "channel:123", message: "hello" }, + dryRun: true, + silent: true, + gateway: { + url: "http://127.0.0.1:18789", + token: "tok", + timeoutMs: 5000, + clientName: "gateway", + mode: "gateway", + }, + }, + to: "channel:123", + message: "hello", + }); + + expect(mocks.dispatchChannelMessageAction).not.toHaveBeenCalled(); + expect(mocks.sendMessage).toHaveBeenCalledWith( + expect.objectContaining({ + to: "channel:123", + content: "hello", + dryRun: true, + silent: true, + gateway: expect.objectContaining({ + url: "http://127.0.0.1:18789", + token: "tok", + timeoutMs: 5000, + }), + }), + ); + }); + it("forwards poll args to sendPoll on core outbound path", async () => { mocks.dispatchChannelMessageAction.mockResolvedValue(null); mocks.sendPoll.mockResolvedValue({ @@ -200,4 +272,55 @@ describe("executeSendAction", () => { }), ); }); + + it("skips plugin dispatch during dry-run polls and forwards durationHours + silent", async () => { + mocks.sendPoll.mockResolvedValue({ + channel: "discord", + to: "channel:123", + question: "Lunch?", + options: ["Pizza", "Sushi"], + maxSelections: 1, + durationSeconds: null, + durationHours: 6, + via: "gateway", + }); + + await executePollAction({ + ctx: { + cfg: {}, + channel: "discord", + params: {}, + dryRun: true, + silent: true, + gateway: { + url: "http://127.0.0.1:18789", + token: "tok", + timeoutMs: 5000, + clientName: "gateway", + mode: "gateway", + }, + }, + to: "channel:123", + question: "Lunch?", + options: ["Pizza", "Sushi"], + maxSelections: 1, + durationHours: 6, + }); + + expect(mocks.dispatchChannelMessageAction).not.toHaveBeenCalled(); + expect(mocks.sendPoll).toHaveBeenCalledWith( + expect.objectContaining({ + to: "channel:123", + question: "Lunch?", + durationHours: 6, + dryRun: true, + silent: true, + gateway: expect.objectContaining({ + url: "http://127.0.0.1:18789", + token: "tok", + timeoutMs: 5000, + }), + }), + ); + }); }); diff --git a/src/infra/outbound/outbound-session.test.ts b/src/infra/outbound/outbound-session.test.ts new file mode 100644 index 00000000000..17367f4a128 --- /dev/null +++ b/src/infra/outbound/outbound-session.test.ts @@ -0,0 +1,259 @@ +import { describe, expect, it } from "vitest"; +import type { OpenClawConfig } from "../../config/config.js"; +import { resolveOutboundSessionRoute } from "./outbound-session.js"; + +describe("resolveOutboundSessionRoute", () => { + const baseConfig = {} as OpenClawConfig; + + it("resolves provider-specific session routes", async () => { + const perChannelPeerCfg = { session: { dmScope: "per-channel-peer" } } as OpenClawConfig; + const identityLinksCfg = { + session: { + dmScope: "per-peer", + identityLinks: { + alice: ["discord:123"], + }, + }, + } as OpenClawConfig; + const slackMpimCfg = { + channels: { + slack: { + dm: { + groupChannels: ["G123"], + }, + }, + }, + } as OpenClawConfig; + const cases: Array<{ + name: string; + cfg: OpenClawConfig; + channel: string; + target: string; + replyToId?: string; + threadId?: string; + expected: { + sessionKey: string; + from?: string; + to?: string; + threadId?: string | number; + chatType?: "direct" | "group"; + }; + }> = [ + { + name: "Slack thread", + cfg: baseConfig, + channel: "slack", + target: "channel:C123", + replyToId: "456", + expected: { + sessionKey: "agent:main:slack:channel:c123:thread:456", + from: "slack:channel:C123", + to: "channel:C123", + threadId: "456", + }, + }, + { + name: "Telegram topic group", + cfg: baseConfig, + channel: "telegram", + target: "-100123456:topic:42", + expected: { + sessionKey: "agent:main:telegram:group:-100123456:topic:42", + from: "telegram:group:-100123456:topic:42", + to: "telegram:-100123456", + threadId: 42, + }, + }, + { + name: "Telegram DM with topic", + cfg: perChannelPeerCfg, + channel: "telegram", + target: "123456789:topic:99", + expected: { + sessionKey: "agent:main:telegram:direct:123456789:thread:99", + from: "telegram:123456789:topic:99", + to: "telegram:123456789", + threadId: 99, + chatType: "direct", + }, + }, + { + name: "Telegram unresolved username DM", + cfg: perChannelPeerCfg, + channel: "telegram", + target: "@alice", + expected: { + sessionKey: "agent:main:telegram:direct:@alice", + chatType: "direct", + }, + }, + { + name: "Telegram DM scoped threadId fallback", + cfg: perChannelPeerCfg, + channel: "telegram", + target: "12345", + threadId: "12345:99", + expected: { + sessionKey: "agent:main:telegram:direct:12345:thread:99", + from: "telegram:12345:topic:99", + to: "telegram:12345", + threadId: 99, + chatType: "direct", + }, + }, + { + name: "identity-links per-peer", + cfg: identityLinksCfg, + channel: "discord", + target: "user:123", + expected: { + sessionKey: "agent:main:direct:alice", + }, + }, + { + name: "BlueBubbles chat_* prefix stripping", + cfg: baseConfig, + channel: "bluebubbles", + target: "chat_guid:ABC123", + expected: { + sessionKey: "agent:main:bluebubbles:group:abc123", + from: "group:ABC123", + }, + }, + { + name: "Zalo Personal DM target", + cfg: perChannelPeerCfg, + channel: "zalouser", + target: "123456", + expected: { + sessionKey: "agent:main:zalouser:direct:123456", + chatType: "direct", + }, + }, + { + name: "Slack mpim allowlist -> group key", + cfg: slackMpimCfg, + channel: "slack", + target: "channel:G123", + expected: { + sessionKey: "agent:main:slack:group:g123", + from: "slack:group:G123", + }, + }, + { + name: "Feishu explicit group prefix keeps group routing", + cfg: baseConfig, + channel: "feishu", + target: "group:oc_group_chat", + expected: { + sessionKey: "agent:main:feishu:group:oc_group_chat", + from: "feishu:group:oc_group_chat", + to: "oc_group_chat", + chatType: "group", + }, + }, + { + name: "Feishu explicit dm prefix keeps direct routing", + cfg: perChannelPeerCfg, + channel: "feishu", + target: "dm:oc_dm_chat", + expected: { + sessionKey: "agent:main:feishu:direct:oc_dm_chat", + from: "feishu:oc_dm_chat", + to: "oc_dm_chat", + chatType: "direct", + }, + }, + { + name: "Feishu bare oc_ target defaults to direct routing", + cfg: perChannelPeerCfg, + channel: "feishu", + target: "oc_ambiguous_chat", + expected: { + sessionKey: "agent:main:feishu:direct:oc_ambiguous_chat", + from: "feishu:oc_ambiguous_chat", + to: "oc_ambiguous_chat", + chatType: "direct", + }, + }, + ]; + + for (const testCase of cases) { + const route = await resolveOutboundSessionRoute({ + cfg: testCase.cfg, + channel: testCase.channel, + agentId: "main", + target: testCase.target, + replyToId: testCase.replyToId, + threadId: testCase.threadId, + }); + expect(route?.sessionKey, testCase.name).toBe(testCase.expected.sessionKey); + if (testCase.expected.from !== undefined) { + expect(route?.from, testCase.name).toBe(testCase.expected.from); + } + if (testCase.expected.to !== undefined) { + expect(route?.to, testCase.name).toBe(testCase.expected.to); + } + if (testCase.expected.threadId !== undefined) { + expect(route?.threadId, testCase.name).toBe(testCase.expected.threadId); + } + if (testCase.expected.chatType !== undefined) { + expect(route?.chatType, testCase.name).toBe(testCase.expected.chatType); + } + } + }); + + it("uses resolved Discord user targets to route bare numeric ids as DMs", async () => { + const route = await resolveOutboundSessionRoute({ + cfg: { session: { dmScope: "per-channel-peer" } } as OpenClawConfig, + channel: "discord", + agentId: "main", + target: "123", + resolvedTarget: { + to: "user:123", + kind: "user", + source: "directory", + }, + }); + + expect(route).toMatchObject({ + sessionKey: "agent:main:discord:direct:123", + from: "discord:123", + to: "user:123", + chatType: "direct", + }); + }); + + it("uses resolved Mattermost user targets to route bare ids as DMs", async () => { + const userId = "dthcxgoxhifn3pwh65cut3ud3w"; + const route = await resolveOutboundSessionRoute({ + cfg: { session: { dmScope: "per-channel-peer" } } as OpenClawConfig, + channel: "mattermost", + agentId: "main", + target: userId, + resolvedTarget: { + to: `user:${userId}`, + kind: "user", + source: "directory", + }, + }); + + expect(route).toMatchObject({ + sessionKey: `agent:main:mattermost:direct:${userId}`, + from: `mattermost:${userId}`, + to: `user:${userId}`, + chatType: "direct", + }); + }); + + it("rejects bare numeric Discord targets when the caller has no kind hint", async () => { + await expect( + resolveOutboundSessionRoute({ + cfg: { session: { dmScope: "per-channel-peer" } } as OpenClawConfig, + channel: "discord", + agentId: "main", + target: "123", + }), + ).rejects.toThrow(/Ambiguous Discord recipient/); + }); +}); diff --git a/src/infra/outbound/outbound.test.ts b/src/infra/outbound/outbound.test.ts index 72ccf3e3c55..c20632099bd 100644 --- a/src/infra/outbound/outbound.test.ts +++ b/src/infra/outbound/outbound.test.ts @@ -1,1299 +1,3 @@ -import fs from "node:fs"; -import os from "node:os"; -import path from "node:path"; -import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest"; -import type { ReplyPayload } from "../../auto-reply/types.js"; -import type { OpenClawConfig } from "../../config/config.js"; -import { typedCases } from "../../test-utils/typed-cases.js"; -import { - ackDelivery, - computeBackoffMs, - type DeliverFn, - enqueueDelivery, - failDelivery, - isEntryEligibleForRecoveryRetry, - isPermanentDeliveryError, - loadPendingDeliveries, - MAX_RETRIES, - moveToFailed, - recoverPendingDeliveries, -} from "./delivery-queue.js"; -import { DirectoryCache } from "./directory-cache.js"; -import { buildOutboundResultEnvelope } from "./envelope.js"; -import type { OutboundDeliveryJson } from "./format.js"; -import { - buildOutboundDeliveryJson, - formatGatewaySummary, - formatOutboundDeliverySummary, -} from "./format.js"; -import { - applyCrossContextDecoration, - buildCrossContextDecoration, - enforceCrossContextPolicy, -} from "./outbound-policy.js"; -import { resolveOutboundSessionRoute } from "./outbound-session.js"; -import { - formatOutboundPayloadLog, - normalizeOutboundPayloads, - normalizeOutboundPayloadsForJson, -} from "./payloads.js"; import { runResolveOutboundTargetCoreTests } from "./targets.shared-test.js"; -describe("delivery-queue", () => { - let tmpDir: string; - let fixtureRoot = ""; - let fixtureCount = 0; - - beforeAll(() => { - fixtureRoot = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-dq-suite-")); - }); - - beforeEach(() => { - tmpDir = path.join(fixtureRoot, `case-${fixtureCount++}`); - fs.mkdirSync(tmpDir, { recursive: true }); - }); - - afterAll(() => { - if (!fixtureRoot) { - return; - } - fs.rmSync(fixtureRoot, { recursive: true, force: true }); - fixtureRoot = ""; - }); - - describe("enqueue + ack lifecycle", () => { - it("creates and removes a queue entry", async () => { - const id = await enqueueDelivery( - { - channel: "whatsapp", - to: "+1555", - payloads: [{ text: "hello" }], - bestEffort: true, - gifPlayback: true, - silent: true, - mirror: { - sessionKey: "agent:main:main", - text: "hello", - mediaUrls: ["https://example.com/file.png"], - }, - }, - tmpDir, - ); - - // Entry file exists after enqueue. - const queueDir = path.join(tmpDir, "delivery-queue"); - const files = fs.readdirSync(queueDir).filter((f) => f.endsWith(".json")); - expect(files).toHaveLength(1); - expect(files[0]).toBe(`${id}.json`); - - // Entry contents are correct. - const entry = JSON.parse(fs.readFileSync(path.join(queueDir, files[0]), "utf-8")); - expect(entry).toMatchObject({ - id, - channel: "whatsapp", - to: "+1555", - bestEffort: true, - gifPlayback: true, - silent: true, - mirror: { - sessionKey: "agent:main:main", - text: "hello", - mediaUrls: ["https://example.com/file.png"], - }, - retryCount: 0, - }); - expect(entry.payloads).toEqual([{ text: "hello" }]); - - // Ack removes the file. - await ackDelivery(id, tmpDir); - const remaining = fs.readdirSync(queueDir).filter((f) => f.endsWith(".json")); - expect(remaining).toHaveLength(0); - }); - - it("ack is idempotent (no error on missing file)", async () => { - await expect(ackDelivery("nonexistent-id", tmpDir)).resolves.toBeUndefined(); - }); - - it("ack cleans up leftover .delivered marker when .json is already gone", async () => { - const id = await enqueueDelivery( - { channel: "whatsapp", to: "+1", payloads: [{ text: "stale-marker" }] }, - tmpDir, - ); - const queueDir = path.join(tmpDir, "delivery-queue"); - - fs.renameSync(path.join(queueDir, `${id}.json`), path.join(queueDir, `${id}.delivered`)); - await expect(ackDelivery(id, tmpDir)).resolves.toBeUndefined(); - - expect(fs.existsSync(path.join(queueDir, `${id}.delivered`))).toBe(false); - }); - - it("ack removes .delivered marker so recovery does not replay", async () => { - const id = await enqueueDelivery( - { channel: "whatsapp", to: "+1", payloads: [{ text: "ack-test" }] }, - tmpDir, - ); - const queueDir = path.join(tmpDir, "delivery-queue"); - - await ackDelivery(id, tmpDir); - - // Neither .json nor .delivered should remain. - expect(fs.existsSync(path.join(queueDir, `${id}.json`))).toBe(false); - expect(fs.existsSync(path.join(queueDir, `${id}.delivered`))).toBe(false); - }); - - it("loadPendingDeliveries cleans up stale .delivered markers without replaying", async () => { - const id = await enqueueDelivery( - { channel: "telegram", to: "99", payloads: [{ text: "stale" }] }, - tmpDir, - ); - const queueDir = path.join(tmpDir, "delivery-queue"); - - // Simulate crash between ack phase 1 (rename) and phase 2 (unlink): - // rename .json → .delivered, then pretend the process died. - fs.renameSync(path.join(queueDir, `${id}.json`), path.join(queueDir, `${id}.delivered`)); - - const entries = await loadPendingDeliveries(tmpDir); - - // The .delivered entry must NOT appear as pending. - expect(entries).toHaveLength(0); - // And the marker file should have been cleaned up. - expect(fs.existsSync(path.join(queueDir, `${id}.delivered`))).toBe(false); - }); - }); - - describe("failDelivery", () => { - it("increments retryCount, records attempt time, and sets lastError", async () => { - const id = await enqueueDelivery( - { - channel: "telegram", - to: "123", - payloads: [{ text: "test" }], - }, - tmpDir, - ); - - await failDelivery(id, "connection refused", tmpDir); - - const queueDir = path.join(tmpDir, "delivery-queue"); - const entry = JSON.parse(fs.readFileSync(path.join(queueDir, `${id}.json`), "utf-8")); - expect(entry.retryCount).toBe(1); - expect(typeof entry.lastAttemptAt).toBe("number"); - expect(entry.lastAttemptAt).toBeGreaterThan(0); - expect(entry.lastError).toBe("connection refused"); - }); - }); - - describe("moveToFailed", () => { - it("moves entry to failed/ subdirectory", async () => { - const id = await enqueueDelivery( - { - channel: "slack", - to: "#general", - payloads: [{ text: "hi" }], - }, - tmpDir, - ); - - await moveToFailed(id, tmpDir); - - const queueDir = path.join(tmpDir, "delivery-queue"); - const failedDir = path.join(queueDir, "failed"); - expect(fs.existsSync(path.join(queueDir, `${id}.json`))).toBe(false); - expect(fs.existsSync(path.join(failedDir, `${id}.json`))).toBe(true); - }); - }); - - describe("isPermanentDeliveryError", () => { - it.each([ - "No conversation reference found for user:abc", - "Telegram send failed: chat not found (chat_id=user:123)", - "user not found", - "Bot was blocked by the user", - "Forbidden: bot was kicked from the group chat", - "chat_id is empty", - "Outbound not configured for channel: msteams", - ])("returns true for permanent error: %s", (msg) => { - expect(isPermanentDeliveryError(msg)).toBe(true); - }); - - it.each([ - "network down", - "ETIMEDOUT", - "socket hang up", - "rate limited", - "500 Internal Server Error", - ])("returns false for transient error: %s", (msg) => { - expect(isPermanentDeliveryError(msg)).toBe(false); - }); - }); - - describe("loadPendingDeliveries", () => { - it("returns empty array when queue directory does not exist", async () => { - const nonexistent = path.join(tmpDir, "no-such-dir"); - const entries = await loadPendingDeliveries(nonexistent); - expect(entries).toEqual([]); - }); - - it("loads multiple entries", async () => { - await enqueueDelivery({ channel: "whatsapp", to: "+1", payloads: [{ text: "a" }] }, tmpDir); - await enqueueDelivery({ channel: "telegram", to: "2", payloads: [{ text: "b" }] }, tmpDir); - - const entries = await loadPendingDeliveries(tmpDir); - expect(entries).toHaveLength(2); - }); - - it("backfills lastAttemptAt for legacy retry entries during load", async () => { - const id = await enqueueDelivery( - { channel: "whatsapp", to: "+1", payloads: [{ text: "legacy" }] }, - tmpDir, - ); - const filePath = path.join(tmpDir, "delivery-queue", `${id}.json`); - const legacyEntry = JSON.parse(fs.readFileSync(filePath, "utf-8")); - legacyEntry.retryCount = 2; - delete legacyEntry.lastAttemptAt; - fs.writeFileSync(filePath, JSON.stringify(legacyEntry), "utf-8"); - - const entries = await loadPendingDeliveries(tmpDir); - expect(entries).toHaveLength(1); - expect(entries[0]?.lastAttemptAt).toBe(entries[0]?.enqueuedAt); - - const persisted = JSON.parse(fs.readFileSync(filePath, "utf-8")); - expect(persisted.lastAttemptAt).toBe(persisted.enqueuedAt); - }); - }); - - describe("computeBackoffMs", () => { - it("returns scheduled backoff values and clamps at max retry", () => { - const cases = [ - { retryCount: 0, expected: 0 }, - { retryCount: 1, expected: 5_000 }, - { retryCount: 2, expected: 25_000 }, - { retryCount: 3, expected: 120_000 }, - { retryCount: 4, expected: 600_000 }, - // Beyond defined schedule -- clamps to last value. - { retryCount: 5, expected: 600_000 }, - ] as const; - - for (const testCase of cases) { - expect(computeBackoffMs(testCase.retryCount), String(testCase.retryCount)).toBe( - testCase.expected, - ); - } - }); - }); - - describe("isEntryEligibleForRecoveryRetry", () => { - it("allows first replay after crash for retryCount=0 without lastAttemptAt", () => { - const now = Date.now(); - const result = isEntryEligibleForRecoveryRetry( - { - id: "entry-1", - channel: "whatsapp", - to: "+1", - payloads: [{ text: "a" }], - enqueuedAt: now, - retryCount: 0, - }, - now, - ); - expect(result).toEqual({ eligible: true }); - }); - - it("defers retry entries until backoff window elapses", () => { - const now = Date.now(); - const result = isEntryEligibleForRecoveryRetry( - { - id: "entry-2", - channel: "whatsapp", - to: "+1", - payloads: [{ text: "a" }], - enqueuedAt: now - 30_000, - retryCount: 3, - lastAttemptAt: now, - }, - now, - ); - expect(result.eligible).toBe(false); - if (result.eligible) { - throw new Error("Expected ineligible retry entry"); - } - expect(result.remainingBackoffMs).toBeGreaterThan(0); - }); - }); - - describe("recoverPendingDeliveries", () => { - const baseCfg = {}; - const createLog = () => ({ info: vi.fn(), warn: vi.fn(), error: vi.fn() }); - const enqueueCrashRecoveryEntries = async () => { - await enqueueDelivery({ channel: "whatsapp", to: "+1", payloads: [{ text: "a" }] }, tmpDir); - await enqueueDelivery({ channel: "telegram", to: "2", payloads: [{ text: "b" }] }, tmpDir); - }; - const setEntryState = ( - id: string, - state: { retryCount: number; lastAttemptAt?: number; enqueuedAt?: number }, - ) => { - const filePath = path.join(tmpDir, "delivery-queue", `${id}.json`); - const entry = JSON.parse(fs.readFileSync(filePath, "utf-8")); - entry.retryCount = state.retryCount; - if (state.lastAttemptAt === undefined) { - delete entry.lastAttemptAt; - } else { - entry.lastAttemptAt = state.lastAttemptAt; - } - if (state.enqueuedAt !== undefined) { - entry.enqueuedAt = state.enqueuedAt; - } - fs.writeFileSync(filePath, JSON.stringify(entry), "utf-8"); - }; - const runRecovery = async ({ - deliver, - log = createLog(), - maxRecoveryMs, - }: { - deliver: ReturnType; - log?: ReturnType; - maxRecoveryMs?: number; - }) => { - const result = await recoverPendingDeliveries({ - deliver: deliver as DeliverFn, - log, - cfg: baseCfg, - stateDir: tmpDir, - ...(maxRecoveryMs === undefined ? {} : { maxRecoveryMs }), - }); - return { result, log }; - }; - - it("recovers entries from a simulated crash", async () => { - // Manually create queue entries as if gateway crashed before delivery. - await enqueueCrashRecoveryEntries(); - const deliver = vi.fn().mockResolvedValue([]); - const { result } = await runRecovery({ deliver }); - - expect(deliver).toHaveBeenCalledTimes(2); - expect(result.recovered).toBe(2); - expect(result.failed).toBe(0); - expect(result.skippedMaxRetries).toBe(0); - expect(result.deferredBackoff).toBe(0); - - // Queue should be empty after recovery. - const remaining = await loadPendingDeliveries(tmpDir); - expect(remaining).toHaveLength(0); - }); - - it("moves entries that exceeded max retries to failed/", async () => { - // Create an entry and manually set retryCount to MAX_RETRIES. - const id = await enqueueDelivery( - { channel: "whatsapp", to: "+1", payloads: [{ text: "a" }] }, - tmpDir, - ); - setEntryState(id, { retryCount: MAX_RETRIES }); - - const deliver = vi.fn(); - const { result } = await runRecovery({ deliver }); - - expect(deliver).not.toHaveBeenCalled(); - expect(result.skippedMaxRetries).toBe(1); - expect(result.deferredBackoff).toBe(0); - - // Entry should be in failed/ directory. - const failedDir = path.join(tmpDir, "delivery-queue", "failed"); - expect(fs.existsSync(path.join(failedDir, `${id}.json`))).toBe(true); - }); - - it("increments retryCount on failed recovery attempt", async () => { - await enqueueDelivery({ channel: "slack", to: "#ch", payloads: [{ text: "x" }] }, tmpDir); - - const deliver = vi.fn().mockRejectedValue(new Error("network down")); - const { result } = await runRecovery({ deliver }); - - expect(result.failed).toBe(1); - expect(result.recovered).toBe(0); - - // Entry should still be in queue with incremented retryCount. - const entries = await loadPendingDeliveries(tmpDir); - expect(entries).toHaveLength(1); - expect(entries[0].retryCount).toBe(1); - expect(entries[0].lastError).toBe("network down"); - }); - - it("moves entries to failed/ immediately on permanent delivery errors", async () => { - const id = await enqueueDelivery( - { channel: "msteams", to: "user:abc", payloads: [{ text: "hi" }] }, - tmpDir, - ); - const deliver = vi - .fn() - .mockRejectedValue(new Error("No conversation reference found for user:abc")); - const log = createLog(); - const { result } = await runRecovery({ deliver, log }); - - expect(result.failed).toBe(1); - expect(result.recovered).toBe(0); - const remaining = await loadPendingDeliveries(tmpDir); - expect(remaining).toHaveLength(0); - const failedDir = path.join(tmpDir, "delivery-queue", "failed"); - expect(fs.existsSync(path.join(failedDir, `${id}.json`))).toBe(true); - expect(log.warn).toHaveBeenCalledWith(expect.stringContaining("permanent error")); - }); - - it("passes skipQueue: true to prevent re-enqueueing during recovery", async () => { - await enqueueDelivery({ channel: "whatsapp", to: "+1", payloads: [{ text: "a" }] }, tmpDir); - - const deliver = vi.fn().mockResolvedValue([]); - await runRecovery({ deliver }); - - expect(deliver).toHaveBeenCalledWith(expect.objectContaining({ skipQueue: true })); - }); - - it("replays stored delivery options during recovery", async () => { - await enqueueDelivery( - { - channel: "whatsapp", - to: "+1", - payloads: [{ text: "a" }], - bestEffort: true, - gifPlayback: true, - silent: true, - mirror: { - sessionKey: "agent:main:main", - text: "a", - mediaUrls: ["https://example.com/a.png"], - }, - }, - tmpDir, - ); - - const deliver = vi.fn().mockResolvedValue([]); - await runRecovery({ deliver }); - - expect(deliver).toHaveBeenCalledWith( - expect.objectContaining({ - bestEffort: true, - gifPlayback: true, - silent: true, - mirror: { - sessionKey: "agent:main:main", - text: "a", - mediaUrls: ["https://example.com/a.png"], - }, - }), - ); - }); - - it("respects maxRecoveryMs time budget", async () => { - await enqueueCrashRecoveryEntries(); - await enqueueDelivery({ channel: "slack", to: "#c", payloads: [{ text: "c" }] }, tmpDir); - - const deliver = vi.fn().mockResolvedValue([]); - const { result, log } = await runRecovery({ - deliver, - maxRecoveryMs: 0, // Immediate timeout -- no entries should be processed. - }); - - expect(deliver).not.toHaveBeenCalled(); - expect(result.recovered).toBe(0); - expect(result.failed).toBe(0); - expect(result.skippedMaxRetries).toBe(0); - expect(result.deferredBackoff).toBe(0); - - // All entries should still be in the queue. - const remaining = await loadPendingDeliveries(tmpDir); - expect(remaining).toHaveLength(3); - - // Should have logged a warning about deferred entries. - expect(log.warn).toHaveBeenCalledWith(expect.stringContaining("deferred to next restart")); - }); - - it("defers entries until backoff becomes eligible", async () => { - const id = await enqueueDelivery( - { channel: "whatsapp", to: "+1", payloads: [{ text: "a" }] }, - tmpDir, - ); - setEntryState(id, { retryCount: 3, lastAttemptAt: Date.now() }); - - const deliver = vi.fn().mockResolvedValue([]); - const { result, log } = await runRecovery({ - deliver, - maxRecoveryMs: 60_000, - }); - - expect(deliver).not.toHaveBeenCalled(); - expect(result).toEqual({ - recovered: 0, - failed: 0, - skippedMaxRetries: 0, - deferredBackoff: 1, - }); - - const remaining = await loadPendingDeliveries(tmpDir); - expect(remaining).toHaveLength(1); - - expect(log.info).toHaveBeenCalledWith(expect.stringContaining("not ready for retry yet")); - }); - - it("continues past high-backoff entries and recovers ready entries behind them", async () => { - const now = Date.now(); - const blockedId = await enqueueDelivery( - { channel: "whatsapp", to: "+1", payloads: [{ text: "blocked" }] }, - tmpDir, - ); - const readyId = await enqueueDelivery( - { channel: "telegram", to: "2", payloads: [{ text: "ready" }] }, - tmpDir, - ); - - setEntryState(blockedId, { retryCount: 3, lastAttemptAt: now, enqueuedAt: now - 30_000 }); - setEntryState(readyId, { retryCount: 0, enqueuedAt: now - 10_000 }); - - const deliver = vi.fn().mockResolvedValue([]); - const { result } = await runRecovery({ deliver, maxRecoveryMs: 60_000 }); - - expect(result).toEqual({ - recovered: 1, - failed: 0, - skippedMaxRetries: 0, - deferredBackoff: 1, - }); - expect(deliver).toHaveBeenCalledTimes(1); - expect(deliver).toHaveBeenCalledWith( - expect.objectContaining({ channel: "telegram", to: "2", skipQueue: true }), - ); - - const remaining = await loadPendingDeliveries(tmpDir); - expect(remaining).toHaveLength(1); - expect(remaining[0]?.id).toBe(blockedId); - }); - - it("recovers deferred entries on a later restart once backoff elapsed", async () => { - vi.useFakeTimers(); - const start = new Date("2026-01-01T00:00:00.000Z"); - vi.setSystemTime(start); - - const id = await enqueueDelivery( - { channel: "whatsapp", to: "+1", payloads: [{ text: "later" }] }, - tmpDir, - ); - setEntryState(id, { retryCount: 3, lastAttemptAt: start.getTime() }); - - const firstDeliver = vi.fn().mockResolvedValue([]); - const firstRun = await runRecovery({ deliver: firstDeliver, maxRecoveryMs: 60_000 }); - expect(firstRun.result).toEqual({ - recovered: 0, - failed: 0, - skippedMaxRetries: 0, - deferredBackoff: 1, - }); - expect(firstDeliver).not.toHaveBeenCalled(); - - vi.setSystemTime(new Date(start.getTime() + 600_000 + 1)); - const secondDeliver = vi.fn().mockResolvedValue([]); - const secondRun = await runRecovery({ deliver: secondDeliver, maxRecoveryMs: 60_000 }); - expect(secondRun.result).toEqual({ - recovered: 1, - failed: 0, - skippedMaxRetries: 0, - deferredBackoff: 0, - }); - expect(secondDeliver).toHaveBeenCalledTimes(1); - - const remaining = await loadPendingDeliveries(tmpDir); - expect(remaining).toHaveLength(0); - - vi.useRealTimers(); - }); - - it("returns zeros when queue is empty", async () => { - const deliver = vi.fn(); - const { result } = await runRecovery({ deliver }); - - expect(result).toEqual({ - recovered: 0, - failed: 0, - skippedMaxRetries: 0, - deferredBackoff: 0, - }); - expect(deliver).not.toHaveBeenCalled(); - }); - }); -}); - -describe("DirectoryCache", () => { - const cfg = {} as OpenClawConfig; - - afterEach(() => { - vi.useRealTimers(); - }); - - it("expires entries after ttl", () => { - vi.useFakeTimers(); - vi.setSystemTime(new Date("2026-01-01T00:00:00.000Z")); - const cache = new DirectoryCache(1000, 10); - - cache.set("a", "value-a", cfg); - expect(cache.get("a", cfg)).toBe("value-a"); - - vi.setSystemTime(new Date("2026-01-01T00:00:02.000Z")); - expect(cache.get("a", cfg)).toBeUndefined(); - }); - - it("evicts least-recent entries when capacity is exceeded", () => { - const cases = [ - { - actions: [ - ["set", "a", "value-a"], - ["set", "b", "value-b"], - ["set", "c", "value-c"], - ] as const, - expected: { a: undefined, b: "value-b", c: "value-c" }, - }, - { - actions: [ - ["set", "a", "value-a"], - ["set", "b", "value-b"], - ["set", "a", "value-a2"], - ["set", "c", "value-c"], - ] as const, - expected: { a: "value-a2", b: undefined, c: "value-c" }, - }, - ]; - - for (const testCase of cases) { - const cache = new DirectoryCache(60_000, 2); - for (const action of testCase.actions) { - cache.set(action[1], action[2], cfg); - } - expect(cache.get("a", cfg)).toBe(testCase.expected.a); - expect(cache.get("b", cfg)).toBe(testCase.expected.b); - expect(cache.get("c", cfg)).toBe(testCase.expected.c); - } - }); -}); - -describe("buildOutboundResultEnvelope", () => { - it("formats envelope variants", () => { - const whatsappDelivery: OutboundDeliveryJson = { - channel: "whatsapp", - via: "gateway", - to: "+1", - messageId: "m1", - mediaUrl: null, - }; - const telegramDelivery: OutboundDeliveryJson = { - channel: "telegram", - via: "direct", - to: "123", - messageId: "m2", - mediaUrl: null, - chatId: "c1", - }; - const discordDelivery: OutboundDeliveryJson = { - channel: "discord", - via: "gateway", - to: "channel:C1", - messageId: "m3", - mediaUrl: null, - channelId: "C1", - }; - const cases = typedCases<{ - name: string; - input: Parameters[0]; - expected: unknown; - }>([ - { - name: "flatten delivery by default", - input: { delivery: whatsappDelivery }, - expected: whatsappDelivery, - }, - { - name: "keep payloads + meta", - input: { - payloads: [{ text: "hi", mediaUrl: null, mediaUrls: undefined }], - meta: { foo: "bar" }, - }, - expected: { - payloads: [{ text: "hi", mediaUrl: null, mediaUrls: undefined }], - meta: { foo: "bar" }, - }, - }, - { - name: "include delivery when payloads exist", - input: { payloads: [], delivery: telegramDelivery, meta: { ok: true } }, - expected: { - payloads: [], - meta: { ok: true }, - delivery: telegramDelivery, - }, - }, - { - name: "keep wrapped delivery when flatten disabled", - input: { delivery: discordDelivery, flattenDelivery: false }, - expected: { delivery: discordDelivery }, - }, - ]); - for (const testCase of cases) { - expect(buildOutboundResultEnvelope(testCase.input), testCase.name).toEqual(testCase.expected); - } - }); -}); - -describe("formatOutboundDeliverySummary", () => { - it("formats fallback and channel-specific detail variants", () => { - const cases = [ - { - name: "fallback telegram", - channel: "telegram" as const, - result: undefined, - expected: "✅ Sent via Telegram. Message ID: unknown", - }, - { - name: "fallback imessage", - channel: "imessage" as const, - result: undefined, - expected: "✅ Sent via iMessage. Message ID: unknown", - }, - { - name: "telegram with chat detail", - channel: "telegram" as const, - result: { - channel: "telegram" as const, - messageId: "m1", - chatId: "c1", - }, - expected: "✅ Sent via Telegram. Message ID: m1 (chat c1)", - }, - { - name: "discord with channel detail", - channel: "discord" as const, - result: { - channel: "discord" as const, - messageId: "d1", - channelId: "chan", - }, - expected: "✅ Sent via Discord. Message ID: d1 (channel chan)", - }, - ]; - - for (const testCase of cases) { - expect(formatOutboundDeliverySummary(testCase.channel, testCase.result), testCase.name).toBe( - testCase.expected, - ); - } - }); -}); - -describe("buildOutboundDeliveryJson", () => { - it("builds direct delivery payloads across provider-specific fields", () => { - const cases = [ - { - name: "telegram direct payload", - input: { - channel: "telegram" as const, - to: "123", - result: { channel: "telegram" as const, messageId: "m1", chatId: "c1" }, - mediaUrl: "https://example.com/a.png", - }, - expected: { - channel: "telegram", - via: "direct", - to: "123", - messageId: "m1", - mediaUrl: "https://example.com/a.png", - chatId: "c1", - }, - }, - { - name: "whatsapp metadata", - input: { - channel: "whatsapp" as const, - to: "+1", - result: { channel: "whatsapp" as const, messageId: "w1", toJid: "jid" }, - }, - expected: { - channel: "whatsapp", - via: "direct", - to: "+1", - messageId: "w1", - mediaUrl: null, - toJid: "jid", - }, - }, - { - name: "signal timestamp", - input: { - channel: "signal" as const, - to: "+1", - result: { channel: "signal" as const, messageId: "s1", timestamp: 123 }, - }, - expected: { - channel: "signal", - via: "direct", - to: "+1", - messageId: "s1", - mediaUrl: null, - timestamp: 123, - }, - }, - ]; - - for (const testCase of cases) { - expect(buildOutboundDeliveryJson(testCase.input), testCase.name).toEqual(testCase.expected); - } - }); -}); - -describe("formatGatewaySummary", () => { - it("formats default and custom gateway action summaries", () => { - const cases = [ - { - name: "default send action", - input: { channel: "whatsapp", messageId: "m1" }, - expected: "✅ Sent via gateway (whatsapp). Message ID: m1", - }, - { - name: "custom action", - input: { action: "Poll sent", channel: "discord", messageId: "p1" }, - expected: "✅ Poll sent via gateway (discord). Message ID: p1", - }, - ]; - - for (const testCase of cases) { - expect(formatGatewaySummary(testCase.input), testCase.name).toBe(testCase.expected); - } - }); -}); - -const slackConfig = { - channels: { - slack: { - botToken: "xoxb-test", - appToken: "xapp-test", - }, - }, -} as OpenClawConfig; - -const discordConfig = { - channels: { - discord: {}, - }, -} as OpenClawConfig; - -describe("outbound policy", () => { - it("allows cross-provider sends when enabled", () => { - const cfg = { - ...slackConfig, - tools: { - message: { crossContext: { allowAcrossProviders: true } }, - }, - } as OpenClawConfig; - - expect(() => - enforceCrossContextPolicy({ - cfg, - channel: "telegram", - action: "send", - args: { to: "telegram:@ops" }, - toolContext: { currentChannelId: "C12345678", currentChannelProvider: "slack" }, - }), - ).not.toThrow(); - }); - - it("uses components when available and preferred", async () => { - const decoration = await buildCrossContextDecoration({ - cfg: discordConfig, - channel: "discord", - target: "123", - toolContext: { currentChannelId: "C12345678", currentChannelProvider: "discord" }, - }); - - expect(decoration).not.toBeNull(); - const applied = applyCrossContextDecoration({ - message: "hello", - decoration: decoration!, - preferComponents: true, - }); - - expect(applied.usedComponents).toBe(true); - expect(applied.componentsBuilder).toBeDefined(); - expect(applied.componentsBuilder?.("hello").length).toBeGreaterThan(0); - expect(applied.message).toBe("hello"); - }); -}); - -describe("resolveOutboundSessionRoute", () => { - const baseConfig = {} as OpenClawConfig; - - it("resolves provider-specific session routes", async () => { - const perChannelPeerCfg = { session: { dmScope: "per-channel-peer" } } as OpenClawConfig; - const identityLinksCfg = { - session: { - dmScope: "per-peer", - identityLinks: { - alice: ["discord:123"], - }, - }, - } as OpenClawConfig; - const slackMpimCfg = { - channels: { - slack: { - dm: { - groupChannels: ["G123"], - }, - }, - }, - } as OpenClawConfig; - const cases: Array<{ - name: string; - cfg: OpenClawConfig; - channel: string; - target: string; - replyToId?: string; - threadId?: string; - expected: { - sessionKey: string; - from?: string; - to?: string; - threadId?: string | number; - chatType?: "direct" | "group"; - }; - }> = [ - { - name: "Slack thread", - cfg: baseConfig, - channel: "slack", - target: "channel:C123", - replyToId: "456", - expected: { - sessionKey: "agent:main:slack:channel:c123:thread:456", - from: "slack:channel:C123", - to: "channel:C123", - threadId: "456", - }, - }, - { - name: "Telegram topic group", - cfg: baseConfig, - channel: "telegram", - target: "-100123456:topic:42", - expected: { - sessionKey: "agent:main:telegram:group:-100123456:topic:42", - from: "telegram:group:-100123456:topic:42", - to: "telegram:-100123456", - threadId: 42, - }, - }, - { - name: "Telegram DM with topic", - cfg: perChannelPeerCfg, - channel: "telegram", - target: "123456789:topic:99", - expected: { - sessionKey: "agent:main:telegram:direct:123456789:thread:99", - from: "telegram:123456789:topic:99", - to: "telegram:123456789", - threadId: 99, - chatType: "direct", - }, - }, - { - name: "Telegram unresolved username DM", - cfg: perChannelPeerCfg, - channel: "telegram", - target: "@alice", - expected: { - sessionKey: "agent:main:telegram:direct:@alice", - chatType: "direct", - }, - }, - { - name: "Telegram DM scoped threadId fallback", - cfg: perChannelPeerCfg, - channel: "telegram", - target: "12345", - threadId: "12345:99", - expected: { - sessionKey: "agent:main:telegram:direct:12345:thread:99", - from: "telegram:12345:topic:99", - to: "telegram:12345", - threadId: 99, - chatType: "direct", - }, - }, - { - name: "identity-links per-peer", - cfg: identityLinksCfg, - channel: "discord", - target: "user:123", - expected: { - sessionKey: "agent:main:direct:alice", - }, - }, - { - name: "BlueBubbles chat_* prefix stripping", - cfg: baseConfig, - channel: "bluebubbles", - target: "chat_guid:ABC123", - expected: { - sessionKey: "agent:main:bluebubbles:group:abc123", - from: "group:ABC123", - }, - }, - { - name: "Zalo Personal DM target", - cfg: perChannelPeerCfg, - channel: "zalouser", - target: "123456", - expected: { - sessionKey: "agent:main:zalouser:direct:123456", - chatType: "direct", - }, - }, - { - name: "Slack mpim allowlist -> group key", - cfg: slackMpimCfg, - channel: "slack", - target: "channel:G123", - expected: { - sessionKey: "agent:main:slack:group:g123", - from: "slack:group:G123", - }, - }, - { - name: "Feishu explicit group prefix keeps group routing", - cfg: baseConfig, - channel: "feishu", - target: "group:oc_group_chat", - expected: { - sessionKey: "agent:main:feishu:group:oc_group_chat", - from: "feishu:group:oc_group_chat", - to: "oc_group_chat", - chatType: "group", - }, - }, - { - name: "Feishu explicit dm prefix keeps direct routing", - cfg: perChannelPeerCfg, - channel: "feishu", - target: "dm:oc_dm_chat", - expected: { - sessionKey: "agent:main:feishu:direct:oc_dm_chat", - from: "feishu:oc_dm_chat", - to: "oc_dm_chat", - chatType: "direct", - }, - }, - { - name: "Feishu bare oc_ target defaults to direct routing", - cfg: perChannelPeerCfg, - channel: "feishu", - target: "oc_ambiguous_chat", - expected: { - sessionKey: "agent:main:feishu:direct:oc_ambiguous_chat", - from: "feishu:oc_ambiguous_chat", - to: "oc_ambiguous_chat", - chatType: "direct", - }, - }, - ]; - - for (const testCase of cases) { - const route = await resolveOutboundSessionRoute({ - cfg: testCase.cfg, - channel: testCase.channel, - agentId: "main", - target: testCase.target, - replyToId: testCase.replyToId, - threadId: testCase.threadId, - }); - expect(route?.sessionKey, testCase.name).toBe(testCase.expected.sessionKey); - if (testCase.expected.from !== undefined) { - expect(route?.from, testCase.name).toBe(testCase.expected.from); - } - if (testCase.expected.to !== undefined) { - expect(route?.to, testCase.name).toBe(testCase.expected.to); - } - if (testCase.expected.threadId !== undefined) { - expect(route?.threadId, testCase.name).toBe(testCase.expected.threadId); - } - if (testCase.expected.chatType !== undefined) { - expect(route?.chatType, testCase.name).toBe(testCase.expected.chatType); - } - } - }); - - it("uses resolved Discord user targets to route bare numeric ids as DMs", async () => { - const route = await resolveOutboundSessionRoute({ - cfg: { session: { dmScope: "per-channel-peer" } } as OpenClawConfig, - channel: "discord", - agentId: "main", - target: "123", - resolvedTarget: { - to: "user:123", - kind: "user", - source: "directory", - }, - }); - - expect(route).toMatchObject({ - sessionKey: "agent:main:discord:direct:123", - from: "discord:123", - to: "user:123", - chatType: "direct", - }); - }); - - it("uses resolved Mattermost user targets to route bare ids as DMs", async () => { - const userId = "dthcxgoxhifn3pwh65cut3ud3w"; - const route = await resolveOutboundSessionRoute({ - cfg: { session: { dmScope: "per-channel-peer" } } as OpenClawConfig, - channel: "mattermost", - agentId: "main", - target: userId, - resolvedTarget: { - to: `user:${userId}`, - kind: "user", - source: "directory", - }, - }); - - expect(route).toMatchObject({ - sessionKey: `agent:main:mattermost:direct:${userId}`, - from: `mattermost:${userId}`, - to: `user:${userId}`, - chatType: "direct", - }); - }); - - it("rejects bare numeric Discord targets when the caller has no kind hint", async () => { - await expect( - resolveOutboundSessionRoute({ - cfg: { session: { dmScope: "per-channel-peer" } } as OpenClawConfig, - channel: "discord", - agentId: "main", - target: "123", - }), - ).rejects.toThrow(/Ambiguous Discord recipient/); - }); -}); - -describe("normalizeOutboundPayloadsForJson", () => { - it("normalizes payloads for JSON output", () => { - const cases = typedCases<{ - input: Parameters[0]; - expected: ReturnType; - }>([ - { - input: [ - { text: "hi" }, - { text: "photo", mediaUrl: "https://x.test/a.jpg" }, - { text: "multi", mediaUrls: ["https://x.test/1.png"] }, - ], - expected: [ - { text: "hi", mediaUrl: null, mediaUrls: undefined, channelData: undefined }, - { - text: "photo", - mediaUrl: "https://x.test/a.jpg", - mediaUrls: ["https://x.test/a.jpg"], - channelData: undefined, - }, - { - text: "multi", - mediaUrl: null, - mediaUrls: ["https://x.test/1.png"], - channelData: undefined, - }, - ], - }, - { - input: [ - { - text: "MEDIA:https://x.test/a.png\nMEDIA:https://x.test/b.png", - }, - ], - expected: [ - { - text: "", - mediaUrl: null, - mediaUrls: ["https://x.test/a.png", "https://x.test/b.png"], - channelData: undefined, - }, - ], - }, - ]); - - for (const testCase of cases) { - const input: ReplyPayload[] = testCase.input.map((payload) => - "mediaUrls" in payload - ? ({ - ...payload, - mediaUrls: payload.mediaUrls ? [...payload.mediaUrls] : undefined, - } as ReplyPayload) - : ({ ...payload } as ReplyPayload), - ); - expect(normalizeOutboundPayloadsForJson(input)).toEqual(testCase.expected); - } - }); - - it("suppresses reasoning payloads", () => { - const normalized = normalizeOutboundPayloadsForJson([ - { text: "Reasoning:\n_step_", isReasoning: true }, - { text: "final answer" }, - ]); - expect(normalized).toEqual([{ text: "final answer", mediaUrl: null, mediaUrls: undefined }]); - }); -}); - -describe("normalizeOutboundPayloads", () => { - it("keeps channelData-only payloads", () => { - const channelData = { line: { flexMessage: { altText: "Card", contents: {} } } }; - const normalized = normalizeOutboundPayloads([{ channelData }]); - expect(normalized).toEqual([{ text: "", mediaUrls: [], channelData }]); - }); - - it("suppresses reasoning payloads", () => { - const normalized = normalizeOutboundPayloads([ - { text: "Reasoning:\n_step_", isReasoning: true }, - { text: "final answer" }, - ]); - expect(normalized).toEqual([{ text: "final answer", mediaUrls: [] }]); - }); -}); - -describe("formatOutboundPayloadLog", () => { - it("formats text+media and media-only logs", () => { - const cases = typedCases<{ - name: string; - input: Parameters[0]; - expected: string; - }>([ - { - name: "text with media lines", - input: { - text: "hello ", - mediaUrls: ["https://x.test/a.png", "https://x.test/b.png"], - }, - expected: "hello\nMEDIA:https://x.test/a.png\nMEDIA:https://x.test/b.png", - }, - { - name: "media only", - input: { - text: "", - mediaUrls: ["https://x.test/a.png"], - }, - expected: "MEDIA:https://x.test/a.png", - }, - ]); - - for (const testCase of cases) { - expect( - formatOutboundPayloadLog({ - ...testCase.input, - mediaUrls: [...testCase.input.mediaUrls], - }), - testCase.name, - ).toBe(testCase.expected); - } - }); -}); - runResolveOutboundTargetCoreTests(); diff --git a/src/infra/outbound/payloads.test.ts b/src/infra/outbound/payloads.test.ts new file mode 100644 index 00000000000..ef5ccbced53 --- /dev/null +++ b/src/infra/outbound/payloads.test.ts @@ -0,0 +1,198 @@ +import { describe, expect, it } from "vitest"; +import type { ReplyPayload } from "../../auto-reply/types.js"; +import { typedCases } from "../../test-utils/typed-cases.js"; +import { + formatOutboundPayloadLog, + normalizeOutboundPayloads, + normalizeOutboundPayloadsForJson, + normalizeReplyPayloadsForDelivery, +} from "./payloads.js"; + +describe("normalizeReplyPayloadsForDelivery", () => { + it("parses directives, merges media, and preserves reply metadata", () => { + expect( + normalizeReplyPayloadsForDelivery([ + { + text: "[[reply_to: 123]] Hello [[audio_as_voice]]\nMEDIA:https://x.test/a.png", + mediaUrl: " https://x.test/a.png ", + mediaUrls: ["https://x.test/a.png", "https://x.test/b.png"], + replyToTag: false, + }, + ]), + ).toEqual([ + { + text: "Hello", + mediaUrl: undefined, + mediaUrls: ["https://x.test/a.png", "https://x.test/b.png"], + replyToId: "123", + replyToTag: true, + replyToCurrent: false, + audioAsVoice: true, + }, + ]); + }); + + it("drops silent payloads without media and suppresses reasoning payloads", () => { + expect( + normalizeReplyPayloadsForDelivery([ + { text: "NO_REPLY" }, + { text: "Reasoning:\n_step_", isReasoning: true }, + { text: "final answer" }, + ]), + ).toEqual([ + { + text: "final answer", + mediaUrls: undefined, + mediaUrl: undefined, + replyToId: undefined, + replyToCurrent: false, + replyToTag: false, + audioAsVoice: false, + }, + ]); + }); + + it("keeps renderable channel-data payloads and reply-to-current markers", () => { + expect( + normalizeReplyPayloadsForDelivery([ + { + text: "[[reply_to_current]]", + channelData: { line: { flexMessage: { altText: "Card", contents: {} } } }, + }, + ]), + ).toEqual([ + { + text: "", + mediaUrls: undefined, + mediaUrl: undefined, + replyToCurrent: true, + replyToTag: true, + audioAsVoice: false, + channelData: { line: { flexMessage: { altText: "Card", contents: {} } } }, + }, + ]); + }); +}); + +describe("normalizeOutboundPayloadsForJson", () => { + it("normalizes payloads for JSON output", () => { + const cases = typedCases<{ + input: Parameters[0]; + expected: ReturnType; + }>([ + { + input: [ + { text: "hi" }, + { text: "photo", mediaUrl: "https://x.test/a.jpg" }, + { text: "multi", mediaUrls: ["https://x.test/1.png"] }, + ], + expected: [ + { text: "hi", mediaUrl: null, mediaUrls: undefined, channelData: undefined }, + { + text: "photo", + mediaUrl: "https://x.test/a.jpg", + mediaUrls: ["https://x.test/a.jpg"], + channelData: undefined, + }, + { + text: "multi", + mediaUrl: null, + mediaUrls: ["https://x.test/1.png"], + channelData: undefined, + }, + ], + }, + { + input: [ + { + text: "MEDIA:https://x.test/a.png\nMEDIA:https://x.test/b.png", + }, + ], + expected: [ + { + text: "", + mediaUrl: null, + mediaUrls: ["https://x.test/a.png", "https://x.test/b.png"], + channelData: undefined, + }, + ], + }, + ]); + + for (const testCase of cases) { + const input: ReplyPayload[] = testCase.input.map((payload) => + "mediaUrls" in payload + ? ({ + ...payload, + mediaUrls: payload.mediaUrls ? [...payload.mediaUrls] : undefined, + } as ReplyPayload) + : ({ ...payload } as ReplyPayload), + ); + expect(normalizeOutboundPayloadsForJson(input)).toEqual(testCase.expected); + } + }); + + it("suppresses reasoning payloads", () => { + expect( + normalizeOutboundPayloadsForJson([ + { text: "Reasoning:\n_step_", isReasoning: true }, + { text: "final answer" }, + ]), + ).toEqual([{ text: "final answer", mediaUrl: null, mediaUrls: undefined }]); + }); +}); + +describe("normalizeOutboundPayloads", () => { + it("keeps channelData-only payloads", () => { + const channelData = { line: { flexMessage: { altText: "Card", contents: {} } } }; + expect(normalizeOutboundPayloads([{ channelData }])).toEqual([ + { text: "", mediaUrls: [], channelData }, + ]); + }); + + it("suppresses reasoning payloads", () => { + expect( + normalizeOutboundPayloads([ + { text: "Reasoning:\n_step_", isReasoning: true }, + { text: "final answer" }, + ]), + ).toEqual([{ text: "final answer", mediaUrls: [] }]); + }); +}); + +describe("formatOutboundPayloadLog", () => { + it("formats text+media and media-only logs", () => { + const cases = typedCases<{ + name: string; + input: Parameters[0]; + expected: string; + }>([ + { + name: "text with media lines", + input: { + text: "hello ", + mediaUrls: ["https://x.test/a.png", "https://x.test/b.png"], + }, + expected: "hello\nMEDIA:https://x.test/a.png\nMEDIA:https://x.test/b.png", + }, + { + name: "media only", + input: { + text: "", + mediaUrls: ["https://x.test/a.png"], + }, + expected: "MEDIA:https://x.test/a.png", + }, + ]); + + for (const testCase of cases) { + expect( + formatOutboundPayloadLog({ + ...testCase.input, + mediaUrls: [...testCase.input.mediaUrls], + }), + testCase.name, + ).toBe(testCase.expected); + } + }); +}); diff --git a/src/infra/outbound/session-context.test.ts b/src/infra/outbound/session-context.test.ts new file mode 100644 index 00000000000..c24ede1f3e8 --- /dev/null +++ b/src/infra/outbound/session-context.test.ts @@ -0,0 +1,55 @@ +import { describe, expect, it, vi } from "vitest"; + +const resolveSessionAgentIdMock = vi.hoisted(() => vi.fn()); + +vi.mock("../../agents/agent-scope.js", () => ({ + resolveSessionAgentId: (...args: unknown[]) => resolveSessionAgentIdMock(...args), +})); + +import { buildOutboundSessionContext } from "./session-context.js"; + +describe("buildOutboundSessionContext", () => { + it("returns undefined when both session key and agent id are blank", () => { + expect( + buildOutboundSessionContext({ + cfg: {} as never, + sessionKey: " ", + agentId: null, + }), + ).toBeUndefined(); + expect(resolveSessionAgentIdMock).not.toHaveBeenCalled(); + }); + + it("derives the agent id from the trimmed session key when no explicit agent is given", () => { + resolveSessionAgentIdMock.mockReturnValueOnce("derived-agent"); + + expect( + buildOutboundSessionContext({ + cfg: { agents: {} } as never, + sessionKey: " session:main:123 ", + }), + ).toEqual({ + key: "session:main:123", + agentId: "derived-agent", + }); + expect(resolveSessionAgentIdMock).toHaveBeenCalledWith({ + sessionKey: "session:main:123", + config: { agents: {} }, + }); + }); + + it("prefers an explicit trimmed agent id over the derived one", () => { + resolveSessionAgentIdMock.mockReturnValueOnce("derived-agent"); + + expect( + buildOutboundSessionContext({ + cfg: {} as never, + sessionKey: "session:main:123", + agentId: " explicit-agent ", + }), + ).toEqual({ + key: "session:main:123", + agentId: "explicit-agent", + }); + }); +}); diff --git a/src/infra/outbound/target-errors.test.ts b/src/infra/outbound/target-errors.test.ts new file mode 100644 index 00000000000..fb43f5279bf --- /dev/null +++ b/src/infra/outbound/target-errors.test.ts @@ -0,0 +1,39 @@ +import { describe, expect, it } from "vitest"; +import { + ambiguousTargetError, + ambiguousTargetMessage, + missingTargetError, + missingTargetMessage, + unknownTargetError, + unknownTargetMessage, +} from "./target-errors.js"; + +describe("target error helpers", () => { + it("formats missing-target messages with and without hints", () => { + expect(missingTargetMessage("Slack")).toBe("Delivering to Slack requires target"); + expect(missingTargetMessage("Slack", "Use channel:C123")).toBe( + "Delivering to Slack requires target Use channel:C123", + ); + expect(missingTargetError("Slack", "Use channel:C123").message).toBe( + "Delivering to Slack requires target Use channel:C123", + ); + }); + + it("formats ambiguous and unknown target messages with labeled hints", () => { + expect(ambiguousTargetMessage("Discord", "general")).toBe( + 'Ambiguous target "general" for Discord. Provide a unique name or an explicit id.', + ); + expect(ambiguousTargetMessage("Discord", "general", "Use channel:123")).toBe( + 'Ambiguous target "general" for Discord. Provide a unique name or an explicit id. Hint: Use channel:123', + ); + expect(unknownTargetMessage("Discord", "general", "Use channel:123")).toBe( + 'Unknown target "general" for Discord. Hint: Use channel:123', + ); + expect(ambiguousTargetError("Discord", "general", "Use channel:123").message).toContain( + "Hint: Use channel:123", + ); + expect(unknownTargetError("Discord", "general").message).toBe( + 'Unknown target "general" for Discord.', + ); + }); +}); diff --git a/src/infra/outbound/target-normalization.test.ts b/src/infra/outbound/target-normalization.test.ts new file mode 100644 index 00000000000..c8e6ea7e124 --- /dev/null +++ b/src/infra/outbound/target-normalization.test.ts @@ -0,0 +1,142 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const normalizeChannelIdMock = vi.hoisted(() => vi.fn()); +const getChannelPluginMock = vi.hoisted(() => vi.fn()); +const getActivePluginRegistryVersionMock = vi.hoisted(() => vi.fn()); + +vi.mock("../../channels/plugins/index.js", () => ({ + normalizeChannelId: (...args: unknown[]) => normalizeChannelIdMock(...args), + getChannelPlugin: (...args: unknown[]) => getChannelPluginMock(...args), +})); + +vi.mock("../../plugins/runtime.js", () => ({ + getActivePluginRegistryVersion: (...args: unknown[]) => + getActivePluginRegistryVersionMock(...args), +})); + +import { + buildTargetResolverSignature, + normalizeChannelTargetInput, + normalizeTargetForProvider, +} from "./target-normalization.js"; + +describe("normalizeChannelTargetInput", () => { + it("trims raw target input", () => { + expect(normalizeChannelTargetInput(" channel:C1 ")).toBe("channel:C1"); + }); +}); + +describe("normalizeTargetForProvider", () => { + beforeEach(() => { + normalizeChannelIdMock.mockReset(); + getChannelPluginMock.mockReset(); + getActivePluginRegistryVersionMock.mockReset(); + }); + + it("returns undefined for missing or blank raw input", () => { + expect(normalizeTargetForProvider("telegram")).toBeUndefined(); + expect(normalizeTargetForProvider("telegram", " ")).toBeUndefined(); + }); + + it("falls back to trimmed input when the provider is unknown or has no normalizer", () => { + normalizeChannelIdMock.mockReturnValueOnce(null); + expect(normalizeTargetForProvider("unknown", " raw-id ")).toBe("raw-id"); + + normalizeChannelIdMock.mockReturnValueOnce("telegram"); + getActivePluginRegistryVersionMock.mockReturnValueOnce(1); + getChannelPluginMock.mockReturnValueOnce(undefined); + expect(normalizeTargetForProvider("telegram", " raw-id ")).toBe("raw-id"); + }); + + it("uses the cached target normalizer until the plugin registry version changes", () => { + const firstNormalizer = vi.fn((raw: string) => raw.trim().toUpperCase()); + const secondNormalizer = vi.fn((raw: string) => `next:${raw.trim()}`); + normalizeChannelIdMock.mockReturnValue("telegram"); + getActivePluginRegistryVersionMock + .mockReturnValueOnce(10) + .mockReturnValueOnce(10) + .mockReturnValueOnce(11); + getChannelPluginMock + .mockReturnValueOnce({ + messaging: { normalizeTarget: firstNormalizer }, + }) + .mockReturnValueOnce({ + messaging: { normalizeTarget: secondNormalizer }, + }); + + expect(normalizeTargetForProvider("telegram", " abc ")).toBe("ABC"); + expect(normalizeTargetForProvider("telegram", " def ")).toBe("DEF"); + expect(normalizeTargetForProvider("telegram", " ghi ")).toBe("next:ghi"); + + expect(getChannelPluginMock).toHaveBeenCalledTimes(2); + expect(firstNormalizer).toHaveBeenCalledTimes(2); + expect(secondNormalizer).toHaveBeenCalledTimes(1); + }); + + it("returns undefined when the provider normalizer resolves to an empty value", () => { + normalizeChannelIdMock.mockReturnValueOnce("telegram"); + getActivePluginRegistryVersionMock.mockReturnValueOnce(20); + getChannelPluginMock.mockReturnValueOnce({ + messaging: { + normalizeTarget: () => "", + }, + }); + + expect(normalizeTargetForProvider("telegram", " raw-id ")).toBeUndefined(); + }); +}); + +describe("buildTargetResolverSignature", () => { + beforeEach(() => { + getChannelPluginMock.mockReset(); + }); + + it("builds stable signatures from resolver hint and looksLikeId source", () => { + const looksLikeId = (value: string) => value.startsWith("C"); + getChannelPluginMock.mockReturnValueOnce({ + messaging: { + targetResolver: { + hint: "Use channel id", + looksLikeId, + }, + }, + }); + + const first = buildTargetResolverSignature("slack"); + getChannelPluginMock.mockReturnValueOnce({ + messaging: { + targetResolver: { + hint: "Use channel id", + looksLikeId, + }, + }, + }); + const second = buildTargetResolverSignature("slack"); + + expect(first).toBe(second); + }); + + it("changes when resolver metadata changes", () => { + getChannelPluginMock.mockReturnValueOnce({ + messaging: { + targetResolver: { + hint: "Use channel id", + looksLikeId: (value: string) => value.startsWith("C"), + }, + }, + }); + const first = buildTargetResolverSignature("slack"); + + getChannelPluginMock.mockReturnValueOnce({ + messaging: { + targetResolver: { + hint: "Use user id", + looksLikeId: (value: string) => value.startsWith("U"), + }, + }, + }); + const second = buildTargetResolverSignature("slack"); + + expect(first).not.toBe(second); + }); +}); diff --git a/src/infra/outbound/targets.test.ts b/src/infra/outbound/targets.test.ts index 6a8b50403b5..b9c795f532e 100644 --- a/src/infra/outbound/targets.test.ts +++ b/src/infra/outbound/targets.test.ts @@ -339,35 +339,145 @@ describe("resolveSessionDeliveryTarget", () => { }, }); - it("allows heartbeat delivery to Slack DMs and avoids inherited threadId by default", () => { - const resolved = resolveHeartbeatTarget({ - sessionId: "sess-heartbeat-outbound", - updatedAt: 1, - lastChannel: "slack", - lastTo: "user:U123", - lastThreadId: "1739142736.000100", - }); + const expectHeartbeatTarget = (params: { + name: string; + entry: Parameters[0]["entry"]; + directPolicy?: "allow" | "block"; + expectedChannel: string; + expectedTo?: string; + expectedReason?: string; + expectedThreadId?: string | number; + }) => { + const resolved = resolveHeartbeatTarget(params.entry, params.directPolicy); + expect(resolved.channel, params.name).toBe(params.expectedChannel); + expect(resolved.to, params.name).toBe(params.expectedTo); + expect(resolved.reason, params.name).toBe(params.expectedReason); + expect(resolved.threadId, params.name).toBe(params.expectedThreadId); + }; - expect(resolved.channel).toBe("slack"); - expect(resolved.to).toBe("user:U123"); - expect(resolved.threadId).toBeUndefined(); - }); - - it("blocks heartbeat delivery to Slack DMs when directPolicy is block", () => { - const resolved = resolveHeartbeatTarget( - { - sessionId: "sess-heartbeat-outbound", + it.each([ + { + name: "allows heartbeat delivery to Slack DMs by default and drops inherited thread ids", + entry: { + sessionId: "sess-heartbeat-slack-direct", updatedAt: 1, lastChannel: "slack", lastTo: "user:U123", lastThreadId: "1739142736.000100", }, - "block", - ); - - expect(resolved.channel).toBe("none"); - expect(resolved.reason).toBe("dm-blocked"); - expect(resolved.threadId).toBeUndefined(); + expectedChannel: "slack", + expectedTo: "user:U123", + }, + { + name: "blocks heartbeat delivery to Slack DMs when directPolicy is block", + entry: { + sessionId: "sess-heartbeat-slack-direct-blocked", + updatedAt: 1, + lastChannel: "slack", + lastTo: "user:U123", + lastThreadId: "1739142736.000100", + }, + directPolicy: "block" as const, + expectedChannel: "none", + expectedReason: "dm-blocked", + }, + { + name: "allows heartbeat delivery to Telegram direct chats by default", + entry: { + sessionId: "sess-heartbeat-telegram-direct", + updatedAt: 1, + lastChannel: "telegram", + lastTo: "5232990709", + }, + expectedChannel: "telegram", + expectedTo: "5232990709", + }, + { + name: "blocks heartbeat delivery to Telegram direct chats when directPolicy is block", + entry: { + sessionId: "sess-heartbeat-telegram-direct-blocked", + updatedAt: 1, + lastChannel: "telegram", + lastTo: "5232990709", + }, + directPolicy: "block" as const, + expectedChannel: "none", + expectedReason: "dm-blocked", + }, + { + name: "keeps heartbeat delivery to Telegram groups", + entry: { + sessionId: "sess-heartbeat-telegram-group", + updatedAt: 1, + lastChannel: "telegram", + lastTo: "-1001234567890", + }, + expectedChannel: "telegram", + expectedTo: "-1001234567890", + }, + { + name: "allows heartbeat delivery to WhatsApp direct chats by default", + entry: { + sessionId: "sess-heartbeat-whatsapp-direct", + updatedAt: 1, + lastChannel: "whatsapp", + lastTo: "+15551234567", + }, + expectedChannel: "whatsapp", + expectedTo: "+15551234567", + }, + { + name: "keeps heartbeat delivery to WhatsApp groups", + entry: { + sessionId: "sess-heartbeat-whatsapp-group", + updatedAt: 1, + lastChannel: "whatsapp", + lastTo: "120363140186826074@g.us", + }, + expectedChannel: "whatsapp", + expectedTo: "120363140186826074@g.us", + }, + { + name: "uses session chatType hints when target parsing cannot classify a direct chat", + entry: { + sessionId: "sess-heartbeat-imessage-direct", + updatedAt: 1, + lastChannel: "imessage", + lastTo: "chat-guid-unknown-shape", + chatType: "direct", + }, + expectedChannel: "imessage", + expectedTo: "chat-guid-unknown-shape", + }, + { + name: "blocks session chatType direct hints when directPolicy is block", + entry: { + sessionId: "sess-heartbeat-imessage-direct-blocked", + updatedAt: 1, + lastChannel: "imessage", + lastTo: "chat-guid-unknown-shape", + chatType: "direct", + }, + directPolicy: "block" as const, + expectedChannel: "none", + expectedReason: "dm-blocked", + }, + ] satisfies Array<{ + name: string; + entry: NonNullable[0]["entry"]>; + directPolicy?: "allow" | "block"; + expectedChannel: string; + expectedTo?: string; + expectedReason?: string; + }>)("$name", ({ name, entry, directPolicy, expectedChannel, expectedTo, expectedReason }) => { + expectHeartbeatTarget({ + name, + entry, + directPolicy, + expectedChannel, + expectedTo, + expectedReason, + }); }); it("allows heartbeat delivery to Discord DMs by default", () => { @@ -389,119 +499,6 @@ describe("resolveSessionDeliveryTarget", () => { expect(resolved.to).toBe("user:12345"); }); - it("allows heartbeat delivery to Telegram direct chats by default", () => { - const resolved = resolveHeartbeatTarget({ - sessionId: "sess-heartbeat-telegram-direct", - updatedAt: 1, - lastChannel: "telegram", - lastTo: "5232990709", - }); - - expect(resolved.channel).toBe("telegram"); - expect(resolved.to).toBe("5232990709"); - }); - - it("blocks heartbeat delivery to Telegram direct chats when directPolicy is block", () => { - const resolved = resolveHeartbeatTarget( - { - sessionId: "sess-heartbeat-telegram-direct", - updatedAt: 1, - lastChannel: "telegram", - lastTo: "5232990709", - }, - "block", - ); - - expect(resolved.channel).toBe("none"); - expect(resolved.reason).toBe("dm-blocked"); - }); - - it("keeps heartbeat delivery to Telegram groups", () => { - const cfg: OpenClawConfig = {}; - const resolved = resolveHeartbeatDeliveryTarget({ - cfg, - entry: { - sessionId: "sess-heartbeat-telegram-group", - updatedAt: 1, - lastChannel: "telegram", - lastTo: "-1001234567890", - }, - heartbeat: { - target: "last", - }, - }); - - expect(resolved.channel).toBe("telegram"); - expect(resolved.to).toBe("-1001234567890"); - }); - - it("allows heartbeat delivery to WhatsApp direct chats by default", () => { - const cfg: OpenClawConfig = {}; - const resolved = resolveHeartbeatDeliveryTarget({ - cfg, - entry: { - sessionId: "sess-heartbeat-whatsapp-direct", - updatedAt: 1, - lastChannel: "whatsapp", - lastTo: "+15551234567", - }, - heartbeat: { - target: "last", - }, - }); - - expect(resolved.channel).toBe("whatsapp"); - expect(resolved.to).toBe("+15551234567"); - }); - - it("keeps heartbeat delivery to WhatsApp groups", () => { - const cfg: OpenClawConfig = {}; - const resolved = resolveHeartbeatDeliveryTarget({ - cfg, - entry: { - sessionId: "sess-heartbeat-whatsapp-group", - updatedAt: 1, - lastChannel: "whatsapp", - lastTo: "120363140186826074@g.us", - }, - heartbeat: { - target: "last", - }, - }); - - expect(resolved.channel).toBe("whatsapp"); - expect(resolved.to).toBe("120363140186826074@g.us"); - }); - - it("uses session chatType hint when target parser cannot classify and allows direct by default", () => { - const resolved = resolveHeartbeatTarget({ - sessionId: "sess-heartbeat-imessage-direct", - updatedAt: 1, - lastChannel: "imessage", - lastTo: "chat-guid-unknown-shape", - chatType: "direct", - }); - - expect(resolved.channel).toBe("imessage"); - expect(resolved.to).toBe("chat-guid-unknown-shape"); - }); - - it("blocks session chatType direct hints when directPolicy is block", () => { - const resolved = resolveHeartbeatTarget( - { - sessionId: "sess-heartbeat-imessage-direct", - updatedAt: 1, - lastChannel: "imessage", - lastTo: "chat-guid-unknown-shape", - chatType: "direct", - }, - "block", - ); - - expect(resolved.channel).toBe("none"); - expect(resolved.reason).toBe("dm-blocked"); - }); - it("keeps heartbeat delivery to Discord channels", () => { const cfg: OpenClawConfig = {}; const resolved = resolveHeartbeatDeliveryTarget({ diff --git a/src/infra/outbound/tool-payload.test.ts b/src/infra/outbound/tool-payload.test.ts new file mode 100644 index 00000000000..08629089618 --- /dev/null +++ b/src/infra/outbound/tool-payload.test.ts @@ -0,0 +1,42 @@ +import { describe, expect, it } from "vitest"; +import { extractToolPayload } from "./tool-payload.js"; + +describe("extractToolPayload", () => { + it("prefers explicit details payloads", () => { + expect( + extractToolPayload({ + details: { ok: true }, + content: [{ type: "text", text: '{"ignored":true}' }], + } as never), + ).toEqual({ ok: true }); + }); + + it("parses JSON text blocks from tool content", () => { + expect( + extractToolPayload({ + content: [ + { type: "image", url: "https://example.com/a.png" }, + { type: "text", text: '{"ok":true,"count":2}' }, + ], + } as never), + ).toEqual({ ok: true, count: 2 }); + }); + + it("falls back to raw text, then content, then the whole result", () => { + expect( + extractToolPayload({ + content: [{ type: "text", text: "not json" }], + } as never), + ).toBe("not json"); + + const content = [{ type: "image", url: "https://example.com/a.png" }]; + expect( + extractToolPayload({ + content, + } as never), + ).toBe(content); + + const result = { status: "ok" }; + expect(extractToolPayload(result as never)).toBe(result); + }); +}); diff --git a/src/infra/package-json.test.ts b/src/infra/package-json.test.ts new file mode 100644 index 00000000000..664fcaa4f14 --- /dev/null +++ b/src/infra/package-json.test.ts @@ -0,0 +1,39 @@ +import fs from "node:fs/promises"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { withTempDir } from "../test-helpers/temp-dir.js"; +import { readPackageName, readPackageVersion } from "./package-json.js"; + +describe("package-json helpers", () => { + it("reads package version and trims package name", async () => { + await withTempDir({ prefix: "openclaw-package-json-" }, async (root) => { + await fs.writeFile( + path.join(root, "package.json"), + JSON.stringify({ version: "1.2.3", name: " @openclaw/demo " }), + "utf8", + ); + + await expect(readPackageVersion(root)).resolves.toBe("1.2.3"); + await expect(readPackageName(root)).resolves.toBe("@openclaw/demo"); + }); + }); + + it("returns null for missing or invalid package.json data", async () => { + await withTempDir({ prefix: "openclaw-package-json-" }, async (root) => { + await expect(readPackageVersion(root)).resolves.toBeNull(); + await expect(readPackageName(root)).resolves.toBeNull(); + + await fs.writeFile(path.join(root, "package.json"), "{", "utf8"); + await expect(readPackageVersion(root)).resolves.toBeNull(); + await expect(readPackageName(root)).resolves.toBeNull(); + + await fs.writeFile( + path.join(root, "package.json"), + JSON.stringify({ version: 123, name: " " }), + "utf8", + ); + await expect(readPackageVersion(root)).resolves.toBeNull(); + await expect(readPackageName(root)).resolves.toBeNull(); + }); + }); +}); diff --git a/src/infra/package-tag.test.ts b/src/infra/package-tag.test.ts new file mode 100644 index 00000000000..794acf63093 --- /dev/null +++ b/src/infra/package-tag.test.ts @@ -0,0 +1,21 @@ +import { describe, expect, it } from "vitest"; +import { normalizePackageTagInput } from "./package-tag.js"; + +describe("normalizePackageTagInput", () => { + const packageNames = ["openclaw", "@openclaw/plugin"] as const; + + it("returns null for blank inputs", () => { + expect(normalizePackageTagInput(undefined, packageNames)).toBeNull(); + expect(normalizePackageTagInput(" ", packageNames)).toBeNull(); + }); + + it("strips known package-name prefixes before returning the tag", () => { + expect(normalizePackageTagInput("openclaw@beta", packageNames)).toBe("beta"); + expect(normalizePackageTagInput("@openclaw/plugin@2026.2.24", packageNames)).toBe("2026.2.24"); + }); + + it("returns trimmed raw values when no package prefix matches", () => { + expect(normalizePackageTagInput(" latest ", packageNames)).toBe("latest"); + expect(normalizePackageTagInput("@other/plugin@beta", packageNames)).toBe("@other/plugin@beta"); + }); +}); diff --git a/src/infra/pairing-files.test.ts b/src/infra/pairing-files.test.ts new file mode 100644 index 00000000000..8f891036956 --- /dev/null +++ b/src/infra/pairing-files.test.ts @@ -0,0 +1,85 @@ +import { describe, expect, it, vi } from "vitest"; +import { + pruneExpiredPending, + resolvePairingPaths, + upsertPendingPairingRequest, +} from "./pairing-files.js"; + +describe("pairing file helpers", () => { + it("resolves pairing file paths from explicit base dirs", () => { + expect(resolvePairingPaths("/tmp/openclaw-state", "devices")).toEqual({ + dir: "/tmp/openclaw-state/devices", + pendingPath: "/tmp/openclaw-state/devices/pending.json", + pairedPath: "/tmp/openclaw-state/devices/paired.json", + }); + }); + + it("prunes only entries older than the ttl", () => { + const pendingById = { + stale: { ts: 10, requestId: "stale" }, + edge: { ts: 50, requestId: "edge" }, + fresh: { ts: 70, requestId: "fresh" }, + }; + + pruneExpiredPending(pendingById, 100, 50); + + expect(pendingById).toEqual({ + edge: { ts: 50, requestId: "edge" }, + fresh: { ts: 70, requestId: "fresh" }, + }); + }); + + it("reuses existing pending requests without persisting again", async () => { + const persist = vi.fn(async () => undefined); + const existing = { requestId: "req-1", deviceId: "device-1", ts: 1 }; + const pendingById = { "req-1": existing }; + + await expect( + upsertPendingPairingRequest({ + pendingById, + isExisting: (pending) => pending.deviceId === "device-1", + createRequest: vi.fn(() => ({ requestId: "req-2", deviceId: "device-1", ts: 2 })), + isRepair: false, + persist, + }), + ).resolves.toEqual({ + status: "pending", + request: existing, + created: false, + }); + expect(persist).not.toHaveBeenCalled(); + }); + + it("creates and persists new pending requests with the repair flag", async () => { + const persist = vi.fn(async () => undefined); + const createRequest = vi.fn((isRepair: boolean) => ({ + requestId: "req-2", + deviceId: "device-2", + ts: 2, + isRepair, + })); + const pendingById: Record< + string, + { requestId: string; deviceId: string; ts: number; isRepair: boolean } + > = {}; + + await expect( + upsertPendingPairingRequest({ + pendingById, + isExisting: (pending) => pending.deviceId === "device-2", + createRequest, + isRepair: true, + persist, + }), + ).resolves.toEqual({ + status: "pending", + request: { requestId: "req-2", deviceId: "device-2", ts: 2, isRepair: true }, + created: true, + }); + expect(createRequest).toHaveBeenCalledWith(true); + expect(persist).toHaveBeenCalledOnce(); + expect(pendingById).toEqual({ + "req-2": { requestId: "req-2", deviceId: "device-2", ts: 2, isRepair: true }, + }); + }); +}); diff --git a/src/infra/pairing-pending.test.ts b/src/infra/pairing-pending.test.ts new file mode 100644 index 00000000000..90c27ac0130 --- /dev/null +++ b/src/infra/pairing-pending.test.ts @@ -0,0 +1,48 @@ +import { describe, expect, it, vi } from "vitest"; +import { rejectPendingPairingRequest } from "./pairing-pending.js"; + +describe("rejectPendingPairingRequest", () => { + it("returns null and skips persistence when the request is missing", async () => { + const persistState = vi.fn(); + + await expect( + rejectPendingPairingRequest({ + requestId: "missing", + idKey: "deviceId", + loadState: async () => ({ pendingById: {} }), + persistState, + getId: (pending: { id: string }) => pending.id, + }), + ).resolves.toBeNull(); + + expect(persistState).not.toHaveBeenCalled(); + }); + + it("removes the request, persists, and returns the dynamic id key", async () => { + const state: { pendingById: Record } = { + pendingById: { + keep: { accountId: "keep-me" }, + reject: { accountId: "acct-42" }, + }, + }; + const persistState = vi.fn(async () => undefined); + + await expect( + rejectPendingPairingRequest({ + requestId: "reject", + idKey: "accountId", + loadState: async () => state, + persistState, + getId: (pending: { accountId: string }) => pending.accountId, + }), + ).resolves.toEqual({ + requestId: "reject", + accountId: "acct-42", + }); + + expect(state.pendingById).toEqual({ + keep: { accountId: "keep-me" }, + }); + expect(persistState).toHaveBeenCalledWith(state); + }); +}); diff --git a/src/infra/pairing-token.test.ts b/src/infra/pairing-token.test.ts new file mode 100644 index 00000000000..2d6a5964396 --- /dev/null +++ b/src/infra/pairing-token.test.ts @@ -0,0 +1,30 @@ +import { Buffer } from "node:buffer"; +import { describe, expect, it, vi } from "vitest"; + +const randomBytesMock = vi.hoisted(() => vi.fn()); + +vi.mock("node:crypto", async () => { + const actual = await vi.importActual("node:crypto"); + return { + ...actual, + randomBytes: (...args: unknown[]) => randomBytesMock(...args), + }; +}); + +import { generatePairingToken, PAIRING_TOKEN_BYTES, verifyPairingToken } from "./pairing-token.js"; + +describe("generatePairingToken", () => { + it("uses the configured byte count and returns a base64url token", () => { + randomBytesMock.mockReturnValueOnce(Buffer.from([0xfb, 0xff, 0x00])); + + expect(generatePairingToken()).toBe("-_8A"); + expect(randomBytesMock).toHaveBeenCalledWith(PAIRING_TOKEN_BYTES); + }); +}); + +describe("verifyPairingToken", () => { + it("uses constant-time comparison semantics", () => { + expect(verifyPairingToken("secret-token", "secret-token")).toBe(true); + expect(verifyPairingToken("secret-token", "secret-tokEn")).toBe(false); + }); +}); diff --git a/src/infra/parse-finite-number.test.ts b/src/infra/parse-finite-number.test.ts index 99b093dfe3b..46329f3001b 100644 --- a/src/infra/parse-finite-number.test.ts +++ b/src/infra/parse-finite-number.test.ts @@ -7,47 +7,58 @@ import { } from "./parse-finite-number.js"; describe("parseFiniteNumber", () => { - it("returns finite numbers", () => { - expect(parseFiniteNumber(42)).toBe(42); + it.each([ + { value: 42, expected: 42 }, + { value: "3.14", expected: 3.14 }, + { value: " 3.14ms", expected: 3.14 }, + { value: "+7", expected: 7 }, + { value: "1e3", expected: 1000 }, + ])("parses %j", ({ value, expected }) => { + expect(parseFiniteNumber(value)).toBe(expected); }); - it("parses numeric strings", () => { - expect(parseFiniteNumber("3.14")).toBe(3.14); - }); - - it("returns undefined for non-finite or non-numeric values", () => { - expect(parseFiniteNumber(Number.NaN)).toBeUndefined(); - expect(parseFiniteNumber(Number.POSITIVE_INFINITY)).toBeUndefined(); - expect(parseFiniteNumber("not-a-number")).toBeUndefined(); - expect(parseFiniteNumber(null)).toBeUndefined(); - }); + it.each([Number.NaN, Number.POSITIVE_INFINITY, "not-a-number", " ", "", null])( + "returns undefined for %j", + (value) => { + expect(parseFiniteNumber(value)).toBeUndefined(); + }, + ); }); describe("parseStrictInteger", () => { - it("parses exact integers", () => { - expect(parseStrictInteger("42")).toBe(42); - expect(parseStrictInteger(" -7 ")).toBe(-7); + it.each([ + { value: "42", expected: 42 }, + { value: " -7 ", expected: -7 }, + { value: 12, expected: 12 }, + { value: "+9", expected: 9 }, + ])("parses %j", ({ value, expected }) => { + expect(parseStrictInteger(value)).toBe(expected); }); - it("rejects junk prefixes and suffixes", () => { - expect(parseStrictInteger("42ms")).toBeUndefined(); - expect(parseStrictInteger("0abc")).toBeUndefined(); - expect(parseStrictInteger("1.5")).toBeUndefined(); - }); + it.each(["42ms", "0abc", "1.5", "1e3", " ", Number.MAX_SAFE_INTEGER + 1])( + "rejects %j", + (value) => { + expect(parseStrictInteger(value)).toBeUndefined(); + }, + ); }); describe("parseStrictPositiveInteger", () => { - it("accepts only positive integers", () => { - expect(parseStrictPositiveInteger("9")).toBe(9); - expect(parseStrictPositiveInteger("0")).toBeUndefined(); - expect(parseStrictPositiveInteger("-1")).toBeUndefined(); + it.each([ + { value: "9", expected: 9 }, + { value: "0", expected: undefined }, + { value: "-1", expected: undefined }, + ])("parses %j", ({ value, expected }) => { + expect(parseStrictPositiveInteger(value)).toBe(expected); }); }); describe("parseStrictNonNegativeInteger", () => { - it("accepts zero and positive integers only", () => { - expect(parseStrictNonNegativeInteger("0")).toBe(0); - expect(parseStrictNonNegativeInteger("9")).toBe(9); - expect(parseStrictNonNegativeInteger("-1")).toBeUndefined(); + it.each([ + { value: "0", expected: 0 }, + { value: "9", expected: 9 }, + { value: "-1", expected: undefined }, + ])("parses %j", ({ value, expected }) => { + expect(parseStrictNonNegativeInteger(value)).toBe(expected); }); }); diff --git a/src/infra/path-alias-guards.test.ts b/src/infra/path-alias-guards.test.ts index abc16c48847..7d70b79805a 100644 --- a/src/infra/path-alias-guards.test.ts +++ b/src/infra/path-alias-guards.test.ts @@ -1,76 +1,75 @@ import fs from "node:fs/promises"; import path from "node:path"; import { describe, expect, it } from "vitest"; +import { withTempDir } from "../test-helpers/temp-dir.js"; import { assertNoPathAliasEscape } from "./path-alias-guards.js"; -async function withTempRoot(run: (root: string) => Promise): Promise { - const base = await fs.mkdtemp(path.join(process.cwd(), "openclaw-path-alias-")); - const root = path.join(base, "root"); - await fs.mkdir(root, { recursive: true }); - try { - return await run(root); - } finally { - await fs.rm(base, { recursive: true, force: true }); - } -} - describe("assertNoPathAliasEscape", () => { it.runIf(process.platform !== "win32")( "rejects broken final symlink targets outside root", async () => { - await withTempRoot(async (root) => { - const outside = path.join(path.dirname(root), "outside"); - await fs.mkdir(outside, { recursive: true }); - const linkPath = path.join(root, "jump"); - await fs.symlink(path.join(outside, "owned.txt"), linkPath); + await withTempDir( + { prefix: "openclaw-path-alias-", parentDir: process.cwd(), subdir: "root" }, + async (root) => { + const outside = path.join(path.dirname(root), "outside"); + await fs.mkdir(outside, { recursive: true }); + const linkPath = path.join(root, "jump"); + await fs.symlink(path.join(outside, "owned.txt"), linkPath); - await expect( - assertNoPathAliasEscape({ - absolutePath: linkPath, - rootPath: root, - boundaryLabel: "sandbox root", - }), - ).rejects.toThrow(/Symlink escapes sandbox root/); - }); + await expect( + assertNoPathAliasEscape({ + absolutePath: linkPath, + rootPath: root, + boundaryLabel: "sandbox root", + }), + ).rejects.toThrow(/Symlink escapes sandbox root/); + }, + ); }, ); it.runIf(process.platform !== "win32")( "allows broken final symlink targets that remain inside root", async () => { - await withTempRoot(async (root) => { - const linkPath = path.join(root, "jump"); - await fs.symlink(path.join(root, "missing", "owned.txt"), linkPath); + await withTempDir( + { prefix: "openclaw-path-alias-", parentDir: process.cwd(), subdir: "root" }, + async (root) => { + const linkPath = path.join(root, "jump"); + await fs.symlink(path.join(root, "missing", "owned.txt"), linkPath); - await expect( - assertNoPathAliasEscape({ - absolutePath: linkPath, - rootPath: root, - boundaryLabel: "sandbox root", - }), - ).resolves.toBeUndefined(); - }); + await expect( + assertNoPathAliasEscape({ + absolutePath: linkPath, + rootPath: root, + boundaryLabel: "sandbox root", + }), + ).resolves.toBeUndefined(); + }, + ); }, ); it.runIf(process.platform !== "win32")( "rejects broken targets that traverse via an in-root symlink alias", async () => { - await withTempRoot(async (root) => { - const outside = path.join(path.dirname(root), "outside"); - await fs.mkdir(outside, { recursive: true }); - await fs.symlink(outside, path.join(root, "hop")); - const linkPath = path.join(root, "jump"); - await fs.symlink(path.join("hop", "missing", "owned.txt"), linkPath); + await withTempDir( + { prefix: "openclaw-path-alias-", parentDir: process.cwd(), subdir: "root" }, + async (root) => { + const outside = path.join(path.dirname(root), "outside"); + await fs.mkdir(outside, { recursive: true }); + await fs.symlink(outside, path.join(root, "hop")); + const linkPath = path.join(root, "jump"); + await fs.symlink(path.join("hop", "missing", "owned.txt"), linkPath); - await expect( - assertNoPathAliasEscape({ - absolutePath: linkPath, - rootPath: root, - boundaryLabel: "sandbox root", - }), - ).rejects.toThrow(/Symlink escapes sandbox root/); - }); + await expect( + assertNoPathAliasEscape({ + absolutePath: linkPath, + rootPath: root, + boundaryLabel: "sandbox root", + }), + ).rejects.toThrow(/Symlink escapes sandbox root/); + }, + ); }, ); }); diff --git a/src/infra/path-env.test.ts b/src/infra/path-env.test.ts index a439602d653..75c63f11d17 100644 --- a/src/infra/path-env.test.ts +++ b/src/infra/path-env.test.ts @@ -72,26 +72,39 @@ describe("ensureOpenClawCliOnPath", () => { } }); - it("prepends the bundled app bin dir when a sibling openclaw exists", () => { - const tmp = abs("/tmp/openclaw-path/case-bundled"); + function setupAppCliRoot(name: string) { + const tmp = abs(`/tmp/openclaw-path/${name}`); const appBinDir = path.join(tmp, "AppBin"); - const cliPath = path.join(appBinDir, "openclaw"); + const appCli = path.join(appBinDir, "openclaw"); setDir(tmp); setDir(appBinDir); - setExe(cliPath); + setExe(appCli); + return { tmp, appBinDir, appCli }; + } + function bootstrapPath(params: { + execPath: string; + cwd: string; + homeDir: string; + platform: NodeJS.Platform; + allowProjectLocalBin?: boolean; + }) { + ensureOpenClawCliOnPath(params); + return (process.env.PATH ?? "").split(path.delimiter); + } + + it("prepends the bundled app bin dir when a sibling openclaw exists", () => { + const { tmp, appBinDir, appCli } = setupAppCliRoot("case-bundled"); process.env.PATH = "/usr/bin"; delete process.env.OPENCLAW_PATH_BOOTSTRAPPED; - ensureOpenClawCliOnPath({ - execPath: cliPath, + const updated = bootstrapPath({ + execPath: appCli, cwd: tmp, homeDir: tmp, platform: "darwin", }); - - const updated = process.env.PATH ?? ""; - expect(updated.split(path.delimiter)[0]).toBe(appBinDir); + expect(updated[0]).toBe(appBinDir); }); it("is idempotent", () => { @@ -107,13 +120,7 @@ describe("ensureOpenClawCliOnPath", () => { }); it("prepends mise shims when available", () => { - const tmp = abs("/tmp/openclaw-path/case-mise"); - const appBinDir = path.join(tmp, "AppBin"); - const appCli = path.join(appBinDir, "openclaw"); - setDir(tmp); - setDir(appBinDir); - setExe(appCli); - + const { tmp, appBinDir, appCli } = setupAppCliRoot("case-mise"); const miseDataDir = path.join(tmp, "mise"); const shimsDir = path.join(miseDataDir, "shims"); setDir(miseDataDir); @@ -123,62 +130,92 @@ describe("ensureOpenClawCliOnPath", () => { process.env.PATH = "/usr/bin"; delete process.env.OPENCLAW_PATH_BOOTSTRAPPED; - ensureOpenClawCliOnPath({ + const updated = bootstrapPath({ execPath: appCli, cwd: tmp, homeDir: tmp, platform: "darwin", }); - - const updated = process.env.PATH ?? ""; - const parts = updated.split(path.delimiter); - const appBinIndex = parts.indexOf(appBinDir); - const shimsIndex = parts.indexOf(shimsDir); + const appBinIndex = updated.indexOf(appBinDir); + const shimsIndex = updated.indexOf(shimsDir); expect(appBinIndex).toBeGreaterThanOrEqual(0); expect(shimsIndex).toBeGreaterThan(appBinIndex); }); - it("only appends project-local node_modules/.bin when explicitly enabled", () => { - const tmp = abs("/tmp/openclaw-path/case-project-local"); - const appBinDir = path.join(tmp, "AppBin"); - const appCli = path.join(appBinDir, "openclaw"); - setDir(tmp); - setDir(appBinDir); - setExe(appCli); - - const localBinDir = path.join(tmp, "node_modules", ".bin"); - const localCli = path.join(localBinDir, "openclaw"); - setDir(path.join(tmp, "node_modules")); - setDir(localBinDir); - setExe(localCli); - - process.env.PATH = "/usr/bin"; - delete process.env.OPENCLAW_PATH_BOOTSTRAPPED; - - ensureOpenClawCliOnPath({ - execPath: appCli, - cwd: tmp, - homeDir: tmp, - platform: "darwin", - }); - const withoutOptIn = (process.env.PATH ?? "").split(path.delimiter); - expect(withoutOptIn.includes(localBinDir)).toBe(false); - - process.env.PATH = "/usr/bin"; - delete process.env.OPENCLAW_PATH_BOOTSTRAPPED; - - ensureOpenClawCliOnPath({ - execPath: appCli, - cwd: tmp, - homeDir: tmp, - platform: "darwin", + it.each([ + { + name: "explicit option", + envValue: undefined, allowProjectLocalBin: true, + }, + { + name: "truthy env", + envValue: "1", + allowProjectLocalBin: undefined, + }, + ])( + "only appends project-local node_modules/.bin when enabled via $name", + ({ envValue, allowProjectLocalBin }) => { + const { tmp, appCli } = setupAppCliRoot("case-project-local"); + const localBinDir = path.join(tmp, "node_modules", ".bin"); + const localCli = path.join(localBinDir, "openclaw"); + setDir(path.join(tmp, "node_modules")); + setDir(localBinDir); + setExe(localCli); + + process.env.PATH = "/usr/bin"; + delete process.env.OPENCLAW_PATH_BOOTSTRAPPED; + delete process.env.OPENCLAW_ALLOW_PROJECT_LOCAL_BIN; + + const withoutOptIn = bootstrapPath({ + execPath: appCli, + cwd: tmp, + homeDir: tmp, + platform: "darwin", + }); + expect(withoutOptIn.includes(localBinDir)).toBe(false); + + process.env.PATH = "/usr/bin"; + delete process.env.OPENCLAW_PATH_BOOTSTRAPPED; + if (envValue === undefined) { + delete process.env.OPENCLAW_ALLOW_PROJECT_LOCAL_BIN; + } else { + process.env.OPENCLAW_ALLOW_PROJECT_LOCAL_BIN = envValue; + } + + const withOptIn = bootstrapPath({ + execPath: appCli, + cwd: tmp, + homeDir: tmp, + platform: "darwin", + ...(allowProjectLocalBin === undefined ? {} : { allowProjectLocalBin }), + }); + const usrBinIndex = withOptIn.indexOf("/usr/bin"); + const localIndex = withOptIn.indexOf(localBinDir); + expect(usrBinIndex).toBeGreaterThanOrEqual(0); + expect(localIndex).toBeGreaterThan(usrBinIndex); + }, + ); + + it("prepends XDG_BIN_HOME ahead of other user bin fallbacks", () => { + const { tmp, appCli } = setupAppCliRoot("case-xdg-bin-home"); + const xdgBinHome = path.join(tmp, "xdg-bin"); + const localBin = path.join(tmp, ".local", "bin"); + setDir(xdgBinHome); + setDir(path.join(tmp, ".local")); + setDir(localBin); + + process.env.PATH = "/usr/bin"; + process.env.XDG_BIN_HOME = xdgBinHome; + delete process.env.OPENCLAW_PATH_BOOTSTRAPPED; + + const updated = bootstrapPath({ + execPath: appCli, + cwd: tmp, + homeDir: tmp, + platform: "linux", }); - const withOptIn = (process.env.PATH ?? "").split(path.delimiter); - const usrBinIndex = withOptIn.indexOf("/usr/bin"); - const localIndex = withOptIn.indexOf(localBinDir); - expect(usrBinIndex).toBeGreaterThanOrEqual(0); - expect(localIndex).toBeGreaterThan(usrBinIndex); + expect(updated.indexOf(xdgBinHome)).toBeLessThan(updated.indexOf(localBin)); }); it("prepends Linuxbrew dirs when present", () => { @@ -200,15 +237,12 @@ describe("ensureOpenClawCliOnPath", () => { delete process.env.HOMEBREW_BREW_FILE; delete process.env.XDG_BIN_HOME; - ensureOpenClawCliOnPath({ + const parts = bootstrapPath({ execPath: path.join(execDir, "node"), cwd: tmp, homeDir: tmp, platform: "linux", }); - - const updated = process.env.PATH ?? ""; - const parts = updated.split(path.delimiter); expect(parts[0]).toBe(linuxbrewBin); expect(parts[1]).toBe(linuxbrewSbin); }); diff --git a/src/infra/path-guards.test.ts b/src/infra/path-guards.test.ts new file mode 100644 index 00000000000..28bf3d7c3b8 --- /dev/null +++ b/src/infra/path-guards.test.ts @@ -0,0 +1,50 @@ +import { describe, expect, it } from "vitest"; +import { + hasNodeErrorCode, + isNodeError, + isNotFoundPathError, + isPathInside, + isSymlinkOpenError, + normalizeWindowsPathForComparison, +} from "./path-guards.js"; + +describe("normalizeWindowsPathForComparison", () => { + it("normalizes extended-length and UNC windows paths", () => { + expect(normalizeWindowsPathForComparison("\\\\?\\C:\\Users\\Peter/Repo")).toBe( + "c:\\users\\peter\\repo", + ); + expect(normalizeWindowsPathForComparison("\\\\?\\UNC\\Server\\Share\\Folder")).toBe( + "\\\\server\\share\\folder", + ); + }); +}); + +describe("node path error helpers", () => { + it("recognizes node-style error objects and exact codes", () => { + const enoent = { code: "ENOENT" }; + + expect(isNodeError(enoent)).toBe(true); + expect(isNodeError({ message: "nope" })).toBe(false); + expect(hasNodeErrorCode(enoent, "ENOENT")).toBe(true); + expect(hasNodeErrorCode(enoent, "EACCES")).toBe(false); + }); + + it("classifies not-found and symlink-open error codes", () => { + expect(isNotFoundPathError({ code: "ENOENT" })).toBe(true); + expect(isNotFoundPathError({ code: "ENOTDIR" })).toBe(true); + expect(isNotFoundPathError({ code: "EACCES" })).toBe(false); + + expect(isSymlinkOpenError({ code: "ELOOP" })).toBe(true); + expect(isSymlinkOpenError({ code: "EINVAL" })).toBe(true); + expect(isSymlinkOpenError({ code: "ENOTSUP" })).toBe(true); + expect(isSymlinkOpenError({ code: "ENOENT" })).toBe(false); + }); +}); + +describe("isPathInside", () => { + it("accepts identical and nested paths but rejects escapes", () => { + expect(isPathInside("/workspace/root", "/workspace/root")).toBe(true); + expect(isPathInside("/workspace/root", "/workspace/root/nested/file.txt")).toBe(true); + expect(isPathInside("/workspace/root", "/workspace/root/../escape.txt")).toBe(false); + }); +}); diff --git a/src/infra/path-prepend.test.ts b/src/infra/path-prepend.test.ts new file mode 100644 index 00000000000..29dfb504cfb --- /dev/null +++ b/src/infra/path-prepend.test.ts @@ -0,0 +1,33 @@ +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { mergePathPrepend, normalizePathPrepend } from "./path-prepend.js"; + +describe("path prepend helpers", () => { + it("normalizes prepend lists by trimming, skipping blanks, and deduping", () => { + expect( + normalizePathPrepend([ + " /custom/bin ", + "", + " /custom/bin ", + "/opt/bin", + // oxlint-disable-next-line typescript/no-explicit-any + 42 as any, + ]), + ).toEqual(["/custom/bin", "/opt/bin"]); + expect(normalizePathPrepend()).toEqual([]); + }); + + it("merges prepended paths ahead of existing values without duplicates", () => { + expect(mergePathPrepend(`/usr/bin${path.delimiter}/opt/bin`, ["/custom/bin", "/usr/bin"])).toBe( + ["/custom/bin", "/usr/bin", "/opt/bin"].join(path.delimiter), + ); + expect(mergePathPrepend(undefined, ["/custom/bin"])).toBe("/custom/bin"); + expect(mergePathPrepend("/usr/bin", [])).toBe("/usr/bin"); + }); + + it("trims existing path entries while preserving order", () => { + expect( + mergePathPrepend(` /usr/bin ${path.delimiter} ${path.delimiter} /opt/bin `, ["/custom/bin"]), + ).toBe(["/custom/bin", "/usr/bin", "/opt/bin"].join(path.delimiter)); + }); +}); diff --git a/src/infra/path-safety.test.ts b/src/infra/path-safety.test.ts index b05eeced172..4e89c1c35fb 100644 --- a/src/infra/path-safety.test.ts +++ b/src/infra/path-safety.test.ts @@ -3,14 +3,24 @@ import { describe, expect, it } from "vitest"; import { isWithinDir, resolveSafeBaseDir } from "./path-safety.js"; describe("path-safety", () => { - it("resolves safe base dir with trailing separator", () => { - const base = resolveSafeBaseDir("/tmp/demo"); - expect(base.endsWith(path.sep)).toBe(true); + it.each([ + { rootDir: "/tmp/demo", expected: `${path.resolve("/tmp/demo")}${path.sep}` }, + { rootDir: `/tmp/demo${path.sep}`, expected: `${path.resolve("/tmp/demo")}${path.sep}` }, + { rootDir: "/tmp/demo/..", expected: `${path.resolve("/tmp")}${path.sep}` }, + ])("resolves safe base dir for %j", ({ rootDir, expected }) => { + expect(resolveSafeBaseDir(rootDir)).toBe(expected); }); - it("checks directory containment", () => { - expect(isWithinDir("/tmp/demo", "/tmp/demo")).toBe(true); - expect(isWithinDir("/tmp/demo", "/tmp/demo/sub/file.txt")).toBe(true); - expect(isWithinDir("/tmp/demo", "/tmp/demo/../escape.txt")).toBe(false); + it.each([ + { rootDir: "/tmp/demo", targetPath: "/tmp/demo", expected: true }, + { rootDir: "/tmp/demo", targetPath: "/tmp/demo/sub/file.txt", expected: true }, + { rootDir: "/tmp/demo", targetPath: "/tmp/demo/./nested/../file.txt", expected: true }, + { rootDir: "/tmp/demo", targetPath: "/tmp/demo-two/../demo/file.txt", expected: true }, + { rootDir: "/tmp/demo", targetPath: "/tmp/demo/../escape.txt", expected: false }, + { rootDir: "/tmp/demo", targetPath: "/tmp/demo-sibling/file.txt", expected: false }, + { rootDir: "/tmp/demo", targetPath: "/tmp/demo/../../escape.txt", expected: false }, + { rootDir: "/tmp/demo", targetPath: "sub/file.txt", expected: false }, + ])("checks containment for %j", ({ rootDir, targetPath, expected }) => { + expect(isWithinDir(rootDir, targetPath)).toBe(expected); }); }); diff --git a/src/infra/plain-object.test.ts b/src/infra/plain-object.test.ts index b87e555b21a..272c7c94f9d 100644 --- a/src/infra/plain-object.test.ts +++ b/src/infra/plain-object.test.ts @@ -2,17 +2,28 @@ import { describe, expect, it } from "vitest"; import { isPlainObject } from "./plain-object.js"; describe("isPlainObject", () => { - it("accepts plain objects", () => { - expect(isPlainObject({})).toBe(true); - expect(isPlainObject({ a: 1 })).toBe(true); + it.each([{}, { a: 1 }, Object.create(null), new (class X {})()])( + "accepts object-tag values: %j", + (value) => { + expect(isPlainObject(value)).toBe(true); + }, + ); + + it.each([ + null, + [], + new Date(), + /re/, + "x", + 42, + () => null, + new Map(), + { [Symbol.toStringTag]: "Array" }, + ])("rejects non-plain values: %j", (value) => { + expect(isPlainObject(value)).toBe(false); }); - it("rejects non-plain values", () => { - expect(isPlainObject(null)).toBe(false); - expect(isPlainObject([])).toBe(false); - expect(isPlainObject(new Date())).toBe(false); - expect(isPlainObject(/re/)).toBe(false); - expect(isPlainObject("x")).toBe(false); - expect(isPlainObject(42)).toBe(false); + it("accepts object-tag values with an explicit Object toStringTag", () => { + expect(isPlainObject({ [Symbol.toStringTag]: "Object" })).toBe(true); }); }); diff --git a/src/infra/plugin-install-path-warnings.test.ts b/src/infra/plugin-install-path-warnings.test.ts index 6c24e57623f..eef3348fb06 100644 --- a/src/infra/plugin-install-path-warnings.test.ts +++ b/src/infra/plugin-install-path-warnings.test.ts @@ -8,6 +8,25 @@ import { } from "./plugin-install-path-warnings.js"; describe("plugin install path warnings", () => { + it("ignores non-path installs and blank path candidates", async () => { + expect( + await detectPluginInstallPathIssue({ + pluginId: "matrix", + install: null, + }), + ).toBeNull(); + expect( + await detectPluginInstallPathIssue({ + pluginId: "matrix", + install: { + source: "npm", + sourcePath: " ", + installPath: " ", + }, + }), + ).toBeNull(); + }); + it("detects stale custom plugin install paths", async () => { const issue = await detectPluginInstallPathIssue({ pluginId: "matrix", @@ -37,6 +56,28 @@ describe("plugin install path warnings", () => { ]); }); + it("uses the second candidate path when the first one is stale", async () => { + await withTempHome(async (home) => { + const pluginPath = path.join(home, "matrix-plugin"); + await fs.mkdir(pluginPath, { recursive: true }); + + const issue = await detectPluginInstallPathIssue({ + pluginId: "matrix", + install: { + source: "path", + sourcePath: "/tmp/openclaw-matrix-missing", + installPath: pluginPath, + }, + }); + + expect(issue).toEqual({ + kind: "custom-path", + pluginId: "matrix", + path: pluginPath, + }); + }); + }); + it("detects active custom plugin install paths", async () => { await withTempHome(async (home) => { const pluginPath = path.join(home, "matrix-plugin"); @@ -58,4 +99,25 @@ describe("plugin install path warnings", () => { }); }); }); + + it("applies custom command formatting in warning messages", () => { + expect( + formatPluginInstallPathIssue({ + issue: { + kind: "custom-path", + pluginId: "matrix", + path: "/tmp/matrix-plugin", + }, + pluginLabel: "Matrix", + defaultInstallCommand: "openclaw plugins install @openclaw/matrix", + repoInstallCommand: "openclaw plugins install ./extensions/matrix", + formatCommand: (command) => `<${command}>`, + }), + ).toEqual([ + "Matrix is installed from a custom path: /tmp/matrix-plugin", + "Main updates will not automatically replace that plugin with the repo's default Matrix package.", + 'Reinstall with "" when you want to return to the standard Matrix plugin.', + 'If you are intentionally running from a repo checkout, reinstall that checkout explicitly with "" after updates.', + ]); + }); }); diff --git a/src/infra/ports-format.test.ts b/src/infra/ports-format.test.ts new file mode 100644 index 00000000000..c532de63970 --- /dev/null +++ b/src/infra/ports-format.test.ts @@ -0,0 +1,87 @@ +import { describe, expect, it } from "vitest"; +import { + buildPortHints, + classifyPortListener, + formatPortDiagnostics, + formatPortListener, +} from "./ports-format.js"; + +describe("ports-format", () => { + it("classifies listeners across gateway, ssh, and unknown command lines", () => { + const cases = [ + { + listener: { commandLine: "ssh -N -L 18789:127.0.0.1:18789 user@host" }, + expected: "ssh", + }, + { + listener: { command: "ssh" }, + expected: "ssh", + }, + { + listener: { commandLine: "node /Users/me/Projects/openclaw/dist/entry.js gateway" }, + expected: "gateway", + }, + { + listener: { commandLine: "python -m http.server 18789" }, + expected: "unknown", + }, + ] as const; + + for (const testCase of cases) { + expect( + classifyPortListener(testCase.listener, 18789), + JSON.stringify(testCase.listener), + ).toBe(testCase.expected); + } + }); + + it("builds ordered hints for mixed listener kinds and multiplicity", () => { + expect( + buildPortHints( + [ + { commandLine: "node dist/index.js openclaw gateway" }, + { commandLine: "ssh -N -L 18789:127.0.0.1:18789" }, + { commandLine: "python -m http.server 18789" }, + ], + 18789, + ), + ).toEqual([ + expect.stringContaining("Gateway already running locally."), + "SSH tunnel already bound to this port. Close the tunnel or use a different local port in -L.", + "Another process is listening on this port.", + expect.stringContaining("Multiple listeners detected"), + ]); + expect(buildPortHints([], 18789)).toEqual([]); + }); + + it("formats listeners with pid, user, command, and address fallbacks", () => { + expect( + formatPortListener({ pid: 123, user: "alice", commandLine: "ssh -N", address: "::1" }), + ).toBe("pid 123 alice: ssh -N (::1)"); + expect(formatPortListener({ command: "ssh", address: "127.0.0.1:18789" })).toBe( + "pid ?: ssh (127.0.0.1:18789)", + ); + expect(formatPortListener({})).toBe("pid ?: unknown"); + }); + + it("formats free and busy port diagnostics", () => { + expect( + formatPortDiagnostics({ + port: 18789, + status: "free", + listeners: [], + hints: [], + }), + ).toEqual(["Port 18789 is free."]); + + const lines = formatPortDiagnostics({ + port: 18789, + status: "busy", + listeners: [{ pid: 123, user: "alice", commandLine: "ssh -N -L 18789:127.0.0.1:18789" }], + hints: buildPortHints([{ pid: 123, commandLine: "ssh -N -L 18789:127.0.0.1:18789" }], 18789), + }); + expect(lines[0]).toContain("Port 18789 is already in use"); + expect(lines).toContain("- pid 123 alice: ssh -N -L 18789:127.0.0.1:18789"); + expect(lines.some((line) => line.includes("SSH tunnel"))).toBe(true); + }); +}); diff --git a/src/infra/ports-lsof.test.ts b/src/infra/ports-lsof.test.ts new file mode 100644 index 00000000000..eb599112a5a --- /dev/null +++ b/src/infra/ports-lsof.test.ts @@ -0,0 +1,67 @@ +import fs from "node:fs"; +import fsPromises from "node:fs/promises"; +import { afterEach, describe, expect, it, vi } from "vitest"; +import { resolveLsofCommand, resolveLsofCommandSync } from "./ports-lsof.js"; + +const LSOF_CANDIDATES = + process.platform === "darwin" + ? ["/usr/sbin/lsof", "/usr/bin/lsof"] + : ["/usr/bin/lsof", "/usr/sbin/lsof"]; + +describe("lsof command resolution", () => { + afterEach(() => { + vi.restoreAllMocks(); + }); + + it("prefers the first executable async candidate", async () => { + const accessSpy = vi.spyOn(fsPromises, "access").mockImplementation(async (target) => { + if (target === LSOF_CANDIDATES[0]) { + return; + } + throw new Error("unexpected"); + }); + + await expect(resolveLsofCommand()).resolves.toBe(LSOF_CANDIDATES[0]); + expect(accessSpy).toHaveBeenCalledTimes(1); + }); + + it("falls through async candidates before using the shell fallback", async () => { + const accessSpy = vi.spyOn(fsPromises, "access").mockImplementation(async (target) => { + if (target === LSOF_CANDIDATES[0]) { + throw new Error("missing"); + } + if (target === LSOF_CANDIDATES[1]) { + return; + } + throw new Error("unexpected"); + }); + + await expect(resolveLsofCommand()).resolves.toBe(LSOF_CANDIDATES[1]); + expect(accessSpy).toHaveBeenCalledTimes(2); + + accessSpy.mockImplementation(async () => { + throw new Error("missing"); + }); + await expect(resolveLsofCommand()).resolves.toBe("lsof"); + }); + + it("mirrors candidate resolution for the sync helper", () => { + const accessSpy = vi.spyOn(fs, "accessSync").mockImplementation((target) => { + if (target === LSOF_CANDIDATES[0]) { + throw new Error("missing"); + } + if (target === LSOF_CANDIDATES[1]) { + return undefined; + } + throw new Error("unexpected"); + }); + + expect(resolveLsofCommandSync()).toBe(LSOF_CANDIDATES[1]); + expect(accessSpy).toHaveBeenCalledTimes(2); + + accessSpy.mockImplementation(() => { + throw new Error("missing"); + }); + expect(resolveLsofCommandSync()).toBe("lsof"); + }); +}); diff --git a/src/infra/ports-probe.test.ts b/src/infra/ports-probe.test.ts new file mode 100644 index 00000000000..ce127970cce --- /dev/null +++ b/src/infra/ports-probe.test.ts @@ -0,0 +1,30 @@ +import net from "node:net"; +import { describe, expect, it } from "vitest"; +import { tryListenOnPort } from "./ports-probe.js"; + +describe("tryListenOnPort", () => { + it("can bind and release an ephemeral loopback port", async () => { + await expect(tryListenOnPort({ port: 0, host: "127.0.0.1", exclusive: true })).resolves.toBe( + undefined, + ); + }); + + it("rejects when the port is already in use", async () => { + const server = net.createServer(); + await new Promise((resolve) => server.listen(0, "127.0.0.1", () => resolve())); + const address = server.address(); + if (!address || typeof address === "string") { + throw new Error("expected tcp address"); + } + + try { + await expect( + tryListenOnPort({ port: address.port, host: "127.0.0.1" }), + ).rejects.toMatchObject({ + code: "EADDRINUSE", + }); + } finally { + await new Promise((resolve) => server.close(() => resolve())); + } + }); +}); diff --git a/src/infra/ports.test.ts b/src/infra/ports.test.ts index f809662f1ac..090ccb128b9 100644 --- a/src/infra/ports.test.ts +++ b/src/infra/ports.test.ts @@ -8,14 +8,7 @@ vi.mock("../process/exec.js", () => ({ runCommandWithTimeout: (...args: unknown[]) => runCommandWithTimeoutMock(...args), })); import { inspectPortUsage } from "./ports-inspect.js"; -import { - buildPortHints, - classifyPortListener, - ensurePortAvailable, - formatPortDiagnostics, - handlePortError, - PortInUseError, -} from "./ports.js"; +import { ensurePortAvailable, handlePortError, PortInUseError } from "./ports.js"; const describeUnix = process.platform === "win32" ? describe.skip : describe; @@ -61,32 +54,6 @@ describe("ports helpers", () => { const messages = runtime.error.mock.calls.map((call) => stripAnsi(String(call[0] ?? ""))); expect(messages.join("\n")).toContain("another OpenClaw instance is already running"); }); - - it("classifies ssh and gateway listeners", () => { - expect( - classifyPortListener({ commandLine: "ssh -N -L 18789:127.0.0.1:18789 user@host" }, 18789), - ).toBe("ssh"); - expect( - classifyPortListener( - { - commandLine: "node /Users/me/Projects/openclaw/dist/entry.js gateway", - }, - 18789, - ), - ).toBe("gateway"); - }); - - it("formats port diagnostics with hints", () => { - const diagnostics = { - port: 18789, - status: "busy" as const, - listeners: [{ pid: 123, commandLine: "ssh -N -L 18789:127.0.0.1:18789" }], - hints: buildPortHints([{ pid: 123, commandLine: "ssh -N -L 18789:127.0.0.1:18789" }], 18789), - }; - const lines = formatPortDiagnostics(diagnostics); - expect(lines[0]).toContain("Port 18789 is already in use"); - expect(lines.some((line) => line.includes("SSH tunnel"))).toBe(true); - }); }); describeUnix("inspectPortUsage", () => { diff --git a/src/infra/prototype-keys.test.ts b/src/infra/prototype-keys.test.ts new file mode 100644 index 00000000000..f2bd8287226 --- /dev/null +++ b/src/infra/prototype-keys.test.ts @@ -0,0 +1,14 @@ +import { describe, expect, it } from "vitest"; +import { isBlockedObjectKey } from "./prototype-keys.js"; + +describe("isBlockedObjectKey", () => { + it("blocks prototype-pollution keys and allows ordinary keys", () => { + for (const key of ["__proto__", "prototype", "constructor"]) { + expect(isBlockedObjectKey(key)).toBe(true); + } + + for (const key of ["toString", "value", "constructorName"]) { + expect(isBlockedObjectKey(key)).toBe(false); + } + }); +}); diff --git a/src/infra/provider-usage.auth.normalizes-keys.test.ts b/src/infra/provider-usage.auth.normalizes-keys.test.ts index 87d3f1ffbed..baf96781c27 100644 --- a/src/infra/provider-usage.auth.normalizes-keys.test.ts +++ b/src/infra/provider-usage.auth.normalizes-keys.test.ts @@ -3,11 +3,18 @@ import os from "node:os"; import path from "node:path"; import { afterAll, beforeAll, describe, expect, it } from "vitest"; import { NON_ENV_SECRETREF_MARKER } from "../agents/model-auth-markers.js"; -import { resolveProviderAuths } from "./provider-usage.auth.js"; +import { resolveProviderAuths, type ProviderAuth } from "./provider-usage.auth.js"; describe("resolveProviderAuths key normalization", () => { let suiteRoot = ""; let suiteCase = 0; + const EMPTY_PROVIDER_ENV = { + ZAI_API_KEY: undefined, + Z_AI_API_KEY: undefined, + MINIMAX_API_KEY: undefined, + MINIMAX_CODE_PLAN_KEY: undefined, + XIAOMI_API_KEY: undefined, + } satisfies Record; beforeAll(async () => { suiteRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-provider-auth-suite-")); @@ -140,54 +147,96 @@ describe("resolveProviderAuths key normalization", () => { }); }, { - MINIMAX_API_KEY: undefined, - MINIMAX_CODE_PLAN_KEY: undefined, + ...EMPTY_PROVIDER_ENV, }, ); } - it("strips embedded CR/LF from env keys", async () => { + async function expectResolvedAuthsFromSuiteHome(params: { + providers: Parameters[0]["providers"]; + expected: Awaited>; + env?: Record; + setup?: (home: string) => Promise; + }) { await withSuiteHome( - async () => { + async (home) => { + await params.setup?.(home); const auths = await resolveProviderAuths({ - providers: ["zai", "minimax", "xiaomi"], + providers: params.providers, }); - expect(auths).toEqual([ - { provider: "zai", token: "zai-key" }, - { provider: "minimax", token: "minimax-key" }, - { provider: "xiaomi", token: "xiaomi-key" }, - ]); + expect(auths).toEqual(params.expected); }, { + ...EMPTY_PROVIDER_ENV, + ...params.env, + }, + ); + } + + it.each([ + { + name: "strips embedded CR/LF from env keys", + providers: ["zai", "minimax", "xiaomi"] as const, + env: { ZAI_API_KEY: "zai-\r\nkey", MINIMAX_API_KEY: "minimax-\r\nkey", XIAOMI_API_KEY: "xiaomi-\r\nkey", }, - ); + expected: [ + { provider: "zai", token: "zai-key" }, + { provider: "minimax", token: "minimax-key" }, + { provider: "xiaomi", token: "xiaomi-key" }, + ], + }, + { + name: "accepts z-ai env alias and normalizes embedded CR/LF", + providers: ["zai"] as const, + env: { + Z_AI_API_KEY: "zai-\r\nkey", + }, + expected: [{ provider: "zai", token: "zai-key" }], + }, + { + name: "prefers ZAI_API_KEY over the z-ai alias when both are set", + providers: ["zai"] as const, + env: { + ZAI_API_KEY: "direct-zai-key", + Z_AI_API_KEY: "alias-zai-key", + }, + expected: [{ provider: "zai", token: "direct-zai-key" }], + }, + { + name: "prefers MINIMAX_CODE_PLAN_KEY over MINIMAX_API_KEY", + providers: ["minimax"] as const, + env: { + MINIMAX_CODE_PLAN_KEY: "code-plan-key", + MINIMAX_API_KEY: "api-key", + }, + expected: [{ provider: "minimax", token: "code-plan-key" }], + }, + ] satisfies Array<{ + name: string; + providers: readonly Parameters[0]["providers"][number][]; + env: Record; + expected: ProviderAuth[]; + }>)("$name", async ({ providers, env, expected }) => { + await expectResolvedAuthsFromSuiteHome({ providers: [...providers], env, expected }); }); it("strips embedded CR/LF from stored auth profiles (token + api_key)", async () => { - await withSuiteHome( - async (home) => { + await expectResolvedAuthsFromSuiteHome({ + providers: ["minimax", "xiaomi"], + setup: async (home) => { await writeAuthProfiles(home, { "minimax:default": { type: "token", provider: "minimax", token: "mini-\r\nmax" }, "xiaomi:default": { type: "api_key", provider: "xiaomi", key: "xiao-\r\nmi" }, }); - - const auths = await resolveProviderAuths({ - providers: ["minimax", "xiaomi"], - }); - expect(auths).toEqual([ - { provider: "minimax", token: "mini-max" }, - { provider: "xiaomi", token: "xiao-mi" }, - ]); }, - { - MINIMAX_API_KEY: undefined, - MINIMAX_CODE_PLAN_KEY: undefined, - XIAOMI_API_KEY: undefined, - }, - ); + expected: [ + { provider: "minimax", token: "mini-max" }, + { provider: "xiaomi", token: "xiao-mi" }, + ], + }); }); it("returns injected auth values unchanged", async () => { @@ -198,80 +247,52 @@ describe("resolveProviderAuths key normalization", () => { expect(auths).toEqual([{ provider: "anthropic", token: "token-1", accountId: "acc-1" }]); }); - it("accepts z-ai env alias and normalizes embedded CR/LF", async () => { - await withSuiteHome( - async () => { - const auths = await resolveProviderAuths({ - providers: ["zai"], - }); - expect(auths).toEqual([{ provider: "zai", token: "zai-key" }]); - }, - { - ZAI_API_KEY: undefined, - Z_AI_API_KEY: "zai-\r\nkey", - }, - ); - }); - it("falls back to legacy .pi auth file for zai keys even after os.homedir() is primed", async () => { // Prime os.homedir() to simulate long-lived workers that may have touched it before HOME changes. os.homedir(); - await withSuiteHome( - async (home) => { + await expectResolvedAuthsFromSuiteHome({ + providers: ["zai"], + setup: async (home) => { await writeLegacyPiAuth( home, `${JSON.stringify({ "z-ai": { access: "legacy-zai-key" } }, null, 2)}\n`, ); + }, + expected: [{ provider: "zai", token: "legacy-zai-key" }], + }); + }); - const auths = await resolveProviderAuths({ - providers: ["zai"], + it.each([ + { + name: "extracts google oauth token from JSON payload in token profiles", + token: '{"token":"google-oauth-token"}', + expectedToken: "google-oauth-token", + }, + { + name: "keeps raw google token when token payload is not JSON", + token: "plain-google-token", + expectedToken: "plain-google-token", + }, + ])("$name", async ({ token, expectedToken }) => { + await expectResolvedAuthsFromSuiteHome({ + providers: ["google-gemini-cli"], + setup: async (home) => { + await writeAuthProfiles(home, { + "google-gemini-cli:default": { + type: "token", + provider: "google-gemini-cli", + token, + }, }); - expect(auths).toEqual([{ provider: "zai", token: "legacy-zai-key" }]); }, - { - ZAI_API_KEY: undefined, - Z_AI_API_KEY: undefined, - }, - ); - }); - - it("extracts google oauth token from JSON payload in token profiles", async () => { - await withSuiteHome(async (home) => { - await writeAuthProfiles(home, { - "google-gemini-cli:default": { - type: "token", - provider: "google-gemini-cli", - token: '{"token":"google-oauth-token"}', - }, - }); - - const auths = await resolveProviderAuths({ - providers: ["google-gemini-cli"], - }); - expect(auths).toEqual([{ provider: "google-gemini-cli", token: "google-oauth-token" }]); - }, {}); - }); - - it("keeps raw google token when token payload is not JSON", async () => { - await withSuiteHome(async (home) => { - await writeAuthProfiles(home, { - "google-gemini-cli:default": { - type: "token", - provider: "google-gemini-cli", - token: "plain-google-token", - }, - }); - - const auths = await resolveProviderAuths({ - providers: ["google-gemini-cli"], - }); - expect(auths).toEqual([{ provider: "google-gemini-cli", token: "plain-google-token" }]); - }, {}); + expected: [{ provider: "google-gemini-cli", token: expectedToken }], + }); }); it("uses config api keys when env and profiles are missing", async () => { - await withSuiteHome( - async (home) => { + await expectResolvedAuthsFromSuiteHome({ + providers: ["zai", "minimax", "xiaomi"], + setup: async (home) => { const modelDef = { id: "test-model", name: "Test Model", @@ -302,77 +323,42 @@ describe("resolveProviderAuths key normalization", () => { }, }, }); - - const auths = await resolveProviderAuths({ - providers: ["zai", "minimax", "xiaomi"], - }); - expect(auths).toEqual([ - { provider: "zai", token: "cfg-zai-key" }, - { provider: "minimax", token: "cfg-minimax-key" }, - { provider: "xiaomi", token: "cfg-xiaomi-key" }, - ]); }, - { - ZAI_API_KEY: undefined, - Z_AI_API_KEY: undefined, - MINIMAX_API_KEY: undefined, - MINIMAX_CODE_PLAN_KEY: undefined, - XIAOMI_API_KEY: undefined, - }, - ); + expected: [ + { provider: "zai", token: "cfg-zai-key" }, + { provider: "minimax", token: "cfg-minimax-key" }, + { provider: "xiaomi", token: "cfg-xiaomi-key" }, + ], + }); }); it("returns no auth when providers have no configured credentials", async () => { - await withSuiteHome( - async () => { - const auths = await resolveProviderAuths({ - providers: ["zai", "minimax", "xiaomi"], - }); - expect(auths).toEqual([]); - }, - { - ZAI_API_KEY: undefined, - Z_AI_API_KEY: undefined, - MINIMAX_API_KEY: undefined, - MINIMAX_CODE_PLAN_KEY: undefined, - XIAOMI_API_KEY: undefined, - }, - ); + await expectResolvedAuthsFromSuiteHome({ + providers: ["zai", "minimax", "xiaomi"], + expected: [], + }); }); it("uses zai api_key auth profiles when env and config are missing", async () => { - await withSuiteHome( - async (home) => { + await expectResolvedAuthsFromSuiteHome({ + providers: ["zai"], + setup: async (home) => { await writeAuthProfiles(home, { "zai:default": { type: "api_key", provider: "zai", key: "profile-zai-key" }, }); - - const auths = await resolveProviderAuths({ - providers: ["zai"], - }); - expect(auths).toEqual([{ provider: "zai", token: "profile-zai-key" }]); }, - { - ZAI_API_KEY: undefined, - Z_AI_API_KEY: undefined, - }, - ); + expected: [{ provider: "zai", token: "profile-zai-key" }], + }); }); it("ignores invalid legacy z-ai auth files", async () => { - await withSuiteHome( - async (home) => { + await expectResolvedAuthsFromSuiteHome({ + providers: ["zai"], + setup: async (home) => { await writeLegacyPiAuth(home, "{not-json"); - const auths = await resolveProviderAuths({ - providers: ["zai"], - }); - expect(auths).toEqual([]); }, - { - ZAI_API_KEY: undefined, - Z_AI_API_KEY: undefined, - }, - ); + expected: [], + }); }); it("discovers oauth provider from config but skips mismatched profile providers", async () => { diff --git a/src/infra/provider-usage.fetch.claude.test.ts b/src/infra/provider-usage.fetch.claude.test.ts index 59b8542558a..e9b82c9ad4f 100644 --- a/src/infra/provider-usage.fetch.claude.test.ts +++ b/src/infra/provider-usage.fetch.claude.test.ts @@ -77,6 +77,25 @@ describe("fetchClaudeUsage", () => { ]); }); + it("clamps oauth usage windows and prefers sonnet over opus when both exist", async () => { + const mockFetch = createProviderUsageFetch(async () => + makeResponse(200, { + five_hour: { utilization: -5 }, + seven_day: { utilization: 140 }, + seven_day_sonnet: { utilization: 40 }, + seven_day_opus: { utilization: 90 }, + }), + ); + + const result = await fetchClaudeUsage("token", 5000, mockFetch); + + expect(result.windows).toEqual([ + { label: "5h", usedPercent: 0, resetAt: undefined }, + { label: "Week", usedPercent: 100, resetAt: undefined }, + { label: "Sonnet", usedPercent: 40 }, + ]); + }); + it("returns HTTP errors with provider message suffix", async () => { const mockFetch = createProviderUsageFetch(async () => makeResponse(403, { @@ -89,6 +108,26 @@ describe("fetchClaudeUsage", () => { expect(result.windows).toHaveLength(0); }); + it("omits blank error message suffixes on oauth failures", async () => { + const mockFetch = createProviderUsageFetch(async () => + makeResponse(403, { + error: { message: " " }, + }), + ); + + const result = await fetchClaudeUsage("token", 5000, mockFetch); + expect(result.error).toBe("HTTP 403"); + expect(result.windows).toHaveLength(0); + }); + + it("keeps HTTP status errors when oauth error bodies are not JSON", async () => { + const mockFetch = createProviderUsageFetch(async () => makeResponse(502, "bad gateway")); + + const result = await fetchClaudeUsage("token", 5000, mockFetch); + expect(result.error).toBe("HTTP 502"); + expect(result.windows).toHaveLength(0); + }); + it("falls back to claude web usage when oauth scope is missing", async () => { vi.stubEnv("CLAUDE_AI_SESSION_KEY", "sk-ant-session-key"); @@ -119,6 +158,25 @@ describe("fetchClaudeUsage", () => { expect(result.windows).toEqual([{ label: "5h", usedPercent: 12, resetAt: undefined }]); }); + it("parses sessionKey from Cookie-prefixed CLAUDE_WEB_COOKIE headers", async () => { + vi.stubEnv("CLAUDE_WEB_COOKIE", "Cookie: foo=bar; sessionKey=sk-ant-cookie-header"); + + const mockFetch = createScopeFallbackFetch(async (url) => { + if (url.endsWith("/api/organizations")) { + return makeResponse(200, [{ uuid: "org-header" }]); + } + if (url.endsWith("/api/organizations/org-header/usage")) { + return makeResponse(200, { five_hour: { utilization: 9 } }); + } + return makeResponse(404, "not found"); + }); + + const result = await fetchClaudeUsage("token", 5000, mockFetch); + expect(result.error).toBeUndefined(); + expect(result.windows).toEqual([{ label: "5h", usedPercent: 9, resetAt: undefined }]); + expect(mockFetch).toHaveBeenCalledTimes(3); + }); + it("parses sessionKey from CLAUDE_WEB_COOKIE for web fallback", async () => { vi.stubEnv("CLAUDE_WEB_COOKIE", "sessionKey=sk-ant-cookie-session"); diff --git a/src/infra/provider-usage.fetch.codex.test.ts b/src/infra/provider-usage.fetch.codex.test.ts index e74d0f25f65..428199c40fe 100644 --- a/src/infra/provider-usage.fetch.codex.test.ts +++ b/src/infra/provider-usage.fetch.codex.test.ts @@ -107,4 +107,44 @@ describe("fetchCodexUsage", () => { { label: "Week", usedPercent: 20, resetAt: weeklyLikeSecondaryReset * 1000 }, ]); }); + + it("labels short secondary windows in hours", async () => { + const mockFetch = createProviderUsageFetch(async () => + makeResponse(200, { + rate_limit: { + secondary_window: { + limit_window_seconds: 21_600, + used_percent: 11, + }, + }, + }), + ); + + const result = await fetchCodexUsage("token", undefined, 5000, mockFetch); + expect(result.windows).toEqual([{ label: "6h", usedPercent: 11, resetAt: undefined }]); + }); + + it("builds a balance-only plan when credits exist without a plan type", async () => { + const mockFetch = createProviderUsageFetch(async () => + makeResponse(200, { + credits: { balance: "7.5" }, + }), + ); + + const result = await fetchCodexUsage("token", undefined, 5000, mockFetch); + expect(result.plan).toBe("$7.50"); + expect(result.windows).toEqual([]); + }); + + it("falls back invalid credit strings to a zero balance", async () => { + const mockFetch = createProviderUsageFetch(async () => + makeResponse(200, { + plan_type: "Plus", + credits: { balance: "not-a-number" }, + }), + ); + + const result = await fetchCodexUsage("token", undefined, 5000, mockFetch); + expect(result.plan).toBe("Plus ($0.00)"); + }); }); diff --git a/src/infra/provider-usage.fetch.copilot.test.ts b/src/infra/provider-usage.fetch.copilot.test.ts index 7df17118159..0abfd5f782f 100644 --- a/src/infra/provider-usage.fetch.copilot.test.ts +++ b/src/infra/provider-usage.fetch.copilot.test.ts @@ -34,4 +34,40 @@ describe("fetchCopilotUsage", () => { { label: "Chat", usedPercent: 25 }, ]); }); + + it("defaults missing snapshot values and clamps invalid remaining percentages", async () => { + const mockFetch = createProviderUsageFetch(async () => + makeResponse(200, { + quota_snapshots: { + premium_interactions: { percent_remaining: null }, + chat: { percent_remaining: 140 }, + }, + }), + ); + + const result = await fetchCopilotUsage("token", 5000, mockFetch); + + expect(result.windows).toEqual([ + { label: "Premium", usedPercent: 100 }, + { label: "Chat", usedPercent: 0 }, + ]); + expect(result.plan).toBeUndefined(); + }); + + it("returns an empty window list when quota snapshots are missing", async () => { + const mockFetch = createProviderUsageFetch(async () => + makeResponse(200, { + copilot_plan: "free", + }), + ); + + const result = await fetchCopilotUsage("token", 5000, mockFetch); + + expect(result).toEqual({ + provider: "github-copilot", + displayName: "Copilot", + windows: [], + plan: "free", + }); + }); }); diff --git a/src/infra/provider-usage.fetch.gemini.test.ts b/src/infra/provider-usage.fetch.gemini.test.ts index ea713478011..c21292ebf97 100644 --- a/src/infra/provider-usage.fetch.gemini.test.ts +++ b/src/infra/provider-usage.fetch.gemini.test.ts @@ -36,4 +36,39 @@ describe("fetchGeminiUsage", () => { expect(result.windows[1]?.label).toBe("Flash"); expect(result.windows[1]?.usedPercent).toBeCloseTo(30, 6); }); + + it("returns no windows when the response has no recognized model families", async () => { + const mockFetch = createProviderUsageFetch(async () => + makeResponse(200, { + buckets: [{ modelId: "gemini-unknown", remainingFraction: 0.5 }], + }), + ); + + const result = await fetchGeminiUsage("token", 5000, mockFetch, "google-gemini-cli"); + + expect(result).toEqual({ + provider: "google-gemini-cli", + displayName: "Gemini", + windows: [], + }); + }); + + it("defaults missing fractions to fully available and clamps invalid fractions", async () => { + const mockFetch = createProviderUsageFetch(async () => + makeResponse(200, { + buckets: [ + { modelId: "gemini-pro" }, + { modelId: "gemini-pro-latest", remainingFraction: -0.5 }, + { modelId: "gemini-flash", remainingFraction: 1.2 }, + ], + }), + ); + + const result = await fetchGeminiUsage("token", 5000, mockFetch, "google-gemini-cli"); + + expect(result.windows).toEqual([ + { label: "Pro", usedPercent: 100 }, + { label: "Flash", usedPercent: 0 }, + ]); + }); }); diff --git a/src/infra/provider-usage.fetch.minimax.test.ts b/src/infra/provider-usage.fetch.minimax.test.ts index 1c13619b8db..ceb1c5439b0 100644 --- a/src/infra/provider-usage.fetch.minimax.test.ts +++ b/src/infra/provider-usage.fetch.minimax.test.ts @@ -2,45 +2,70 @@ import { describe, expect, it } from "vitest"; import { createProviderUsageFetch, makeResponse } from "../test-utils/provider-usage-fetch.js"; import { fetchMinimaxUsage } from "./provider-usage.fetch.minimax.js"; +async function expectMinimaxUsageResult(params: { + payload: unknown; + expected: { + plan?: string; + windows: Array<{ label: string; usedPercent: number; resetAt?: number }>; + }; +}) { + const mockFetch = createProviderUsageFetch(async (_url, init) => { + const headers = (init?.headers as Record | undefined) ?? {}; + expect(headers.Authorization).toBe("Bearer key"); + expect(headers["MM-API-Source"]).toBe("OpenClaw"); + return makeResponse(200, params.payload); + }); + + const result = await fetchMinimaxUsage("key", 5000, mockFetch); + expect(result.plan).toBe(params.expected.plan); + expect(result.windows).toEqual(params.expected.windows); +} + describe("fetchMinimaxUsage", () => { - it("returns HTTP errors for failed requests", async () => { - const mockFetch = createProviderUsageFetch(async () => makeResponse(502, "bad gateway")); + it.each([ + { + name: "returns HTTP errors for failed requests", + response: () => makeResponse(502, "bad gateway"), + expectedError: "HTTP 502", + }, + { + name: "returns invalid JSON when payload cannot be parsed", + response: () => makeResponse(200, "{not-json"), + expectedError: "Invalid JSON", + }, + { + name: "returns trimmed API errors from base_resp", + response: () => + makeResponse(200, { + base_resp: { + status_code: 1007, + status_msg: " auth denied ", + }, + }), + expectedError: "auth denied", + }, + { + name: "falls back to a generic API error when base_resp message is blank", + response: () => + makeResponse(200, { + base_resp: { + status_code: 1007, + status_msg: " ", + }, + }), + expectedError: "API error", + }, + ])("$name", async ({ response, expectedError }) => { + const mockFetch = createProviderUsageFetch(async () => response()); const result = await fetchMinimaxUsage("key", 5000, mockFetch); - - expect(result.error).toBe("HTTP 502"); + expect(result.error).toBe(expectedError); expect(result.windows).toHaveLength(0); }); - it("returns invalid JSON when payload cannot be parsed", async () => { - const mockFetch = createProviderUsageFetch(async () => makeResponse(200, "{not-json")); - const result = await fetchMinimaxUsage("key", 5000, mockFetch); - - expect(result.error).toBe("Invalid JSON"); - expect(result.windows).toHaveLength(0); - }); - - it("returns API errors from base_resp", async () => { - const mockFetch = createProviderUsageFetch(async () => - makeResponse(200, { - base_resp: { - status_code: 1007, - status_msg: " auth denied ", - }, - }), - ); - const result = await fetchMinimaxUsage("key", 5000, mockFetch); - - expect(result.error).toBe("auth denied"); - expect(result.windows).toHaveLength(0); - }); - - it("derives usage from used/total fields and includes reset + plan", async () => { - const mockFetch = createProviderUsageFetch(async (_url, init) => { - const headers = (init?.headers as Record | undefined) ?? {}; - expect(headers.Authorization).toBe("Bearer key"); - expect(headers["MM-API-Source"]).toBe("OpenClaw"); - - return makeResponse(200, { + it.each([ + { + name: "derives usage from used/total fields and includes reset + plan", + payload: { data: { used: 35, total: 100, @@ -48,52 +73,36 @@ describe("fetchMinimaxUsage", () => { reset_at: 1_700_000_000, plan_name: "Pro Max", }, - }); - }); - - const result = await fetchMinimaxUsage("key", 5000, mockFetch); - - expect(result.plan).toBe("Pro Max"); - expect(result.windows).toEqual([ - { - label: "3h", - usedPercent: 35, - resetAt: 1_700_000_000_000, }, - ]); - }); - - it("supports usage ratio strings with minute windows and ISO reset strings", async () => { - const resetIso = "2026-01-08T00:00:00Z"; - const mockFetch = createProviderUsageFetch(async () => - makeResponse(200, { + expected: { + plan: "Pro Max", + windows: [{ label: "3h", usedPercent: 35, resetAt: 1_700_000_000_000 }], + }, + }, + { + name: "supports usage ratio strings with minute windows and ISO reset strings", + payload: { data: { nested: [ { usage_ratio: "0.25", window_minutes: "30", - reset_time: resetIso, + reset_time: "2026-01-08T00:00:00Z", plan: "Starter", }, ], }, - }), - ); - - const result = await fetchMinimaxUsage("key", 5000, mockFetch); - expect(result.plan).toBe("Starter"); - expect(result.windows).toEqual([ - { - label: "30m", - usedPercent: 25, - resetAt: new Date(resetIso).getTime(), }, - ]); - }); - - it("derives used from total and remaining counts", async () => { - const mockFetch = createProviderUsageFetch(async () => - makeResponse(200, { + expected: { + plan: "Starter", + windows: [ + { label: "30m", usedPercent: 25, resetAt: new Date("2026-01-08T00:00:00Z").getTime() }, + ], + }, + }, + { + name: "derives used from total and remaining counts", + payload: { data: { total: "200", remaining: "50", @@ -101,18 +110,28 @@ describe("fetchMinimaxUsage", () => { reset_at: 1_700_000_000_000, plan_name: "Team", }, - }), - ); - - const result = await fetchMinimaxUsage("key", 5000, mockFetch); - expect(result.plan).toBe("Team"); - expect(result.windows).toEqual([ - { - label: "5h", - usedPercent: 75, - resetAt: 1_700_000_000_000, }, - ]); + expected: { + plan: "Team", + windows: [{ label: "5h", usedPercent: 75, resetAt: 1_700_000_000_000 }], + }, + }, + { + name: "falls back to payload-level reset and plan when nested usage records omit them", + payload: { + data: { + plan_name: "Payload Plan", + reset_at: 1_700_000_100, + nested: [{ usage_ratio: 0.4, window_hours: 2 }], + }, + }, + expected: { + plan: "Payload Plan", + windows: [{ label: "2h", usedPercent: 40, resetAt: 1_700_000_100_000 }], + }, + }, + ])("$name", async ({ payload, expected }) => { + await expectMinimaxUsageResult({ payload, expected }); }); it("returns unsupported response shape when no usage fields are present", async () => { diff --git a/src/infra/provider-usage.fetch.shared.test.ts b/src/infra/provider-usage.fetch.shared.test.ts index d41c7e079a1..692a57705db 100644 --- a/src/infra/provider-usage.fetch.shared.test.ts +++ b/src/infra/provider-usage.fetch.shared.test.ts @@ -1,10 +1,17 @@ -import { describe, expect, it } from "vitest"; +import { afterEach, describe, expect, it, vi } from "vitest"; import { buildUsageErrorSnapshot, buildUsageHttpErrorSnapshot, + fetchJson, + parseFiniteNumber, } from "./provider-usage.fetch.shared.js"; describe("provider usage fetch shared helpers", () => { + afterEach(() => { + vi.useRealTimers(); + vi.restoreAllMocks(); + }); + it("builds a provider error snapshot", () => { expect(buildUsageErrorSnapshot("zai", "API error")).toEqual({ provider: "zai", @@ -14,6 +21,66 @@ describe("provider usage fetch shared helpers", () => { }); }); + it.each([ + { value: 12, expected: 12 }, + { value: "12.5", expected: 12.5 }, + { value: "not-a-number", expected: undefined }, + ])("parses finite numbers for %j", ({ value, expected }) => { + expect(parseFiniteNumber(value)).toBe(expected); + }); + + it("forwards request init and clears the timeout on success", async () => { + vi.useFakeTimers(); + const clearTimeoutSpy = vi.spyOn(globalThis, "clearTimeout"); + const fetchFnMock = vi.fn( + async (_input: URL | RequestInfo, init?: RequestInit) => + new Response(JSON.stringify({ aborted: init?.signal?.aborted ?? false }), { status: 200 }), + ); + const fetchFn = fetchFnMock as typeof fetch; + + const response = await fetchJson( + "https://example.com/usage", + { + method: "POST", + headers: { authorization: "Bearer test" }, + }, + 1_000, + fetchFn, + ); + + expect(fetchFnMock).toHaveBeenCalledWith( + "https://example.com/usage", + expect.objectContaining({ + method: "POST", + headers: { authorization: "Bearer test" }, + signal: expect.any(AbortSignal), + }), + ); + await expect(response.json()).resolves.toEqual({ aborted: false }); + expect(clearTimeoutSpy).toHaveBeenCalledTimes(1); + }); + + it("aborts timed out requests and clears the timer on rejection", async () => { + vi.useFakeTimers(); + const clearTimeoutSpy = vi.spyOn(globalThis, "clearTimeout"); + const fetchFnMock = vi.fn( + (_input: URL | RequestInfo, init?: RequestInit) => + new Promise((_, reject) => { + init?.signal?.addEventListener("abort", () => reject(new Error("aborted by timeout")), { + once: true, + }); + }), + ); + const fetchFn = fetchFnMock as typeof fetch; + + const request = fetchJson("https://example.com/usage", {}, 50, fetchFn); + const rejection = expect(request).rejects.toThrow("aborted by timeout"); + await vi.advanceTimersByTimeAsync(50); + + await rejection; + expect(clearTimeoutSpy).toHaveBeenCalledTimes(1); + }); + it("maps configured status codes to token expired", () => { const snapshot = buildUsageHttpErrorSnapshot({ provider: "openai-codex", @@ -35,4 +102,14 @@ describe("provider usage fetch shared helpers", () => { expect(snapshot.error).toBe("HTTP 403: missing scope"); }); + + it("omits empty HTTP error message suffixes", () => { + const snapshot = buildUsageHttpErrorSnapshot({ + provider: "anthropic", + status: 429, + message: " ", + }); + + expect(snapshot.error).toBe("HTTP 429"); + }); }); diff --git a/src/infra/provider-usage.fetch.zai.test.ts b/src/infra/provider-usage.fetch.zai.test.ts index 2dafaccca9f..d952495e90f 100644 --- a/src/infra/provider-usage.fetch.zai.test.ts +++ b/src/infra/provider-usage.fetch.zai.test.ts @@ -25,6 +25,20 @@ describe("fetchZaiUsage", () => { expect(result.windows).toHaveLength(0); }); + it("falls back to a generic API error for blank unsuccessful messages", async () => { + const mockFetch = createProviderUsageFetch(async () => + makeResponse(200, { + success: false, + code: 500, + msg: " ", + }), + ); + + const result = await fetchZaiUsage("key", 5000, mockFetch); + expect(result.error).toBe("API error"); + expect(result.windows).toHaveLength(0); + }); + it("parses token and monthly windows with reset times", async () => { const tokenReset = "2026-01-08T00:00:00Z"; const minuteReset = "2026-01-08T00:30:00Z"; @@ -83,4 +97,47 @@ describe("fetchZaiUsage", () => { }, ]); }); + + it("clamps invalid percentages and falls back to alternate plan fields", async () => { + const mockFetch = createProviderUsageFetch(async () => + makeResponse(200, { + success: true, + code: 200, + data: { + plan: "Pro", + limits: [ + { + type: "TOKENS_LIMIT", + percentage: -5, + unit: 99, + }, + { + type: "TIME_LIMIT", + percentage: 140, + }, + { + type: "OTHER_LIMIT", + percentage: 50, + }, + ], + }, + }), + ); + + const result = await fetchZaiUsage("key", 5000, mockFetch); + + expect(result.plan).toBe("Pro"); + expect(result.windows).toEqual([ + { + label: "Tokens (Limit)", + usedPercent: 0, + resetAt: undefined, + }, + { + label: "Monthly", + usedPercent: 100, + resetAt: undefined, + }, + ]); + }); }); diff --git a/src/infra/provider-usage.fetch.zai.ts b/src/infra/provider-usage.fetch.zai.ts index 1ab1fd14764..d6f4970f0b7 100644 --- a/src/infra/provider-usage.fetch.zai.ts +++ b/src/infra/provider-usage.fetch.zai.ts @@ -46,11 +46,12 @@ export async function fetchZaiUsage( const data = (await res.json()) as ZaiUsageResponse; if (!data.success || data.code !== 200) { + const errorMessage = typeof data.msg === "string" ? data.msg.trim() : ""; return { provider: "zai", displayName: PROVIDER_LABELS.zai, windows: [], - error: data.msg || "API error", + error: errorMessage || "API error", }; } diff --git a/src/infra/provider-usage.format.test.ts b/src/infra/provider-usage.format.test.ts index 3063a571a24..d87d6a73c17 100644 --- a/src/infra/provider-usage.format.test.ts +++ b/src/infra/provider-usage.format.test.ts @@ -54,6 +54,18 @@ describe("provider-usage.format", () => { expect(summary).toBe("A 90% left · B 80% left"); }); + it("treats non-positive max windows as all windows and clamps overused percentages", () => { + const summary = formatUsageWindowSummary( + makeSnapshot([ + { label: "Over", usedPercent: 120, resetAt: now + 60_000 }, + { label: "Under", usedPercent: -10 }, + ]), + { now, maxWindows: 0, includeResets: true }, + ); + + expect(summary).toBe("Over 0% left ⏱1m · Under 100% left"); + }); + it("formats summary line from highest-usage window and provider cap", () => { const summary: UsageSummary = { updatedAt: now, @@ -79,6 +91,27 @@ describe("provider-usage.format", () => { ); }); + it("returns null summary line when providers are errored or have no windows", () => { + expect( + formatUsageSummaryLine({ + updatedAt: now, + providers: [ + { + provider: "anthropic", + displayName: "Claude", + windows: [], + error: "HTTP 401", + }, + { + provider: "zai", + displayName: "z.ai", + windows: [], + }, + ], + }), + ).toBeNull(); + }); + it("formats report output for empty, error, no-data, and plan entries", () => { expect(formatUsageReportLines({ updatedAt: now, providers: [] })).toEqual([ "Usage: no provider usage available.", @@ -107,4 +140,24 @@ describe("provider-usage.format", () => { " Xiaomi: no data", ]); }); + + it("formats detailed report lines with reset windows", () => { + const summary: UsageSummary = { + updatedAt: now, + providers: [ + { + provider: "anthropic", + displayName: "Claude", + plan: "Pro", + windows: [{ label: "Daily", usedPercent: 25, resetAt: now + 2 * 60 * 60_000 }], + }, + ], + }; + + expect(formatUsageReportLines(summary, { now })).toEqual([ + "Usage:", + " Claude (Pro)", + " Daily: 75% left · resets 2h", + ]); + }); }); diff --git a/src/infra/provider-usage.shared.test.ts b/src/infra/provider-usage.shared.test.ts index 3de021235be..048352a183d 100644 --- a/src/infra/provider-usage.shared.test.ts +++ b/src/infra/provider-usage.shared.test.ts @@ -1,27 +1,55 @@ -import { describe, expect, it } from "vitest"; +import { afterEach, describe, expect, it, vi } from "vitest"; import { clampPercent, resolveUsageProviderId, withTimeout } from "./provider-usage.shared.js"; describe("provider-usage.shared", () => { - it("normalizes supported usage provider ids", () => { - expect(resolveUsageProviderId("z-ai")).toBe("zai"); - expect(resolveUsageProviderId(" GOOGLE-GEMINI-CLI ")).toBe("google-gemini-cli"); - expect(resolveUsageProviderId("unknown-provider")).toBeUndefined(); - expect(resolveUsageProviderId()).toBeUndefined(); + afterEach(() => { + vi.useRealTimers(); + vi.restoreAllMocks(); }); - it("clamps usage percents and handles non-finite values", () => { - expect(clampPercent(-5)).toBe(0); - expect(clampPercent(120)).toBe(100); - expect(clampPercent(Number.NaN)).toBe(0); - expect(clampPercent(Number.POSITIVE_INFINITY)).toBe(0); + it.each([ + { value: "z-ai", expected: "zai" }, + { value: " GOOGLE-GEMINI-CLI ", expected: "google-gemini-cli" }, + { value: "unknown-provider", expected: undefined }, + { value: undefined, expected: undefined }, + { value: null, expected: undefined }, + ])("normalizes provider ids for %j", ({ value, expected }) => { + expect(resolveUsageProviderId(value)).toBe(expected); + }); + + it.each([ + { value: -5, expected: 0 }, + { value: 42, expected: 42 }, + { value: 120, expected: 100 }, + { value: Number.NaN, expected: 0 }, + { value: Number.POSITIVE_INFINITY, expected: 0 }, + ])("clamps usage percents for %j", ({ value, expected }) => { + expect(clampPercent(value)).toBe(expected); }); it("returns work result when it resolves before timeout", async () => { await expect(withTimeout(Promise.resolve("ok"), 100, "fallback")).resolves.toBe("ok"); }); + it("propagates work errors before timeout", async () => { + await expect(withTimeout(Promise.reject(new Error("boom")), 100, "fallback")).rejects.toThrow( + "boom", + ); + }); + it("returns fallback when timeout wins", async () => { + vi.useFakeTimers(); const late = new Promise((resolve) => setTimeout(() => resolve("late"), 50)); - await expect(withTimeout(late, 1, "fallback")).resolves.toBe("fallback"); + const result = withTimeout(late, 1, "fallback"); + await vi.advanceTimersByTimeAsync(1); + await expect(result).resolves.toBe("fallback"); + }); + + it("clears the timeout after successful work", async () => { + const clearTimeoutSpy = vi.spyOn(globalThis, "clearTimeout"); + + await expect(withTimeout(Promise.resolve("ok"), 100, "fallback")).resolves.toBe("ok"); + + expect(clearTimeoutSpy).toHaveBeenCalledTimes(1); }); }); diff --git a/src/infra/provider-usage.test.ts b/src/infra/provider-usage.test.ts index f84a4bb25d0..d8f94d04646 100644 --- a/src/infra/provider-usage.test.ts +++ b/src/infra/provider-usage.test.ts @@ -48,17 +48,21 @@ function createMinimaxOnlyFetch(payload: unknown) { async function expectMinimaxUsage( payload: unknown, - expectedUsedPercent: number, - expectedPlan?: string, + expected: { + usedPercent: number; + plan?: string; + label?: string; + }, ) { const mockFetch = createMinimaxOnlyFetch(payload); const summary = await loadUsageWithAuth([{ provider: "minimax", token: "token-1b" }], mockFetch); const minimax = summary.providers.find((p) => p.provider === "minimax"); - expect(minimax?.windows[0]?.usedPercent).toBe(expectedUsedPercent); - if (expectedPlan !== undefined) { - expect(minimax?.plan).toBe(expectedPlan); + expect(minimax?.windows[0]?.usedPercent).toBe(expected.usedPercent); + expect(minimax?.windows[0]?.label).toBe(expected.label ?? "5h"); + if (expected.plan !== undefined) { + expect(minimax?.plan).toBe(expected.plan); } expect(mockFetch).toHaveBeenCalled(); } @@ -181,9 +185,10 @@ describe("provider usage loading", () => { expect(mockFetch).toHaveBeenCalled(); }); - it("handles nested MiniMax usage payloads", async () => { - await expectMinimaxUsage( - { + it.each([ + { + name: "handles nested MiniMax usage payloads", + payload: { base_resp: { status_code: 0, status_msg: "ok" }, data: { plan_name: "Coding Plan", @@ -194,14 +199,11 @@ describe("provider usage loading", () => { }, }, }, - 75, - "Coding Plan", - ); - }); - - it("prefers MiniMax count-based usage when percent looks inverted", async () => { - await expectMinimaxUsage( - { + expected: { usedPercent: 75, plan: "Coding Plan" }, + }, + { + name: "prefers MiniMax count-based usage when percent looks inverted", + payload: { base_resp: { status_code: 0, status_msg: "ok" }, data: { prompt_limit: 200, @@ -210,13 +212,11 @@ describe("provider usage loading", () => { next_reset_time: "2026-01-07T05:00:00Z", }, }, - 25, - ); - }); - - it("handles MiniMax model_remains usage payloads", async () => { - await expectMinimaxUsage( - { + expected: { usedPercent: 25 }, + }, + { + name: "handles MiniMax model_remains usage payloads", + payload: { base_resp: { status_code: 0, status_msg: "ok" }, model_remains: [ { @@ -229,8 +229,25 @@ describe("provider usage loading", () => { }, ], }, - 25, - ); + expected: { usedPercent: 25 }, + }, + { + name: "keeps payload-level MiniMax plan metadata when the usage candidate is nested", + payload: { + base_resp: { status_code: 0, status_msg: "ok" }, + data: { + plan_name: "Payload Plan", + nested: { + usage_ratio: "0.4", + window_hours: 2, + next_reset_time: "2026-01-07T05:00:00Z", + }, + }, + }, + expected: { usedPercent: 40, plan: "Payload Plan", label: "2h" }, + }, + ])("$name", async ({ payload, expected }) => { + await expectMinimaxUsage(payload, expected); }); it("discovers Claude usage from token auth profiles", async () => { diff --git a/src/infra/restart-sentinel.test.ts b/src/infra/restart-sentinel.test.ts index 76b9e53b59e..c28504685bb 100644 --- a/src/infra/restart-sentinel.test.ts +++ b/src/infra/restart-sentinel.test.ts @@ -5,9 +5,11 @@ import { afterEach, beforeEach, describe, expect, it } from "vitest"; import { captureEnv } from "../test-utils/env.js"; import { consumeRestartSentinel, + formatDoctorNonInteractiveHint, formatRestartSentinelMessage, readRestartSentinel, resolveRestartSentinelPath, + summarizeRestartSentinel, trimLogTail, writeRestartSentinel, } from "./restart-sentinel.js"; @@ -59,6 +61,15 @@ describe("restart sentinel", () => { await expect(fs.stat(filePath)).rejects.toThrow(); }); + it("drops structurally invalid sentinel payloads", async () => { + const filePath = resolveRestartSentinelPath(); + await fs.mkdir(path.dirname(filePath), { recursive: true }); + await fs.writeFile(filePath, JSON.stringify({ version: 2, payload: null }), "utf-8"); + + await expect(readRestartSentinel()).resolves.toBeNull(); + await expect(fs.stat(filePath)).rejects.toThrow(); + }); + it("formatRestartSentinelMessage uses custom message when present", () => { const payload = { kind: "config-apply" as const, @@ -93,6 +104,26 @@ describe("restart sentinel", () => { expect(result).toContain("Gateway restart"); }); + it("formats summary, distinct reason, and doctor hint together", () => { + const payload = { + kind: "config-patch" as const, + status: "error" as const, + ts: Date.now(), + message: "Patch failed", + doctorHint: "Run openclaw doctor", + stats: { mode: "patch", reason: "validation failed" }, + }; + + expect(formatRestartSentinelMessage(payload)).toBe( + [ + "Gateway restart config-patch error (patch)", + "Patch failed", + "Reason: validation failed", + "Run openclaw doctor", + ].join("\n"), + ); + }); + it("trims log tails", () => { const text = "a".repeat(9000); const trimmed = trimLogTail(text, 8000); @@ -115,6 +146,18 @@ describe("restart sentinel", () => { expect(textA).toContain("Gateway restart restart ok"); expect(textA).not.toContain('"ts"'); }); + + it("summarizes restart payloads and trims log tails without trailing whitespace", () => { + expect( + summarizeRestartSentinel({ + kind: "update", + status: "skipped", + ts: 1, + }), + ).toBe("Gateway restart update skipped"); + expect(trimLogTail("hello\n")).toBe("hello"); + expect(trimLogTail(undefined)).toBeNull(); + }); }); describe("restart sentinel message dedup", () => { @@ -145,4 +188,10 @@ describe("restart sentinel message dedup", () => { expect(result).toContain("Restart requested by /restart"); expect(result).toContain("Reason: /restart"); }); + + it("formats the non-interactive doctor command", () => { + expect(formatDoctorNonInteractiveHint({ PATH: "/usr/bin:/bin" })).toContain( + "openclaw doctor --non-interactive", + ); + }); }); diff --git a/src/infra/restart-stale-pids.test.ts b/src/infra/restart-stale-pids.test.ts index f7bf0709d9f..b7589d26e15 100644 --- a/src/infra/restart-stale-pids.test.ts +++ b/src/infra/restart-stale-pids.test.ts @@ -42,6 +42,51 @@ function lsofOutput(entries: Array<{ pid: number; cmd: string }>): string { return entries.map(({ pid, cmd }) => `p${pid}\nc${cmd}`).join("\n") + "\n"; } +type MockLsofResult = { + error: Error | null; + status: number | null; + stdout: string; + stderr: string; +}; + +function createLsofResult(overrides: Partial = {}): MockLsofResult { + return { + error: null, + status: 0, + stdout: "", + stderr: "", + ...overrides, + }; +} + +function createOpenClawBusyResult(pid: number, overrides: Partial = {}) { + return createLsofResult({ + stdout: lsofOutput([{ pid, cmd: "openclaw-gateway" }]), + ...overrides, + }); +} + +function createErrnoResult(code: string, message: string) { + const error = new Error(message) as NodeJS.ErrnoException; + error.code = code; + return createLsofResult({ error, status: null }); +} + +function installInitialBusyPoll( + stalePid: number, + resolvePoll: (call: number) => MockLsofResult, +): () => number { + let call = 0; + mockSpawnSync.mockImplementation(() => { + call += 1; + if (call === 1) { + return createOpenClawBusyResult(stalePid); + } + return resolvePoll(call); + }); + return () => call; +} + describe.skipIf(isWindows)("restart-stale-pids", () => { beforeEach(() => { mockSpawnSync.mockReset(); @@ -201,20 +246,7 @@ describe.skipIf(isWindows)("restart-stale-pids", () => { // lsof exits with status 1 when no matching processes are found — this is // the canonical "port is free" signal, not an error. const stalePid = process.pid + 500; - let call = 0; - mockSpawnSync.mockImplementation(() => { - call++; - if (call === 1) { - return { - error: null, - status: 0, - stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]), - stderr: "", - }; - } - // Poll returns status 1 — no listeners - return { error: null, status: 1, stdout: "", stderr: "" }; - }); + installInitialBusyPoll(stalePid, () => createLsofResult({ status: 1 })); vi.spyOn(process, "kill").mockReturnValue(true); // Should complete cleanly (port reported free on status 1) expect(() => cleanStaleGatewayProcessesSync()).not.toThrow(); @@ -225,27 +257,17 @@ describe.skipIf(isWindows)("restart-stale-pids", () => { // bad flag, runtime error) must not be mapped to free:true. They are // inconclusive and should keep the polling loop running until budget expires. const stalePid = process.pid + 501; - let call = 0; const events: string[] = []; - mockSpawnSync.mockImplementation(() => { - call++; - if (call === 1) { - events.push("initial-find"); - return { - error: null, - status: 0, - stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]), - stderr: "", - }; - } + events.push("initial-find"); + installInitialBusyPoll(stalePid, (call) => { if (call === 2) { // Permission/runtime error — status 2, should NOT be treated as free events.push("error-poll"); - return { error: null, status: 2, stdout: "", stderr: "lsof: permission denied" }; + return createLsofResult({ status: 2, stderr: "lsof: permission denied" }); } // Eventually port is free events.push("free-poll"); - return { error: null, status: 1, stdout: "", stderr: "" }; + return createLsofResult({ status: 1 }); }); vi.spyOn(process, "kill").mockReturnValue(true); cleanStaleGatewayProcessesSync(); @@ -263,29 +285,13 @@ describe.skipIf(isWindows)("restart-stale-pids", () => { // The fix: pollPortOnce now parses res.stdout directly from the first // spawnSync call. Exactly ONE lsof invocation per poll cycle. const stalePid = process.pid + 400; - let spawnCount = 0; - mockSpawnSync.mockImplementation(() => { - spawnCount++; - if (spawnCount === 1) { - // Initial findGatewayPidsOnPortSync — returns stale pid - return { - error: null, - status: 0, - stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]), - stderr: "", - }; - } - if (spawnCount === 2) { + const getCallCount = installInitialBusyPoll(stalePid, (call) => { + if (call === 2) { // First waitForPortFreeSync poll — status 0, port busy (should parse inline, not spawn again) - return { - error: null, - status: 0, - stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]), - stderr: "", - }; + return createOpenClawBusyResult(stalePid); } // Port free on third call - return { error: null, status: 0, stdout: "", stderr: "" }; + return createLsofResult(); }); vi.spyOn(process, "kill").mockReturnValue(true); @@ -294,7 +300,7 @@ describe.skipIf(isWindows)("restart-stale-pids", () => { // If pollPortOnce made a second lsof call internally, spawnCount would // be at least 4 (initial + 2 polls each doubled). With the fix, each poll // is exactly one spawn: initial(1) + busy-poll(1) + free-poll(1) = 3. - expect(spawnCount).toBe(3); + expect(getCallCount()).toBe(3); }); it("lsof status 1 with non-empty openclaw stdout is treated as busy, not free (Linux container edge case)", () => { @@ -302,34 +308,21 @@ describe.skipIf(isWindows)("restart-stale-pids", () => { // lsof can exit 1 AND still emit output for processes it could read. // status 1 + non-empty openclaw stdout must not be treated as port-free. const stalePid = process.pid + 601; - let call = 0; - mockSpawnSync.mockImplementation(() => { - call++; - if (call === 1) { - // Initial scan: finds stale pid - return { - error: null, - status: 0, - stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]), - stderr: "", - }; - } + const getCallCount = installInitialBusyPoll(stalePid, (call) => { if (call === 2) { // status 1 + openclaw pid in stdout — container-restricted lsof reports partial results - return { - error: null, + return createOpenClawBusyResult(stalePid, { status: 1, - stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]), stderr: "lsof: WARNING: can't stat() fuse", - }; + }); } // Third poll: port is genuinely free - return { error: null, status: 1, stdout: "", stderr: "" }; + return createLsofResult({ status: 1 }); }); vi.spyOn(process, "kill").mockReturnValue(true); cleanStaleGatewayProcessesSync(); // Poll 2 returned busy (not free), so we must have polled at least 3 times - expect(call).toBeGreaterThanOrEqual(3); + expect(getCallCount()).toBeGreaterThanOrEqual(3); }); it("pollPortOnce outer catch returns { free: null, permanent: false } when resolveLsofCommandSync throws", () => { @@ -382,20 +375,7 @@ describe.skipIf(isWindows)("restart-stale-pids", () => { it("sends SIGTERM to stale pids and returns them", () => { const stalePid = process.pid + 100; - let call = 0; - mockSpawnSync.mockImplementation(() => { - call++; - if (call === 1) { - return { - error: null, - status: 0, - stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]), - stderr: "", - }; - } - // waitForPortFreeSync polls: port free immediately - return { error: null, status: 0, stdout: "", stderr: "" }; - }); + installInitialBusyPoll(stalePid, () => createLsofResult()); const killSpy = vi.spyOn(process, "kill").mockReturnValue(true); const result = cleanStaleGatewayProcessesSync(); @@ -474,24 +454,11 @@ describe.skipIf(isWindows)("restart-stale-pids", () => { // immediately on ENOENT rather than spinning the full 2-second budget. const stalePid = process.pid + 300; const events: string[] = []; - let call = 0; - - mockSpawnSync.mockImplementation(() => { - call++; - if (call === 1) { - events.push("initial-find"); - return { - error: null, - status: 0, - stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]), - stderr: "", - }; - } + events.push("initial-find"); + installInitialBusyPoll(stalePid, (call) => { // Permanent ENOENT — lsof is not installed events.push(`enoent-poll-${call}`); - const err = new Error("lsof not found") as NodeJS.ErrnoException; - err.code = "ENOENT"; - return { error: err, status: null, stdout: "", stderr: "" }; + return createErrnoResult("ENOENT", "lsof not found"); }); vi.spyOn(process, "kill").mockReturnValue(true); @@ -506,50 +473,26 @@ describe.skipIf(isWindows)("restart-stale-pids", () => { // EPERM occurs when lsof exists but a MAC policy (SELinux/AppArmor) blocks // execution. Like ENOENT/EACCES, this is permanent — retrying is pointless. const stalePid = process.pid + 305; - let call = 0; - mockSpawnSync.mockImplementation(() => { - call++; - if (call === 1) { - return { - error: null, - status: 0, - stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]), - stderr: "", - }; - } - const err = new Error("lsof eperm") as NodeJS.ErrnoException; - err.code = "EPERM"; - return { error: err, status: null, stdout: "", stderr: "" }; - }); + const getCallCount = installInitialBusyPoll(stalePid, () => + createErrnoResult("EPERM", "lsof eperm"), + ); vi.spyOn(process, "kill").mockReturnValue(true); expect(() => cleanStaleGatewayProcessesSync()).not.toThrow(); // Must bail after exactly 1 EPERM poll — same as ENOENT/EACCES - expect(call).toBe(2); // 1 initial find + 1 EPERM poll + expect(getCallCount()).toBe(2); // 1 initial find + 1 EPERM poll }); it("bails immediately when lsof is permanently unavailable (EACCES) — same as ENOENT", () => { // EACCES and EPERM are also permanent conditions — lsof exists but the // process has no permission to run it. No point retrying. const stalePid = process.pid + 302; - let call = 0; - mockSpawnSync.mockImplementation(() => { - call++; - if (call === 1) { - return { - error: null, - status: 0, - stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]), - stderr: "", - }; - } - const err = new Error("lsof permission denied") as NodeJS.ErrnoException; - err.code = "EACCES"; - return { error: err, status: null, stdout: "", stderr: "" }; - }); + const getCallCount = installInitialBusyPoll(stalePid, () => + createErrnoResult("EACCES", "lsof permission denied"), + ); vi.spyOn(process, "kill").mockReturnValue(true); expect(() => cleanStaleGatewayProcessesSync()).not.toThrow(); // Should have bailed after exactly 1 poll call (the EACCES one) - expect(call).toBe(2); // 1 initial find + 1 EACCES poll + expect(getCallCount()).toBe(2); // 1 initial find + 1 EACCES poll }); it("proceeds with warning when polling budget is exhausted — fake clock, no real 2s wait", () => { @@ -561,15 +504,10 @@ describe.skipIf(isWindows)("restart-stale-pids", () => { let fakeNow = 0; __testing.setDateNowOverride(() => fakeNow); - mockSpawnSync.mockImplementation(() => { + installInitialBusyPoll(stalePid, () => { // Advance clock by PORT_FREE_TIMEOUT_MS + 1ms on first poll to trip the deadline. fakeNow += 2001; - return { - error: null, - status: 0, - stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]), - stderr: "", - }; + return createOpenClawBusyResult(stalePid); }); vi.spyOn(process, "kill").mockReturnValue(true); @@ -585,24 +523,13 @@ describe.skipIf(isWindows)("restart-stale-pids", () => { // leaving its socket in TIME_WAIT / FIN_WAIT. Skipping the poll would // silently recreate the EADDRINUSE race we are fixing. const stalePid = process.pid + 304; - let call = 0; const events: string[] = []; - mockSpawnSync.mockImplementation(() => { - call++; - if (call === 1) { - // Initial scan: finds stale pid - events.push("initial-find"); - return { - error: null, - status: 0, - stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]), - stderr: "", - }; - } + events.push("initial-find"); + installInitialBusyPoll(stalePid, () => { // Port is already free on first poll — pid was dead before SIGTERM events.push("poll-free"); - return { error: null, status: 1, stdout: "", stderr: "" }; + return createLsofResult({ status: 1 }); }); // All SIGTERMs throw ESRCH — pid already gone @@ -623,27 +550,16 @@ describe.skipIf(isWindows)("restart-stale-pids", () => { // would recreate the EADDRINUSE race this PR is designed to prevent. const stalePid = process.pid + 301; const events: string[] = []; - let call = 0; - - mockSpawnSync.mockImplementation(() => { - call++; - if (call === 1) { - events.push("initial-find"); - return { - error: null, - status: 0, - stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]), - stderr: "", - }; - } + events.push("initial-find"); + installInitialBusyPoll(stalePid, (call) => { if (call === 2) { // Transient: spawnSync timeout (no ENOENT code) events.push("transient-error"); - return { error: new Error("timeout"), status: null, stdout: "", stderr: "" }; + return createLsofResult({ error: new Error("timeout"), status: null }); } // Port free on the next poll events.push("port-free"); - return { error: null, status: 1, stdout: "", stderr: "" }; + return createLsofResult({ status: 1 }); }); vi.spyOn(process, "kill").mockReturnValue(true); @@ -739,30 +655,18 @@ describe.skipIf(isWindows)("restart-stale-pids", () => { // the port may be held by an unrelated process. From our perspective // (we only kill openclaw pids) it is effectively free. const stalePid = process.pid + 800; - let call = 0; - mockSpawnSync.mockImplementation(() => { - call++; - if (call === 1) { - return { - error: null, - status: 0, - stdout: lsofOutput([{ pid: stalePid, cmd: "openclaw-gateway" }]), - stderr: "", - }; - } + const getCallCount = installInitialBusyPoll(stalePid, () => { // status 1 + non-openclaw output — should be treated as free:true for our purposes - return { - error: null, + return createLsofResult({ status: 1, stdout: lsofOutput([{ pid: process.pid + 801, cmd: "caddy" }]), - stderr: "", - }; + }); }); vi.spyOn(process, "kill").mockReturnValue(true); // Should complete cleanly — no openclaw pids in status-1 output → free expect(() => cleanStaleGatewayProcessesSync()).not.toThrow(); // Completed in exactly 2 calls (initial find + 1 free poll) - expect(call).toBe(2); + expect(getCallCount()).toBe(2); }); }); diff --git a/src/infra/retry-policy.test.ts b/src/infra/retry-policy.test.ts index 76a4415deee..be0e4d91de3 100644 --- a/src/infra/retry-policy.test.ts +++ b/src/infra/retry-policy.test.ts @@ -1,48 +1,154 @@ -import { describe, expect, it, vi } from "vitest"; +import { afterEach, describe, expect, it, vi } from "vitest"; import { createTelegramRetryRunner } from "./retry-policy.js"; +const ZERO_DELAY_RETRY = { attempts: 3, minDelayMs: 0, maxDelayMs: 0, jitter: 0 }; + describe("createTelegramRetryRunner", () => { + afterEach(() => { + vi.useRealTimers(); + vi.restoreAllMocks(); + }); + describe("strictShouldRetry", () => { - it("without strictShouldRetry: ECONNRESET is retried via regex fallback even when predicate returns false", async () => { - const fn = vi - .fn() - .mockRejectedValue(Object.assign(new Error("read ECONNRESET"), { code: "ECONNRESET" })); - const runner = createTelegramRetryRunner({ - retry: { attempts: 2, minDelayMs: 0, maxDelayMs: 0, jitter: 0 }, - shouldRetry: () => false, // predicate says no - // strictShouldRetry not set — regex fallback still applies - }); - await expect(runner(fn, "test")).rejects.toThrow("ECONNRESET"); - // Regex matches "reset" so it retried despite shouldRetry returning false - expect(fn).toHaveBeenCalledTimes(2); - }); + it.each([ + { + name: "falls back to regex matching when strictShouldRetry is disabled", + runnerOptions: { + retry: { ...ZERO_DELAY_RETRY, attempts: 2 }, + shouldRetry: () => false, + }, + fnSteps: [ + { + type: "reject" as const, + value: Object.assign(new Error("read ECONNRESET"), { + code: "ECONNRESET", + }), + }, + ], + expectedCalls: 2, + expectedError: "ECONNRESET", + }, + { + name: "suppresses regex fallback when strictShouldRetry is enabled", + runnerOptions: { + retry: { ...ZERO_DELAY_RETRY, attempts: 2 }, + shouldRetry: () => false, + strictShouldRetry: true, + }, + fnSteps: [ + { + type: "reject" as const, + value: Object.assign(new Error("read ECONNRESET"), { + code: "ECONNRESET", + }), + }, + ], + expectedCalls: 1, + expectedError: "ECONNRESET", + }, + { + name: "still retries when the strict predicate returns true", + runnerOptions: { + retry: { ...ZERO_DELAY_RETRY, attempts: 2 }, + shouldRetry: (err: unknown) => (err as { code?: string }).code === "ECONNREFUSED", + strictShouldRetry: true, + }, + fnSteps: [ + { + type: "reject" as const, + value: Object.assign(new Error("ECONNREFUSED"), { + code: "ECONNREFUSED", + }), + }, + { type: "resolve" as const, value: "ok" }, + ], + expectedCalls: 2, + expectedValue: "ok", + }, + { + name: "does not retry unrelated errors when neither predicate nor regex match", + runnerOptions: { + retry: { ...ZERO_DELAY_RETRY, attempts: 2 }, + }, + fnSteps: [ + { + type: "reject" as const, + value: Object.assign(new Error("permission denied"), { + code: "EACCES", + }), + }, + ], + expectedCalls: 1, + expectedError: "permission denied", + }, + { + name: "keeps retrying retriable errors until attempts are exhausted", + runnerOptions: { + retry: ZERO_DELAY_RETRY, + }, + fnSteps: [ + { + type: "reject" as const, + value: Object.assign(new Error("connection timeout"), { + code: "ETIMEDOUT", + }), + }, + ], + expectedCalls: 3, + expectedError: "connection timeout", + }, + ])("$name", async ({ runnerOptions, fnSteps, expectedCalls, expectedValue, expectedError }) => { + vi.useFakeTimers(); + const runner = createTelegramRetryRunner(runnerOptions); + const fn = vi.fn(); + const allRejects = fnSteps.length > 0 && fnSteps.every((step) => step.type === "reject"); + if (allRejects) { + fn.mockRejectedValue(fnSteps[0]?.value); + } + for (const [index, step] of fnSteps.entries()) { + if (allRejects && index > 0) { + break; + } + if (step.type === "reject") { + fn.mockRejectedValueOnce(step.value); + } else { + fn.mockResolvedValueOnce(step.value); + } + } - it("with strictShouldRetry=true: ECONNRESET is NOT retried when predicate returns false", async () => { - const fn = vi - .fn() - .mockRejectedValue(Object.assign(new Error("read ECONNRESET"), { code: "ECONNRESET" })); - const runner = createTelegramRetryRunner({ - retry: { attempts: 2, minDelayMs: 0, maxDelayMs: 0, jitter: 0 }, - shouldRetry: () => false, - strictShouldRetry: true, // predicate is authoritative - }); - await expect(runner(fn, "test")).rejects.toThrow("ECONNRESET"); - // No retry — predicate returned false and regex fallback was suppressed - expect(fn).toHaveBeenCalledTimes(1); - }); + const promise = runner(fn, "test"); + const assertion = expectedError + ? expect(promise).rejects.toThrow(expectedError) + : expect(promise).resolves.toBe(expectedValue); - it("with strictShouldRetry=true: ECONNREFUSED is still retried when predicate returns true", async () => { - const fn = vi - .fn() - .mockRejectedValueOnce(Object.assign(new Error("ECONNREFUSED"), { code: "ECONNREFUSED" })) - .mockResolvedValue("ok"); - const runner = createTelegramRetryRunner({ - retry: { attempts: 2, minDelayMs: 0, maxDelayMs: 0, jitter: 0 }, - shouldRetry: (err) => (err as { code?: string }).code === "ECONNREFUSED", - strictShouldRetry: true, - }); - await expect(runner(fn, "test")).resolves.toBe("ok"); - expect(fn).toHaveBeenCalledTimes(2); + await vi.runAllTimersAsync(); + await assertion; + expect(fn).toHaveBeenCalledTimes(expectedCalls); }); }); + + it("honors nested retry_after hints before retrying", async () => { + vi.useFakeTimers(); + + const runner = createTelegramRetryRunner({ + retry: { attempts: 2, minDelayMs: 0, maxDelayMs: 1_000, jitter: 0 }, + }); + const fn = vi + .fn() + .mockRejectedValueOnce({ + message: "429 Too Many Requests", + response: { parameters: { retry_after: 1 } }, + }) + .mockResolvedValue("ok"); + + const promise = runner(fn, "test"); + + expect(fn).toHaveBeenCalledTimes(1); + await vi.advanceTimersByTimeAsync(999); + expect(fn).toHaveBeenCalledTimes(1); + + await vi.advanceTimersByTimeAsync(1); + await expect(promise).resolves.toBe("ok"); + expect(fn).toHaveBeenCalledTimes(2); + }); }); diff --git a/src/infra/retry.test.ts b/src/infra/retry.test.ts index dfba7cabd6b..0eafafa6536 100644 --- a/src/infra/retry.test.ts +++ b/src/infra/retry.test.ts @@ -1,5 +1,5 @@ import { describe, expect, it, vi } from "vitest"; -import { retryAsync } from "./retry.js"; +import { resolveRetryConfig, retryAsync } from "./retry.js"; async function runRetryAfterCase(params: { minDelayMs: number; @@ -48,22 +48,34 @@ describe("retryAsync", () => { }); it("stops when shouldRetry returns false", async () => { - const fn = vi.fn().mockRejectedValue(new Error("boom")); - await expect(retryAsync(fn, { attempts: 3, shouldRetry: () => false })).rejects.toThrow("boom"); + const err = new Error("boom"); + const fn = vi.fn().mockRejectedValue(err); + const shouldRetry = vi.fn(() => false); + await expect(retryAsync(fn, { attempts: 3, shouldRetry })).rejects.toThrow("boom"); expect(fn).toHaveBeenCalledTimes(1); + expect(shouldRetry).toHaveBeenCalledWith(err, 1); }); - it("calls onRetry before retrying", async () => { - const fn = vi.fn().mockRejectedValueOnce(new Error("boom")).mockResolvedValueOnce("ok"); + it("calls onRetry with retry metadata before retrying", async () => { + const err = new Error("boom"); + const fn = vi.fn().mockRejectedValueOnce(err).mockResolvedValueOnce("ok"); const onRetry = vi.fn(); const res = await retryAsync(fn, { attempts: 2, minDelayMs: 0, maxDelayMs: 0, + label: "telegram", onRetry, }); expect(res).toBe("ok"); - expect(onRetry).toHaveBeenCalledWith(expect.objectContaining({ attempt: 1, maxAttempts: 2 })); + expect(onRetry).toHaveBeenCalledWith( + expect.objectContaining({ + attempt: 1, + maxAttempts: 2, + err, + label: "telegram", + }), + ); }); it("clamps attempts to at least 1", async () => { @@ -89,3 +101,30 @@ describe("retryAsync", () => { expect(delays[0]).toBe(250); }); }); + +describe("resolveRetryConfig", () => { + it.each([ + { + name: "rounds attempts and delays", + overrides: { attempts: 2.6, minDelayMs: 10.4, maxDelayMs: 99.8, jitter: 0.4 }, + expected: { attempts: 3, minDelayMs: 10, maxDelayMs: 100, jitter: 0.4 }, + }, + { + name: "clamps attempts to at least one and maxDelayMs to minDelayMs", + overrides: { attempts: 0, minDelayMs: 250, maxDelayMs: 100, jitter: -1 }, + expected: { attempts: 1, minDelayMs: 250, maxDelayMs: 250, jitter: 0 }, + }, + { + name: "falls back for non-finite overrides and caps jitter at one", + overrides: { + attempts: Number.NaN, + minDelayMs: Number.POSITIVE_INFINITY, + maxDelayMs: Number.NaN, + jitter: 2, + }, + expected: { attempts: 3, minDelayMs: 300, maxDelayMs: 30000, jitter: 1 }, + }, + ])("$name", ({ overrides, expected }) => { + expect(resolveRetryConfig(undefined, overrides)).toEqual(expected); + }); +}); diff --git a/src/infra/run-node.test.ts b/src/infra/run-node.test.ts index fab1d7e771a..1007b2c6141 100644 --- a/src/infra/run-node.test.ts +++ b/src/infra/run-node.test.ts @@ -13,6 +13,17 @@ async function withTempDir(run: (dir: string) => Promise): Promise { } } +function createExitedProcess(code: number | null, signal: string | null = null) { + return { + on: (event: string, cb: (code: number | null, signal: string | null) => void) => { + if (event === "exit") { + queueMicrotask(() => cb(code, signal)); + } + return undefined; + }, + }; +} + describe("run-node script", () => { it.runIf(process.platform !== "win32")( "preserves control-ui assets by building with tsdown --no-clean", @@ -66,4 +77,88 @@ describe("run-node script", () => { }); }, ); + + it("skips rebuilding when dist is current and the source tree is clean", async () => { + await withTempDir(async (tmp) => { + const srcPath = path.join(tmp, "src", "index.ts"); + const distEntryPath = path.join(tmp, "dist", "entry.js"); + const buildStampPath = path.join(tmp, "dist", ".buildstamp"); + const tsconfigPath = path.join(tmp, "tsconfig.json"); + const packageJsonPath = path.join(tmp, "package.json"); + await fs.mkdir(path.dirname(srcPath), { recursive: true }); + await fs.mkdir(path.dirname(distEntryPath), { recursive: true }); + await fs.writeFile(srcPath, "export const value = 1;\n", "utf-8"); + await fs.writeFile(tsconfigPath, "{}\n", "utf-8"); + await fs.writeFile(packageJsonPath, '{"name":"openclaw-test"}\n', "utf-8"); + await fs.writeFile(distEntryPath, "console.log('built');\n", "utf-8"); + await fs.writeFile(buildStampPath, '{"head":"abc123"}\n', "utf-8"); + + const oldTime = new Date("2026-03-13T10:00:00.000Z"); + const stampTime = new Date("2026-03-13T12:00:00.000Z"); + await fs.utimes(srcPath, oldTime, oldTime); + await fs.utimes(tsconfigPath, oldTime, oldTime); + await fs.utimes(packageJsonPath, oldTime, oldTime); + await fs.utimes(distEntryPath, stampTime, stampTime); + await fs.utimes(buildStampPath, stampTime, stampTime); + + const spawnCalls: string[][] = []; + const spawn = (cmd: string, args: string[]) => { + spawnCalls.push([cmd, ...args]); + return createExitedProcess(0); + }; + const spawnSync = (cmd: string, args: string[]) => { + if (cmd === "git" && args[0] === "rev-parse") { + return { status: 0, stdout: "abc123\n" }; + } + if (cmd === "git" && args[0] === "status") { + return { status: 0, stdout: "" }; + } + return { status: 1, stdout: "" }; + }; + + const { runNodeMain } = await import("../../scripts/run-node.mjs"); + const exitCode = await runNodeMain({ + cwd: tmp, + args: ["status"], + env: { + ...process.env, + OPENCLAW_RUNNER_LOG: "0", + }, + spawn, + spawnSync, + execPath: process.execPath, + platform: process.platform, + }); + + expect(exitCode).toBe(0); + expect(spawnCalls).toEqual([[process.execPath, "openclaw.mjs", "status"]]); + }); + }); + + it("returns the build exit code when the compiler step fails", async () => { + await withTempDir(async (tmp) => { + const spawn = (cmd: string) => { + if (cmd === "pnpm") { + return createExitedProcess(23); + } + return createExitedProcess(0); + }; + + const { runNodeMain } = await import("../../scripts/run-node.mjs"); + const exitCode = await runNodeMain({ + cwd: tmp, + args: ["status"], + env: { + ...process.env, + OPENCLAW_FORCE_BUILD: "1", + OPENCLAW_RUNNER_LOG: "0", + }, + spawn, + execPath: process.execPath, + platform: process.platform, + }); + + expect(exitCode).toBe(23); + }); + }); }); diff --git a/src/infra/runtime-guard.test.ts b/src/infra/runtime-guard.test.ts index 410fe5d4a2d..ca1080b84bc 100644 --- a/src/infra/runtime-guard.test.ts +++ b/src/infra/runtime-guard.test.ts @@ -4,6 +4,7 @@ import { detectRuntime, isAtLeast, parseSemver, + isSupportedNodeVersion, type RuntimeDetails, runtimeSatisfies, } from "./runtime-guard.js"; @@ -12,6 +13,7 @@ describe("runtime-guard", () => { it("parses semver with or without leading v", () => { expect(parseSemver("v22.1.3")).toEqual({ major: 22, minor: 1, patch: 3 }); expect(parseSemver("1.3.0")).toEqual({ major: 1, minor: 3, patch: 0 }); + expect(parseSemver("22.16.0-beta.1")).toEqual({ major: 22, minor: 16, patch: 0 }); expect(parseSemver("invalid")).toBeNull(); }); @@ -49,6 +51,9 @@ describe("runtime-guard", () => { expect(runtimeSatisfies(nodeOld)).toBe(false); expect(runtimeSatisfies(nodeTooOld)).toBe(false); expect(runtimeSatisfies(unknown)).toBe(false); + expect(isSupportedNodeVersion("22.16.0")).toBe(true); + expect(isSupportedNodeVersion("22.15.9")).toBe(false); + expect(isSupportedNodeVersion(null)).toBe(false); }); it("throws via exit when runtime is too old", () => { @@ -67,6 +72,7 @@ describe("runtime-guard", () => { }; expect(() => assertSupportedRuntime(runtime, details)).toThrow("exit"); expect(runtime.error).toHaveBeenCalledWith(expect.stringContaining("requires Node")); + expect(runtime.error).toHaveBeenCalledWith(expect.stringContaining("Detected: node 20.0.0")); }); it("returns silently when runtime meets requirements", () => { @@ -84,4 +90,25 @@ describe("runtime-guard", () => { expect(() => assertSupportedRuntime(runtime, details)).not.toThrow(); expect(runtime.exit).not.toHaveBeenCalled(); }); + + it("reports unknown runtimes with fallback labels", () => { + const runtime = { + log: vi.fn(), + error: vi.fn(), + exit: vi.fn(() => { + throw new Error("exit"); + }), + }; + const details: RuntimeDetails = { + kind: "unknown", + version: null, + execPath: null, + pathEnv: "(not set)", + }; + + expect(() => assertSupportedRuntime(runtime, details)).toThrow("exit"); + expect(runtime.error).toHaveBeenCalledWith( + expect.stringContaining("Detected: unknown runtime (exec: unknown)."), + ); + }); }); diff --git a/src/infra/runtime-status.test.ts b/src/infra/runtime-status.test.ts new file mode 100644 index 00000000000..fc79afe5bee --- /dev/null +++ b/src/infra/runtime-status.test.ts @@ -0,0 +1,30 @@ +import { describe, expect, it } from "vitest"; +import { formatRuntimeStatusWithDetails } from "./runtime-status.js"; + +describe("formatRuntimeStatusWithDetails", () => { + it("falls back to unknown when status is missing", () => { + expect(formatRuntimeStatusWithDetails({})).toBe("unknown"); + }); + + it("includes pid, distinct state, and non-empty details", () => { + expect( + formatRuntimeStatusWithDetails({ + status: "running", + pid: 1234, + state: "sleeping", + details: ["healthy", "", "port 18789"], + }), + ).toBe("running (pid 1234, state sleeping, healthy, port 18789)"); + }); + + it("omits duplicate state text and falsy pid values", () => { + expect( + formatRuntimeStatusWithDetails({ + status: "running", + pid: 0, + state: "RUNNING", + details: [], + }), + ).toBe("running"); + }); +}); diff --git a/src/infra/safe-open-sync.test.ts b/src/infra/safe-open-sync.test.ts index 3208a089786..726aa9195f1 100644 --- a/src/infra/safe-open-sync.test.ts +++ b/src/infra/safe-open-sync.test.ts @@ -5,6 +5,11 @@ import path from "node:path"; import { describe, expect, it } from "vitest"; import { openVerifiedFileSync } from "./safe-open-sync.js"; +type SafeOpenSyncFs = NonNullable[0]["ioFs"]>; +type SafeOpenSyncLstatSync = SafeOpenSyncFs["lstatSync"]; +type SafeOpenSyncRealpathSync = SafeOpenSyncFs["realpathSync"]; +type SafeOpenSyncFstatSync = SafeOpenSyncFs["fstatSync"]; + async function withTempDir(prefix: string, run: (dir: string) => Promise): Promise { const dir = await fsp.mkdtemp(path.join(os.tmpdir(), prefix)); try { @@ -14,7 +19,50 @@ async function withTempDir(prefix: string, run: (dir: string) => Promise): } } +function mockStat(params: { + isFile?: boolean; + isDirectory?: boolean; + nlink?: number; + size?: number; + dev?: number; + ino?: number; +}): fs.Stats { + return { + isFile: () => params.isFile ?? false, + isDirectory: () => params.isDirectory ?? false, + isSymbolicLink: () => false, + nlink: params.nlink ?? 1, + size: params.size ?? 0, + dev: params.dev ?? 1, + ino: params.ino ?? 1, + } as unknown as fs.Stats; +} + +function mockRealpathSync(result: string): SafeOpenSyncRealpathSync { + const resolvePath = ((_: fs.PathLike) => result) as SafeOpenSyncRealpathSync; + resolvePath.native = ((_: fs.PathLike) => result) as typeof resolvePath.native; + return resolvePath; +} + +function mockLstatSync(read: (filePath: fs.PathLike) => fs.Stats): SafeOpenSyncLstatSync { + return ((filePath: fs.PathLike) => read(filePath)) as unknown as SafeOpenSyncLstatSync; +} + +function mockFstatSync(stat: fs.Stats): SafeOpenSyncFstatSync { + return ((_: number) => stat) as unknown as SafeOpenSyncFstatSync; +} + describe("openVerifiedFileSync", () => { + it("returns a path error for missing files", async () => { + await withTempDir("openclaw-safe-open-", async (root) => { + const opened = openVerifiedFileSync({ filePath: path.join(root, "missing.txt") }); + expect(opened.ok).toBe(false); + if (!opened.ok) { + expect(opened.reason).toBe("path"); + } + }); + }); + it("rejects directories by default", async () => { await withTempDir("openclaw-safe-open-", async (root) => { const targetDir = path.join(root, "nested"); @@ -46,4 +94,92 @@ describe("openVerifiedFileSync", () => { fs.closeSync(opened.fd); }); }); + + it("rejects symlink paths when rejectPathSymlink is enabled", async () => { + await withTempDir("openclaw-safe-open-", async (root) => { + const targetFile = path.join(root, "target.txt"); + const linkFile = path.join(root, "link.txt"); + await fsp.writeFile(targetFile, "hello"); + await fsp.symlink(targetFile, linkFile); + + const opened = openVerifiedFileSync({ + filePath: linkFile, + rejectPathSymlink: true, + }); + expect(opened.ok).toBe(false); + if (!opened.ok) { + expect(opened.reason).toBe("validation"); + } + }); + }); + + it("rejects files larger than maxBytes", async () => { + await withTempDir("openclaw-safe-open-", async (root) => { + const filePath = path.join(root, "payload.txt"); + await fsp.writeFile(filePath, "hello"); + + const opened = openVerifiedFileSync({ + filePath, + maxBytes: 4, + }); + expect(opened.ok).toBe(false); + if (!opened.ok) { + expect(opened.reason).toBe("validation"); + } + }); + }); + + it("rejects post-open validation mismatches and closes the fd", () => { + const closeSync = (fd: number) => { + closed.push(fd); + }; + const closed: number[] = []; + const ioFs: SafeOpenSyncFs = { + constants: fs.constants, + lstatSync: mockLstatSync((filePath) => + String(filePath) === "/real/file.txt" + ? mockStat({ isFile: true, size: 1, dev: 1, ino: 1 }) + : mockStat({ isFile: false }), + ), + realpathSync: mockRealpathSync("/real/file.txt"), + openSync: () => 42, + fstatSync: mockFstatSync(mockStat({ isFile: true, size: 1, dev: 2, ino: 1 })), + closeSync, + }; + + const opened = openVerifiedFileSync({ + filePath: "/input/file.txt", + ioFs, + }); + expect(opened.ok).toBe(false); + if (!opened.ok) { + expect(opened.reason).toBe("validation"); + } + expect(closed).toEqual([42]); + }); + + it("reports non-path filesystem failures as io errors", () => { + const ioFs: SafeOpenSyncFs = { + constants: fs.constants, + lstatSync: () => { + const err = new Error("permission denied") as NodeJS.ErrnoException; + err.code = "EACCES"; + throw err; + }, + realpathSync: mockRealpathSync("/real/file.txt"), + openSync: () => 42, + fstatSync: mockFstatSync(mockStat({ isFile: true })), + closeSync: () => {}, + }; + + const opened = openVerifiedFileSync({ + filePath: "/input/file.txt", + rejectPathSymlink: true, + ioFs, + }); + expect(opened.ok).toBe(false); + if (!opened.ok) { + expect(opened.reason).toBe("io"); + } + }); }); diff --git a/src/infra/scp-host.test.ts b/src/infra/scp-host.test.ts index 178c738adfb..78498b997ce 100644 --- a/src/infra/scp-host.test.ts +++ b/src/infra/scp-host.test.ts @@ -2,18 +2,34 @@ import { describe, expect, it } from "vitest"; import { isSafeScpRemoteHost, normalizeScpRemoteHost } from "./scp-host.js"; describe("scp remote host", () => { - it("accepts host and user@host forms", () => { - expect(normalizeScpRemoteHost("gateway-host")).toBe("gateway-host"); - expect(normalizeScpRemoteHost("bot@gateway-host")).toBe("bot@gateway-host"); - expect(normalizeScpRemoteHost("bot@192.168.64.3")).toBe("bot@192.168.64.3"); - expect(normalizeScpRemoteHost("bot@[fe80::1]")).toBe("bot@[fe80::1]"); + it.each([ + { value: "gateway-host", expected: "gateway-host" }, + { value: " bot@gateway-host ", expected: "bot@gateway-host" }, + { value: "bot@192.168.64.3", expected: "bot@192.168.64.3" }, + { value: "bot@[fe80::1]", expected: "bot@[fe80::1]" }, + ])("normalizes safe hosts for %j", ({ value, expected }) => { + expect(normalizeScpRemoteHost(value)).toBe(expected); }); - it("rejects unsafe host tokens", () => { - expect(isSafeScpRemoteHost("-oProxyCommand=whoami")).toBe(false); - expect(isSafeScpRemoteHost("bot@gateway-host -oStrictHostKeyChecking=no")).toBe(false); - expect(isSafeScpRemoteHost("bot@host:22")).toBe(false); - expect(isSafeScpRemoteHost("bot@/tmp/host")).toBe(false); - expect(isSafeScpRemoteHost("bot@@host")).toBe(false); + it.each([ + null, + undefined, + "", + " ", + "-oProxyCommand=whoami", + "bot@gateway-host -oStrictHostKeyChecking=no", + "bot@host:22", + "bot@/tmp/host", + "bot@@host", + "@host", + "bot@", + "bot@host\\name", + "bot@-gateway-host", + "bot@fe80::1", + "bot@[fe80::1%en0]", + "bot name@gateway-host", + ])("rejects unsafe host tokens: %j", (value) => { + expect(normalizeScpRemoteHost(value)).toBeUndefined(); + expect(isSafeScpRemoteHost(value)).toBe(false); }); }); diff --git a/src/infra/secret-file.test.ts b/src/infra/secret-file.test.ts index 788b4c75e23..ca7841891e5 100644 --- a/src/infra/secret-file.test.ts +++ b/src/infra/secret-file.test.ts @@ -4,6 +4,7 @@ import { afterEach, describe, expect, it } from "vitest"; import { createTrackedTempDirs } from "../test-utils/tracked-temp-dirs.js"; import { DEFAULT_SECRET_FILE_MAX_BYTES, + loadSecretFileSync, readSecretFileSync, tryReadSecretFileSync, } from "./secret-file.js"; @@ -16,6 +17,12 @@ afterEach(async () => { }); describe("readSecretFileSync", () => { + it("rejects blank file paths", () => { + expect(() => readSecretFileSync(" ", "Gateway password")).toThrow( + "Gateway password file path is empty.", + ); + }); + it("reads and trims a regular secret file", async () => { const dir = await createTempDir(); const file = path.join(dir, "secret.txt"); @@ -56,6 +63,28 @@ describe("readSecretFileSync", () => { ); }); + it("rejects empty secret files after trimming", async () => { + const dir = await createTempDir(); + const file = path.join(dir, "secret.txt"); + await writeFile(file, " \n\t ", "utf8"); + + expect(() => readSecretFileSync(file, "Gateway password")).toThrow( + `Gateway password file at ${file} is empty.`, + ); + }); + + it("exposes resolvedPath on non-throwing read failures", async () => { + const dir = await createTempDir(); + const file = path.join(dir, "secret.txt"); + await writeFile(file, " \n\t ", "utf8"); + + expect(loadSecretFileSync(file, "Gateway password")).toMatchObject({ + ok: false, + resolvedPath: file, + message: `Gateway password file at ${file} is empty.`, + }); + }); + it("returns undefined from the non-throwing helper for rejected files", async () => { const dir = await createTempDir(); const target = path.join(dir, "target.txt"); @@ -67,4 +96,9 @@ describe("readSecretFileSync", () => { undefined, ); }); + + it("returns undefined from the non-throwing helper for blank file paths", () => { + expect(tryReadSecretFileSync(" ", "Telegram bot token")).toBeUndefined(); + expect(tryReadSecretFileSync(undefined, "Telegram bot token")).toBeUndefined(); + }); }); diff --git a/src/infra/secure-random.test.ts b/src/infra/secure-random.test.ts index 96f08252de4..2a595900c7b 100644 --- a/src/infra/secure-random.test.ts +++ b/src/infra/secure-random.test.ts @@ -1,20 +1,52 @@ -import { describe, expect, it } from "vitest"; +import { Buffer } from "node:buffer"; +import { describe, expect, it, vi } from "vitest"; + +const cryptoMocks = vi.hoisted(() => ({ + randomBytes: vi.fn((bytes: number) => Buffer.alloc(bytes, 0xab)), + randomUUID: vi.fn(), +})); + +vi.mock("node:crypto", () => ({ + randomBytes: cryptoMocks.randomBytes, + randomUUID: cryptoMocks.randomUUID, +})); + import { generateSecureToken, generateSecureUuid } from "./secure-random.js"; describe("secure-random", () => { - it("generates UUIDs", () => { - const first = generateSecureUuid(); - const second = generateSecureUuid(); - expect(first).not.toBe(second); - expect(first).toMatch( - /^[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i, - ); + it("delegates UUID generation to crypto.randomUUID", () => { + cryptoMocks.randomUUID.mockReturnValueOnce("uuid-1").mockReturnValueOnce("uuid-2"); + + expect(generateSecureUuid()).toBe("uuid-1"); + expect(generateSecureUuid()).toBe("uuid-2"); + expect(cryptoMocks.randomUUID).toHaveBeenCalledTimes(2); }); - it("generates url-safe tokens", () => { + it("generates url-safe tokens with the default byte count", () => { + cryptoMocks.randomBytes.mockClear(); + const defaultToken = generateSecureToken(); - const token18 = generateSecureToken(18); + + expect(cryptoMocks.randomBytes).toHaveBeenCalledWith(16); expect(defaultToken).toMatch(/^[A-Za-z0-9_-]+$/); - expect(token18).toMatch(/^[A-Za-z0-9_-]{24}$/); + expect(defaultToken).toHaveLength(Buffer.alloc(16, 0xab).toString("base64url").length); + }); + + it("passes custom byte counts through to crypto.randomBytes", () => { + cryptoMocks.randomBytes.mockClear(); + + const token18 = generateSecureToken(18); + + expect(cryptoMocks.randomBytes).toHaveBeenCalledWith(18); + expect(token18).toBe(Buffer.alloc(18, 0xab).toString("base64url")); + }); + + it("supports zero-byte tokens without rewriting the requested size", () => { + cryptoMocks.randomBytes.mockClear(); + + const token = generateSecureToken(0); + + expect(cryptoMocks.randomBytes).toHaveBeenCalledWith(0); + expect(token).toBe(""); }); }); diff --git a/src/infra/session-maintenance-warning.test.ts b/src/infra/session-maintenance-warning.test.ts index f0e9590c572..f4c2e0757a1 100644 --- a/src/infra/session-maintenance-warning.test.ts +++ b/src/infra/session-maintenance-warning.test.ts @@ -1,3 +1,4 @@ +import { randomUUID } from "node:crypto"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; const mocks = vi.hoisted(() => ({ @@ -37,6 +38,26 @@ vi.mock("./system-events.js", () => ({ const { deliverSessionMaintenanceWarning } = await import("./session-maintenance-warning.js"); +function createParams( + overrides: Partial[0]> = {}, +): Parameters[0] { + const sessionKey = overrides.sessionKey ?? `agent:${randomUUID()}:main`; + return { + cfg: {}, + sessionKey, + entry: {} as never, + warning: { + activeSessionKey: sessionKey, + pruneAfterMs: 1_000, + maxEntries: 100, + wouldPrune: true, + wouldCap: false, + ...(overrides.warning as object), + } as never, + ...overrides, + }; +} + describe("deliverSessionMaintenanceWarning", () => { let prevVitest: string | undefined; let prevNodeEnv: string | undefined; @@ -68,18 +89,9 @@ describe("deliverSessionMaintenanceWarning", () => { }); it("forwards session context to outbound delivery", async () => { - await deliverSessionMaintenanceWarning({ - cfg: {}, - sessionKey: "agent:main:main", - entry: {} as never, - warning: { - activeSessionKey: "agent:main:main", - pruneAfterMs: 1_000, - maxEntries: 100, - wouldPrune: true, - wouldCap: false, - } as never, - }); + const params = createParams({ sessionKey: "agent:main:main" }); + + await deliverSessionMaintenanceWarning(params); expect(mocks.deliverOutboundPayloads).toHaveBeenCalledWith( expect.objectContaining({ @@ -90,4 +102,61 @@ describe("deliverSessionMaintenanceWarning", () => { ); expect(mocks.enqueueSystemEvent).not.toHaveBeenCalled(); }); + + it("suppresses duplicate warning contexts for the same session", async () => { + const params = createParams(); + + await deliverSessionMaintenanceWarning(params); + await deliverSessionMaintenanceWarning(params); + + expect(mocks.deliverOutboundPayloads).toHaveBeenCalledTimes(1); + }); + + it("falls back to a system event when the last target is not deliverable", async () => { + mocks.resolveSessionDeliveryTarget.mockReturnValueOnce({ + channel: "debug", + to: "+15550001", + accountId: "acct-1", + threadId: "thread-1", + }); + mocks.isDeliverableMessageChannel.mockReturnValueOnce(false); + + await deliverSessionMaintenanceWarning( + createParams({ + warning: { + pruneAfterMs: 3_600_000, + maxEntries: 10, + wouldPrune: false, + wouldCap: true, + } as never, + }), + ); + + expect(mocks.deliverOutboundPayloads).not.toHaveBeenCalled(); + expect(mocks.enqueueSystemEvent).toHaveBeenCalledWith( + expect.stringContaining("most recent 10 sessions"), + expect.objectContaining({ sessionKey: expect.stringContaining("agent:") }), + ); + }); + + it("skips warning delivery in test mode", async () => { + process.env.NODE_ENV = "test"; + + await deliverSessionMaintenanceWarning(createParams()); + + expect(mocks.resolveSessionDeliveryTarget).not.toHaveBeenCalled(); + expect(mocks.deliverOutboundPayloads).not.toHaveBeenCalled(); + expect(mocks.enqueueSystemEvent).not.toHaveBeenCalled(); + }); + + it("enqueues a system event when outbound delivery fails", async () => { + mocks.deliverOutboundPayloads.mockRejectedValueOnce(new Error("boom")); + + await deliverSessionMaintenanceWarning(createParams()); + + expect(mocks.enqueueSystemEvent).toHaveBeenCalledWith( + expect.stringContaining("older than 1 second"), + expect.objectContaining({ sessionKey: expect.stringContaining("agent:") }), + ); + }); }); diff --git a/src/infra/shell-inline-command.test.ts b/src/infra/shell-inline-command.test.ts new file mode 100644 index 00000000000..1c5892eff59 --- /dev/null +++ b/src/infra/shell-inline-command.test.ts @@ -0,0 +1,80 @@ +import { describe, expect, it } from "vitest"; +import { + POSIX_INLINE_COMMAND_FLAGS, + POWERSHELL_INLINE_COMMAND_FLAGS, + resolveInlineCommandMatch, +} from "./shell-inline-command.js"; + +describe("resolveInlineCommandMatch", () => { + it("extracts the next token for exact inline-command flags", () => { + expect( + resolveInlineCommandMatch(["bash", "-lc", "echo hi"], POSIX_INLINE_COMMAND_FLAGS), + ).toEqual({ + command: "echo hi", + valueTokenIndex: 2, + }); + expect( + resolveInlineCommandMatch( + ["pwsh", "-Command", "Get-ChildItem"], + POWERSHELL_INLINE_COMMAND_FLAGS, + ), + ).toEqual({ + command: "Get-ChildItem", + valueTokenIndex: 2, + }); + expect( + resolveInlineCommandMatch(["pwsh", "-File", "script.ps1"], POWERSHELL_INLINE_COMMAND_FLAGS), + ).toEqual({ + command: "script.ps1", + valueTokenIndex: 2, + }); + expect( + resolveInlineCommandMatch( + ["powershell", "-f", "script.ps1"], + POWERSHELL_INLINE_COMMAND_FLAGS, + ), + ).toEqual({ + command: "script.ps1", + valueTokenIndex: 2, + }); + }); + + it("supports combined -c forms only when enabled", () => { + expect( + resolveInlineCommandMatch(["sh", "-cecho hi"], POSIX_INLINE_COMMAND_FLAGS, { + allowCombinedC: true, + }), + ).toEqual({ + command: "echo hi", + valueTokenIndex: 1, + }); + expect( + resolveInlineCommandMatch(["sh", "-cecho hi"], POSIX_INLINE_COMMAND_FLAGS, { + allowCombinedC: false, + }), + ).toEqual({ + command: null, + valueTokenIndex: null, + }); + }); + + it("returns a value index even when the flag is present without a usable command", () => { + expect(resolveInlineCommandMatch(["bash", "-lc", " "], POSIX_INLINE_COMMAND_FLAGS)).toEqual({ + command: null, + valueTokenIndex: 2, + }); + expect(resolveInlineCommandMatch(["bash", "-lc"], POSIX_INLINE_COMMAND_FLAGS)).toEqual({ + command: null, + valueTokenIndex: null, + }); + }); + + it("stops parsing after --", () => { + expect( + resolveInlineCommandMatch(["bash", "--", "-lc", "echo hi"], POSIX_INLINE_COMMAND_FLAGS), + ).toEqual({ + command: null, + valueTokenIndex: null, + }); + }); +}); diff --git a/src/infra/shell-inline-command.ts b/src/infra/shell-inline-command.ts index 9e0f33627ab..e5b46640c13 100644 --- a/src/infra/shell-inline-command.ts +++ b/src/infra/shell-inline-command.ts @@ -3,6 +3,8 @@ export const POWERSHELL_INLINE_COMMAND_FLAGS = new Set([ "-c", "-command", "--command", + "-f", + "-file", "-encodedcommand", "-enc", "-e", diff --git a/src/infra/skills-remote.test.ts b/src/infra/skills-remote.test.ts index 5aecf39a3b3..24c41dddbd5 100644 --- a/src/infra/skills-remote.test.ts +++ b/src/infra/skills-remote.test.ts @@ -33,4 +33,65 @@ describe("skills-remote", () => { removeRemoteNodeInfo(nodeId); }).not.toThrow(); }); + + it("ignores non-mac and non-system.run nodes for eligibility", () => { + const linuxNodeId = `node-${randomUUID()}`; + const noRunNodeId = `node-${randomUUID()}`; + const bin = `bin-${randomUUID()}`; + try { + recordRemoteNodeInfo({ + nodeId: linuxNodeId, + displayName: "Linux Box", + platform: "linux", + commands: ["system.run"], + }); + recordRemoteNodeBins(linuxNodeId, [bin]); + + recordRemoteNodeInfo({ + nodeId: noRunNodeId, + displayName: "Remote Mac", + platform: "darwin", + commands: ["system.which"], + }); + recordRemoteNodeBins(noRunNodeId, [bin]); + + expect(getRemoteSkillEligibility()).toBeUndefined(); + } finally { + removeRemoteNodeInfo(linuxNodeId); + removeRemoteNodeInfo(noRunNodeId); + } + }); + + it("aggregates bins and note labels across eligible mac nodes", () => { + const nodeA = `node-${randomUUID()}`; + const nodeB = `node-${randomUUID()}`; + const binA = `bin-${randomUUID()}`; + const binB = `bin-${randomUUID()}`; + try { + recordRemoteNodeInfo({ + nodeId: nodeA, + displayName: "Mac Studio", + platform: "darwin", + commands: ["system.run"], + }); + recordRemoteNodeBins(nodeA, [binA]); + + recordRemoteNodeInfo({ + nodeId: nodeB, + platform: "macOS", + commands: ["system.run"], + }); + recordRemoteNodeBins(nodeB, [binB]); + + const eligibility = getRemoteSkillEligibility(); + expect(eligibility?.platforms).toEqual(["darwin"]); + expect(eligibility?.hasBin(binA)).toBe(true); + expect(eligibility?.hasAnyBin([`missing-${randomUUID()}`, binB])).toBe(true); + expect(eligibility?.note).toContain("Mac Studio"); + expect(eligibility?.note).toContain(nodeB); + } finally { + removeRemoteNodeInfo(nodeA); + removeRemoteNodeInfo(nodeB); + } + }); }); diff --git a/src/infra/ssh-config.test.ts b/src/infra/ssh-config.test.ts index 318f2dab973..cd722f51203 100644 --- a/src/infra/ssh-config.test.ts +++ b/src/infra/ssh-config.test.ts @@ -58,6 +58,17 @@ describe("ssh-config", () => { expect(parsed.identityFiles).toEqual(["/tmp/id"]); }); + it("ignores invalid ports and blank lines in ssh -G output", () => { + const parsed = parseSshConfigOutput( + "user bob\nhostname example.com\nport not-a-number\nidentityfile none\nidentityfile \n", + ); + + expect(parsed.user).toBe("bob"); + expect(parsed.host).toBe("example.com"); + expect(parsed.port).toBeUndefined(); + expect(parsed.identityFiles).toEqual([]); + }); + it("resolves ssh config via ssh -G", async () => { const config = await resolveSshConfig({ user: "me", host: "alias", port: 22 }); expect(config?.user).toBe("steipete"); @@ -68,6 +79,16 @@ describe("ssh-config", () => { expect(args?.slice(-2)).toEqual(["--", "me@alias"]); }); + it("adds non-default port and trimmed identity arguments", async () => { + await resolveSshConfig( + { user: "me", host: "alias", port: 2022 }, + { identity: " /tmp/custom_id " }, + ); + + const args = spawnMock.mock.calls.at(-1)?.[1] as string[] | undefined; + expect(args).toEqual(["-G", "-p", "2022", "-i", "/tmp/custom_id", "--", "me@alias"]); + }); + it("returns null when ssh -G fails", async () => { spawnMock.mockImplementationOnce( (_command: string, _args: readonly string[], _options: SpawnOptions): ChildProcess => { @@ -82,4 +103,18 @@ describe("ssh-config", () => { const config = await resolveSshConfig({ user: "me", host: "bad-host", port: 22 }); expect(config).toBeNull(); }); + + it("returns null when the ssh process emits an error", async () => { + spawnMock.mockImplementationOnce( + (_command: string, _args: readonly string[], _options: SpawnOptions): ChildProcess => { + const { child } = createMockSpawnChild(); + process.nextTick(() => { + child.emit("error", new Error("spawn boom")); + }); + return child as unknown as ChildProcess; + }, + ); + + await expect(resolveSshConfig({ user: "me", host: "bad-host", port: 22 })).resolves.toBeNull(); + }); }); diff --git a/src/infra/ssh-tunnel.test.ts b/src/infra/ssh-tunnel.test.ts new file mode 100644 index 00000000000..da450d1c029 --- /dev/null +++ b/src/infra/ssh-tunnel.test.ts @@ -0,0 +1,29 @@ +import { describe, expect, it } from "vitest"; +import { parseSshTarget } from "./ssh-tunnel.js"; + +describe("parseSshTarget", () => { + it("parses user@host:port targets", () => { + expect(parseSshTarget("me@example.com:2222")).toEqual({ + user: "me", + host: "example.com", + port: 2222, + }); + }); + + it("strips an ssh prefix and keeps the default port when missing", () => { + expect(parseSshTarget(" ssh alice@example.com ")).toEqual({ + user: "alice", + host: "example.com", + port: 22, + }); + }); + + it("rejects invalid hosts and ports", () => { + expect(parseSshTarget("")).toBeNull(); + expect(parseSshTarget("me@example.com:0")).toBeNull(); + expect(parseSshTarget("me@example.com:not-a-port")).toBeNull(); + expect(parseSshTarget("-V")).toBeNull(); + expect(parseSshTarget("me@-badhost")).toBeNull(); + expect(parseSshTarget("-oProxyCommand=echo")).toBeNull(); + }); +}); diff --git a/src/infra/stable-node-path.test.ts b/src/infra/stable-node-path.test.ts new file mode 100644 index 00000000000..75121ba91b2 --- /dev/null +++ b/src/infra/stable-node-path.test.ts @@ -0,0 +1,40 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { resolveStableNodePath } from "./stable-node-path.js"; + +describe("resolveStableNodePath", () => { + it("returns non-cellar paths unchanged", async () => { + await expect(resolveStableNodePath("/usr/local/bin/node")).resolves.toBe("/usr/local/bin/node"); + }); + + it("prefers the Homebrew opt symlink for default and versioned formulas", async () => { + const prefix = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-stable-node-")); + const defaultNode = path.join(prefix, "Cellar", "node", "25.7.0", "bin", "node"); + const versionedNode = path.join(prefix, "Cellar", "node@22", "22.17.0", "bin", "node"); + const optDefault = path.join(prefix, "opt", "node", "bin", "node"); + const optVersioned = path.join(prefix, "opt", "node@22", "bin", "node"); + + await fs.mkdir(path.dirname(optDefault), { recursive: true }); + await fs.mkdir(path.dirname(optVersioned), { recursive: true }); + await fs.writeFile(optDefault, "", "utf8"); + await fs.writeFile(optVersioned, "", "utf8"); + + await expect(resolveStableNodePath(defaultNode)).resolves.toBe(optDefault); + await expect(resolveStableNodePath(versionedNode)).resolves.toBe(optVersioned); + }); + + it("falls back to the bin symlink for the default formula, otherwise original path", async () => { + const prefix = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-stable-node-")); + const defaultNode = path.join(prefix, "Cellar", "node", "25.7.0", "bin", "node"); + const versionedNode = path.join(prefix, "Cellar", "node@22", "22.17.0", "bin", "node"); + const binNode = path.join(prefix, "bin", "node"); + + await fs.mkdir(path.dirname(binNode), { recursive: true }); + await fs.writeFile(binNode, "", "utf8"); + + await expect(resolveStableNodePath(defaultNode)).resolves.toBe(binNode); + await expect(resolveStableNodePath(versionedNode)).resolves.toBe(versionedNode); + }); +}); diff --git a/src/infra/state-migrations.fs.test.ts b/src/infra/state-migrations.fs.test.ts new file mode 100644 index 00000000000..143572ca303 --- /dev/null +++ b/src/infra/state-migrations.fs.test.ts @@ -0,0 +1,71 @@ +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { + ensureDir, + existsDir, + fileExists, + isLegacyWhatsAppAuthFile, + readSessionStoreJson5, + safeReadDir, +} from "./state-migrations.fs.js"; + +describe("state migration fs helpers", () => { + it("reads directories safely and creates missing directories", () => { + const base = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-state-migrations-fs-")); + const nested = path.join(base, "nested"); + + expect(safeReadDir(nested)).toEqual([]); + ensureDir(nested); + fs.writeFileSync(path.join(nested, "file.txt"), "ok", "utf8"); + + expect(safeReadDir(nested).map((entry) => entry.name)).toEqual(["file.txt"]); + expect(existsDir(nested)).toBe(true); + expect(existsDir(path.join(nested, "file.txt"))).toBe(false); + }); + + it("distinguishes files from directories", () => { + const base = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-state-migrations-fs-")); + const filePath = path.join(base, "store.json"); + const dirPath = path.join(base, "dir"); + fs.writeFileSync(filePath, "{}", "utf8"); + fs.mkdirSync(dirPath); + + expect(fileExists(filePath)).toBe(true); + expect(fileExists(dirPath)).toBe(false); + expect(fileExists(path.join(base, "missing.json"))).toBe(false); + }); + + it("recognizes legacy whatsapp auth file names", () => { + expect(isLegacyWhatsAppAuthFile("creds.json")).toBe(true); + expect(isLegacyWhatsAppAuthFile("creds.json.bak")).toBe(true); + expect(isLegacyWhatsAppAuthFile("session-123.json")).toBe(true); + expect(isLegacyWhatsAppAuthFile("pre-key-1.json")).toBe(true); + expect(isLegacyWhatsAppAuthFile("sender-key-1.txt")).toBe(false); + expect(isLegacyWhatsAppAuthFile("other.json")).toBe(false); + }); + + it("parses json5 session stores and rejects invalid shapes", () => { + const base = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-state-migrations-fs-")); + const okPath = path.join(base, "store.json"); + const badPath = path.join(base, "bad.json"); + const listPath = path.join(base, "list.json"); + + fs.writeFileSync(okPath, "{session: {sessionId: 'abc', updatedAt: 1}}", "utf8"); + fs.writeFileSync(badPath, "{not valid", "utf8"); + fs.writeFileSync(listPath, "[]", "utf8"); + + expect(readSessionStoreJson5(okPath)).toEqual({ + ok: true, + store: { + session: { + sessionId: "abc", + updatedAt: 1, + }, + }, + }); + expect(readSessionStoreJson5(badPath)).toEqual({ ok: false, store: {} }); + expect(readSessionStoreJson5(listPath)).toEqual({ ok: false, store: {} }); + }); +}); diff --git a/src/infra/state-migrations.state-dir.test.ts b/src/infra/state-migrations.state-dir.test.ts index 8c46fe398e0..c270e30475f 100644 --- a/src/infra/state-migrations.state-dir.test.ts +++ b/src/infra/state-migrations.state-dir.test.ts @@ -49,4 +49,47 @@ describe("legacy state dir auto-migration", () => { expect(fs.readFileSync(path.join(root, ".moltbot", "marker.txt"), "utf-8")).toBe("ok"); expect(fs.readFileSync(path.join(root, ".clawdbot", "marker.txt"), "utf-8")).toBe("ok"); }); + + it("skips state-dir migration when OPENCLAW_STATE_DIR is explicitly set", async () => { + const root = await makeTempRoot(); + const legacyDir = path.join(root, ".clawdbot"); + fs.mkdirSync(legacyDir, { recursive: true }); + + const result = await autoMigrateLegacyStateDir({ + env: { OPENCLAW_STATE_DIR: path.join(root, "custom-state") } as NodeJS.ProcessEnv, + homedir: () => root, + }); + + expect(result).toEqual({ + migrated: false, + skipped: true, + changes: [], + warnings: [], + }); + expect(fs.existsSync(legacyDir)).toBe(true); + }); + + it("only runs once per process until reset", async () => { + const root = await makeTempRoot(); + const legacyDir = path.join(root, ".clawdbot"); + fs.mkdirSync(legacyDir, { recursive: true }); + fs.writeFileSync(path.join(legacyDir, "marker.txt"), "ok", "utf-8"); + + const first = await autoMigrateLegacyStateDir({ + env: {} as NodeJS.ProcessEnv, + homedir: () => root, + }); + const second = await autoMigrateLegacyStateDir({ + env: {} as NodeJS.ProcessEnv, + homedir: () => root, + }); + + expect(first.migrated).toBe(true); + expect(second).toEqual({ + migrated: false, + skipped: true, + changes: [], + warnings: [], + }); + }); }); diff --git a/src/infra/supervisor-markers.test.ts b/src/infra/supervisor-markers.test.ts new file mode 100644 index 00000000000..fb49ec6eaf5 --- /dev/null +++ b/src/infra/supervisor-markers.test.ts @@ -0,0 +1,67 @@ +import { describe, expect, it } from "vitest"; +import { detectRespawnSupervisor, SUPERVISOR_HINT_ENV_VARS } from "./supervisor-markers.js"; + +describe("SUPERVISOR_HINT_ENV_VARS", () => { + it("includes the cross-platform supervisor hint env vars", () => { + expect(SUPERVISOR_HINT_ENV_VARS).toEqual( + expect.arrayContaining([ + "LAUNCH_JOB_LABEL", + "INVOCATION_ID", + "OPENCLAW_WINDOWS_TASK_NAME", + "OPENCLAW_SERVICE_MARKER", + "OPENCLAW_SERVICE_KIND", + ]), + ); + }); +}); + +describe("detectRespawnSupervisor", () => { + it("detects launchd and systemd only from non-blank platform-specific hints", () => { + expect(detectRespawnSupervisor({ LAUNCH_JOB_LABEL: " ai.openclaw.gateway " }, "darwin")).toBe( + "launchd", + ); + expect(detectRespawnSupervisor({ LAUNCH_JOB_LABEL: " " }, "darwin")).toBeNull(); + + expect(detectRespawnSupervisor({ INVOCATION_ID: "abc123" }, "linux")).toBe("systemd"); + expect(detectRespawnSupervisor({ JOURNAL_STREAM: "" }, "linux")).toBeNull(); + }); + + it("detects scheduled-task supervision on Windows from either hint family", () => { + expect( + detectRespawnSupervisor({ OPENCLAW_WINDOWS_TASK_NAME: "OpenClaw Gateway" }, "win32"), + ).toBe("schtasks"); + expect( + detectRespawnSupervisor( + { + OPENCLAW_SERVICE_MARKER: "openclaw", + OPENCLAW_SERVICE_KIND: "gateway", + }, + "win32", + ), + ).toBe("schtasks"); + expect( + detectRespawnSupervisor( + { + OPENCLAW_SERVICE_MARKER: "openclaw", + OPENCLAW_SERVICE_KIND: "worker", + }, + "win32", + ), + ).toBeNull(); + }); + + it("ignores service markers on non-Windows platforms and unknown platforms", () => { + expect( + detectRespawnSupervisor( + { + OPENCLAW_SERVICE_MARKER: "openclaw", + OPENCLAW_SERVICE_KIND: "gateway", + }, + "linux", + ), + ).toBeNull(); + expect( + detectRespawnSupervisor({ LAUNCH_JOB_LABEL: "ai.openclaw.gateway" }, "freebsd"), + ).toBeNull(); + }); +}); diff --git a/src/infra/system-events.test.ts b/src/infra/system-events.test.ts index 0b92aa36568..cf16416e210 100644 --- a/src/infra/system-events.test.ts +++ b/src/infra/system-events.test.ts @@ -3,7 +3,15 @@ import { drainFormattedSystemEvents } from "../auto-reply/reply/session-updates. import type { OpenClawConfig } from "../config/config.js"; import { resolveMainSessionKey } from "../config/sessions.js"; import { isCronSystemEvent } from "./heartbeat-runner.js"; -import { enqueueSystemEvent, peekSystemEvents, resetSystemEventsForTest } from "./system-events.js"; +import { + drainSystemEventEntries, + enqueueSystemEvent, + hasSystemEvents, + isSystemEventContextChanged, + peekSystemEventEntries, + peekSystemEvents, + resetSystemEventsForTest, +} from "./system-events.js"; const cfg = {} as unknown as OpenClawConfig; const mainKey = resolveMainSessionKey(cfg); @@ -56,6 +64,50 @@ describe("system events (session routing)", () => { expect(second).toBe(false); }); + it("normalizes context keys when checking for context changes", () => { + const key = "agent:main:test-context"; + expect(isSystemEventContextChanged(key, " build:123 ")).toBe(true); + + enqueueSystemEvent("Node connected", { + sessionKey: key, + contextKey: " BUILD:123 ", + }); + + expect(isSystemEventContextChanged(key, "build:123")).toBe(false); + expect(isSystemEventContextChanged(key, "build:456")).toBe(true); + expect(isSystemEventContextChanged(key)).toBe(true); + }); + + it("returns cloned event entries and resets duplicate suppression after drain", () => { + const key = "agent:main:test-entry-clone"; + enqueueSystemEvent("Node connected", { + sessionKey: key, + contextKey: "build:123", + }); + + const peeked = peekSystemEventEntries(key); + expect(hasSystemEvents(key)).toBe(true); + expect(peeked).toHaveLength(1); + peeked[0].text = "mutated"; + expect(peekSystemEvents(key)).toEqual(["Node connected"]); + + expect(drainSystemEventEntries(key).map((entry) => entry.text)).toEqual(["Node connected"]); + expect(hasSystemEvents(key)).toBe(false); + + expect(enqueueSystemEvent("Node connected", { sessionKey: key })).toBe(true); + }); + + it("keeps only the newest 20 queued events", () => { + const key = "agent:main:test-max-events"; + for (let index = 1; index <= 22; index += 1) { + enqueueSystemEvent(`event ${index}`, { sessionKey: key }); + } + + expect(peekSystemEvents(key)).toEqual( + Array.from({ length: 20 }, (_, index) => `event ${index + 3}`), + ); + }); + it("filters heartbeat/noise lines, returning undefined", async () => { const key = "agent:main:test-heartbeat-filter"; enqueueSystemEvent("Read HEARTBEAT.md before continuing", { sessionKey: key }); diff --git a/src/infra/system-message.test.ts b/src/infra/system-message.test.ts index b0c32f31c35..980c852eeb4 100644 --- a/src/infra/system-message.test.ts +++ b/src/infra/system-message.test.ts @@ -2,8 +2,22 @@ import { describe, expect, it } from "vitest"; import { SYSTEM_MARK, hasSystemMark, prefixSystemMessage } from "./system-message.js"; describe("system-message", () => { - it("prepends the system mark once", () => { - expect(prefixSystemMessage("thread notice")).toBe(`${SYSTEM_MARK} thread notice`); + it.each([ + { input: "thread notice", expected: `${SYSTEM_MARK} thread notice` }, + { input: ` thread notice `, expected: `${SYSTEM_MARK} thread notice` }, + { input: " ", expected: "" }, + ])("prefixes %j", ({ input, expected }) => { + expect(prefixSystemMessage(input)).toBe(expected); + }); + + it.each([ + { input: `${SYSTEM_MARK} already prefixed`, expected: true }, + { input: ` ${SYSTEM_MARK} hello`, expected: true }, + { input: SYSTEM_MARK, expected: true }, + { input: "", expected: false }, + { input: "hello", expected: false }, + ])("detects marks for %j", ({ input, expected }) => { + expect(hasSystemMark(input)).toBe(expected); }); it("does not double-prefix messages that already have the mark", () => { @@ -12,8 +26,7 @@ describe("system-message", () => { ); }); - it("detects marked system text after trim normalization", () => { - expect(hasSystemMark(` ${SYSTEM_MARK} hello`)).toBe(true); - expect(hasSystemMark("hello")).toBe(false); + it("preserves mark-only messages after trimming", () => { + expect(prefixSystemMessage(` ${SYSTEM_MARK} `)).toBe(SYSTEM_MARK); }); }); diff --git a/src/infra/system-presence.test.ts b/src/infra/system-presence.test.ts index 10929115605..02369a18355 100644 --- a/src/infra/system-presence.test.ts +++ b/src/infra/system-presence.test.ts @@ -61,6 +61,42 @@ describe("system-presence", () => { expect(entry?.scopes).toEqual(expect.arrayContaining(["operator.admin", "system.run"])); }); + it("parses node presence text and normalizes the update key", () => { + const update = updateSystemPresence({ + text: "Node: Relay-Host (10.0.0.9) · app 2.1.0 · last input 7s ago · mode ui · reason beacon", + instanceId: " Mixed-Case-Node ", + }); + + expect(update.key).toBe("mixed-case-node"); + expect(update.changedKeys).toEqual(["host", "ip", "version", "mode", "reason"]); + expect(update.next).toMatchObject({ + host: "Relay-Host", + ip: "10.0.0.9", + version: "2.1.0", + lastInputSeconds: 7, + mode: "ui", + reason: "beacon", + text: "Node: Relay-Host (10.0.0.9) · app 2.1.0 · last input 7s ago · mode ui · reason beacon", + }); + }); + + it("drops blank role and scope entries while keeping fallback text", () => { + const deviceId = randomUUID(); + + upsertPresence(deviceId, { + deviceId, + host: "relay-host", + mode: "operator", + roles: [" operator ", "", " "], + scopes: ["operator.admin", "", " "], + }); + + const entry = listSystemPresence().find((candidate) => candidate.deviceId === deviceId); + expect(entry?.roles).toEqual(["operator"]); + expect(entry?.scopes).toEqual(["operator.admin"]); + expect(entry?.text).toBe("Node: relay-host · mode operator"); + }); + it("prunes stale non-self entries after TTL", () => { vi.useFakeTimers(); vi.setSystemTime(Date.now()); diff --git a/src/infra/system-presence.version.test.ts b/src/infra/system-presence.version.test.ts index 8465466ef9c..867ce379392 100644 --- a/src/infra/system-presence.version.test.ts +++ b/src/infra/system-presence.version.test.ts @@ -13,46 +13,69 @@ async function withPresenceModule( } describe("system-presence version fallback", () => { + async function expectSelfVersion( + env: Record, + expectedVersion: string | (() => Promise), + ) { + await withPresenceModule(env, async ({ listSystemPresence }) => { + const selfEntry = listSystemPresence().find((entry) => entry.reason === "self"); + const resolvedExpected = + typeof expectedVersion === "function" ? await expectedVersion() : expectedVersion; + expect(selfEntry?.version).toBe(resolvedExpected); + }); + } + it("uses runtime VERSION when OPENCLAW_VERSION is not set", async () => { - await withPresenceModule( + await expectSelfVersion( { OPENCLAW_SERVICE_VERSION: "2.4.6-service", npm_package_version: "1.0.0-package", }, - async ({ listSystemPresence }) => { - const { VERSION } = await import("../version.js"); - const selfEntry = listSystemPresence().find((entry) => entry.reason === "self"); - expect(selfEntry?.version).toBe(VERSION); - }, + async () => (await import("../version.js")).VERSION, ); }); it("prefers OPENCLAW_VERSION over runtime VERSION", async () => { - await withPresenceModule( + await expectSelfVersion( { OPENCLAW_VERSION: "9.9.9-cli", OPENCLAW_SERVICE_VERSION: "2.4.6-service", npm_package_version: "1.0.0-package", }, - ({ listSystemPresence }) => { - const selfEntry = listSystemPresence().find((entry) => entry.reason === "self"); - expect(selfEntry?.version).toBe("9.9.9-cli"); - }, + "9.9.9-cli", ); }); - it("uses runtime VERSION when OPENCLAW_VERSION and OPENCLAW_SERVICE_VERSION are blank", async () => { - await withPresenceModule( + it("still prefers runtime VERSION over OPENCLAW_SERVICE_VERSION when OPENCLAW_VERSION is blank", async () => { + await expectSelfVersion( + { + OPENCLAW_VERSION: " ", + OPENCLAW_SERVICE_VERSION: "2.4.6-service", + npm_package_version: "1.0.0-package", + }, + async () => (await import("../version.js")).VERSION, + ); + }); + + it("still prefers runtime VERSION over npm_package_version when service markers are blank", async () => { + await expectSelfVersion( { OPENCLAW_VERSION: " ", OPENCLAW_SERVICE_VERSION: "\t", npm_package_version: "1.0.0-package", }, - async ({ listSystemPresence }) => { - const { VERSION } = await import("../version.js"); - const selfEntry = listSystemPresence().find((entry) => entry.reason === "self"); - expect(selfEntry?.version).toBe(VERSION); + async () => (await import("../version.js")).VERSION, + ); + }); + + it("uses runtime VERSION when OPENCLAW_VERSION and OPENCLAW_SERVICE_VERSION are blank", async () => { + await expectSelfVersion( + { + OPENCLAW_VERSION: " ", + OPENCLAW_SERVICE_VERSION: "\t", + npm_package_version: "1.0.0-package", }, + async () => (await import("../version.js")).VERSION, ); }); }); diff --git a/src/infra/system-run-approval-binding.test.ts b/src/infra/system-run-approval-binding.test.ts new file mode 100644 index 00000000000..468956dba7b --- /dev/null +++ b/src/infra/system-run-approval-binding.test.ts @@ -0,0 +1,245 @@ +import { describe, expect, it } from "vitest"; +import { + buildSystemRunApprovalBinding, + buildSystemRunApprovalEnvBinding, + matchSystemRunApprovalBinding, + matchSystemRunApprovalEnvHash, + missingSystemRunApprovalBinding, + normalizeSystemRunApprovalPlan, +} from "./system-run-approval-binding.js"; + +describe("normalizeSystemRunApprovalPlan", () => { + it("accepts commandText and normalized mutable file operands", () => { + expect( + normalizeSystemRunApprovalPlan({ + argv: ["bash", "-lc", "echo hi"], + commandText: 'bash -lc "echo hi"', + commandPreview: "echo hi", + cwd: " /tmp ", + agentId: " main ", + sessionKey: " agent:main:main ", + mutableFileOperand: { + argvIndex: 2, + path: " /tmp/payload.txt ", + sha256: " abc123 ", + }, + }), + ).toEqual({ + argv: ["bash", "-lc", "echo hi"], + commandText: 'bash -lc "echo hi"', + commandPreview: "echo hi", + cwd: "/tmp", + agentId: "main", + sessionKey: "agent:main:main", + mutableFileOperand: { + argvIndex: 2, + path: "/tmp/payload.txt", + sha256: "abc123", + }, + }); + }); + + it("falls back to rawCommand and rejects invalid file operands", () => { + expect( + normalizeSystemRunApprovalPlan({ + argv: ["bash", "-lc", "echo hi"], + rawCommand: 'bash -lc "echo hi"', + }), + ).toEqual({ + argv: ["bash", "-lc", "echo hi"], + commandText: 'bash -lc "echo hi"', + commandPreview: null, + cwd: null, + agentId: null, + sessionKey: null, + mutableFileOperand: undefined, + }); + + expect( + normalizeSystemRunApprovalPlan({ + argv: ["bash", "-lc", "echo hi"], + commandText: 'bash -lc "echo hi"', + mutableFileOperand: { + argvIndex: -1, + path: "/tmp/payload.txt", + sha256: "abc123", + }, + }), + ).toBeNull(); + }); +}); + +describe("buildSystemRunApprovalEnvBinding", () => { + it("normalizes, filters, and sorts env keys before hashing", () => { + const normalized = buildSystemRunApprovalEnvBinding({ + z_key: "b", + " bad key ": "ignored", + alpha: "a", + EMPTY: 1, + }); + const reordered = buildSystemRunApprovalEnvBinding({ + alpha: "a", + z_key: "b", + }); + + expect(normalized).toEqual({ + envHash: reordered.envHash, + envKeys: ["alpha", "z_key"], + }); + expect(normalized.envHash).toBeTypeOf("string"); + expect(normalized.envHash).toHaveLength(64); + }); + + it("returns a null hash when no usable env entries remain", () => { + expect(buildSystemRunApprovalEnvBinding(null)).toEqual({ + envHash: null, + envKeys: [], + }); + expect( + buildSystemRunApprovalEnvBinding({ + bad: 1, + }), + ).toEqual({ + envHash: null, + envKeys: [], + }); + }); +}); + +describe("buildSystemRunApprovalBinding", () => { + it("normalizes argv and metadata into a binding", () => { + const envBinding = buildSystemRunApprovalEnvBinding({ + beta: "2", + alpha: "1", + }); + + expect( + buildSystemRunApprovalBinding({ + argv: ["bash", "-lc", 12], + cwd: " /tmp ", + agentId: " main ", + sessionKey: " agent:main:main ", + env: { + beta: "2", + alpha: "1", + }, + }), + ).toEqual({ + binding: { + argv: ["bash", "-lc", "12"], + cwd: "/tmp", + agentId: "main", + sessionKey: "agent:main:main", + envHash: envBinding.envHash, + }, + envKeys: ["alpha", "beta"], + }); + }); +}); + +describe("matchSystemRunApprovalEnvHash", () => { + it("handles matching, missing, and mismatched env bindings", () => { + expect( + matchSystemRunApprovalEnvHash({ + expectedEnvHash: null, + actualEnvHash: null, + actualEnvKeys: [], + }), + ).toEqual({ ok: true }); + + expect( + matchSystemRunApprovalEnvHash({ + expectedEnvHash: null, + actualEnvHash: "abc", + actualEnvKeys: ["ALPHA"], + }), + ).toEqual({ + ok: false, + code: "APPROVAL_ENV_BINDING_MISSING", + message: "approval id missing env binding for requested env overrides", + details: { envKeys: ["ALPHA"] }, + }); + + expect( + matchSystemRunApprovalEnvHash({ + expectedEnvHash: "abc", + actualEnvHash: "def", + actualEnvKeys: ["ALPHA"], + }), + ).toEqual({ + ok: false, + code: "APPROVAL_ENV_MISMATCH", + message: "approval id env binding mismatch", + details: { + envKeys: ["ALPHA"], + expectedEnvHash: "abc", + actualEnvHash: "def", + }, + }); + }); +}); + +describe("matchSystemRunApprovalBinding", () => { + const expected = { + argv: ["bash", "-lc", "echo hi"], + cwd: "/tmp", + agentId: "main", + sessionKey: "agent:main:main", + envHash: "abc", + }; + + it("accepts exact matches", () => { + expect( + matchSystemRunApprovalBinding({ + expected, + actual: { ...expected }, + actualEnvKeys: ["ALPHA"], + }), + ).toEqual({ ok: true }); + }); + + it.each([ + { + name: "argv mismatch", + actual: { ...expected, argv: ["bash", "-lc", "echo bye"] }, + }, + { + name: "cwd mismatch", + actual: { ...expected, cwd: "/var/tmp" }, + }, + { + name: "agent mismatch", + actual: { ...expected, agentId: "other" }, + }, + { + name: "session mismatch", + actual: { ...expected, sessionKey: "agent:main:other" }, + }, + ])("rejects $name", ({ actual }) => { + expect( + matchSystemRunApprovalBinding({ + expected, + actual, + actualEnvKeys: ["ALPHA"], + }), + ).toEqual({ + ok: false, + code: "APPROVAL_REQUEST_MISMATCH", + message: "approval id does not match request", + details: undefined, + }); + }); +}); + +describe("missingSystemRunApprovalBinding", () => { + it("reports env keys with request mismatches", () => { + expect(missingSystemRunApprovalBinding({ actualEnvKeys: ["ALPHA", "BETA"] })).toEqual({ + ok: false, + code: "APPROVAL_REQUEST_MISMATCH", + message: "approval id does not match request", + details: { + envKeys: ["ALPHA", "BETA"], + }, + }); + }); +}); diff --git a/src/infra/system-run-approval-context.test.ts b/src/infra/system-run-approval-context.test.ts index fbd0e805a22..1dc98eea200 100644 --- a/src/infra/system-run-approval-context.test.ts +++ b/src/infra/system-run-approval-context.test.ts @@ -1,5 +1,9 @@ import { describe, expect, test } from "vitest"; -import { resolveSystemRunApprovalRequestContext } from "./system-run-approval-context.js"; +import { + parsePreparedSystemRunPayload, + resolveSystemRunApprovalRequestContext, + resolveSystemRunApprovalRuntimeContext, +} from "./system-run-approval-context.js"; describe("resolveSystemRunApprovalRequestContext", () => { test("uses full approval text and separate preview for node system.run plans", () => { @@ -37,4 +41,127 @@ describe("resolveSystemRunApprovalRequestContext", () => { expect(context.commandText).toBe('./env sh -c "jq --version"'); expect(context.commandPreview).toBe("jq --version"); }); + + test("falls back to explicit request params for non-node hosts", () => { + const context = resolveSystemRunApprovalRequestContext({ + host: "gateway", + command: "jq --version", + commandArgv: ["jq", "--version"], + cwd: "/tmp", + agentId: "main", + sessionKey: "agent:main:main", + systemRunPlan: { + argv: ["ignored"], + commandText: "ignored", + }, + }); + + expect(context.plan).toBeNull(); + expect(context.commandArgv).toEqual(["jq", "--version"]); + expect(context.commandText).toBe("jq --version"); + expect(context.commandPreview).toBeNull(); + expect(context.cwd).toBe("/tmp"); + expect(context.agentId).toBe("main"); + expect(context.sessionKey).toBe("agent:main:main"); + }); +}); + +describe("parsePreparedSystemRunPayload", () => { + test("parses legacy prepared payloads via top-level fallback command text", () => { + expect( + parsePreparedSystemRunPayload({ + plan: { + argv: ["bash", "-lc", "jq --version"], + cwd: "/tmp", + agentId: "main", + sessionKey: "agent:main:main", + }, + commandText: 'bash -lc "jq --version"', + }), + ).toEqual({ + plan: { + argv: ["bash", "-lc", "jq --version"], + cwd: "/tmp", + commandText: 'bash -lc "jq --version"', + commandPreview: null, + agentId: "main", + sessionKey: "agent:main:main", + }, + }); + }); + + test("rejects legacy payloads missing argv or command text", () => { + expect(parsePreparedSystemRunPayload({ plan: { argv: [] }, commandText: "jq --version" })).toBe( + null, + ); + expect( + parsePreparedSystemRunPayload({ + plan: { argv: ["jq", "--version"] }, + }), + ).toBeNull(); + }); +}); + +describe("resolveSystemRunApprovalRuntimeContext", () => { + test("uses normalized plan runtime metadata when available", () => { + expect( + resolveSystemRunApprovalRuntimeContext({ + plan: { + argv: ["jq", "--version"], + cwd: "/tmp", + commandText: "jq --version", + commandPreview: "jq --version", + agentId: "main", + sessionKey: "agent:main:main", + }, + }), + ).toEqual({ + ok: true, + plan: { + argv: ["jq", "--version"], + cwd: "/tmp", + commandText: "jq --version", + commandPreview: "jq --version", + agentId: "main", + sessionKey: "agent:main:main", + }, + argv: ["jq", "--version"], + cwd: "/tmp", + agentId: "main", + sessionKey: "agent:main:main", + commandText: "jq --version", + }); + }); + + test("falls back to command/rawCommand validation without a plan", () => { + expect( + resolveSystemRunApprovalRuntimeContext({ + command: ["bash", "-lc", "jq --version"], + rawCommand: 'bash -lc "jq --version"', + cwd: "/tmp", + agentId: "main", + sessionKey: "agent:main:main", + }), + ).toEqual({ + ok: true, + plan: null, + argv: ["bash", "-lc", "jq --version"], + cwd: "/tmp", + agentId: "main", + sessionKey: "agent:main:main", + commandText: 'bash -lc "jq --version"', + }); + }); + + test("returns request validation errors from command fallback", () => { + expect( + resolveSystemRunApprovalRuntimeContext({ + rawCommand: "jq --version", + }), + ).toEqual({ + ok: false, + message: "rawCommand requires params.command", + details: { code: "MISSING_COMMAND" }, + }); + }); }); diff --git a/src/infra/system-run-command.test.ts b/src/infra/system-run-command.test.ts index e9e5d17da2e..ea3ec9ffdf5 100644 --- a/src/infra/system-run-command.test.ts +++ b/src/infra/system-run-command.test.ts @@ -55,6 +55,8 @@ describe("system run command helpers", () => { test("extractShellCommandFromArgv supports fish and pwsh wrappers", () => { expect(extractShellCommandFromArgv(["fish", "-c", "echo hi"])).toBe("echo hi"); expect(extractShellCommandFromArgv(["pwsh", "-Command", "Get-Date"])).toBe("Get-Date"); + expect(extractShellCommandFromArgv(["pwsh", "-File", "script.ps1"])).toBe("script.ps1"); + expect(extractShellCommandFromArgv(["powershell", "-f", "script.ps1"])).toBe("script.ps1"); expect(extractShellCommandFromArgv(["pwsh", "-EncodedCommand", "ZQBjAGgAbwA="])).toBe( "ZQBjAGgAbwA=", ); diff --git a/src/infra/system-run-command.ts b/src/infra/system-run-command.ts index 12a5d32485d..3051a607683 100644 --- a/src/infra/system-run-command.ts +++ b/src/infra/system-run-command.ts @@ -167,52 +167,23 @@ export function resolveSystemRunCommand(params: { command?: unknown; rawCommand?: unknown; }): ResolvedSystemRunCommand { - const raw = normalizeRawCommandText(params.rawCommand); - const command = Array.isArray(params.command) ? params.command : []; - if (command.length === 0) { - if (raw) { - return { - ok: false, - message: "rawCommand requires params.command", - details: { code: "MISSING_COMMAND" }, - }; - } - return { - ok: true, - argv: [], - commandText: "", - shellPayload: null, - previewText: null, - }; - } - - const argv = command.map((v) => String(v)); - const validation = validateSystemRunCommandConsistency({ - argv, - rawCommand: raw, - allowLegacyShellText: false, - }); - if (!validation.ok) { - return { - ok: false, - message: validation.message, - details: validation.details ?? { code: "RAW_COMMAND_MISMATCH" }, - }; - } - - return { - ok: true, - argv, - commandText: validation.commandText, - shellPayload: validation.shellPayload, - previewText: validation.previewText, - }; + return resolveSystemRunCommandWithMode(params, false); } export function resolveSystemRunCommandRequest(params: { command?: unknown; rawCommand?: unknown; }): ResolvedSystemRunCommand { + return resolveSystemRunCommandWithMode(params, true); +} + +function resolveSystemRunCommandWithMode( + params: { + command?: unknown; + rawCommand?: unknown; + }, + allowLegacyShellText: boolean, +): ResolvedSystemRunCommand { const raw = normalizeRawCommandText(params.rawCommand); const command = Array.isArray(params.command) ? params.command : []; if (command.length === 0) { @@ -236,7 +207,7 @@ export function resolveSystemRunCommandRequest(params: { const validation = validateSystemRunCommandConsistency({ argv, rawCommand: raw, - allowLegacyShellText: true, + allowLegacyShellText, }); if (!validation.ok) { return { diff --git a/src/infra/system-run-normalize.test.ts b/src/infra/system-run-normalize.test.ts new file mode 100644 index 00000000000..6bf2f56d4e9 --- /dev/null +++ b/src/infra/system-run-normalize.test.ts @@ -0,0 +1,17 @@ +import { describe, expect, it } from "vitest"; +import { normalizeNonEmptyString, normalizeStringArray } from "./system-run-normalize.js"; + +describe("system run normalization helpers", () => { + it("normalizes only non-empty trimmed strings", () => { + expect(normalizeNonEmptyString(" hello ")).toBe("hello"); + expect(normalizeNonEmptyString(" \n\t ")).toBeNull(); + expect(normalizeNonEmptyString(42)).toBeNull(); + expect(normalizeNonEmptyString(null)).toBeNull(); + }); + + it("normalizes array entries and rejects non-arrays", () => { + expect(normalizeStringArray([" alpha ", 42, false])).toEqual([" alpha ", "42", "false"]); + expect(normalizeStringArray(undefined)).toEqual([]); + expect(normalizeStringArray("alpha")).toEqual([]); + }); +}); diff --git a/src/infra/tailnet.test.ts b/src/infra/tailnet.test.ts new file mode 100644 index 00000000000..eeb259cbeb4 --- /dev/null +++ b/src/infra/tailnet.test.ts @@ -0,0 +1,54 @@ +import os from "node:os"; +import { afterEach, describe, expect, it, vi } from "vitest"; +import { + isTailnetIPv4, + listTailnetAddresses, + pickPrimaryTailnetIPv4, + pickPrimaryTailnetIPv6, +} from "./tailnet.js"; + +describe("tailnet helpers", () => { + afterEach(() => { + vi.restoreAllMocks(); + }); + + it("detects tailscale ipv4 ranges", () => { + expect(isTailnetIPv4("100.64.0.1")).toBe(true); + expect(isTailnetIPv4("100.127.255.254")).toBe(true); + expect(isTailnetIPv4("100.63.255.255")).toBe(false); + expect(isTailnetIPv4("192.168.1.10")).toBe(false); + }); + + it("lists unique non-internal tailnet addresses only", () => { + vi.spyOn(os, "networkInterfaces").mockReturnValue({ + lo0: [{ address: "127.0.0.1", family: "IPv4", internal: true, netmask: "" }], + en0: [ + { address: " 100.88.1.5 ", family: "IPv4", internal: false, netmask: "" }, + { address: "100.88.1.5", family: "IPv4", internal: false, netmask: "" }, + { address: "fd7a:115c:a1e0::1", family: "IPv6", internal: false, netmask: "" }, + { address: " ", family: "IPv6", internal: false, netmask: "" }, + { address: "fe80::1", family: "IPv6", internal: false, netmask: "" }, + ], + // oxlint-disable-next-line typescript/no-explicit-any + } as any); + + expect(listTailnetAddresses()).toEqual({ + ipv4: ["100.88.1.5"], + ipv6: ["fd7a:115c:a1e0::1"], + }); + }); + + it("picks the first available tailnet addresses", () => { + vi.spyOn(os, "networkInterfaces").mockReturnValue({ + utun1: [ + { address: "100.99.1.1", family: "IPv4", internal: false, netmask: "" }, + { address: "100.99.1.2", family: "IPv4", internal: false, netmask: "" }, + { address: "fd7a:115c:a1e0::9", family: "IPv6", internal: false, netmask: "" }, + ], + // oxlint-disable-next-line typescript/no-explicit-any + } as any); + + expect(pickPrimaryTailnetIPv4()).toBe("100.99.1.1"); + expect(pickPrimaryTailnetIPv6()).toBe("fd7a:115c:a1e0::9"); + }); +}); diff --git a/src/infra/tailscale.test.ts b/src/infra/tailscale.test.ts index db402e51521..37658c2b287 100644 --- a/src/infra/tailscale.test.ts +++ b/src/infra/tailscale.test.ts @@ -22,6 +22,13 @@ function createRuntimeWithExitError() { }; } +function expectServeFallbackCommand(params: { callArgs: string[]; sudoArgs: string[] }) { + return [ + [tailscaleBin, expect.arrayContaining(params.callArgs)], + ["sudo", expect.arrayContaining(["-n", tailscaleBin, ...params.sudoArgs])], + ]; +} + describe("tailscale helpers", () => { let envSnapshot: ReturnType; @@ -53,53 +60,62 @@ describe("tailscale helpers", () => { expect(host).toBe("100.2.2.2"); }); - it("ensureGoInstalled installs when missing and user agrees", async () => { - const exec = vi.fn().mockRejectedValueOnce(new Error("no go")).mockResolvedValue({}); // brew install go - const prompt = vi.fn().mockResolvedValue(true); - const runtime = createRuntimeWithExitError(); - await ensureGoInstalled(exec as never, prompt, runtime); - expect(exec).toHaveBeenCalledWith("brew", ["install", "go"]); + it("parses noisy JSON output from tailscale status", async () => { + const exec = vi.fn().mockResolvedValue({ + stdout: + 'warning: stale state\n{"Self":{"DNSName":"noisy.tailnet.ts.net.","TailscaleIPs":["100.9.9.9"]}}\n', + }); + const host = await getTailnetHostname(exec); + expect(host).toBe("noisy.tailnet.ts.net"); }); - it("ensureGoInstalled exits when missing and user declines install", async () => { - const exec = vi.fn().mockRejectedValueOnce(new Error("no go")); + it.each([ + { + name: "ensureGoInstalled installs when missing and user agrees", + fn: ensureGoInstalled, + missingError: new Error("no go"), + installCommand: ["brew", ["install", "go"]] as const, + promptResult: true, + }, + { + name: "ensureTailscaledInstalled installs when missing and user agrees", + fn: ensureTailscaledInstalled, + missingError: new Error("missing"), + installCommand: ["brew", ["install", "tailscale"]] as const, + promptResult: true, + }, + ])("$name", async ({ fn, missingError, installCommand, promptResult }) => { + const exec = vi.fn().mockRejectedValueOnce(missingError).mockResolvedValue({}); + const prompt = vi.fn().mockResolvedValue(promptResult); + const runtime = createRuntimeWithExitError(); + await fn(exec as never, prompt, runtime); + expect(exec).toHaveBeenCalledWith(installCommand[0], installCommand[1]); + }); + + it.each([ + { + name: "ensureGoInstalled exits when missing and user declines install", + fn: ensureGoInstalled, + missingError: new Error("no go"), + errorMessage: "Go is required to build tailscaled from source. Aborting.", + }, + { + name: "ensureTailscaledInstalled exits when missing and user declines install", + fn: ensureTailscaledInstalled, + missingError: new Error("missing"), + errorMessage: "tailscaled is required for user-space funnel. Aborting.", + }, + ])("$name", async ({ fn, missingError, errorMessage }) => { + const exec = vi.fn().mockRejectedValueOnce(missingError); const prompt = vi.fn().mockResolvedValue(false); const runtime = createRuntimeWithExitError(); - await expect(ensureGoInstalled(exec as never, prompt, runtime)).rejects.toThrow("exit 1"); - - expect(runtime.error).toHaveBeenCalledWith( - "Go is required to build tailscaled from source. Aborting.", - ); - expect(exec).toHaveBeenCalledTimes(1); - }); - - it("ensureTailscaledInstalled installs when missing and user agrees", async () => { - const exec = vi.fn().mockRejectedValueOnce(new Error("missing")).mockResolvedValue({}); - const prompt = vi.fn().mockResolvedValue(true); - const runtime = createRuntimeWithExitError(); - await ensureTailscaledInstalled(exec as never, prompt, runtime); - expect(exec).toHaveBeenCalledWith("brew", ["install", "tailscale"]); - }); - - it("ensureTailscaledInstalled exits when missing and user declines install", async () => { - const exec = vi.fn().mockRejectedValueOnce(new Error("missing")); - const prompt = vi.fn().mockResolvedValue(false); - const runtime = createRuntimeWithExitError(); - - await expect(ensureTailscaledInstalled(exec as never, prompt, runtime)).rejects.toThrow( - "exit 1", - ); - - expect(runtime.error).toHaveBeenCalledWith( - "tailscaled is required for user-space funnel. Aborting.", - ); + await expect(fn(exec as never, prompt, runtime)).rejects.toThrow("exit 1"); + expect(runtime.error).toHaveBeenCalledWith(errorMessage); expect(exec).toHaveBeenCalledTimes(1); }); it("enableTailscaleServe attempts normal first, then sudo", async () => { - // 1. First attempt fails - // 2. Second attempt (sudo) succeeds const exec = vi .fn() .mockRejectedValueOnce(new Error("permission denied")) @@ -107,19 +123,12 @@ describe("tailscale helpers", () => { await enableTailscaleServe(3000, exec as never); - expect(exec).toHaveBeenNthCalledWith( - 1, - tailscaleBin, - expect.arrayContaining(["serve", "--bg", "--yes", "3000"]), - expect.any(Object), - ); - - expect(exec).toHaveBeenNthCalledWith( - 2, - "sudo", - expect.arrayContaining(["-n", tailscaleBin, "serve", "--bg", "--yes", "3000"]), - expect.any(Object), - ); + const [firstCall, secondCall] = expectServeFallbackCommand({ + callArgs: ["serve", "--bg", "--yes", "3000"], + sudoArgs: ["serve", "--bg", "--yes", "3000"], + }); + expect(exec).toHaveBeenNthCalledWith(1, firstCall[0], firstCall[1], expect.any(Object)); + expect(exec).toHaveBeenNthCalledWith(2, secondCall[0], secondCall[1], expect.any(Object)); }); it("enableTailscaleServe does NOT use sudo if first attempt succeeds", async () => { @@ -153,10 +162,6 @@ describe("tailscale helpers", () => { }); it("ensureFunnel uses fallback for enabling", async () => { - // Mock exec: - // 1. status (success) - // 2. enable (fails) - // 3. enable sudo (success) const exec = vi .fn() .mockResolvedValueOnce({ stdout: JSON.stringify({ BackendState: "Running" }) }) // status @@ -172,22 +177,17 @@ describe("tailscale helpers", () => { await ensureFunnel(8080, exec as never, runtime, prompt); - // 1. status expect(exec).toHaveBeenNthCalledWith( 1, tailscaleBin, expect.arrayContaining(["funnel", "status", "--json"]), ); - - // 2. enable normal expect(exec).toHaveBeenNthCalledWith( 2, tailscaleBin, expect.arrayContaining(["funnel", "--yes", "--bg", "8080"]), expect.any(Object), ); - - // 3. enable sudo expect(exec).toHaveBeenNthCalledWith( 3, "sudo", diff --git a/src/infra/update-channels.test.ts b/src/infra/update-channels.test.ts index b17133bb7fa..2738cc0ddad 100644 --- a/src/infra/update-channels.test.ts +++ b/src/infra/update-channels.test.ts @@ -1,19 +1,194 @@ import { describe, expect, it } from "vitest"; -import { isBetaTag, isStableTag } from "./update-channels.js"; +import { + channelToNpmTag, + formatUpdateChannelLabel, + isBetaTag, + isStableTag, + normalizeUpdateChannel, + resolveEffectiveUpdateChannel, + resolveUpdateChannelDisplay, + type UpdateChannel, + type UpdateChannelSource, +} from "./update-channels.js"; describe("update-channels tag detection", () => { - it("recognizes both -beta and .beta formats", () => { - expect(isBetaTag("v2026.2.24-beta.1")).toBe(true); - expect(isBetaTag("v2026.2.24.beta.1")).toBe(true); - }); - - it("keeps legacy -x tags stable", () => { - expect(isBetaTag("v2026.2.24-1")).toBe(false); - expect(isStableTag("v2026.2.24-1")).toBe(true); - }); - - it("does not false-positive on non-beta words", () => { - expect(isBetaTag("v2026.2.24-alphabeta.1")).toBe(false); - expect(isStableTag("v2026.2.24")).toBe(true); + it.each([ + { tag: "v2026.2.24-beta.1", beta: true }, + { tag: "v2026.2.24.beta.1", beta: true }, + { tag: "v2026.2.24-BETA-1", beta: true }, + { tag: "v2026.2.24-1", beta: false }, + { tag: "v2026.2.24-alphabeta.1", beta: false }, + { tag: "v2026.2.24", beta: false }, + ])("classifies $tag", ({ tag, beta }) => { + expect(isBetaTag(tag)).toBe(beta); + expect(isStableTag(tag)).toBe(!beta); + }); +}); + +describe("normalizeUpdateChannel", () => { + it.each([ + { value: "stable", expected: "stable" }, + { value: " BETA ", expected: "beta" }, + { value: "Dev", expected: "dev" }, + { value: "", expected: null }, + { value: " nightly ", expected: null }, + { value: null, expected: null }, + { value: undefined, expected: null }, + ] satisfies Array<{ value: string | null | undefined; expected: UpdateChannel | null }>)( + "normalizes %j", + ({ value, expected }) => { + expect(normalizeUpdateChannel(value)).toBe(expected); + }, + ); +}); + +describe("channelToNpmTag", () => { + it.each([ + { channel: "stable", expected: "latest" }, + { channel: "beta", expected: "beta" }, + { channel: "dev", expected: "dev" }, + ] satisfies Array<{ channel: UpdateChannel; expected: string }>)( + "maps $channel to $expected", + ({ channel, expected }) => { + expect(channelToNpmTag(channel)).toBe(expected); + }, + ); +}); + +describe("resolveEffectiveUpdateChannel", () => { + it.each([ + { + name: "prefers config over git metadata", + params: { + configChannel: "beta", + installKind: "git" as const, + git: { tag: "v2026.2.24", branch: "feature/test" }, + }, + expected: { channel: "beta", source: "config" }, + }, + { + name: "uses beta git tag", + params: { + installKind: "git" as const, + git: { tag: "v2026.2.24-beta.1" }, + }, + expected: { channel: "beta", source: "git-tag" }, + }, + { + name: "treats non-beta git tag as stable", + params: { + installKind: "git" as const, + git: { tag: "v2026.2.24-1" }, + }, + expected: { channel: "stable", source: "git-tag" }, + }, + { + name: "uses non-HEAD git branch as dev", + params: { + installKind: "git" as const, + git: { branch: "feature/test" }, + }, + expected: { channel: "dev", source: "git-branch" }, + }, + { + name: "falls back for detached HEAD git installs", + params: { + installKind: "git" as const, + git: { branch: "HEAD" }, + }, + expected: { channel: "dev", source: "default" }, + }, + { + name: "defaults package installs to stable", + params: { installKind: "package" as const }, + expected: { channel: "stable", source: "default" }, + }, + { + name: "defaults unknown installs to stable", + params: { installKind: "unknown" as const }, + expected: { channel: "stable", source: "default" }, + }, + ] satisfies Array<{ + name: string; + params: Parameters[0]; + expected: { channel: UpdateChannel; source: UpdateChannelSource }; + }>)("$name", ({ params, expected }) => { + expect(resolveEffectiveUpdateChannel(params)).toEqual(expected); + }); +}); + +describe("formatUpdateChannelLabel", () => { + it.each([ + { + name: "formats config labels", + params: { channel: "beta", source: "config" as const }, + expected: "beta (config)", + }, + { + name: "formats git tag labels with tag", + params: { + channel: "stable", + source: "git-tag" as const, + gitTag: "v2026.2.24", + }, + expected: "stable (v2026.2.24)", + }, + { + name: "formats git tag labels without tag", + params: { channel: "stable", source: "git-tag" as const }, + expected: "stable (tag)", + }, + { + name: "formats git branch labels with branch", + params: { + channel: "dev", + source: "git-branch" as const, + gitBranch: "feature/test", + }, + expected: "dev (feature/test)", + }, + { + name: "formats git branch labels without branch", + params: { channel: "dev", source: "git-branch" as const }, + expected: "dev (branch)", + }, + { + name: "formats default labels", + params: { channel: "stable", source: "default" as const }, + expected: "stable (default)", + }, + ] satisfies Array<{ + name: string; + params: Parameters[0]; + expected: string; + }>)("$name", ({ params, expected }) => { + expect(formatUpdateChannelLabel(params)).toBe(expected); + }); +}); + +describe("resolveUpdateChannelDisplay", () => { + it("includes the derived label for git branches", () => { + expect( + resolveUpdateChannelDisplay({ + installKind: "git", + gitBranch: "feature/test", + }), + ).toEqual({ + channel: "dev", + source: "git-branch", + label: "dev (feature/test)", + }); + }); + + it("does not synthesize git metadata when both tag and branch are missing", () => { + expect( + resolveUpdateChannelDisplay({ + installKind: "package", + }), + ).toEqual({ + channel: "stable", + source: "default", + label: "stable (default)", + }); }); }); diff --git a/src/infra/update-check.test.ts b/src/infra/update-check.test.ts index 560902aee83..610ca1957ec 100644 --- a/src/infra/update-check.test.ts +++ b/src/infra/update-check.test.ts @@ -1,5 +1,16 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; -import { compareSemverStrings, resolveNpmChannelTag } from "./update-check.js"; +import { + checkDepsStatus, + checkUpdateStatus, + compareSemverStrings, + fetchNpmLatestVersion, + fetchNpmTagVersion, + formatGitInstallLabel, + resolveNpmChannelTag, +} from "./update-check.js"; describe("compareSemverStrings", () => { it("handles stable and prerelease precedence for both legacy and beta formats", () => { @@ -72,4 +83,160 @@ describe("resolveNpmChannelTag", () => { expect(resolved).toEqual({ tag: "latest", version: "1.0.1" }); }); + + it("keeps non-beta channels unchanged", async () => { + versionByTag.latest = "1.0.3"; + + await expect(resolveNpmChannelTag({ channel: "stable", timeoutMs: 1000 })).resolves.toEqual({ + tag: "latest", + version: "1.0.3", + }); + }); + + it("exposes tag fetch helpers for success and http failures", async () => { + versionByTag.latest = "1.0.4"; + + await expect(fetchNpmTagVersion({ tag: "latest", timeoutMs: 1000 })).resolves.toEqual({ + tag: "latest", + version: "1.0.4", + }); + await expect(fetchNpmLatestVersion({ timeoutMs: 1000 })).resolves.toEqual({ + latestVersion: "1.0.4", + error: undefined, + }); + await expect(fetchNpmTagVersion({ tag: "beta", timeoutMs: 1000 })).resolves.toEqual({ + tag: "beta", + version: null, + error: "HTTP 404", + }); + }); +}); + +describe("formatGitInstallLabel", () => { + it("formats branch, detached tag, and non-git installs", () => { + expect( + formatGitInstallLabel({ + root: "/repo", + installKind: "git", + packageManager: "pnpm", + git: { + root: "/repo", + sha: "1234567890abcdef", + tag: null, + branch: "main", + upstream: "origin/main", + dirty: false, + ahead: 0, + behind: 0, + fetchOk: true, + }, + }), + ).toBe("main · @ 12345678"); + + expect( + formatGitInstallLabel({ + root: "/repo", + installKind: "git", + packageManager: "pnpm", + git: { + root: "/repo", + sha: "abcdef1234567890", + tag: "v1.2.3", + branch: "HEAD", + upstream: null, + dirty: false, + ahead: 0, + behind: 0, + fetchOk: null, + }, + }), + ).toBe("detached · tag v1.2.3 · @ abcdef12"); + + expect( + formatGitInstallLabel({ + root: null, + installKind: "package", + packageManager: "pnpm", + }), + ).toBeNull(); + }); +}); + +describe("checkDepsStatus", () => { + it("reports unknown, missing, stale, and ok states from lockfile markers", async () => { + const base = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-update-check-")); + + await expect(checkDepsStatus({ root: base, manager: "unknown" })).resolves.toEqual({ + manager: "unknown", + status: "unknown", + lockfilePath: null, + markerPath: null, + reason: "unknown package manager", + }); + + await fs.writeFile(path.join(base, "pnpm-lock.yaml"), "lock", "utf8"); + await expect(checkDepsStatus({ root: base, manager: "pnpm" })).resolves.toMatchObject({ + manager: "pnpm", + status: "missing", + reason: "node_modules marker missing", + }); + + const markerPath = path.join(base, "node_modules", ".modules.yaml"); + await fs.mkdir(path.dirname(markerPath), { recursive: true }); + await fs.writeFile(markerPath, "marker", "utf8"); + const staleDate = new Date(Date.now() - 10_000); + const freshDate = new Date(); + await fs.utimes(markerPath, staleDate, staleDate); + await fs.utimes(path.join(base, "pnpm-lock.yaml"), freshDate, freshDate); + + await expect(checkDepsStatus({ root: base, manager: "pnpm" })).resolves.toMatchObject({ + manager: "pnpm", + status: "stale", + reason: "lockfile newer than install marker", + }); + + const newerMarker = new Date(Date.now() + 2_000); + await fs.utimes(markerPath, newerMarker, newerMarker); + await expect(checkDepsStatus({ root: base, manager: "pnpm" })).resolves.toMatchObject({ + manager: "pnpm", + status: "ok", + }); + }); +}); + +describe("checkUpdateStatus", () => { + it("returns unknown install status when root is missing", async () => { + await expect( + checkUpdateStatus({ root: null, includeRegistry: false, timeoutMs: 1000 }), + ).resolves.toEqual({ + root: null, + installKind: "unknown", + packageManager: "unknown", + registry: undefined, + }); + }); + + it("detects package installs for non-git roots", async () => { + const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-update-check-")); + await fs.writeFile( + path.join(root, "package.json"), + JSON.stringify({ packageManager: "npm@10.0.0" }), + "utf8", + ); + await fs.writeFile(path.join(root, "package-lock.json"), "lock", "utf8"); + await fs.mkdir(path.join(root, "node_modules"), { recursive: true }); + + await expect( + checkUpdateStatus({ root, includeRegistry: false, fetchGit: false, timeoutMs: 1000 }), + ).resolves.toMatchObject({ + root, + installKind: "package", + packageManager: "npm", + git: undefined, + registry: undefined, + deps: { + manager: "npm", + }, + }); + }); }); diff --git a/src/infra/update-global.test.ts b/src/infra/update-global.test.ts new file mode 100644 index 00000000000..b95727febbf --- /dev/null +++ b/src/infra/update-global.test.ts @@ -0,0 +1,150 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { afterEach, describe, expect, it } from "vitest"; +import { captureEnv } from "../test-utils/env.js"; +import { + cleanupGlobalRenameDirs, + detectGlobalInstallManagerByPresence, + detectGlobalInstallManagerForRoot, + globalInstallArgs, + globalInstallFallbackArgs, + resolveGlobalPackageRoot, + resolveGlobalInstallSpec, + resolveGlobalRoot, + type CommandRunner, +} from "./update-global.js"; + +describe("update global helpers", () => { + let envSnapshot: ReturnType | undefined; + + afterEach(() => { + envSnapshot?.restore(); + envSnapshot = undefined; + }); + + it("prefers explicit package spec overrides", () => { + envSnapshot = captureEnv(["OPENCLAW_UPDATE_PACKAGE_SPEC"]); + process.env.OPENCLAW_UPDATE_PACKAGE_SPEC = "file:/tmp/openclaw.tgz"; + + expect(resolveGlobalInstallSpec({ packageName: "openclaw", tag: "latest" })).toBe( + "file:/tmp/openclaw.tgz", + ); + expect( + resolveGlobalInstallSpec({ + packageName: "openclaw", + tag: "beta", + env: { OPENCLAW_UPDATE_PACKAGE_SPEC: "openclaw@next" }, + }), + ).toBe("openclaw@next"); + }); + + it("resolves global roots and package roots from runner output", async () => { + const runCommand: CommandRunner = async (argv) => { + if (argv[0] === "npm") { + return { stdout: "/tmp/npm-root\n", stderr: "", code: 0 }; + } + if (argv[0] === "pnpm") { + return { stdout: "", stderr: "", code: 1 }; + } + throw new Error(`unexpected command: ${argv.join(" ")}`); + }; + + await expect(resolveGlobalRoot("npm", runCommand, 1000)).resolves.toBe("/tmp/npm-root"); + await expect(resolveGlobalRoot("pnpm", runCommand, 1000)).resolves.toBeNull(); + await expect(resolveGlobalRoot("bun", runCommand, 1000)).resolves.toContain( + path.join(".bun", "install", "global", "node_modules"), + ); + await expect(resolveGlobalPackageRoot("npm", runCommand, 1000)).resolves.toBe( + "/tmp/npm-root/openclaw", + ); + }); + + it("detects install managers from resolved roots and on-disk presence", async () => { + const base = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-update-global-")); + const npmRoot = path.join(base, "npm-root"); + const pnpmRoot = path.join(base, "pnpm-root"); + const bunRoot = path.join(base, ".bun", "install", "global", "node_modules"); + const pkgRoot = path.join(pnpmRoot, "openclaw"); + await fs.mkdir(pkgRoot, { recursive: true }); + await fs.mkdir(path.join(npmRoot, "openclaw"), { recursive: true }); + await fs.mkdir(path.join(bunRoot, "openclaw"), { recursive: true }); + + envSnapshot = captureEnv(["BUN_INSTALL"]); + process.env.BUN_INSTALL = path.join(base, ".bun"); + + const runCommand: CommandRunner = async (argv) => { + if (argv[0] === "npm") { + return { stdout: `${npmRoot}\n`, stderr: "", code: 0 }; + } + if (argv[0] === "pnpm") { + return { stdout: `${pnpmRoot}\n`, stderr: "", code: 0 }; + } + throw new Error(`unexpected command: ${argv.join(" ")}`); + }; + + await expect(detectGlobalInstallManagerForRoot(runCommand, pkgRoot, 1000)).resolves.toBe( + "pnpm", + ); + await expect(detectGlobalInstallManagerByPresence(runCommand, 1000)).resolves.toBe("npm"); + + await fs.rm(path.join(npmRoot, "openclaw"), { recursive: true, force: true }); + await fs.rm(path.join(pnpmRoot, "openclaw"), { recursive: true, force: true }); + await expect(detectGlobalInstallManagerByPresence(runCommand, 1000)).resolves.toBe("bun"); + }); + + it("builds install argv and npm fallback argv", () => { + expect(globalInstallArgs("npm", "openclaw@latest")).toEqual([ + "npm", + "i", + "-g", + "openclaw@latest", + "--no-fund", + "--no-audit", + "--loglevel=error", + ]); + expect(globalInstallArgs("pnpm", "openclaw@latest")).toEqual([ + "pnpm", + "add", + "-g", + "openclaw@latest", + ]); + expect(globalInstallArgs("bun", "openclaw@latest")).toEqual([ + "bun", + "add", + "-g", + "openclaw@latest", + ]); + + expect(globalInstallFallbackArgs("npm", "openclaw@latest")).toEqual([ + "npm", + "i", + "-g", + "openclaw@latest", + "--omit=optional", + "--no-fund", + "--no-audit", + "--loglevel=error", + ]); + expect(globalInstallFallbackArgs("pnpm", "openclaw@latest")).toBeNull(); + }); + + it("cleans only renamed package directories", async () => { + const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-update-cleanup-")); + await fs.mkdir(path.join(root, ".openclaw-123"), { recursive: true }); + await fs.mkdir(path.join(root, ".openclaw-456"), { recursive: true }); + await fs.writeFile(path.join(root, ".openclaw-file"), "nope", "utf8"); + await fs.mkdir(path.join(root, "openclaw"), { recursive: true }); + + await expect( + cleanupGlobalRenameDirs({ + globalRoot: root, + packageName: "openclaw", + }), + ).resolves.toEqual({ + removed: [".openclaw-123", ".openclaw-456"], + }); + await expect(fs.stat(path.join(root, "openclaw"))).resolves.toBeDefined(); + await expect(fs.stat(path.join(root, ".openclaw-file"))).resolves.toBeDefined(); + }); +}); diff --git a/src/infra/voicewake.test.ts b/src/infra/voicewake.test.ts new file mode 100644 index 00000000000..d719a496e81 --- /dev/null +++ b/src/infra/voicewake.test.ts @@ -0,0 +1,55 @@ +import fs from "node:fs/promises"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { withTempDir } from "../test-utils/temp-dir.js"; +import { + defaultVoiceWakeTriggers, + loadVoiceWakeConfig, + setVoiceWakeTriggers, +} from "./voicewake.js"; + +describe("voicewake config", () => { + it("returns defaults when missing", async () => { + await withTempDir("openclaw-voicewake-", async (baseDir) => { + await expect(loadVoiceWakeConfig(baseDir)).resolves.toEqual({ + triggers: defaultVoiceWakeTriggers(), + updatedAtMs: 0, + }); + }); + }); + + it("sanitizes and persists triggers", async () => { + await withTempDir("openclaw-voicewake-", async (baseDir) => { + const saved = await setVoiceWakeTriggers([" hi ", "", " there "], baseDir); + expect(saved.triggers).toEqual(["hi", "there"]); + expect(saved.updatedAtMs).toBeGreaterThan(0); + + await expect(loadVoiceWakeConfig(baseDir)).resolves.toEqual({ + triggers: ["hi", "there"], + updatedAtMs: saved.updatedAtMs, + }); + }); + }); + + it("falls back to defaults for empty or malformed persisted values", async () => { + await withTempDir("openclaw-voicewake-", async (baseDir) => { + const emptySaved = await setVoiceWakeTriggers(["", " "], baseDir); + expect(emptySaved.triggers).toEqual(defaultVoiceWakeTriggers()); + + await fs.mkdir(path.join(baseDir, "settings"), { recursive: true }); + await fs.writeFile( + path.join(baseDir, "settings", "voicewake.json"), + JSON.stringify({ + triggers: [" wake ", "", 42, null], + updatedAtMs: -1, + }), + "utf8", + ); + + await expect(loadVoiceWakeConfig(baseDir)).resolves.toEqual({ + triggers: ["wake"], + updatedAtMs: 0, + }); + }); + }); +}); diff --git a/src/infra/warning-filter.test.ts b/src/infra/warning-filter.test.ts index 9333d23da0c..1eb3b1372b5 100644 --- a/src/infra/warning-filter.test.ts +++ b/src/infra/warning-filter.test.ts @@ -12,6 +12,10 @@ function resetWarningFilterInstallState(): void { process.emitWarning = baseEmitWarning; } +async function flushWarnings(): Promise { + await new Promise((resolve) => setImmediate(resolve)); +} + describe("warning filter", () => { beforeEach(() => { resetWarningFilterInstallState(); @@ -23,36 +27,49 @@ describe("warning filter", () => { }); it("suppresses known deprecation and experimental warning signatures", () => { - expect( - shouldIgnoreWarning({ + const ignoredWarnings = [ + { name: "DeprecationWarning", code: "DEP0040", message: "The punycode module is deprecated.", - }), - ).toBe(true); - expect( - shouldIgnoreWarning({ + }, + { name: "DeprecationWarning", code: "DEP0060", message: "The `util._extend` API is deprecated.", - }), - ).toBe(true); - expect( - shouldIgnoreWarning({ + }, + { name: "ExperimentalWarning", message: "SQLite is an experimental feature and might change at any time", - }), - ).toBe(true); + }, + ]; + + for (const warning of ignoredWarnings) { + expect(shouldIgnoreWarning(warning)).toBe(true); + } }); it("keeps unknown warnings visible", () => { - expect( - shouldIgnoreWarning({ + const visibleWarnings = [ + { name: "DeprecationWarning", code: "DEP9999", message: "Totally new warning", - }), - ).toBe(false); + }, + { + name: "ExperimentalWarning", + message: "Different experimental warning", + }, + { + name: "DeprecationWarning", + code: "DEP0040", + message: "Different deprecated module", + }, + ]; + + for (const warning of visibleWarnings) { + expect(shouldIgnoreWarning(warning)).toBe(false); + } }); it("installs once and suppresses known warnings at emit time", async () => { @@ -82,11 +99,18 @@ describe("warning filter", () => { type: "DeprecationWarning", code: "DEP0060", }); - await new Promise((resolve) => setImmediate(resolve)); + emitWarning( + Object.assign(new Error("The punycode module is deprecated."), { + name: "DeprecationWarning", + code: "DEP0040", + }), + ); + await flushWarnings(); expect(seenWarnings.find((warning) => warning.code === "DEP0060")).toBeUndefined(); + expect(seenWarnings.find((warning) => warning.code === "DEP0040")).toBeUndefined(); emitWarning("Visible warning", { type: "Warning", code: "OPENCLAW_TEST_WARNING" }); - await new Promise((resolve) => setImmediate(resolve)); + await flushWarnings(); expect( seenWarnings.find((warning) => warning.code === "OPENCLAW_TEST_WARNING"), ).toBeDefined(); diff --git a/src/infra/widearea-dns.test.ts b/src/infra/widearea-dns.test.ts index 409c5cc4283..f2ab0c0f54f 100644 --- a/src/infra/widearea-dns.test.ts +++ b/src/infra/widearea-dns.test.ts @@ -1,5 +1,81 @@ -import { describe, expect, it } from "vitest"; -import { renderWideAreaGatewayZoneText } from "./widearea-dns.js"; +import fs from "node:fs"; +import path from "node:path"; +import { afterEach, describe, expect, it, vi } from "vitest"; +import * as utils from "../utils.js"; +import { + getWideAreaZonePath, + normalizeWideAreaDomain, + renderWideAreaGatewayZoneText, + resolveWideAreaDiscoveryDomain, + type WideAreaGatewayZoneOpts, + writeWideAreaGatewayZone, +} from "./widearea-dns.js"; + +const baseZoneOpts: WideAreaGatewayZoneOpts = { + domain: "openclaw.internal.", + gatewayPort: 18789, + displayName: "Mac Studio (OpenClaw)", + tailnetIPv4: "100.123.224.76", + hostLabel: "studio-london", + instanceLabel: "studio-london", +}; + +function makeZoneOpts(overrides: Partial = {}): WideAreaGatewayZoneOpts { + return { ...baseZoneOpts, ...overrides }; +} + +afterEach(() => { + vi.useRealTimers(); + vi.restoreAllMocks(); +}); + +describe("wide-area DNS discovery domain helpers", () => { + it.each([ + { value: "openclaw.internal", expected: "openclaw.internal." }, + { value: "openclaw.internal.", expected: "openclaw.internal." }, + { value: " openclaw.internal ", expected: "openclaw.internal." }, + { value: "", expected: null }, + { value: " ", expected: null }, + { value: null, expected: null }, + { value: undefined, expected: null }, + ])("normalizes domains for %j", ({ value, expected }) => { + expect(normalizeWideAreaDomain(value)).toBe(expected); + }); + + it.each([ + { + name: "prefers config domain over env", + params: { + env: { OPENCLAW_WIDE_AREA_DOMAIN: "env.internal" } as NodeJS.ProcessEnv, + configDomain: "config.internal", + }, + expected: "config.internal.", + }, + { + name: "falls back to env domain", + params: { + env: { OPENCLAW_WIDE_AREA_DOMAIN: "env.internal" } as NodeJS.ProcessEnv, + }, + expected: "env.internal.", + }, + { + name: "returns null when both sources are blank", + params: { + env: { OPENCLAW_WIDE_AREA_DOMAIN: " " } as NodeJS.ProcessEnv, + configDomain: " ", + }, + expected: null, + }, + ])("$name", ({ params, expected }) => { + expect(resolveWideAreaDiscoveryDomain(params)).toBe(expected); + }); + + it("builds the default zone path from the normalized domain", () => { + expect(getWideAreaZonePath("openclaw.internal.")).toBe( + path.join(utils.CONFIG_DIR, "dns", "openclaw.internal.db"), + ); + }); +}); describe("wide-area DNS-SD zone rendering", () => { it("renders a zone with gateway PTR/SRV/TXT records", () => { @@ -41,4 +117,76 @@ describe("wide-area DNS-SD zone rendering", () => { expect(txt).toContain(`tailnetDns=peters-mac-studio-1.sheep-coho.ts.net`); }); + + it("includes gateway TLS TXT fields and trims display metadata", () => { + const txt = renderWideAreaGatewayZoneText({ + domain: "openclaw.internal", + serial: 2025121701, + gatewayPort: 18789, + displayName: " Mac Studio (OpenClaw) ", + tailnetIPv4: "100.123.224.76", + hostLabel: " Studio London ", + instanceLabel: " Studio London ", + gatewayTlsEnabled: true, + gatewayTlsFingerprintSha256: "abc123", + tailnetDns: " tailnet.ts.net ", + cliPath: " /opt/homebrew/bin/openclaw ", + }); + + expect(txt).toContain(`$ORIGIN openclaw.internal.`); + expect(txt).toContain(`studio-london IN A 100.123.224.76`); + expect(txt).toContain(`studio-london._openclaw-gw._tcp IN TXT`); + expect(txt).toContain(`displayName=Mac Studio (OpenClaw)`); + expect(txt).toContain(`gatewayTls=1`); + expect(txt).toContain(`gatewayTlsSha256=abc123`); + expect(txt).toContain(`tailnetDns=tailnet.ts.net`); + expect(txt).toContain(`cliPath=/opt/homebrew/bin/openclaw`); + }); +}); + +describe("wide-area DNS zone writes", () => { + it("rejects blank domains", async () => { + await expect(writeWideAreaGatewayZone(makeZoneOpts({ domain: " " }))).rejects.toThrow( + "wide-area discovery domain is required", + ); + }); + + it("skips rewriting unchanged content", async () => { + vi.spyOn(utils, "ensureDir").mockResolvedValue(undefined); + const existing = renderWideAreaGatewayZoneText({ ...makeZoneOpts(), serial: 2026031301 }); + vi.spyOn(fs, "readFileSync").mockReturnValue(existing); + const writeSpy = vi.spyOn(fs, "writeFileSync").mockImplementation(() => undefined); + + const result = await writeWideAreaGatewayZone(makeZoneOpts()); + + expect(result).toEqual({ + zonePath: getWideAreaZonePath("openclaw.internal."), + changed: false, + }); + expect(writeSpy).not.toHaveBeenCalled(); + }); + + it("increments same-day serials when content changes", async () => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-03-13T12:00:00.000Z")); + vi.spyOn(utils, "ensureDir").mockResolvedValue(undefined); + vi.spyOn(fs, "readFileSync").mockReturnValue( + renderWideAreaGatewayZoneText({ ...makeZoneOpts(), serial: 2026031304 }), + ); + const writeSpy = vi.spyOn(fs, "writeFileSync").mockImplementation(() => undefined); + + const result = await writeWideAreaGatewayZone( + makeZoneOpts({ gatewayTlsEnabled: true, gatewayTlsFingerprintSha256: "abc123" }), + ); + + expect(result).toEqual({ + zonePath: getWideAreaZonePath("openclaw.internal."), + changed: true, + }); + expect(writeSpy).toHaveBeenCalledWith( + getWideAreaZonePath("openclaw.internal."), + expect.stringContaining("@ IN SOA ns1 hostmaster 2026031305 7200 3600 1209600 60"), + "utf-8", + ); + }); }); diff --git a/src/infra/ws.test.ts b/src/infra/ws.test.ts new file mode 100644 index 00000000000..53b70aca614 --- /dev/null +++ b/src/infra/ws.test.ts @@ -0,0 +1,21 @@ +import { Buffer } from "node:buffer"; +import { describe, expect, it } from "vitest"; +import { rawDataToString } from "./ws.js"; + +describe("rawDataToString", () => { + it("returns string input unchanged", () => { + expect(rawDataToString("hello" as unknown as Parameters[0])).toBe( + "hello", + ); + }); + + it("decodes Buffer, Buffer[] and ArrayBuffer inputs", () => { + expect(rawDataToString(Buffer.from("hello"))).toBe("hello"); + expect(rawDataToString([Buffer.from("he"), Buffer.from("llo")])).toBe("hello"); + expect(rawDataToString(Uint8Array.from([104, 101, 108, 108, 111]).buffer)).toBe("hello"); + }); + + it("falls back to string coercion for other raw data shapes", () => { + expect(rawDataToString(Uint8Array.from([1, 2, 3]) as never)).toBe("1,2,3"); + }); +}); diff --git a/src/line/accounts.test.ts b/src/line/accounts.test.ts index 06433f6f8e7..9a15afc6cd9 100644 --- a/src/line/accounts.test.ts +++ b/src/line/accounts.test.ts @@ -12,6 +12,15 @@ import { describe("LINE accounts", () => { const originalEnv = { ...process.env }; + const tempDirs: string[] = []; + + const createSecretFile = (fileName: string, contents: string) => { + const dir = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-line-account-")); + tempDirs.push(dir); + const filePath = path.join(dir, fileName); + fs.writeFileSync(filePath, contents, "utf8"); + return filePath; + }; beforeEach(() => { process.env = { ...originalEnv }; @@ -21,6 +30,9 @@ describe("LINE accounts", () => { afterEach(() => { process.env = originalEnv; + for (const dir of tempDirs.splice(0)) { + fs.rmSync(dir, { recursive: true, force: true }); + } }); describe("resolveLineAccount", () => { @@ -101,8 +113,47 @@ describe("LINE accounts", () => { expect(account.tokenSource).toBe("none"); }); + it("resolves default account credentials from files", () => { + const cfg: OpenClawConfig = { + channels: { + line: { + tokenFile: createSecretFile("token.txt", "file-token\n"), + secretFile: createSecretFile("secret.txt", "file-secret\n"), + }, + }, + }; + + const account = resolveLineAccount({ cfg }); + + expect(account.channelAccessToken).toBe("file-token"); + expect(account.channelSecret).toBe("file-secret"); + expect(account.tokenSource).toBe("file"); + }); + + it("resolves named account credentials from account-level files", () => { + const cfg: OpenClawConfig = { + channels: { + line: { + accounts: { + business: { + tokenFile: createSecretFile("business-token.txt", "business-file-token\n"), + secretFile: createSecretFile("business-secret.txt", "business-file-secret\n"), + }, + }, + }, + }, + }; + + const account = resolveLineAccount({ cfg, accountId: "business" }); + + expect(account.channelAccessToken).toBe("business-file-token"); + expect(account.channelSecret).toBe("business-file-secret"); + expect(account.tokenSource).toBe("file"); + }); + it.runIf(process.platform !== "win32")("rejects symlinked token and secret files", () => { const dir = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-line-account-")); + tempDirs.push(dir); const tokenFile = path.join(dir, "token.txt"); const tokenLink = path.join(dir, "token-link.txt"); const secretFile = path.join(dir, "secret.txt"); @@ -125,74 +176,83 @@ describe("LINE accounts", () => { expect(account.channelAccessToken).toBe(""); expect(account.channelSecret).toBe(""); expect(account.tokenSource).toBe("none"); - fs.rmSync(dir, { recursive: true, force: true }); }); }); describe("resolveDefaultLineAccountId", () => { - it("prefers channels.line.defaultAccount when configured", () => { - const cfg: OpenClawConfig = { - channels: { - line: { - defaultAccount: "business", - accounts: { - business: { enabled: true }, - support: { enabled: true }, + it.each([ + { + name: "prefers channels.line.defaultAccount when configured", + cfg: { + channels: { + line: { + defaultAccount: "business", + accounts: { + business: { enabled: true }, + support: { enabled: true }, + }, }, }, - }, - }; - - const id = resolveDefaultLineAccountId(cfg); - expect(id).toBe("business"); - }); - - it("normalizes channels.line.defaultAccount before lookup", () => { - const cfg: OpenClawConfig = { - channels: { - line: { - defaultAccount: "Business Ops", - accounts: { - "business-ops": { enabled: true }, + } satisfies OpenClawConfig, + expected: "business", + }, + { + name: "normalizes channels.line.defaultAccount before lookup", + cfg: { + channels: { + line: { + defaultAccount: "Business Ops", + accounts: { + "business-ops": { enabled: true }, + }, }, }, - }, - }; - - const id = resolveDefaultLineAccountId(cfg); - expect(id).toBe("business-ops"); - }); - - it("returns first named account when default not configured", () => { - const cfg: OpenClawConfig = { - channels: { - line: { - accounts: { - business: { enabled: true }, + } satisfies OpenClawConfig, + expected: "business-ops", + }, + { + name: "returns first named account when default not configured", + cfg: { + channels: { + line: { + accounts: { + business: { enabled: true }, + }, }, }, - }, - }; - - const id = resolveDefaultLineAccountId(cfg); - - expect(id).toBe("business"); - }); - - it("falls back when channels.line.defaultAccount is missing", () => { - const cfg: OpenClawConfig = { - channels: { - line: { - defaultAccount: "missing", - accounts: { - business: { enabled: true }, + } satisfies OpenClawConfig, + expected: "business", + }, + { + name: "falls back when channels.line.defaultAccount is missing", + cfg: { + channels: { + line: { + defaultAccount: "missing", + accounts: { + business: { enabled: true }, + }, }, }, - }, - }; - - const id = resolveDefaultLineAccountId(cfg); - expect(id).toBe("business"); + } satisfies OpenClawConfig, + expected: "business", + }, + { + name: "prefers the default account when base credentials are configured", + cfg: { + channels: { + line: { + channelAccessToken: "base-token", + accounts: { + business: { enabled: true }, + }, + }, + }, + } satisfies OpenClawConfig, + expected: DEFAULT_ACCOUNT_ID, + }, + ])("$name", ({ cfg, expected }) => { + expect(resolveDefaultLineAccountId(cfg)).toBe(expected); }); }); diff --git a/src/line/bot-handlers.test.ts b/src/line/bot-handlers.test.ts index 4f2ca707c8b..7b3638f072b 100644 --- a/src/line/bot-handlers.test.ts +++ b/src/line/bot-handlers.test.ts @@ -89,27 +89,73 @@ function createReplayMessageEvent(params: { } as MessageEvent; } -function createOpenGroupReplayContext( - processMessage: LineWebhookContext["processMessage"], - replayCache: ReturnType, -): Parameters[1] { +function createTestMessageEvent(params: { + message: MessageEvent["message"]; + source: MessageEvent["source"]; + webhookEventId: string; + timestamp?: number; + replyToken?: string; + isRedelivery?: boolean; +}) { return { - cfg: { channels: { line: { groupPolicy: "open" } } }, + type: "message", + message: params.message, + replyToken: params.replyToken ?? "reply-token", + timestamp: params.timestamp ?? Date.now(), + source: params.source, + mode: "active", + webhookEventId: params.webhookEventId, + deliveryContext: { isRedelivery: params.isRedelivery ?? false }, + } as MessageEvent; +} + +function createLineWebhookTestContext(params: { + processMessage: LineWebhookContext["processMessage"]; + groupPolicy?: "open"; + dmPolicy?: "open"; + requireMention?: boolean; + groupHistories?: Map; + replayCache?: ReturnType; +}): Parameters[1] { + const lineConfig = { + ...(params.groupPolicy ? { groupPolicy: params.groupPolicy } : {}), + ...(params.dmPolicy ? { dmPolicy: params.dmPolicy } : {}), + }; + return { + cfg: { channels: { line: lineConfig } }, account: { accountId: "default", enabled: true, channelAccessToken: "token", channelSecret: "secret", tokenSource: "config", - config: { groupPolicy: "open", groups: { "*": { requireMention: false } } }, + config: { + ...lineConfig, + ...(params.requireMention === undefined + ? {} + : { groups: { "*": { requireMention: params.requireMention } } }), + }, }, runtime: createRuntime(), mediaMaxBytes: 1, - processMessage, - replayCache, + processMessage: params.processMessage, + ...(params.groupHistories ? { groupHistories: params.groupHistories } : {}), + ...(params.replayCache ? { replayCache: params.replayCache } : {}), }; } +function createOpenGroupReplayContext( + processMessage: LineWebhookContext["processMessage"], + replayCache: ReturnType, +): Parameters[1] { + return createLineWebhookTestContext({ + processMessage, + groupPolicy: "open", + requireMention: false, + replayCache, + }); +} + vi.mock("../pairing/pairing-store.js", () => ({ readChannelAllowFromStore: readAllowFromStoreMock, upsertChannelPairingRequest: upsertPairingRequestMock, @@ -631,32 +677,20 @@ describe("handleLineWebhookEvents", () => { it("skips group messages by default when requireMention is not configured", async () => { const processMessage = vi.fn(); - const event = { - type: "message", - message: { id: "m-default-skip", type: "text", text: "hi there" }, - replyToken: "reply-token", - timestamp: Date.now(), + const event = createTestMessageEvent({ + message: { id: "m-default-skip", type: "text", text: "hi there", quoteToken: "q-default" }, source: { type: "group", groupId: "group-default", userId: "user-default" }, - mode: "active", webhookEventId: "evt-default-skip", - deliveryContext: { isRedelivery: false }, - } as MessageEvent; - - await handleLineWebhookEvents([event], { - cfg: { channels: { line: { groupPolicy: "open" } } }, - account: { - accountId: "default", - enabled: true, - channelAccessToken: "token", - channelSecret: "secret", - tokenSource: "config", - config: { groupPolicy: "open" }, - }, - runtime: createRuntime(), - mediaMaxBytes: 1, - processMessage, }); + await handleLineWebhookEvents( + [event], + createLineWebhookTestContext({ + processMessage, + groupPolicy: "open", + }), + ); + expect(processMessage).not.toHaveBeenCalled(); expect(buildLineMessageContextMock).not.toHaveBeenCalled(); }); @@ -667,33 +701,22 @@ describe("handleLineWebhookEvents", () => { string, import("../auto-reply/reply/history.js").HistoryEntry[] >(); - const event = { - type: "message", - message: { id: "m-hist-1", type: "text", text: "hello history" }, - replyToken: "reply-token", + const event = createTestMessageEvent({ + message: { id: "m-hist-1", type: "text", text: "hello history", quoteToken: "q-hist-1" }, timestamp: 1700000000000, source: { type: "group", groupId: "group-hist-1", userId: "user-hist" }, - mode: "active", webhookEventId: "evt-hist-1", - deliveryContext: { isRedelivery: false }, - } as MessageEvent; - - await handleLineWebhookEvents([event], { - cfg: { channels: { line: { groupPolicy: "open" } } }, - account: { - accountId: "default", - enabled: true, - channelAccessToken: "token", - channelSecret: "secret", - tokenSource: "config", - config: { groupPolicy: "open" }, - }, - runtime: createRuntime(), - mediaMaxBytes: 1, - processMessage, - groupHistories, }); + await handleLineWebhookEvents( + [event], + createLineWebhookTestContext({ + processMessage, + groupPolicy: "open", + groupHistories, + }), + ); + expect(processMessage).not.toHaveBeenCalled(); const entries = groupHistories.get("group-hist-1"); expect(entries).toHaveLength(1); @@ -706,35 +729,21 @@ describe("handleLineWebhookEvents", () => { it("skips group messages without mention when requireMention is set", async () => { const processMessage = vi.fn(); - const event = { - type: "message", - message: { id: "m-mention-1", type: "text", text: "hi there" }, - replyToken: "reply-token", - timestamp: Date.now(), + const event = createTestMessageEvent({ + message: { id: "m-mention-1", type: "text", text: "hi there", quoteToken: "q-mention-1" }, source: { type: "group", groupId: "group-mention", userId: "user-mention" }, - mode: "active", webhookEventId: "evt-mention-1", - deliveryContext: { isRedelivery: false }, - } as MessageEvent; - - await handleLineWebhookEvents([event], { - cfg: { channels: { line: { groupPolicy: "open" } } }, - account: { - accountId: "default", - enabled: true, - channelAccessToken: "token", - channelSecret: "secret", - tokenSource: "config", - config: { - groupPolicy: "open", - groups: { "*": { requireMention: true } }, - }, - }, - runtime: createRuntime(), - mediaMaxBytes: 1, - processMessage, }); + await handleLineWebhookEvents( + [event], + createLineWebhookTestContext({ + processMessage, + groupPolicy: "open", + requireMention: true, + }), + ); + expect(processMessage).not.toHaveBeenCalled(); expect(buildLineMessageContextMock).not.toHaveBeenCalled(); }); @@ -742,8 +751,7 @@ describe("handleLineWebhookEvents", () => { it("processes group messages with bot mention when requireMention is set", async () => { const processMessage = vi.fn(); // Simulate a LINE text message with mention.mentionees containing isSelf=true - const event = { - type: "message", + const event = createTestMessageEvent({ message: { id: "m-mention-2", type: "text", @@ -751,41 +759,27 @@ describe("handleLineWebhookEvents", () => { mention: { mentionees: [{ index: 0, length: 4, type: "user", isSelf: true }], }, - }, - replyToken: "reply-token", - timestamp: Date.now(), + } as unknown as MessageEvent["message"], source: { type: "group", groupId: "group-mention", userId: "user-mention" }, - mode: "active", webhookEventId: "evt-mention-2", - deliveryContext: { isRedelivery: false }, - } as unknown as MessageEvent; - - await handleLineWebhookEvents([event], { - cfg: { channels: { line: { groupPolicy: "open" } } }, - account: { - accountId: "default", - enabled: true, - channelAccessToken: "token", - channelSecret: "secret", - tokenSource: "config", - config: { - groupPolicy: "open", - groups: { "*": { requireMention: true } }, - }, - }, - runtime: createRuntime(), - mediaMaxBytes: 1, - processMessage, }); + await handleLineWebhookEvents( + [event], + createLineWebhookTestContext({ + processMessage, + groupPolicy: "open", + requireMention: true, + }), + ); + expect(buildLineMessageContextMock).toHaveBeenCalledTimes(1); expect(processMessage).toHaveBeenCalledTimes(1); }); it("processes group messages with @all mention when requireMention is set", async () => { const processMessage = vi.fn(); - const event = { - type: "message", + const event = createTestMessageEvent({ message: { id: "m-mention-3", type: "text", @@ -793,68 +787,41 @@ describe("handleLineWebhookEvents", () => { mention: { mentionees: [{ index: 0, length: 4, type: "all" }], }, - }, - replyToken: "reply-token", - timestamp: Date.now(), + } as MessageEvent["message"], source: { type: "group", groupId: "group-mention", userId: "user-mention" }, - mode: "active", webhookEventId: "evt-mention-3", - deliveryContext: { isRedelivery: false }, - } as MessageEvent; - - await handleLineWebhookEvents([event], { - cfg: { channels: { line: { groupPolicy: "open" } } }, - account: { - accountId: "default", - enabled: true, - channelAccessToken: "token", - channelSecret: "secret", - tokenSource: "config", - config: { - groupPolicy: "open", - groups: { "*": { requireMention: true } }, - }, - }, - runtime: createRuntime(), - mediaMaxBytes: 1, - processMessage, }); + await handleLineWebhookEvents( + [event], + createLineWebhookTestContext({ + processMessage, + groupPolicy: "open", + requireMention: true, + }), + ); + expect(buildLineMessageContextMock).toHaveBeenCalledTimes(1); expect(processMessage).toHaveBeenCalledTimes(1); }); it("does not apply requireMention gating to DM messages", async () => { const processMessage = vi.fn(); - const event = { - type: "message", - message: { id: "m-mention-dm", type: "text", text: "hi" }, - replyToken: "reply-token", - timestamp: Date.now(), + const event = createTestMessageEvent({ + message: { id: "m-mention-dm", type: "text", text: "hi", quoteToken: "q-mention-dm" }, source: { type: "user", userId: "user-dm" }, - mode: "active", webhookEventId: "evt-mention-dm", - deliveryContext: { isRedelivery: false }, - } as MessageEvent; - - await handleLineWebhookEvents([event], { - cfg: { channels: { line: { dmPolicy: "open" } } }, - account: { - accountId: "default", - enabled: true, - channelAccessToken: "token", - channelSecret: "secret", - tokenSource: "config", - config: { - dmPolicy: "open", - groups: { "*": { requireMention: true } }, - }, - }, - runtime: createRuntime(), - mediaMaxBytes: 1, - processMessage, }); + await handleLineWebhookEvents( + [event], + createLineWebhookTestContext({ + processMessage, + dmPolicy: "open", + requireMention: true, + }), + ); + expect(buildLineMessageContextMock).toHaveBeenCalledTimes(1); expect(processMessage).toHaveBeenCalledTimes(1); }); @@ -862,35 +829,26 @@ describe("handleLineWebhookEvents", () => { it("allows non-text group messages through when requireMention is set (cannot detect mention)", async () => { const processMessage = vi.fn(); // Image message -- LINE only carries mention metadata on text messages. - const event = { - type: "message", - message: { id: "m-mention-img", type: "image", contentProvider: { type: "line" } }, - replyToken: "reply-token", - timestamp: Date.now(), - source: { type: "group", groupId: "group-1", userId: "user-img" }, - mode: "active", - webhookEventId: "evt-mention-img", - deliveryContext: { isRedelivery: false }, - } as MessageEvent; - - await handleLineWebhookEvents([event], { - cfg: { channels: { line: { groupPolicy: "open" } } }, - account: { - accountId: "default", - enabled: true, - channelAccessToken: "token", - channelSecret: "secret", - tokenSource: "config", - config: { - groupPolicy: "open", - groups: { "*": { requireMention: true } }, - }, + const event = createTestMessageEvent({ + message: { + id: "m-mention-img", + type: "image", + contentProvider: { type: "line" }, + quoteToken: "q-mention-img", }, - runtime: createRuntime(), - mediaMaxBytes: 1, - processMessage, + source: { type: "group", groupId: "group-1", userId: "user-img" }, + webhookEventId: "evt-mention-img", }); + await handleLineWebhookEvents( + [event], + createLineWebhookTestContext({ + processMessage, + groupPolicy: "open", + requireMention: true, + }), + ); + expect(buildLineMessageContextMock).toHaveBeenCalledTimes(1); expect(processMessage).toHaveBeenCalledTimes(1); }); @@ -898,40 +856,26 @@ describe("handleLineWebhookEvents", () => { it("does not bypass mention gating when non-bot mention is present with control command", async () => { const processMessage = vi.fn(); // Text message mentions another user (not bot) together with a control command. - const event = { - type: "message", + const event = createTestMessageEvent({ message: { id: "m-mention-other", type: "text", text: "@other !status", mention: { mentionees: [{ index: 0, length: 6, type: "user", isSelf: false }] }, - }, - replyToken: "reply-token", - timestamp: Date.now(), + } as unknown as MessageEvent["message"], source: { type: "group", groupId: "group-1", userId: "user-other" }, - mode: "active", webhookEventId: "evt-mention-other", - deliveryContext: { isRedelivery: false }, - } as unknown as MessageEvent; - - await handleLineWebhookEvents([event], { - cfg: { channels: { line: { groupPolicy: "open" } } }, - account: { - accountId: "default", - enabled: true, - channelAccessToken: "token", - channelSecret: "secret", - tokenSource: "config", - config: { - groupPolicy: "open", - groups: { "*": { requireMention: true } }, - }, - }, - runtime: createRuntime(), - mediaMaxBytes: 1, - processMessage, }); + await handleLineWebhookEvents( + [event], + createLineWebhookTestContext({ + processMessage, + groupPolicy: "open", + requireMention: true, + }), + ); + // Should be skipped because there is a non-bot mention and the bot was not mentioned. expect(processMessage).not.toHaveBeenCalled(); }); diff --git a/src/logging/subsystem.test.ts b/src/logging/subsystem.test.ts index 06f504f47de..15c5dbb9754 100644 --- a/src/logging/subsystem.test.ts +++ b/src/logging/subsystem.test.ts @@ -4,6 +4,17 @@ import { resetLogger, setLoggerOverride } from "./logger.js"; import { loggingState } from "./state.js"; import { createSubsystemLogger } from "./subsystem.js"; +function installConsoleMethodSpy(method: "warn" | "error") { + const spy = vi.fn(); + loggingState.rawConsole = { + log: vi.fn(), + info: vi.fn(), + warn: method === "warn" ? spy : vi.fn(), + error: method === "error" ? spy : vi.fn(), + }; + return spy; +} + afterEach(() => { setConsoleSubsystemFilter(null); setLoggerOverride(null); @@ -58,13 +69,7 @@ describe("createSubsystemLogger().isEnabled", () => { it("suppresses probe warnings for embedded subsystems based on structured run metadata", () => { setLoggerOverride({ level: "silent", consoleLevel: "warn" }); - const warn = vi.fn(); - loggingState.rawConsole = { - log: vi.fn(), - info: vi.fn(), - warn, - error: vi.fn(), - }; + const warn = installConsoleMethodSpy("warn"); const log = createSubsystemLogger("agent/embedded").child("failover"); log.warn("embedded run failover decision", { @@ -77,13 +82,7 @@ describe("createSubsystemLogger().isEnabled", () => { it("does not suppress probe errors for embedded subsystems", () => { setLoggerOverride({ level: "silent", consoleLevel: "error" }); - const error = vi.fn(); - loggingState.rawConsole = { - log: vi.fn(), - info: vi.fn(), - warn: vi.fn(), - error, - }; + const error = installConsoleMethodSpy("error"); const log = createSubsystemLogger("agent/embedded").child("failover"); log.error("embedded run failover decision", { @@ -96,13 +95,7 @@ describe("createSubsystemLogger().isEnabled", () => { it("suppresses probe warnings for model-fallback child subsystems based on structured run metadata", () => { setLoggerOverride({ level: "silent", consoleLevel: "warn" }); - const warn = vi.fn(); - loggingState.rawConsole = { - log: vi.fn(), - info: vi.fn(), - warn, - error: vi.fn(), - }; + const warn = installConsoleMethodSpy("warn"); const log = createSubsystemLogger("model-fallback").child("decision"); log.warn("model fallback decision", { @@ -115,13 +108,7 @@ describe("createSubsystemLogger().isEnabled", () => { it("does not suppress probe errors for model-fallback child subsystems", () => { setLoggerOverride({ level: "silent", consoleLevel: "error" }); - const error = vi.fn(); - loggingState.rawConsole = { - log: vi.fn(), - info: vi.fn(), - warn: vi.fn(), - error, - }; + const error = installConsoleMethodSpy("error"); const log = createSubsystemLogger("model-fallback").child("decision"); log.error("model fallback decision", { @@ -134,13 +121,7 @@ describe("createSubsystemLogger().isEnabled", () => { it("still emits non-probe warnings for embedded subsystems", () => { setLoggerOverride({ level: "silent", consoleLevel: "warn" }); - const warn = vi.fn(); - loggingState.rawConsole = { - log: vi.fn(), - info: vi.fn(), - warn, - error: vi.fn(), - }; + const warn = installConsoleMethodSpy("warn"); const log = createSubsystemLogger("agent/embedded").child("auth-profiles"); log.warn("auth profile failure state updated", { @@ -153,13 +134,7 @@ describe("createSubsystemLogger().isEnabled", () => { it("still emits non-probe model-fallback child warnings", () => { setLoggerOverride({ level: "silent", consoleLevel: "warn" }); - const warn = vi.fn(); - loggingState.rawConsole = { - log: vi.fn(), - info: vi.fn(), - warn, - error: vi.fn(), - }; + const warn = installConsoleMethodSpy("warn"); const log = createSubsystemLogger("model-fallback").child("decision"); log.warn("model fallback decision", { diff --git a/src/media/fetch.telegram-network.test.ts b/src/media/fetch.telegram-network.test.ts index c9989867f0b..cb4cb1ab5b1 100644 --- a/src/media/fetch.telegram-network.test.ts +++ b/src/media/fetch.telegram-network.test.ts @@ -2,47 +2,35 @@ import { afterEach, describe, expect, it, vi } from "vitest"; import { resolveTelegramTransport } from "../telegram/fetch.js"; import { fetchRemoteMedia } from "./fetch.js"; -const undiciFetch = vi.hoisted(() => vi.fn()); -const AgentCtor = vi.hoisted(() => - vi.fn(function MockAgent( - this: { options?: Record }, - options?: Record, - ) { - this.options = options; - }), -); -const EnvHttpProxyAgentCtor = vi.hoisted(() => - vi.fn(function MockEnvHttpProxyAgent( - this: { options?: Record }, - options?: Record, - ) { - this.options = options; - }), -); -const ProxyAgentCtor = vi.hoisted(() => - vi.fn(function MockProxyAgent( - this: { options?: Record | string }, - options?: Record | string, - ) { - this.options = options; - }), -); +const undiciMocks = vi.hoisted(() => { + const createDispatcherCtor = | string>() => + vi.fn(function MockDispatcher(this: { options?: T }, options?: T) { + this.options = options; + }); + + return { + fetch: vi.fn(), + agentCtor: createDispatcherCtor>(), + envHttpProxyAgentCtor: createDispatcherCtor>(), + proxyAgentCtor: createDispatcherCtor | string>(), + }; +}); vi.mock("undici", () => ({ - Agent: AgentCtor, - EnvHttpProxyAgent: EnvHttpProxyAgentCtor, - ProxyAgent: ProxyAgentCtor, - fetch: undiciFetch, + Agent: undiciMocks.agentCtor, + EnvHttpProxyAgent: undiciMocks.envHttpProxyAgentCtor, + ProxyAgent: undiciMocks.proxyAgentCtor, + fetch: undiciMocks.fetch, })); describe("fetchRemoteMedia telegram network policy", () => { type LookupFn = NonNullable[0]["lookupFn"]>; afterEach(() => { - undiciFetch.mockReset(); - AgentCtor.mockClear(); - EnvHttpProxyAgentCtor.mockClear(); - ProxyAgentCtor.mockClear(); + undiciMocks.fetch.mockReset(); + undiciMocks.agentCtor.mockClear(); + undiciMocks.envHttpProxyAgentCtor.mockClear(); + undiciMocks.proxyAgentCtor.mockClear(); vi.unstubAllEnvs(); }); @@ -50,7 +38,7 @@ describe("fetchRemoteMedia telegram network policy", () => { const lookupFn = vi.fn(async () => [ { address: "149.154.167.220", family: 4 }, ]) as unknown as LookupFn; - undiciFetch.mockResolvedValueOnce( + undiciMocks.fetch.mockResolvedValueOnce( new Response(new Uint8Array([0xff, 0xd8, 0xff, 0x00]), { status: 200, headers: { "content-type": "image/jpeg" }, @@ -76,7 +64,7 @@ describe("fetchRemoteMedia telegram network policy", () => { }, }); - const init = undiciFetch.mock.calls[0]?.[1] as + const init = undiciMocks.fetch.mock.calls[0]?.[1] as | (RequestInit & { dispatcher?: { options?: { @@ -100,7 +88,7 @@ describe("fetchRemoteMedia telegram network policy", () => { const lookupFn = vi.fn(async () => [ { address: "149.154.167.220", family: 4 }, ]) as unknown as LookupFn; - undiciFetch.mockResolvedValueOnce( + undiciMocks.fetch.mockResolvedValueOnce( new Response(new Uint8Array([0x25, 0x50, 0x44, 0x46]), { status: 200, headers: { "content-type": "application/pdf" }, @@ -126,7 +114,7 @@ describe("fetchRemoteMedia telegram network policy", () => { }, }); - const init = undiciFetch.mock.calls[0]?.[1] as + const init = undiciMocks.fetch.mock.calls[0]?.[1] as | (RequestInit & { dispatcher?: { options?: { @@ -137,6 +125,6 @@ describe("fetchRemoteMedia telegram network policy", () => { | undefined; expect(init?.dispatcher?.options?.uri).toBe("http://127.0.0.1:7890"); - expect(ProxyAgentCtor).toHaveBeenCalled(); + expect(undiciMocks.proxyAgentCtor).toHaveBeenCalled(); }); }); diff --git a/src/memory/embeddings-gemini.test.ts b/src/memory/embeddings-gemini.test.ts index f97cc6cb142..8d05a43d042 100644 --- a/src/memory/embeddings-gemini.test.ts +++ b/src/memory/embeddings-gemini.test.ts @@ -58,6 +58,31 @@ function mockResolvedProviderKey(apiKey = "test-key") { }); } +type GeminiFetchMock = + | ReturnType + | ReturnType; + +async function createProviderWithFetch( + fetchMock: GeminiFetchMock, + options: Partial[0]> & { model: string }, +) { + vi.stubGlobal("fetch", fetchMock); + mockResolvedProviderKey(); + const { provider } = await createGeminiEmbeddingProvider({ + config: {} as never, + provider: "gemini", + fallback: "none", + ...options, + }); + return provider; +} + +function expectNormalizedThreeFourVector(embedding: number[]) { + expect(embedding[0]).toBeCloseTo(0.6, 5); + expect(embedding[1]).toBeCloseTo(0.8, 5); + expect(magnitude(embedding)).toBeCloseTo(1, 5); +} + describe("buildGeminiTextEmbeddingRequest", () => { it("builds a text embedding request with optional model and dimensions", () => { expect( @@ -160,14 +185,8 @@ describe("resolveGeminiOutputDimensionality", () => { describe("gemini-embedding-001 provider (backward compat)", () => { it("does NOT include outputDimensionality in embedQuery", async () => { const fetchMock = createGeminiFetchMock(); - vi.stubGlobal("fetch", fetchMock); - mockResolvedProviderKey(); - - const { provider } = await createGeminiEmbeddingProvider({ - config: {} as never, - provider: "gemini", + const provider = await createProviderWithFetch(fetchMock, { model: "gemini-embedding-001", - fallback: "none", }); await provider.embedQuery("test query"); @@ -180,14 +199,8 @@ describe("gemini-embedding-001 provider (backward compat)", () => { it("does NOT include outputDimensionality in embedBatch", async () => { const fetchMock = createGeminiBatchFetchMock(2); - vi.stubGlobal("fetch", fetchMock); - mockResolvedProviderKey(); - - const { provider } = await createGeminiEmbeddingProvider({ - config: {} as never, - provider: "gemini", + const provider = await createProviderWithFetch(fetchMock, { model: "gemini-embedding-001", - fallback: "none", }); await provider.embedBatch(["text1", "text2"]); @@ -202,14 +215,8 @@ describe("gemini-embedding-001 provider (backward compat)", () => { describe("gemini-embedding-2-preview provider", () => { it("includes outputDimensionality in embedQuery request", async () => { const fetchMock = createGeminiFetchMock(); - vi.stubGlobal("fetch", fetchMock); - mockResolvedProviderKey(); - - const { provider } = await createGeminiEmbeddingProvider({ - config: {} as never, - provider: "gemini", + const provider = await createProviderWithFetch(fetchMock, { model: "gemini-embedding-2-preview", - fallback: "none", }); await provider.embedQuery("test query"); @@ -222,33 +229,19 @@ describe("gemini-embedding-2-preview provider", () => { it("normalizes embedQuery response vectors", async () => { const fetchMock = createGeminiFetchMock([3, 4]); - vi.stubGlobal("fetch", fetchMock); - mockResolvedProviderKey(); - - const { provider } = await createGeminiEmbeddingProvider({ - config: {} as never, - provider: "gemini", + const provider = await createProviderWithFetch(fetchMock, { model: "gemini-embedding-2-preview", - fallback: "none", }); const embedding = await provider.embedQuery("test query"); - expect(embedding[0]).toBeCloseTo(0.6, 5); - expect(embedding[1]).toBeCloseTo(0.8, 5); - expect(magnitude(embedding)).toBeCloseTo(1, 5); + expectNormalizedThreeFourVector(embedding); }); it("includes outputDimensionality in embedBatch request", async () => { const fetchMock = createGeminiBatchFetchMock(2); - vi.stubGlobal("fetch", fetchMock); - mockResolvedProviderKey(); - - const { provider } = await createGeminiEmbeddingProvider({ - config: {} as never, - provider: "gemini", + const provider = await createProviderWithFetch(fetchMock, { model: "gemini-embedding-2-preview", - fallback: "none", }); await provider.embedBatch(["text1", "text2"]); @@ -272,36 +265,22 @@ describe("gemini-embedding-2-preview provider", () => { it("normalizes embedBatch response vectors", async () => { const fetchMock = createGeminiBatchFetchMock(2, [3, 4]); - vi.stubGlobal("fetch", fetchMock); - mockResolvedProviderKey(); - - const { provider } = await createGeminiEmbeddingProvider({ - config: {} as never, - provider: "gemini", + const provider = await createProviderWithFetch(fetchMock, { model: "gemini-embedding-2-preview", - fallback: "none", }); const embeddings = await provider.embedBatch(["text1", "text2"]); expect(embeddings).toHaveLength(2); for (const embedding of embeddings) { - expect(embedding[0]).toBeCloseTo(0.6, 5); - expect(embedding[1]).toBeCloseTo(0.8, 5); - expect(magnitude(embedding)).toBeCloseTo(1, 5); + expectNormalizedThreeFourVector(embedding); } }); it("respects custom outputDimensionality", async () => { const fetchMock = createGeminiFetchMock(); - vi.stubGlobal("fetch", fetchMock); - mockResolvedProviderKey(); - - const { provider } = await createGeminiEmbeddingProvider({ - config: {} as never, - provider: "gemini", + const provider = await createProviderWithFetch(fetchMock, { model: "gemini-embedding-2-preview", - fallback: "none", outputDimensionality: 768, }); @@ -313,14 +292,8 @@ describe("gemini-embedding-2-preview provider", () => { it("sanitizes and normalizes embedQuery responses", async () => { const fetchMock = createGeminiFetchMock([3, 4, Number.NaN]); - vi.stubGlobal("fetch", fetchMock); - mockResolvedProviderKey(); - - const { provider } = await createGeminiEmbeddingProvider({ - config: {} as never, - provider: "gemini", + const provider = await createProviderWithFetch(fetchMock, { model: "gemini-embedding-2-preview", - fallback: "none", }); await expect(provider.embedQuery("test")).resolves.toEqual([0.6, 0.8, 0]); @@ -328,14 +301,8 @@ describe("gemini-embedding-2-preview provider", () => { it("uses custom outputDimensionality for each embedBatch request", async () => { const fetchMock = createGeminiBatchFetchMock(2); - vi.stubGlobal("fetch", fetchMock); - mockResolvedProviderKey(); - - const { provider } = await createGeminiEmbeddingProvider({ - config: {} as never, - provider: "gemini", + const provider = await createProviderWithFetch(fetchMock, { model: "gemini-embedding-2-preview", - fallback: "none", outputDimensionality: 768, }); @@ -350,14 +317,8 @@ describe("gemini-embedding-2-preview provider", () => { it("sanitizes and normalizes structured batch responses", async () => { const fetchMock = createGeminiBatchFetchMock(1, [0, Number.POSITIVE_INFINITY, 5]); - vi.stubGlobal("fetch", fetchMock); - mockResolvedProviderKey(); - - const { provider } = await createGeminiEmbeddingProvider({ - config: {} as never, - provider: "gemini", + const provider = await createProviderWithFetch(fetchMock, { model: "gemini-embedding-2-preview", - fallback: "none", }); await expect( @@ -375,14 +336,8 @@ describe("gemini-embedding-2-preview provider", () => { it("supports multimodal embedBatchInputs requests", async () => { const fetchMock = createGeminiBatchFetchMock(2); - vi.stubGlobal("fetch", fetchMock); - mockResolvedProviderKey(); - - const { provider } = await createGeminiEmbeddingProvider({ - config: {} as never, - provider: "gemini", + const provider = await createProviderWithFetch(fetchMock, { model: "gemini-embedding-2-preview", - fallback: "none", }); expect(provider.embedBatchInputs).toBeDefined(); @@ -451,14 +406,8 @@ describe("gemini-embedding-2-preview provider", () => { Number.POSITIVE_INFINITY, Number.NEGATIVE_INFINITY, ]); - vi.stubGlobal("fetch", fetchMock); - mockResolvedProviderKey(); - - const { provider } = await createGeminiEmbeddingProvider({ - config: {} as never, - provider: "gemini", + const provider = await createProviderWithFetch(fetchMock, { model: "gemini-embedding-2-preview", - fallback: "none", }); const embedding = await provider.embedQuery("test"); @@ -468,14 +417,8 @@ describe("gemini-embedding-2-preview provider", () => { it("uses correct endpoint URL", async () => { const fetchMock = createGeminiFetchMock(); - vi.stubGlobal("fetch", fetchMock); - mockResolvedProviderKey(); - - const { provider } = await createGeminiEmbeddingProvider({ - config: {} as never, - provider: "gemini", + const provider = await createProviderWithFetch(fetchMock, { model: "gemini-embedding-2-preview", - fallback: "none", }); await provider.embedQuery("test"); @@ -488,14 +431,8 @@ describe("gemini-embedding-2-preview provider", () => { it("allows taskType override via options", async () => { const fetchMock = createGeminiFetchMock(); - vi.stubGlobal("fetch", fetchMock); - mockResolvedProviderKey(); - - const { provider } = await createGeminiEmbeddingProvider({ - config: {} as never, - provider: "gemini", + const provider = await createProviderWithFetch(fetchMock, { model: "gemini-embedding-2-preview", - fallback: "none", taskType: "SEMANTIC_SIMILARITY", }); diff --git a/src/memory/embeddings-voyage.test.ts b/src/memory/embeddings-voyage.test.ts index 2f4bedc87c3..28314017a6f 100644 --- a/src/memory/embeddings-voyage.test.ts +++ b/src/memory/embeddings-voyage.test.ts @@ -1,8 +1,8 @@ import { afterEach, describe, expect, it, vi } from "vitest"; import * as authModule from "../agents/model-auth.js"; -import * as ssrf from "../infra/net/ssrf.js"; import { type FetchMock, withFetchPreconnect } from "../test-utils/fetch-mock.js"; import { createVoyageEmbeddingProvider, normalizeVoyageModel } from "./embeddings-voyage.js"; +import { mockPublicPinnedHostname } from "./test-helpers/ssrf.js"; vi.mock("../agents/model-auth.js", async () => { const { createModelAuthMockModule } = await import("../test-utils/model-auth-mock.js"); @@ -28,18 +28,6 @@ function mockVoyageApiKey() { }); } -function mockPublicPinnedHostname() { - return vi.spyOn(ssrf, "resolvePinnedHostnameWithPolicy").mockImplementation(async (hostname) => { - const normalized = hostname.trim().toLowerCase().replace(/\.$/, ""); - const addresses = ["93.184.216.34"]; - return { - hostname: normalized, - addresses, - lookup: ssrf.createPinnedLookup({ hostname: normalized, addresses }), - }; - }); -} - async function createDefaultVoyageProvider( model: string, fetchMock: ReturnType, diff --git a/src/memory/embeddings.test.ts b/src/memory/embeddings.test.ts index 914abd0cbba..58683c53ca5 100644 --- a/src/memory/embeddings.test.ts +++ b/src/memory/embeddings.test.ts @@ -1,8 +1,8 @@ import { afterEach, describe, expect, it, vi } from "vitest"; import * as authModule from "../agents/model-auth.js"; -import * as ssrf from "../infra/net/ssrf.js"; import { DEFAULT_GEMINI_EMBEDDING_MODEL } from "./embeddings-gemini.js"; import { createEmbeddingProvider, DEFAULT_LOCAL_MODEL } from "./embeddings.js"; +import { mockPublicPinnedHostname } from "./test-helpers/ssrf.js"; vi.mock("../agents/model-auth.js", async () => { const { createModelAuthMockModule } = await import("../test-utils/model-auth-mock.js"); @@ -33,18 +33,6 @@ function readFirstFetchRequest(fetchMock: { mock: { calls: unknown[][] } }) { return { url, init: init as RequestInit | undefined }; } -function mockPublicPinnedHostname() { - return vi.spyOn(ssrf, "resolvePinnedHostnameWithPolicy").mockImplementation(async (hostname) => { - const normalized = hostname.trim().toLowerCase().replace(/\.$/, ""); - const addresses = ["93.184.216.34"]; - return { - hostname: normalized, - addresses, - lookup: ssrf.createPinnedLookup({ hostname: normalized, addresses }), - }; - }); -} - afterEach(() => { vi.resetAllMocks(); vi.unstubAllGlobals(); diff --git a/src/memory/manager-embedding-ops.ts b/src/memory/manager-embedding-ops.ts index 49171d809cb..fe9b27acd32 100644 --- a/src/memory/manager-embedding-ops.ts +++ b/src/memory/manager-embedding-ops.ts @@ -548,12 +548,7 @@ export abstract class MemoryManagerEmbeddingOps extends MemoryManagerSyncOps { if (!this.isRetryableEmbeddingError(message) || attempt >= EMBEDDING_RETRY_MAX_ATTEMPTS) { throw err; } - const waitMs = Math.min( - EMBEDDING_RETRY_MAX_DELAY_MS, - Math.round(delayMs * (1 + Math.random() * 0.2)), - ); - log.warn(`memory embeddings rate limited; retrying in ${waitMs}ms`); - await new Promise((resolve) => setTimeout(resolve, waitMs)); + await this.waitForEmbeddingRetry(delayMs, "retrying"); delayMs *= 2; attempt += 1; } @@ -587,18 +582,22 @@ export abstract class MemoryManagerEmbeddingOps extends MemoryManagerSyncOps { if (!this.isRetryableEmbeddingError(message) || attempt >= EMBEDDING_RETRY_MAX_ATTEMPTS) { throw err; } - const waitMs = Math.min( - EMBEDDING_RETRY_MAX_DELAY_MS, - Math.round(delayMs * (1 + Math.random() * 0.2)), - ); - log.warn(`memory embeddings rate limited; retrying structured batch in ${waitMs}ms`); - await new Promise((resolve) => setTimeout(resolve, waitMs)); + await this.waitForEmbeddingRetry(delayMs, "retrying structured batch"); delayMs *= 2; attempt += 1; } } } + private async waitForEmbeddingRetry(delayMs: number, action: string): Promise { + const waitMs = Math.min( + EMBEDDING_RETRY_MAX_DELAY_MS, + Math.round(delayMs * (1 + Math.random() * 0.2)), + ); + log.warn(`memory embeddings rate limited; ${action} in ${waitMs}ms`); + await new Promise((resolve) => setTimeout(resolve, waitMs)); + } + private isRetryableEmbeddingError(message: string): boolean { return /(rate[_ ]limit|too many requests|429|resource has been exhausted|5\d\d|cloudflare|tokens per day)/i.test( message, diff --git a/src/memory/manager.embedding-batches.test.ts b/src/memory/manager.embedding-batches.test.ts index 1d81744f280..e2af1ed97f2 100644 --- a/src/memory/manager.embedding-batches.test.ts +++ b/src/memory/manager.embedding-batches.test.ts @@ -28,6 +28,17 @@ const fx = installEmbeddingManagerFixture({ const { embedBatch } = fx; describe("memory embedding batches", () => { + async function expectSyncWithFastTimeouts(manager: { + sync: (params: { reason: string }) => Promise; + }) { + const restoreFastTimeouts = useFastShortTimeouts(); + try { + await manager.sync({ reason: "test" }); + } finally { + restoreFastTimeouts(); + } + } + it("splits large files across multiple embedding batches", async () => { const memoryDir = fx.getMemoryDir(); const managerLarge = fx.getManagerLarge(); @@ -93,12 +104,7 @@ describe("memory embedding batches", () => { return texts.map(() => [0, 1, 0]); }); - const restoreFastTimeouts = useFastShortTimeouts(); - try { - await managerSmall.sync({ reason: "test" }); - } finally { - restoreFastTimeouts(); - } + await expectSyncWithFastTimeouts(managerSmall); expect(calls).toBe(3); }, 10000); @@ -119,12 +125,7 @@ describe("memory embedding batches", () => { return texts.map(() => [0, 1, 0]); }); - const restoreFastTimeouts = useFastShortTimeouts(); - try { - await managerSmall.sync({ reason: "test" }); - } finally { - restoreFastTimeouts(); - } + await expectSyncWithFastTimeouts(managerSmall); expect(calls).toBe(2); }, 10000); diff --git a/src/memory/manager.get-concurrency.test.ts b/src/memory/manager.get-concurrency.test.ts index 67b10768fc3..515a9d8226d 100644 --- a/src/memory/manager.get-concurrency.test.ts +++ b/src/memory/manager.get-concurrency.test.ts @@ -49,9 +49,8 @@ describe("memory manager cache hydration", () => { await fs.rm(workspaceDir, { recursive: true, force: true }); }); - it("deduplicates concurrent manager creation for the same cache key", async () => { - const indexPath = path.join(workspaceDir, "index.sqlite"); - const cfg = { + function createMemoryConcurrencyConfig(indexPath: string): OpenClawConfig { + return { agents: { defaults: { workspace: workspaceDir, @@ -65,6 +64,11 @@ describe("memory manager cache hydration", () => { list: [{ id: "main", default: true }], }, } as OpenClawConfig; + } + + it("deduplicates concurrent manager creation for the same cache key", async () => { + const indexPath = path.join(workspaceDir, "index.sqlite"); + const cfg = createMemoryConcurrencyConfig(indexPath); const results = await Promise.all( Array.from( @@ -85,20 +89,7 @@ describe("memory manager cache hydration", () => { it("drains in-flight manager creation during global teardown", async () => { const indexPath = path.join(workspaceDir, "index.sqlite"); - const cfg = { - agents: { - defaults: { - workspace: workspaceDir, - memorySearch: { - provider: "openai", - model: "mock-embed", - store: { path: indexPath, vector: { enabled: false } }, - sync: { watch: false, onSessionStart: false, onSearch: false }, - }, - }, - list: [{ id: "main", default: true }], - }, - } as OpenClawConfig; + const cfg = createMemoryConcurrencyConfig(indexPath); hoisted.providerDelayMs = 100; diff --git a/src/memory/manager.watcher-config.test.ts b/src/memory/manager.watcher-config.test.ts index 43682183676..b10cf84c71f 100644 --- a/src/memory/manager.watcher-config.test.ts +++ b/src/memory/manager.watcher-config.test.ts @@ -3,6 +3,7 @@ import os from "node:os"; import path from "node:path"; import { afterEach, describe, expect, it, vi } from "vitest"; import type { OpenClawConfig } from "../config/config.js"; +import type { MemorySearchConfig } from "../config/types.tools.js"; import { getMemorySearchManager, type MemoryIndexManager } from "./index.js"; const { watchMock } = vi.hoisted(() => ({ @@ -51,36 +52,49 @@ describe("memory watcher config", () => { } }); - it("watches markdown globs and ignores dependency directories", async () => { + async function setupWatcherWorkspace(seedFile: { name: string; contents: string }) { workspaceDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-memory-watch-")); extraDir = path.join(workspaceDir, "extra"); await fs.mkdir(path.join(workspaceDir, "memory"), { recursive: true }); await fs.mkdir(extraDir, { recursive: true }); - await fs.writeFile(path.join(extraDir, "notes.md"), "hello"); + await fs.writeFile(path.join(extraDir, seedFile.name), seedFile.contents); + } - const cfg = { + function createWatcherConfig(overrides?: Partial): OpenClawConfig { + const defaults: NonNullable["defaults"]> = { + workspace: workspaceDir, + memorySearch: { + provider: "openai", + model: "mock-embed", + store: { path: path.join(workspaceDir, "index.sqlite"), vector: { enabled: false } }, + sync: { watch: true, watchDebounceMs: 25, onSessionStart: false, onSearch: false }, + query: { minScore: 0, hybrid: { enabled: false } }, + extraPaths: [extraDir], + ...overrides, + }, + }; + return { agents: { - defaults: { - workspace: workspaceDir, - memorySearch: { - provider: "openai", - model: "mock-embed", - store: { path: path.join(workspaceDir, "index.sqlite"), vector: { enabled: false } }, - sync: { watch: true, watchDebounceMs: 25, onSessionStart: false, onSearch: false }, - query: { minScore: 0, hybrid: { enabled: false } }, - extraPaths: [extraDir], - }, - }, + defaults, list: [{ id: "main", default: true }], }, } as OpenClawConfig; + } + async function expectWatcherManager(cfg: OpenClawConfig) { const result = await getMemorySearchManager({ cfg, agentId: "main" }); expect(result.manager).not.toBeNull(); if (!result.manager) { throw new Error("manager missing"); } manager = result.manager as unknown as MemoryIndexManager; + } + + it("watches markdown globs and ignores dependency directories", async () => { + await setupWatcherWorkspace({ name: "notes.md", contents: "hello" }); + const cfg = createWatcherConfig(); + + await expectWatcherManager(cfg); expect(watchMock).toHaveBeenCalledTimes(1); const [watchedPaths, options] = watchMock.mock.calls[0] as unknown as [ @@ -108,37 +122,15 @@ describe("memory watcher config", () => { }); it("watches multimodal extensions with case-insensitive globs", async () => { - workspaceDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-memory-watch-")); - extraDir = path.join(workspaceDir, "extra"); - await fs.mkdir(path.join(workspaceDir, "memory"), { recursive: true }); - await fs.mkdir(extraDir, { recursive: true }); - await fs.writeFile(path.join(extraDir, "PHOTO.PNG"), "png"); + await setupWatcherWorkspace({ name: "PHOTO.PNG", contents: "png" }); + const cfg = createWatcherConfig({ + provider: "gemini", + model: "gemini-embedding-2-preview", + fallback: "none", + multimodal: { enabled: true, modalities: ["image", "audio"] }, + }); - const cfg = { - agents: { - defaults: { - workspace: workspaceDir, - memorySearch: { - provider: "gemini", - model: "gemini-embedding-2-preview", - fallback: "none", - store: { path: path.join(workspaceDir, "index.sqlite"), vector: { enabled: false } }, - sync: { watch: true, watchDebounceMs: 25, onSessionStart: false, onSearch: false }, - query: { minScore: 0, hybrid: { enabled: false } }, - extraPaths: [extraDir], - multimodal: { enabled: true, modalities: ["image", "audio"] }, - }, - }, - list: [{ id: "main", default: true }], - }, - } as OpenClawConfig; - - const result = await getMemorySearchManager({ cfg, agentId: "main" }); - expect(result.manager).not.toBeNull(); - if (!result.manager) { - throw new Error("manager missing"); - } - manager = result.manager as unknown as MemoryIndexManager; + await expectWatcherManager(cfg); expect(watchMock).toHaveBeenCalledTimes(1); const [watchedPaths] = watchMock.mock.calls[0] as unknown as [ diff --git a/src/memory/test-helpers/ssrf.ts b/src/memory/test-helpers/ssrf.ts new file mode 100644 index 00000000000..c90ef0c4502 --- /dev/null +++ b/src/memory/test-helpers/ssrf.ts @@ -0,0 +1,14 @@ +import { vi } from "vitest"; +import * as ssrf from "../../infra/net/ssrf.js"; + +export function mockPublicPinnedHostname() { + return vi.spyOn(ssrf, "resolvePinnedHostnameWithPolicy").mockImplementation(async (hostname) => { + const normalized = hostname.trim().toLowerCase().replace(/\.$/, ""); + const addresses = ["93.184.216.34"]; + return { + hostname: normalized, + addresses, + lookup: ssrf.createPinnedLookup({ hostname: normalized, addresses }), + }; + }); +} diff --git a/src/node-host/invoke-system-run-plan.test.ts b/src/node-host/invoke-system-run-plan.test.ts index 442d2cad96b..29cec3074aa 100644 --- a/src/node-host/invoke-system-run-plan.test.ts +++ b/src/node-host/invoke-system-run-plan.test.ts @@ -43,6 +43,14 @@ type RuntimeFixture = { binNames?: string[]; }; +type UnsafeRuntimeInvocationCase = { + name: string; + binName: string; + tmpPrefix: string; + command: string[]; + setup?: (tmp: string) => void; +}; + function createScriptOperandFixture(tmp: string, fixture?: RuntimeFixture): ScriptOperandFixture { if (fixture) { return { @@ -68,20 +76,36 @@ function createScriptOperandFixture(tmp: string, fixture?: RuntimeFixture): Scri }; } -function withFakeRuntimeBin(params: { binName: string; run: () => T }): T { - const tmp = fs.mkdtempSync(path.join(os.tmpdir(), `openclaw-${params.binName}-bin-`)); - const binDir = path.join(tmp, "bin"); - fs.mkdirSync(binDir, { recursive: true }); +function writeFakeRuntimeBin(binDir: string, binName: string) { const runtimePath = - process.platform === "win32" - ? path.join(binDir, `${params.binName}.cmd`) - : path.join(binDir, params.binName); + process.platform === "win32" ? path.join(binDir, `${binName}.cmd`) : path.join(binDir, binName); const runtimeBody = process.platform === "win32" ? "@echo off\r\nexit /b 0\r\n" : "#!/bin/sh\nexit 0\n"; fs.writeFileSync(runtimePath, runtimeBody, { mode: 0o755 }); if (process.platform !== "win32") { fs.chmodSync(runtimePath, 0o755); } +} + +function withFakeRuntimeBin(params: { binName: string; run: () => T }): T { + return withFakeRuntimeBins({ + binNames: [params.binName], + tmpPrefix: `openclaw-${params.binName}-bin-`, + run: params.run, + }); +} + +function withFakeRuntimeBins(params: { + binNames: string[]; + tmpPrefix?: string; + run: () => T; +}): T { + const tmp = fs.mkdtempSync(path.join(os.tmpdir(), params.tmpPrefix ?? "openclaw-runtime-bins-")); + const binDir = path.join(tmp, "bin"); + fs.mkdirSync(binDir, { recursive: true }); + for (const binName of params.binNames) { + writeFakeRuntimeBin(binDir, binName); + } const oldPath = process.env.PATH; process.env.PATH = `${binDir}${path.delimiter}${oldPath ?? ""}`; try { @@ -96,36 +120,143 @@ function withFakeRuntimeBin(params: { binName: string; run: () => T }): T { } } -function withFakeRuntimeBins(params: { binNames: string[]; run: () => T }): T { - const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-runtime-bins-")); - const binDir = path.join(tmp, "bin"); - fs.mkdirSync(binDir, { recursive: true }); - for (const binName of params.binNames) { - const runtimePath = - process.platform === "win32" - ? path.join(binDir, `${binName}.cmd`) - : path.join(binDir, binName); - const runtimeBody = - process.platform === "win32" ? "@echo off\r\nexit /b 0\r\n" : "#!/bin/sh\nexit 0\n"; - fs.writeFileSync(runtimePath, runtimeBody, { mode: 0o755 }); - if (process.platform !== "win32") { - fs.chmodSync(runtimePath, 0o755); - } +function expectMutableFileOperandApprovalPlan(fixture: ScriptOperandFixture, cwd: string) { + const prepared = buildSystemRunApprovalPlan({ + command: fixture.command, + cwd, + }); + expect(prepared.ok).toBe(true); + if (!prepared.ok) { + throw new Error("unreachable"); } - const oldPath = process.env.PATH; - process.env.PATH = `${binDir}${path.delimiter}${oldPath ?? ""}`; + expect(prepared.plan.mutableFileOperand).toEqual({ + argvIndex: fixture.expectedArgvIndex, + path: fs.realpathSync(fixture.scriptPath), + sha256: expect.any(String), + }); +} + +function writeScriptOperandFixture(fixture: ScriptOperandFixture) { + fs.writeFileSync(fixture.scriptPath, fixture.initialBody); + if (process.platform !== "win32") { + fs.chmodSync(fixture.scriptPath, 0o755); + } +} + +function withScriptOperandPlanFixture( + params: { + tmpPrefix: string; + fixture?: RuntimeFixture; + afterWrite?: (fixture: ScriptOperandFixture, tmp: string) => void; + }, + run: (fixture: ScriptOperandFixture, tmp: string) => T, +) { + const tmp = fs.mkdtempSync(path.join(os.tmpdir(), params.tmpPrefix)); + const fixture = createScriptOperandFixture(tmp, params.fixture); + writeScriptOperandFixture(fixture); + params.afterWrite?.(fixture, tmp); try { - return params.run(); + return run(fixture, tmp); } finally { - if (oldPath === undefined) { - delete process.env.PATH; - } else { - process.env.PATH = oldPath; - } fs.rmSync(tmp, { recursive: true, force: true }); } } +const DENIED_RUNTIME_APPROVAL = { + ok: false, + message: "SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command", +} as const; + +function expectRuntimeApprovalDenied(command: string[], cwd: string) { + const prepared = buildSystemRunApprovalPlan({ command, cwd }); + expect(prepared).toEqual(DENIED_RUNTIME_APPROVAL); +} + +const unsafeRuntimeInvocationCases: UnsafeRuntimeInvocationCase[] = [ + { + name: "rejects bun package script names that do not bind a concrete file", + binName: "bun", + tmpPrefix: "openclaw-bun-package-script-", + command: ["bun", "run", "dev"], + }, + { + name: "rejects deno eval invocations that do not bind a concrete file", + binName: "deno", + tmpPrefix: "openclaw-deno-eval-", + command: ["deno", "eval", "console.log('SAFE')"], + }, + { + name: "rejects tsx eval invocations that do not bind a concrete file", + binName: "tsx", + tmpPrefix: "openclaw-tsx-eval-", + command: ["tsx", "--eval", "console.log('SAFE')"], + }, + { + name: "rejects node inline import operands that cannot be bound to one stable file", + binName: "node", + tmpPrefix: "openclaw-node-import-inline-", + command: ["node", "--import=./preload.mjs", "./main.mjs"], + setup: (tmp) => { + fs.writeFileSync(path.join(tmp, "main.mjs"), 'console.log("SAFE")\n'); + fs.writeFileSync(path.join(tmp, "preload.mjs"), 'console.log("SAFE")\n'); + }, + }, + { + name: "rejects ruby require preloads that approval cannot bind completely", + binName: "ruby", + tmpPrefix: "openclaw-ruby-require-", + command: ["ruby", "-r", "attacker", "./safe.rb"], + setup: (tmp) => { + fs.writeFileSync(path.join(tmp, "safe.rb"), 'puts "SAFE"\n'); + }, + }, + { + name: "rejects ruby load-path flags that can redirect module resolution after approval", + binName: "ruby", + tmpPrefix: "openclaw-ruby-load-path-", + command: ["ruby", "-I.", "./safe.rb"], + setup: (tmp) => { + fs.writeFileSync(path.join(tmp, "safe.rb"), 'puts "SAFE"\n'); + }, + }, + { + name: "rejects perl module preloads that approval cannot bind completely", + binName: "perl", + tmpPrefix: "openclaw-perl-module-preload-", + command: ["perl", "-MPreload", "./safe.pl"], + setup: (tmp) => { + fs.writeFileSync(path.join(tmp, "safe.pl"), 'print "SAFE\\n";\n'); + }, + }, + { + name: "rejects perl load-path flags that can redirect module resolution after approval", + binName: "perl", + tmpPrefix: "openclaw-perl-load-path-", + command: ["perl", "-Ilib", "./safe.pl"], + setup: (tmp) => { + fs.writeFileSync(path.join(tmp, "safe.pl"), 'print "SAFE\\n";\n'); + }, + }, + { + name: "rejects perl combined preload and load-path flags", + binName: "perl", + tmpPrefix: "openclaw-perl-preload-load-path-", + command: ["perl", "-Ilib", "-MPreload", "./safe.pl"], + setup: (tmp) => { + fs.writeFileSync(path.join(tmp, "safe.pl"), 'print "SAFE\\n";\n'); + }, + }, + { + name: "rejects shell payloads that hide mutable interpreter scripts", + binName: "node", + tmpPrefix: "openclaw-inline-shell-node-", + command: ["sh", "-lc", "node ./run.js"], + setup: (tmp) => { + fs.writeFileSync(path.join(tmp, "run.js"), 'console.log("SAFE")\n'); + }, + }, +]; + describe("hardenApprovedExecutionPaths", () => { const cases: HardeningCase[] = [ { @@ -432,29 +563,48 @@ describe("hardenApprovedExecutionPaths", () => { withFakeRuntimeBins({ binNames, run: () => { - const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-approval-script-plan-")); - const fixture = createScriptOperandFixture(tmp, runtimeCase); - fs.writeFileSync(fixture.scriptPath, fixture.initialBody); - const executablePath = fixture.command[0]; - if (executablePath?.endsWith("pnpm.js")) { - const shimPath = path.join(tmp, "pnpm.js"); - fs.writeFileSync(shimPath, "#!/usr/bin/env node\nconsole.log('shim')\n"); - fs.chmodSync(shimPath, 0o755); - } + withScriptOperandPlanFixture( + { + tmpPrefix: "openclaw-approval-script-plan-", + fixture: runtimeCase, + afterWrite: (fixture, tmp) => { + const executablePath = fixture.command[0]; + if (executablePath?.endsWith("pnpm.js")) { + const shimPath = path.join(tmp, "pnpm.js"); + fs.writeFileSync(shimPath, "#!/usr/bin/env node\nconsole.log('shim')\n"); + fs.chmodSync(shimPath, 0o755); + } + }, + }, + (fixture, tmp) => { + expectMutableFileOperandApprovalPlan(fixture, tmp); + }, + ); + }, + }); + }); + } + + it("captures mutable shell script operands in approval plans", () => { + withScriptOperandPlanFixture( + { + tmpPrefix: "openclaw-approval-script-plan-", + }, + (fixture, tmp) => { + expectMutableFileOperandApprovalPlan(fixture, tmp); + }, + ); + }); + + for (const testCase of unsafeRuntimeInvocationCases) { + it(testCase.name, () => { + withFakeRuntimeBin({ + binName: testCase.binName, + run: () => { + const tmp = fs.mkdtempSync(path.join(os.tmpdir(), testCase.tmpPrefix)); try { - const prepared = buildSystemRunApprovalPlan({ - command: fixture.command, - cwd: tmp, - }); - expect(prepared.ok).toBe(true); - if (!prepared.ok) { - throw new Error("unreachable"); - } - expect(prepared.plan.mutableFileOperand).toEqual({ - argvIndex: fixture.expectedArgvIndex, - path: fs.realpathSync(fixture.scriptPath), - sha256: expect.any(String), - }); + testCase.setup?.(tmp); + expectRuntimeApprovalDenied(testCase.command, tmp); } finally { fs.rmSync(tmp, { recursive: true, force: true }); } @@ -463,260 +613,6 @@ describe("hardenApprovedExecutionPaths", () => { }); } - it("captures mutable shell script operands in approval plans", () => { - const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-approval-script-plan-")); - const fixture = createScriptOperandFixture(tmp); - fs.writeFileSync(fixture.scriptPath, fixture.initialBody); - if (process.platform !== "win32") { - fs.chmodSync(fixture.scriptPath, 0o755); - } - try { - const prepared = buildSystemRunApprovalPlan({ - command: fixture.command, - cwd: tmp, - }); - expect(prepared.ok).toBe(true); - if (!prepared.ok) { - throw new Error("unreachable"); - } - expect(prepared.plan.mutableFileOperand).toEqual({ - argvIndex: fixture.expectedArgvIndex, - path: fs.realpathSync(fixture.scriptPath), - sha256: expect.any(String), - }); - } finally { - fs.rmSync(tmp, { recursive: true, force: true }); - } - }); - - it("rejects bun package script names that do not bind a concrete file", () => { - withFakeRuntimeBin({ - binName: "bun", - run: () => { - const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-bun-package-script-")); - try { - const prepared = buildSystemRunApprovalPlan({ - command: ["bun", "run", "dev"], - cwd: tmp, - }); - expect(prepared).toEqual({ - ok: false, - message: - "SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command", - }); - } finally { - fs.rmSync(tmp, { recursive: true, force: true }); - } - }, - }); - }); - - it("rejects deno eval invocations that do not bind a concrete file", () => { - withFakeRuntimeBin({ - binName: "deno", - run: () => { - const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-deno-eval-")); - try { - const prepared = buildSystemRunApprovalPlan({ - command: ["deno", "eval", "console.log('SAFE')"], - cwd: tmp, - }); - expect(prepared).toEqual({ - ok: false, - message: - "SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command", - }); - } finally { - fs.rmSync(tmp, { recursive: true, force: true }); - } - }, - }); - }); - - it("rejects tsx eval invocations that do not bind a concrete file", () => { - withFakeRuntimeBin({ - binName: "tsx", - run: () => { - const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-tsx-eval-")); - try { - const prepared = buildSystemRunApprovalPlan({ - command: ["tsx", "--eval", "console.log('SAFE')"], - cwd: tmp, - }); - expect(prepared).toEqual({ - ok: false, - message: - "SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command", - }); - } finally { - fs.rmSync(tmp, { recursive: true, force: true }); - } - }, - }); - }); - - it("rejects node inline import operands that cannot be bound to one stable file", () => { - withFakeRuntimeBin({ - binName: "node", - run: () => { - const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-node-import-inline-")); - try { - fs.writeFileSync(path.join(tmp, "main.mjs"), 'console.log("SAFE")\n'); - fs.writeFileSync(path.join(tmp, "preload.mjs"), 'console.log("SAFE")\n'); - const prepared = buildSystemRunApprovalPlan({ - command: ["node", "--import=./preload.mjs", "./main.mjs"], - cwd: tmp, - }); - expect(prepared).toEqual({ - ok: false, - message: - "SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command", - }); - } finally { - fs.rmSync(tmp, { recursive: true, force: true }); - } - }, - }); - }); - - it("rejects ruby require preloads that approval cannot bind completely", () => { - withFakeRuntimeBin({ - binName: "ruby", - run: () => { - const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-ruby-require-")); - try { - fs.writeFileSync(path.join(tmp, "safe.rb"), 'puts "SAFE"\n'); - const prepared = buildSystemRunApprovalPlan({ - command: ["ruby", "-r", "attacker", "./safe.rb"], - cwd: tmp, - }); - expect(prepared).toEqual({ - ok: false, - message: - "SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command", - }); - } finally { - fs.rmSync(tmp, { recursive: true, force: true }); - } - }, - }); - }); - - it("rejects ruby load-path flags that can redirect module resolution after approval", () => { - withFakeRuntimeBin({ - binName: "ruby", - run: () => { - const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-ruby-load-path-")); - try { - fs.writeFileSync(path.join(tmp, "safe.rb"), 'puts "SAFE"\n'); - const prepared = buildSystemRunApprovalPlan({ - command: ["ruby", "-I.", "./safe.rb"], - cwd: tmp, - }); - expect(prepared).toEqual({ - ok: false, - message: - "SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command", - }); - } finally { - fs.rmSync(tmp, { recursive: true, force: true }); - } - }, - }); - }); - - it("rejects perl module preloads that approval cannot bind completely", () => { - withFakeRuntimeBin({ - binName: "perl", - run: () => { - const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-perl-module-preload-")); - try { - fs.writeFileSync(path.join(tmp, "safe.pl"), 'print "SAFE\\n";\n'); - const prepared = buildSystemRunApprovalPlan({ - command: ["perl", "-MPreload", "./safe.pl"], - cwd: tmp, - }); - expect(prepared).toEqual({ - ok: false, - message: - "SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command", - }); - } finally { - fs.rmSync(tmp, { recursive: true, force: true }); - } - }, - }); - }); - - it("rejects perl load-path flags that can redirect module resolution after approval", () => { - withFakeRuntimeBin({ - binName: "perl", - run: () => { - const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-perl-load-path-")); - try { - fs.writeFileSync(path.join(tmp, "safe.pl"), 'print "SAFE\\n";\n'); - const prepared = buildSystemRunApprovalPlan({ - command: ["perl", "-Ilib", "./safe.pl"], - cwd: tmp, - }); - expect(prepared).toEqual({ - ok: false, - message: - "SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command", - }); - } finally { - fs.rmSync(tmp, { recursive: true, force: true }); - } - }, - }); - }); - - it("rejects perl combined preload and load-path flags", () => { - withFakeRuntimeBin({ - binName: "perl", - run: () => { - const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-perl-preload-load-path-")); - try { - fs.writeFileSync(path.join(tmp, "safe.pl"), 'print "SAFE\\n";\n'); - const prepared = buildSystemRunApprovalPlan({ - command: ["perl", "-Ilib", "-MPreload", "./safe.pl"], - cwd: tmp, - }); - expect(prepared).toEqual({ - ok: false, - message: - "SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command", - }); - } finally { - fs.rmSync(tmp, { recursive: true, force: true }); - } - }, - }); - }); - - it("rejects shell payloads that hide mutable interpreter scripts", () => { - withFakeRuntimeBin({ - binName: "node", - run: () => { - const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-inline-shell-node-")); - try { - fs.writeFileSync(path.join(tmp, "run.js"), 'console.log("SAFE")\n'); - const prepared = buildSystemRunApprovalPlan({ - command: ["sh", "-lc", "node ./run.js"], - cwd: tmp, - }); - expect(prepared).toEqual({ - ok: false, - message: - "SYSTEM_RUN_DENIED: approval cannot safely bind this interpreter/runtime command", - }); - } finally { - fs.rmSync(tmp, { recursive: true, force: true }); - } - }, - }); - }); - it("captures the real shell script operand after value-taking shell flags", () => { const tmp = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-shell-option-value-")); try { diff --git a/src/plugin-sdk/slack.ts b/src/plugin-sdk/slack.ts index 18cf529ca45..c3aabde6fe2 100644 --- a/src/plugin-sdk/slack.ts +++ b/src/plugin-sdk/slack.ts @@ -8,6 +8,7 @@ export { resolveSlackAccount, resolveSlackReplyToMode, } from "../slack/accounts.js"; +export { isSlackInteractiveRepliesEnabled } from "../slack/interactive-replies.js"; export { inspectSlackAccount } from "../slack/account-inspect.js"; export { projectCredentialSnapshotFields, diff --git a/src/plugins/discovery.test.ts b/src/plugins/discovery.test.ts index 400094c1fef..3b10146d28f 100644 --- a/src/plugins/discovery.test.ts +++ b/src/plugins/discovery.test.ts @@ -1,32 +1,22 @@ -import { randomUUID } from "node:crypto"; import fs from "node:fs"; -import os from "node:os"; import path from "node:path"; import { afterAll, afterEach, describe, expect, it } from "vitest"; import { clearPluginDiscoveryCache, discoverOpenClawPlugins } from "./discovery.js"; +import { + cleanupTrackedTempDirs, + makeTrackedTempDir, + mkdirSafeDir, +} from "./test-helpers/fs-fixtures.js"; const tempDirs: string[] = []; const previousUmask = process.umask(0o022); -function chmodSafeDir(dir: string) { - if (process.platform === "win32") { - return; - } - fs.chmodSync(dir, 0o755); -} - -function mkdirSafe(dir: string) { - fs.mkdirSync(dir, { recursive: true }); - chmodSafeDir(dir); -} - function makeTempDir() { - const dir = path.join(os.tmpdir(), `openclaw-plugins-${randomUUID()}`); - mkdirSafe(dir); - tempDirs.push(dir); - return dir; + return makeTrackedTempDir("openclaw-plugins", tempDirs); } +const mkdirSafe = mkdirSafeDir; + function buildDiscoveryEnv(stateDir: string): NodeJS.ProcessEnv { return { OPENCLAW_STATE_DIR: stateDir, @@ -66,13 +56,7 @@ function expectEscapesPackageDiagnostic(diagnostics: Array<{ message: string }>) afterEach(() => { clearPluginDiscoveryCache(); - for (const dir of tempDirs.splice(0)) { - try { - fs.rmSync(dir, { recursive: true, force: true }); - } catch { - // ignore cleanup failures - } - } + cleanupTrackedTempDirs(tempDirs); }); afterAll(() => { diff --git a/src/plugins/loader.test.ts b/src/plugins/loader.test.ts index 031d75b31b7..20d3fb22287 100644 --- a/src/plugins/loader.test.ts +++ b/src/plugins/loader.test.ts @@ -1472,6 +1472,30 @@ describe("loadOpenClawPlugins", () => { ).toBe(true); }); + it("dedupes the open allowlist warning for repeated loads of the same plugin set", () => { + useNoBundledPlugins(); + clearPluginLoaderCache(); + const plugin = writePlugin({ + id: "warn-open-allow-once", + body: `module.exports = { id: "warn-open-allow-once", register() {} };`, + }); + const warnings: string[] = []; + const options = { + cache: false, + logger: createWarningLogger(warnings), + config: { + plugins: { + load: { paths: [plugin.file] }, + }, + }, + }; + + loadOpenClawPlugins(options); + loadOpenClawPlugins(options); + + expect(warnings.filter((msg) => msg.includes("plugins.allow is empty"))).toHaveLength(1); + }); + it("does not auto-load workspace-discovered plugins unless explicitly trusted", () => { useNoBundledPlugins(); const workspaceDir = makeTempDir(); diff --git a/src/plugins/loader.ts b/src/plugins/loader.ts index 40983b43347..75882a5105b 100644 --- a/src/plugins/loader.ts +++ b/src/plugins/loader.ts @@ -51,9 +51,11 @@ export type PluginLoadOptions = { const MAX_PLUGIN_REGISTRY_CACHE_ENTRIES = 32; const registryCache = new Map(); +const openAllowlistWarningCache = new Set(); export function clearPluginLoaderCache(): void { registryCache.clear(); + openAllowlistWarningCache.clear(); } const defaultLogger = () => createSubsystemLogger("plugins"); @@ -455,6 +457,7 @@ function warnWhenAllowlistIsOpen(params: { logger: PluginLogger; pluginsEnabled: boolean; allow: string[]; + warningCacheKey: string; discoverablePlugins: Array<{ id: string; source: string; origin: PluginRecord["origin"] }>; }) { if (!params.pluginsEnabled) { @@ -467,11 +470,15 @@ function warnWhenAllowlistIsOpen(params: { if (nonBundled.length === 0) { return; } + if (openAllowlistWarningCache.has(params.warningCacheKey)) { + return; + } const preview = nonBundled .slice(0, 6) .map((entry) => `${entry.id} (${entry.source})`) .join(", "); const extra = nonBundled.length > 6 ? ` (+${nonBundled.length - 6} more)` : ""; + openAllowlistWarningCache.add(params.warningCacheKey); params.logger.warn( `[plugins] plugins.allow is empty; discovered non-bundled plugins may auto-load: ${preview}${extra}. Set plugins.allow to explicit trusted ids.`, ); @@ -598,6 +605,7 @@ export function loadOpenClawPlugins(options: PluginLoadOptions = {}): PluginRegi logger, pluginsEnabled: normalized.enabled, allow: normalized.allow, + warningCacheKey: cacheKey, discoverablePlugins: manifestRegistry.plugins.map((plugin) => ({ id: plugin.id, source: plugin.source, diff --git a/src/plugins/manifest-registry.test.ts b/src/plugins/manifest-registry.test.ts index bbf65d14e41..a948344cba8 100644 --- a/src/plugins/manifest-registry.test.ts +++ b/src/plugins/manifest-registry.test.ts @@ -1,6 +1,4 @@ -import { randomUUID } from "node:crypto"; import fs from "node:fs"; -import os from "node:os"; import path from "node:path"; import { afterAll, afterEach, describe, expect, it } from "vitest"; import type { PluginCandidate } from "./discovery.js"; @@ -8,6 +6,7 @@ import { clearPluginManifestRegistryCache, loadPluginManifestRegistry, } from "./manifest-registry.js"; +import { cleanupTrackedTempDirs, makeTrackedTempDir } from "./test-helpers/fs-fixtures.js"; const tempDirs: string[] = []; const previousUmask = process.umask(0o022); @@ -25,10 +24,7 @@ function mkdirSafe(dir: string) { } function makeTempDir() { - const dir = path.join(os.tmpdir(), `openclaw-manifest-registry-${randomUUID()}`); - mkdirSafe(dir); - tempDirs.push(dir); - return dir; + return makeTrackedTempDir("openclaw-manifest-registry", tempDirs); } function writeManifest(dir: string, manifest: Record) { @@ -133,17 +129,7 @@ function expectUnsafeWorkspaceManifestRejected(params: { afterEach(() => { clearPluginManifestRegistryCache(); - while (tempDirs.length > 0) { - const dir = tempDirs.pop(); - if (!dir) { - break; - } - try { - fs.rmSync(dir, { recursive: true, force: true }); - } catch { - // ignore cleanup failures - } - } + cleanupTrackedTempDirs(tempDirs); }); afterAll(() => { diff --git a/src/plugins/source-display.test.ts b/src/plugins/source-display.test.ts index 3c85cca88b7..6d1b3da7719 100644 --- a/src/plugins/source-display.test.ts +++ b/src/plugins/source-display.test.ts @@ -3,83 +3,60 @@ import { describe, expect, it } from "vitest"; import { withEnv } from "../test-utils/env.js"; import { formatPluginSourceForTable, resolvePluginSourceRoots } from "./source-display.js"; +function createPluginSourceRoots() { + const stockRoot = path.resolve( + path.sep, + "opt", + "homebrew", + "lib", + "node_modules", + "openclaw", + "extensions", + ); + const globalRoot = path.resolve(path.sep, "Users", "x", ".openclaw", "extensions"); + const workspaceRoot = path.resolve(path.sep, "Users", "x", "ws", ".openclaw", "extensions"); + return { + stock: stockRoot, + global: globalRoot, + workspace: workspaceRoot, + }; +} + describe("formatPluginSourceForTable", () => { it("shortens bundled plugin sources under the stock root", () => { - const stockRoot = path.resolve( - path.sep, - "opt", - "homebrew", - "lib", - "node_modules", - "openclaw", - "extensions", - ); - const globalRoot = path.resolve(path.sep, "Users", "x", ".openclaw", "extensions"); - const workspaceRoot = path.resolve(path.sep, "Users", "x", "ws", ".openclaw", "extensions"); + const roots = createPluginSourceRoots(); const out = formatPluginSourceForTable( { origin: "bundled", - source: path.join(stockRoot, "bluebubbles", "index.ts"), - }, - { - stock: stockRoot, - global: globalRoot, - workspace: workspaceRoot, + source: path.join(roots.stock, "bluebubbles", "index.ts"), }, + roots, ); expect(out.value).toBe("stock:bluebubbles/index.ts"); expect(out.rootKey).toBe("stock"); }); it("shortens workspace plugin sources under the workspace root", () => { - const stockRoot = path.resolve( - path.sep, - "opt", - "homebrew", - "lib", - "node_modules", - "openclaw", - "extensions", - ); - const globalRoot = path.resolve(path.sep, "Users", "x", ".openclaw", "extensions"); - const workspaceRoot = path.resolve(path.sep, "Users", "x", "ws", ".openclaw", "extensions"); + const roots = createPluginSourceRoots(); const out = formatPluginSourceForTable( { origin: "workspace", - source: path.join(workspaceRoot, "matrix", "index.ts"), - }, - { - stock: stockRoot, - global: globalRoot, - workspace: workspaceRoot, + source: path.join(roots.workspace, "matrix", "index.ts"), }, + roots, ); expect(out.value).toBe("workspace:matrix/index.ts"); expect(out.rootKey).toBe("workspace"); }); it("shortens global plugin sources under the global root", () => { - const stockRoot = path.resolve( - path.sep, - "opt", - "homebrew", - "lib", - "node_modules", - "openclaw", - "extensions", - ); - const globalRoot = path.resolve(path.sep, "Users", "x", ".openclaw", "extensions"); - const workspaceRoot = path.resolve(path.sep, "Users", "x", "ws", ".openclaw", "extensions"); + const roots = createPluginSourceRoots(); const out = formatPluginSourceForTable( { origin: "global", - source: path.join(globalRoot, "zalo", "index.js"), - }, - { - stock: stockRoot, - global: globalRoot, - workspace: workspaceRoot, + source: path.join(roots.global, "zalo", "index.js"), }, + roots, ); expect(out.value).toBe("global:zalo/index.js"); expect(out.rootKey).toBe("global"); diff --git a/src/plugins/test-helpers/fs-fixtures.ts b/src/plugins/test-helpers/fs-fixtures.ts new file mode 100644 index 00000000000..ec6b88fa4e4 --- /dev/null +++ b/src/plugins/test-helpers/fs-fixtures.ts @@ -0,0 +1,33 @@ +import { randomUUID } from "node:crypto"; +import fs from "node:fs"; +import os from "node:os"; +import path from "node:path"; + +function chmodSafeDir(dir: string) { + if (process.platform === "win32") { + return; + } + fs.chmodSync(dir, 0o755); +} + +export function mkdirSafeDir(dir: string) { + fs.mkdirSync(dir, { recursive: true }); + chmodSafeDir(dir); +} + +export function makeTrackedTempDir(prefix: string, trackedDirs: string[]) { + const dir = path.join(os.tmpdir(), `${prefix}-${randomUUID()}`); + mkdirSafeDir(dir); + trackedDirs.push(dir); + return dir; +} + +export function cleanupTrackedTempDirs(trackedDirs: string[]) { + for (const dir of trackedDirs.splice(0)) { + try { + fs.rmSync(dir, { recursive: true, force: true }); + } catch { + // ignore cleanup failures + } + } +} diff --git a/src/plugins/wired-hooks-compaction.test.ts b/src/plugins/wired-hooks-compaction.test.ts index 5081922ec1d..1e3f0021e29 100644 --- a/src/plugins/wired-hooks-compaction.test.ts +++ b/src/plugins/wired-hooks-compaction.test.ts @@ -40,6 +40,28 @@ describe("compaction hook wiring", () => { vi.mocked(emitAgentEvent).mockClear(); }); + function createCompactionEndCtx(params: { + runId: string; + messages?: unknown[]; + compactionCount?: number; + withRetryHooks?: boolean; + }) { + return { + params: { runId: params.runId, session: { messages: params.messages ?? [] } }, + state: { compactionInFlight: true }, + log: { debug: vi.fn(), warn: vi.fn() }, + maybeResolveCompactionWait: vi.fn(), + incrementCompactionCount: vi.fn(), + getCompactionCount: () => params.compactionCount ?? 0, + ...(params.withRetryHooks + ? { + noteCompactionRetry: vi.fn(), + resetForCompactionRetry: vi.fn(), + } + : {}), + }; + } + it("calls runBeforeCompaction in handleAutoCompactionStart", () => { hookMocks.runner.hasHooks.mockReturnValue(true); @@ -86,14 +108,11 @@ describe("compaction hook wiring", () => { it("calls runAfterCompaction when willRetry is false", () => { hookMocks.runner.hasHooks.mockReturnValue(true); - const ctx = { - params: { runId: "r2", session: { messages: [1, 2] } }, - state: { compactionInFlight: true }, - log: { debug: vi.fn(), warn: vi.fn() }, - maybeResolveCompactionWait: vi.fn(), - incrementCompactionCount: vi.fn(), - getCompactionCount: () => 1, - }; + const ctx = createCompactionEndCtx({ + runId: "r2", + messages: [1, 2], + compactionCount: 1, + }); handleAutoCompactionEnd( ctx as never, @@ -126,16 +145,11 @@ describe("compaction hook wiring", () => { it("does not call runAfterCompaction when willRetry is true but still increments counter", () => { hookMocks.runner.hasHooks.mockReturnValue(true); - const ctx = { - params: { runId: "r3", session: { messages: [] } }, - state: { compactionInFlight: true }, - log: { debug: vi.fn(), warn: vi.fn() }, - noteCompactionRetry: vi.fn(), - resetForCompactionRetry: vi.fn(), - maybeResolveCompactionWait: vi.fn(), - incrementCompactionCount: vi.fn(), - getCompactionCount: () => 1, - }; + const ctx = createCompactionEndCtx({ + runId: "r3", + compactionCount: 1, + withRetryHooks: true, + }); handleAutoCompactionEnd( ctx as never, @@ -160,14 +174,7 @@ describe("compaction hook wiring", () => { }); it("does not increment counter when compaction was aborted", () => { - const ctx = { - params: { runId: "r3b", session: { messages: [] } }, - state: { compactionInFlight: true }, - log: { debug: vi.fn(), warn: vi.fn() }, - maybeResolveCompactionWait: vi.fn(), - incrementCompactionCount: vi.fn(), - getCompactionCount: () => 0, - }; + const ctx = createCompactionEndCtx({ runId: "r3b" }); handleAutoCompactionEnd( ctx as never, @@ -183,14 +190,7 @@ describe("compaction hook wiring", () => { }); it("does not increment counter when compaction has result but was aborted", () => { - const ctx = { - params: { runId: "r3b2", session: { messages: [] } }, - state: { compactionInFlight: true }, - log: { debug: vi.fn(), warn: vi.fn() }, - maybeResolveCompactionWait: vi.fn(), - incrementCompactionCount: vi.fn(), - getCompactionCount: () => 0, - }; + const ctx = createCompactionEndCtx({ runId: "r3b2" }); handleAutoCompactionEnd( ctx as never, @@ -206,14 +206,7 @@ describe("compaction hook wiring", () => { }); it("does not increment counter when result is undefined", () => { - const ctx = { - params: { runId: "r3c", session: { messages: [] } }, - state: { compactionInFlight: true }, - log: { debug: vi.fn(), warn: vi.fn() }, - maybeResolveCompactionWait: vi.fn(), - incrementCompactionCount: vi.fn(), - getCompactionCount: () => 0, - }; + const ctx = createCompactionEndCtx({ runId: "r3c" }); handleAutoCompactionEnd( ctx as never, diff --git a/src/process/kill-tree.ts b/src/process/kill-tree.ts index e3f83f63a0e..6f0d752e4c5 100644 --- a/src/process/kill-tree.ts +++ b/src/process/kill-tree.ts @@ -83,6 +83,7 @@ function runTaskkill(args: string[]): void { spawn("taskkill", args, { stdio: "ignore", detached: true, + windowsHide: true, }); } catch { // Ignore taskkill spawn failures diff --git a/src/scripts/ci-changed-scope.test.ts b/src/scripts/ci-changed-scope.test.ts index 358dbfc472c..682cfb8d9b3 100644 --- a/src/scripts/ci-changed-scope.test.ts +++ b/src/scripts/ci-changed-scope.test.ts @@ -124,6 +124,16 @@ describe("detectChangedScope", () => { }); }); + it("runs platform lanes when the CI workflow changes", () => { + expect(detectChangedScope([".github/workflows/ci.yml"])).toEqual({ + runNode: true, + runMacos: true, + runAndroid: true, + runWindows: true, + runSkillsPython: true, + }); + }); + it("treats base and head as literal git args", () => { const markerPath = path.join( os.tmpdir(), diff --git a/src/secrets/audit.test.ts b/src/secrets/audit.test.ts index d71c9a46cd9..b8a22cdcb43 100644 --- a/src/secrets/audit.test.ts +++ b/src/secrets/audit.test.ts @@ -113,6 +113,40 @@ async function seedAuditFixture(fixture: AuditFixture): Promise { describe("secrets audit", () => { let fixture: AuditFixture; + async function writeModelsProvider( + overrides: Partial<{ + apiKey: unknown; + headers: Record; + }> = {}, + ) { + await writeJsonFile(fixture.modelsPath, { + providers: { + openai: { + baseUrl: "https://api.openai.com/v1", + api: "openai-completions", + apiKey: OPENAI_API_KEY_MARKER, + models: [{ id: "gpt-5", name: "gpt-5" }], + ...overrides, + }, + }, + }); + } + + function expectModelsFinding( + report: Awaited>, + params: { code: string; jsonPath?: string; present?: boolean }, + ) { + expect( + hasFinding( + report, + (entry) => + entry.code === params.code && + entry.file === fixture.modelsPath && + (params.jsonPath === undefined || entry.jsonPath === params.jsonPath), + ), + ).toBe(params.present ?? true); + } + beforeEach(async () => { fixture = await createAuditFixture(); await seedAuditFixture(fixture); @@ -278,221 +312,116 @@ describe("secrets audit", () => { }); it("scans agent models.json files for plaintext provider apiKey values", async () => { - await writeJsonFile(fixture.modelsPath, { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions", - apiKey: "sk-models-plaintext", // pragma: allowlist secret - models: [{ id: "gpt-5", name: "gpt-5" }], - }, - }, - }); + await writeModelsProvider({ apiKey: "sk-models-plaintext" }); // pragma: allowlist secret const report = await runSecretsAudit({ env: fixture.env }); - expect( - hasFinding( - report, - (entry) => - entry.code === "PLAINTEXT_FOUND" && - entry.file === fixture.modelsPath && - entry.jsonPath === "providers.openai.apiKey", - ), - ).toBe(true); + expectModelsFinding(report, { + code: "PLAINTEXT_FOUND", + jsonPath: "providers.openai.apiKey", + }); expect(report.filesScanned).toContain(fixture.modelsPath); }); it("scans agent models.json files for plaintext provider header values", async () => { - await writeJsonFile(fixture.modelsPath, { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions", - apiKey: OPENAI_API_KEY_MARKER, - headers: { - Authorization: "Bearer sk-header-plaintext", // pragma: allowlist secret - }, - models: [{ id: "gpt-5", name: "gpt-5" }], - }, + await writeModelsProvider({ + headers: { + Authorization: "Bearer sk-header-plaintext", // pragma: allowlist secret }, }); const report = await runSecretsAudit({ env: fixture.env }); - expect( - hasFinding( - report, - (entry) => - entry.code === "PLAINTEXT_FOUND" && - entry.file === fixture.modelsPath && - entry.jsonPath === "providers.openai.headers.Authorization", - ), - ).toBe(true); + expectModelsFinding(report, { + code: "PLAINTEXT_FOUND", + jsonPath: "providers.openai.headers.Authorization", + }); }); it("does not flag non-sensitive routing headers in models.json", async () => { - await writeJsonFile(fixture.modelsPath, { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions", - apiKey: OPENAI_API_KEY_MARKER, - headers: { - "X-Proxy-Region": "us-west", - }, - models: [{ id: "gpt-5", name: "gpt-5" }], - }, + await writeModelsProvider({ + headers: { + "X-Proxy-Region": "us-west", }, }); const report = await runSecretsAudit({ env: fixture.env }); - expect( - hasFinding( - report, - (entry) => - entry.code === "PLAINTEXT_FOUND" && - entry.file === fixture.modelsPath && - entry.jsonPath === "providers.openai.headers.X-Proxy-Region", - ), - ).toBe(false); + expectModelsFinding(report, { + code: "PLAINTEXT_FOUND", + jsonPath: "providers.openai.headers.X-Proxy-Region", + present: false, + }); }); it("does not flag models.json marker values as plaintext", async () => { - await writeJsonFile(fixture.modelsPath, { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions", - apiKey: OPENAI_API_KEY_MARKER, - models: [{ id: "gpt-5", name: "gpt-5" }], - }, - }, - }); + await writeModelsProvider(); const report = await runSecretsAudit({ env: fixture.env }); - expect( - hasFinding( - report, - (entry) => - entry.code === "PLAINTEXT_FOUND" && - entry.file === fixture.modelsPath && - entry.jsonPath === "providers.openai.apiKey", - ), - ).toBe(false); + expectModelsFinding(report, { + code: "PLAINTEXT_FOUND", + jsonPath: "providers.openai.apiKey", + present: false, + }); }); it("flags arbitrary all-caps models.json apiKey values as plaintext", async () => { - await writeJsonFile(fixture.modelsPath, { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions", - apiKey: "ALLCAPS_SAMPLE", // pragma: allowlist secret - models: [{ id: "gpt-5", name: "gpt-5" }], - }, - }, - }); + await writeModelsProvider({ apiKey: "ALLCAPS_SAMPLE" }); // pragma: allowlist secret const report = await runSecretsAudit({ env: fixture.env }); - expect( - hasFinding( - report, - (entry) => - entry.code === "PLAINTEXT_FOUND" && - entry.file === fixture.modelsPath && - entry.jsonPath === "providers.openai.apiKey", - ), - ).toBe(true); + expectModelsFinding(report, { + code: "PLAINTEXT_FOUND", + jsonPath: "providers.openai.apiKey", + }); }); it("does not flag models.json header marker values as plaintext", async () => { - await writeJsonFile(fixture.modelsPath, { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions", - apiKey: OPENAI_API_KEY_MARKER, - headers: { - Authorization: "secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret - "x-managed-token": "secretref-managed", // pragma: allowlist secret - }, - models: [{ id: "gpt-5", name: "gpt-5" }], - }, + await writeModelsProvider({ + headers: { + Authorization: "secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret + "x-managed-token": "secretref-managed", // pragma: allowlist secret }, }); const report = await runSecretsAudit({ env: fixture.env }); - expect( - hasFinding( - report, - (entry) => - entry.code === "PLAINTEXT_FOUND" && - entry.file === fixture.modelsPath && - entry.jsonPath === "providers.openai.headers.Authorization", - ), - ).toBe(false); - expect( - hasFinding( - report, - (entry) => - entry.code === "PLAINTEXT_FOUND" && - entry.file === fixture.modelsPath && - entry.jsonPath === "providers.openai.headers.x-managed-token", - ), - ).toBe(false); + expectModelsFinding(report, { + code: "PLAINTEXT_FOUND", + jsonPath: "providers.openai.headers.Authorization", + present: false, + }); + expectModelsFinding(report, { + code: "PLAINTEXT_FOUND", + jsonPath: "providers.openai.headers.x-managed-token", + present: false, + }); }); it("reports unresolved models.json SecretRef objects in provider headers", async () => { - await writeJsonFile(fixture.modelsPath, { - providers: { - openai: { - baseUrl: "https://api.openai.com/v1", - api: "openai-completions", - apiKey: OPENAI_API_KEY_MARKER, - headers: { - Authorization: { - source: "env", - provider: "default", - id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret - }, - }, - models: [{ id: "gpt-5", name: "gpt-5" }], + await writeModelsProvider({ + headers: { + Authorization: { + source: "env", + provider: "default", + id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret }, }, }); const report = await runSecretsAudit({ env: fixture.env }); - expect( - hasFinding( - report, - (entry) => - entry.code === "REF_UNRESOLVED" && - entry.file === fixture.modelsPath && - entry.jsonPath === "providers.openai.headers.Authorization", - ), - ).toBe(true); + expectModelsFinding(report, { + code: "REF_UNRESOLVED", + jsonPath: "providers.openai.headers.Authorization", + }); }); it("reports malformed models.json as unresolved findings", async () => { await fs.writeFile(fixture.modelsPath, "{bad-json", "utf8"); const report = await runSecretsAudit({ env: fixture.env }); - expect( - hasFinding( - report, - (entry) => entry.code === "REF_UNRESOLVED" && entry.file === fixture.modelsPath, - ), - ).toBe(true); + expectModelsFinding(report, { code: "REF_UNRESOLVED" }); }); it("reports non-regular models.json files as unresolved findings", async () => { await fs.rm(fixture.modelsPath, { force: true }); await fs.mkdir(fixture.modelsPath, { recursive: true }); const report = await runSecretsAudit({ env: fixture.env }); - expect( - hasFinding( - report, - (entry) => entry.code === "REF_UNRESOLVED" && entry.file === fixture.modelsPath, - ), - ).toBe(true); + expectModelsFinding(report, { code: "REF_UNRESOLVED" }); }); it("reports oversized models.json as unresolved findings", async () => { @@ -509,12 +438,7 @@ describe("secrets audit", () => { }); const report = await runSecretsAudit({ env: fixture.env }); - expect( - hasFinding( - report, - (entry) => entry.code === "REF_UNRESOLVED" && entry.file === fixture.modelsPath, - ), - ).toBe(true); + expectModelsFinding(report, { code: "REF_UNRESOLVED" }); }); it("scans active agent-dir override models.json even when outside state dir", async () => { diff --git a/src/secrets/runtime-web-tools.test.ts b/src/secrets/runtime-web-tools.test.ts index b4484095188..57e3e955066 100644 --- a/src/secrets/runtime-web-tools.test.ts +++ b/src/secrets/runtime-web-tools.test.ts @@ -65,6 +65,24 @@ function readProviderKey(config: OpenClawConfig, provider: ProviderUnderTest): u return config.tools?.web?.search?.perplexity?.apiKey; } +function expectInactiveFirecrawlSecretRef(params: { + resolveSpy: ReturnType; + metadata: Awaited>["metadata"]; + context: Awaited>["context"]; +}) { + expect(params.resolveSpy).not.toHaveBeenCalled(); + expect(params.metadata.fetch.firecrawl.active).toBe(false); + expect(params.metadata.fetch.firecrawl.apiKeySource).toBe("secretRef"); + expect(params.context.warnings).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + code: "SECRETS_REF_IGNORED_INACTIVE_SURFACE", + path: "tools.web.fetch.firecrawl.apiKey", + }), + ]), + ); +} + describe("runtime web tools resolution", () => { afterEach(() => { vi.restoreAllMocks(); @@ -339,17 +357,7 @@ describe("runtime web tools resolution", () => { }), }); - expect(resolveSpy).not.toHaveBeenCalled(); - expect(metadata.fetch.firecrawl.active).toBe(false); - expect(metadata.fetch.firecrawl.apiKeySource).toBe("secretRef"); - expect(context.warnings).toEqual( - expect.arrayContaining([ - expect.objectContaining({ - code: "SECRETS_REF_IGNORED_INACTIVE_SURFACE", - path: "tools.web.fetch.firecrawl.apiKey", - }), - ]), - ); + expectInactiveFirecrawlSecretRef({ resolveSpy, metadata, context }); }); it("does not resolve Firecrawl SecretRef when Firecrawl is disabled", async () => { @@ -370,17 +378,7 @@ describe("runtime web tools resolution", () => { }), }); - expect(resolveSpy).not.toHaveBeenCalled(); - expect(metadata.fetch.firecrawl.active).toBe(false); - expect(metadata.fetch.firecrawl.apiKeySource).toBe("secretRef"); - expect(context.warnings).toEqual( - expect.arrayContaining([ - expect.objectContaining({ - code: "SECRETS_REF_IGNORED_INACTIVE_SURFACE", - path: "tools.web.fetch.firecrawl.apiKey", - }), - ]), - ); + expectInactiveFirecrawlSecretRef({ resolveSpy, metadata, context }); }); it("uses env fallback for unresolved Firecrawl SecretRef when active", async () => { diff --git a/src/secrets/runtime-web-tools.ts b/src/secrets/runtime-web-tools.ts index d888b36e8ab..883aac6bd02 100644 --- a/src/secrets/runtime-web-tools.ts +++ b/src/secrets/runtime-web-tools.ts @@ -471,39 +471,30 @@ export async function resolveRuntimeWebTools(params: { } } + const failUnresolvedSearchNoFallback = (unresolved: { path: string; reason: string }) => { + const diagnostic: RuntimeWebDiagnostic = { + code: "WEB_SEARCH_KEY_UNRESOLVED_NO_FALLBACK", + message: unresolved.reason, + path: unresolved.path, + }; + diagnostics.push(diagnostic); + searchMetadata.diagnostics.push(diagnostic); + pushWarning(params.context, { + code: "WEB_SEARCH_KEY_UNRESOLVED_NO_FALLBACK", + path: unresolved.path, + message: unresolved.reason, + }); + throw new Error(`[WEB_SEARCH_KEY_UNRESOLVED_NO_FALLBACK] ${unresolved.reason}`); + }; + if (configuredProvider) { const unresolved = unresolvedWithoutFallback[0]; if (unresolved) { - const diagnostic: RuntimeWebDiagnostic = { - code: "WEB_SEARCH_KEY_UNRESOLVED_NO_FALLBACK", - message: unresolved.reason, - path: unresolved.path, - }; - diagnostics.push(diagnostic); - searchMetadata.diagnostics.push(diagnostic); - pushWarning(params.context, { - code: "WEB_SEARCH_KEY_UNRESOLVED_NO_FALLBACK", - path: unresolved.path, - message: unresolved.reason, - }); - throw new Error(`[WEB_SEARCH_KEY_UNRESOLVED_NO_FALLBACK] ${unresolved.reason}`); + failUnresolvedSearchNoFallback(unresolved); } } else { if (!selectedProvider && unresolvedWithoutFallback.length > 0) { - const unresolved = unresolvedWithoutFallback[0]; - const diagnostic: RuntimeWebDiagnostic = { - code: "WEB_SEARCH_KEY_UNRESOLVED_NO_FALLBACK", - message: unresolved.reason, - path: unresolved.path, - }; - diagnostics.push(diagnostic); - searchMetadata.diagnostics.push(diagnostic); - pushWarning(params.context, { - code: "WEB_SEARCH_KEY_UNRESOLVED_NO_FALLBACK", - path: unresolved.path, - message: unresolved.reason, - }); - throw new Error(`[WEB_SEARCH_KEY_UNRESOLVED_NO_FALLBACK] ${unresolved.reason}`); + failUnresolvedSearchNoFallback(unresolvedWithoutFallback[0]); } if (selectedProvider) { diff --git a/src/security/external-content.test.ts b/src/security/external-content.test.ts index b943bdacf72..bdf8af0de46 100644 --- a/src/security/external-content.test.ts +++ b/src/security/external-content.test.ts @@ -236,6 +236,27 @@ describe("external-content security", () => { expect(result).not.toContain(endMarker); } }); + + it.each([ + ["U+200B zero width space", "\u200B"], + ["U+200C zero width non-joiner", "\u200C"], + ["U+200D zero width joiner", "\u200D"], + ["U+2060 word joiner", "\u2060"], + ["U+FEFF zero width no-break space", "\uFEFF"], + ["U+00AD soft hyphen", "\u00AD"], + ])("sanitizes boundary markers split by %s", (_name, ignorable) => { + const startMarker = `<<>>`; + const endMarker = `<<>>`; + const result = wrapWebContent( + `Before ${startMarker} middle ${endMarker} after`, + "web_search", + ); + + expect(result).toContain("[[MARKER_SANITIZED]]"); + expect(result).toContain("[[END_MARKER_SANITIZED]]"); + expect(result).not.toContain(startMarker); + expect(result).not.toContain(endMarker); + }); }); describe("buildSafeExternalPrompt", () => { diff --git a/src/security/external-content.ts b/src/security/external-content.ts index ff571871b5e..1c8a3dfb1b9 100644 --- a/src/security/external-content.ts +++ b/src/security/external-content.ts @@ -151,10 +151,18 @@ function foldMarkerChar(char: string): string { return char; } +const MARKER_IGNORABLE_CHAR_RE = /\u200B|\u200C|\u200D|\u2060|\uFEFF|\u00AD/g; + function foldMarkerText(input: string): string { - return input.replace( - /[\uFF21-\uFF3A\uFF41-\uFF5A\uFF1C\uFF1E\u2329\u232A\u3008\u3009\u2039\u203A\u27E8\u27E9\uFE64\uFE65\u00AB\u00BB\u300A\u300B\u27EA\u27EB\u27EC\u27ED\u27EE\u27EF\u276C\u276D\u276E\u276F\u02C2\u02C3]/g, - (char) => foldMarkerChar(char), + return ( + input + // Strip invisible format characters that can split marker tokens without changing + // how downstream models interpret the apparent boundary text. + .replace(MARKER_IGNORABLE_CHAR_RE, "") + .replace( + /[\uFF21-\uFF3A\uFF41-\uFF5A\uFF1C\uFF1E\u2329\u232A\u3008\u3009\u2039\u203A\u27E8\u27E9\uFE64\uFE65\u00AB\u00BB\u300A\u300B\u27EA\u27EB\u27EC\u27ED\u27EE\u27EF\u276C\u276D\u276E\u276F\u02C2\u02C3]/g, + (char) => foldMarkerChar(char), + ) ); } diff --git a/src/shared/assistant-identity-values.test.ts b/src/shared/assistant-identity-values.test.ts new file mode 100644 index 00000000000..f0e594cc7e7 --- /dev/null +++ b/src/shared/assistant-identity-values.test.ts @@ -0,0 +1,22 @@ +import { describe, expect, it } from "vitest"; +import { coerceIdentityValue } from "./assistant-identity-values.js"; + +describe("shared/assistant-identity-values", () => { + it("returns undefined for missing or blank values", () => { + expect(coerceIdentityValue(undefined, 10)).toBeUndefined(); + expect(coerceIdentityValue(" ", 10)).toBeUndefined(); + expect(coerceIdentityValue(42 as unknown as string, 10)).toBeUndefined(); + }); + + it("trims values and preserves strings within the limit", () => { + expect(coerceIdentityValue(" OpenClaw ", 20)).toBe("OpenClaw"); + }); + + it("truncates overlong trimmed values at the exact limit", () => { + expect(coerceIdentityValue(" OpenClaw Assistant ", 8)).toBe("OpenClaw"); + }); + + it("returns an empty string when truncating to a zero-length limit", () => { + expect(coerceIdentityValue(" OpenClaw ", 0)).toBe(""); + }); +}); diff --git a/src/shared/avatar-policy.test.ts b/src/shared/avatar-policy.test.ts index 81331a45b8d..cbc345767e7 100644 --- a/src/shared/avatar-policy.test.ts +++ b/src/shared/avatar-policy.test.ts @@ -1,24 +1,42 @@ import path from "node:path"; import { describe, expect, it } from "vitest"; import { + hasAvatarUriScheme, + isAvatarDataUrl, + isAvatarHttpUrl, + isAvatarImageDataUrl, isPathWithinRoot, isSupportedLocalAvatarExtension, + isWindowsAbsolutePath, isWorkspaceRelativeAvatarPath, looksLikeAvatarPath, resolveAvatarMime, } from "./avatar-policy.js"; describe("avatar policy", () => { + it("classifies avatar URI and path helpers directly", () => { + expect(isAvatarDataUrl("data:text/plain,hello")).toBe(true); + expect(isAvatarImageDataUrl("data:image/png;base64,AAAA")).toBe(true); + expect(isAvatarImageDataUrl("data:text/plain,hello")).toBe(false); + expect(isAvatarHttpUrl("https://example.com/avatar.png")).toBe(true); + expect(isAvatarHttpUrl("ftp://example.com/avatar.png")).toBe(false); + expect(hasAvatarUriScheme("slack://avatar")).toBe(true); + expect(isWindowsAbsolutePath("C:\\\\avatars\\\\openclaw.png")).toBe(true); + }); + it("accepts workspace-relative avatar paths and rejects URI schemes", () => { expect(isWorkspaceRelativeAvatarPath("avatars/openclaw.png")).toBe(true); expect(isWorkspaceRelativeAvatarPath("C:\\\\avatars\\\\openclaw.png")).toBe(true); expect(isWorkspaceRelativeAvatarPath("https://example.com/avatar.png")).toBe(false); expect(isWorkspaceRelativeAvatarPath("data:image/png;base64,AAAA")).toBe(false); expect(isWorkspaceRelativeAvatarPath("~/avatar.png")).toBe(false); + expect(isWorkspaceRelativeAvatarPath("slack://avatar")).toBe(false); + expect(isWorkspaceRelativeAvatarPath("")).toBe(false); }); it("checks path containment safely", () => { const root = path.resolve("/tmp/root"); + expect(isPathWithinRoot(root, root)).toBe(true); expect(isPathWithinRoot(root, path.resolve("/tmp/root/avatars/a.png"))).toBe(true); expect(isPathWithinRoot(root, path.resolve("/tmp/root/../outside.png"))).toBe(false); }); @@ -38,6 +56,7 @@ describe("avatar policy", () => { it("resolves mime type from extension", () => { expect(resolveAvatarMime("a.svg")).toBe("image/svg+xml"); expect(resolveAvatarMime("a.tiff")).toBe("image/tiff"); + expect(resolveAvatarMime("A.PNG")).toBe("image/png"); expect(resolveAvatarMime("a.bin")).toBe("application/octet-stream"); }); }); diff --git a/src/shared/chat-content.test.ts b/src/shared/chat-content.test.ts new file mode 100644 index 00000000000..0131865cef8 --- /dev/null +++ b/src/shared/chat-content.test.ts @@ -0,0 +1,64 @@ +import { describe, expect, it } from "vitest"; +import { extractTextFromChatContent } from "./chat-content.js"; + +describe("shared/chat-content", () => { + it("normalizes plain string content", () => { + expect(extractTextFromChatContent(" hello\nworld ")).toBe("hello world"); + }); + + it("extracts only text blocks from array content", () => { + expect( + extractTextFromChatContent([ + { type: "text", text: " hello " }, + { type: "image_url", image_url: "https://example.com" }, + { type: "text", text: "world" }, + { text: "ignored without type" }, + null, + ]), + ).toBe("hello world"); + }); + + it("applies sanitizers and custom join/normalization hooks", () => { + expect( + extractTextFromChatContent("Here [Tool Call: foo (ID: 1)] ok", { + sanitizeText: (text) => text.replace(/\[Tool Call:[^\]]+\]\s*/g, ""), + }), + ).toBe("Here ok"); + + expect( + extractTextFromChatContent( + [ + { type: "text", text: " hello " }, + { type: "text", text: "world " }, + ], + { + sanitizeText: (text) => text.trim(), + joinWith: "\n", + normalizeText: (text) => text.trim(), + }, + ), + ).toBe("hello\nworld"); + + expect( + extractTextFromChatContent( + [ + { type: "text", text: "keep" }, + { type: "text", text: "drop" }, + ], + { + sanitizeText: (text) => (text === "drop" ? " " : text), + }, + ), + ).toBe("keep"); + }); + + it("returns null for unsupported or empty content", () => { + expect(extractTextFromChatContent(123)).toBeNull(); + expect(extractTextFromChatContent([{ type: "text", text: " " }])).toBeNull(); + expect( + extractTextFromChatContent(" ", { + sanitizeText: () => "", + }), + ).toBeNull(); + }); +}); diff --git a/src/shared/chat-envelope.test.ts b/src/shared/chat-envelope.test.ts new file mode 100644 index 00000000000..0bd513c1b61 --- /dev/null +++ b/src/shared/chat-envelope.test.ts @@ -0,0 +1,28 @@ +import { describe, expect, it } from "vitest"; +import { stripEnvelope, stripMessageIdHints } from "./chat-envelope.js"; + +describe("shared/chat-envelope", () => { + it("strips recognized channel and timestamp envelope prefixes only", () => { + expect(stripEnvelope("[WhatsApp 2026-01-24 13:36] hello")).toBe("hello"); + expect(stripEnvelope("[Google Chat room] hello")).toBe("hello"); + expect(stripEnvelope("[2026-01-24T13:36Z] hello")).toBe("hello"); + expect(stripEnvelope("[2026-01-24 13:36] hello")).toBe("hello"); + expect(stripEnvelope("[Custom Sender] hello")).toBe("[Custom Sender] hello"); + }); + + it("keeps non-envelope headers and preserves unmatched text", () => { + expect(stripEnvelope("hello")).toBe("hello"); + expect(stripEnvelope("[note] hello")).toBe("[note] hello"); + expect(stripEnvelope("[2026/01/24 13:36] hello")).toBe("[2026/01/24 13:36] hello"); + }); + + it("removes standalone message id hint lines but keeps inline mentions", () => { + expect(stripMessageIdHints("hello\n[message_id: abc123]")).toBe("hello"); + expect(stripMessageIdHints("hello\n [message_id: abc123] \nworld")).toBe("hello\nworld"); + expect(stripMessageIdHints("[message_id: abc123]\nhello")).toBe("hello"); + expect(stripMessageIdHints("[message_id: abc123]")).toBe(""); + expect(stripMessageIdHints("I typed [message_id: abc123] inline")).toBe( + "I typed [message_id: abc123] inline", + ); + }); +}); diff --git a/src/shared/chat-message-content.test.ts b/src/shared/chat-message-content.test.ts new file mode 100644 index 00000000000..50e41f82642 --- /dev/null +++ b/src/shared/chat-message-content.test.ts @@ -0,0 +1,28 @@ +import { describe, expect, it } from "vitest"; +import { extractFirstTextBlock } from "./chat-message-content.js"; + +describe("shared/chat-message-content", () => { + it("extracts the first text block from array content", () => { + expect( + extractFirstTextBlock({ + content: [{ text: "hello" }, { text: "world" }], + }), + ).toBe("hello"); + }); + + it("preserves empty-string text in the first block", () => { + expect( + extractFirstTextBlock({ + content: [{ text: "" }, { text: "later" }], + }), + ).toBe(""); + }); + + it("returns undefined for missing, empty, or non-text content", () => { + expect(extractFirstTextBlock(null)).toBeUndefined(); + expect(extractFirstTextBlock({ content: [] })).toBeUndefined(); + expect(extractFirstTextBlock({ content: [{ type: "image" }] })).toBeUndefined(); + expect(extractFirstTextBlock({ content: ["hello"] })).toBeUndefined(); + expect(extractFirstTextBlock({ content: [{ text: 1 }, { text: "later" }] })).toBeUndefined(); + }); +}); diff --git a/src/shared/config-eval.test.ts b/src/shared/config-eval.test.ts index 2ef18d1bef6..48ddb9e3298 100644 --- a/src/shared/config-eval.test.ts +++ b/src/shared/config-eval.test.ts @@ -1,5 +1,98 @@ import { describe, expect, it } from "vitest"; -import { evaluateRuntimeEligibility } from "./config-eval.js"; +import { + evaluateRuntimeEligibility, + evaluateRuntimeRequires, + isConfigPathTruthyWithDefaults, + isTruthy, + resolveConfigPath, +} from "./config-eval.js"; + +describe("config-eval helpers", () => { + it("normalizes truthy values across primitive types", () => { + expect(isTruthy(undefined)).toBe(false); + expect(isTruthy(null)).toBe(false); + expect(isTruthy(false)).toBe(false); + expect(isTruthy(true)).toBe(true); + expect(isTruthy(0)).toBe(false); + expect(isTruthy(1)).toBe(true); + expect(isTruthy(" ")).toBe(false); + expect(isTruthy(" ok ")).toBe(true); + expect(isTruthy({})).toBe(true); + }); + + it("resolves nested config paths and missing branches safely", () => { + const config = { + browser: { + enabled: true, + nested: { + count: 1, + }, + }, + }; + + expect(resolveConfigPath(config, "browser.enabled")).toBe(true); + expect(resolveConfigPath(config, ".browser..nested.count.")).toBe(1); + expect(resolveConfigPath(config, "browser.missing.value")).toBeUndefined(); + expect(resolveConfigPath("not-an-object", "browser.enabled")).toBeUndefined(); + }); + + it("uses defaults only when config paths are unresolved", () => { + const config = { + browser: { + enabled: false, + }, + }; + + expect( + isConfigPathTruthyWithDefaults(config, "browser.enabled", { "browser.enabled": true }), + ).toBe(false); + expect( + isConfigPathTruthyWithDefaults(config, "browser.missing", { "browser.missing": true }), + ).toBe(true); + expect(isConfigPathTruthyWithDefaults(config, "browser.other", {})).toBe(false); + }); +}); + +describe("evaluateRuntimeRequires", () => { + it("accepts remote bins and remote any-bin matches", () => { + const result = evaluateRuntimeRequires({ + requires: { + bins: ["node"], + anyBins: ["bun", "deno"], + env: ["OPENAI_API_KEY"], + config: ["browser.enabled"], + }, + hasBin: () => false, + hasRemoteBin: (bin) => bin === "node", + hasAnyRemoteBin: (bins) => bins.includes("deno"), + hasEnv: (name) => name === "OPENAI_API_KEY", + isConfigPathTruthy: (path) => path === "browser.enabled", + }); + + expect(result).toBe(true); + }); + + it("rejects when any required runtime check is still unsatisfied", () => { + expect( + evaluateRuntimeRequires({ + requires: { bins: ["node"] }, + hasBin: () => false, + hasEnv: () => true, + isConfigPathTruthy: () => true, + }), + ).toBe(false); + + expect( + evaluateRuntimeRequires({ + requires: { anyBins: ["bun", "node"] }, + hasBin: () => false, + hasAnyRemoteBin: () => false, + hasEnv: () => true, + isConfigPathTruthy: () => true, + }), + ).toBe(false); + }); +}); describe("evaluateRuntimeEligibility", () => { it("rejects entries when required OS does not match local or remote", () => { diff --git a/src/shared/device-auth-store.test.ts b/src/shared/device-auth-store.test.ts new file mode 100644 index 00000000000..be070ee79cd --- /dev/null +++ b/src/shared/device-auth-store.test.ts @@ -0,0 +1,206 @@ +import { describe, expect, it, vi } from "vitest"; +import { + clearDeviceAuthTokenFromStore, + loadDeviceAuthTokenFromStore, + storeDeviceAuthTokenInStore, + type DeviceAuthStoreAdapter, +} from "./device-auth-store.js"; + +function createAdapter(initialStore: ReturnType = null) { + let store = initialStore; + const writes: unknown[] = []; + const adapter: DeviceAuthStoreAdapter = { + readStore: () => store, + writeStore: (next) => { + store = next; + writes.push(next); + }, + }; + return { adapter, writes, readStore: () => store }; +} + +describe("device-auth-store", () => { + it("loads only matching device ids and normalized roles", () => { + const { adapter } = createAdapter({ + version: 1, + deviceId: "device-1", + tokens: { + operator: { + token: "secret", + role: "operator", + scopes: ["operator.read"], + updatedAtMs: 1, + }, + }, + }); + + expect( + loadDeviceAuthTokenFromStore({ + adapter, + deviceId: "device-1", + role: " operator ", + }), + ).toMatchObject({ token: "secret" }); + expect( + loadDeviceAuthTokenFromStore({ + adapter, + deviceId: "device-2", + role: "operator", + }), + ).toBeNull(); + }); + + it("stores normalized roles and deduped sorted scopes while preserving same-device tokens", () => { + vi.spyOn(Date, "now").mockReturnValue(1234); + const { adapter, writes, readStore } = createAdapter({ + version: 1, + deviceId: "device-1", + tokens: { + node: { + token: "node-token", + role: "node", + scopes: ["node.invoke"], + updatedAtMs: 10, + }, + }, + }); + + const entry = storeDeviceAuthTokenInStore({ + adapter, + deviceId: "device-1", + role: " operator ", + token: "operator-token", + scopes: [" operator.write ", "operator.read", "operator.read", ""], + }); + + expect(entry).toEqual({ + token: "operator-token", + role: "operator", + scopes: ["operator.read", "operator.write"], + updatedAtMs: 1234, + }); + expect(writes).toHaveLength(1); + expect(readStore()).toEqual({ + version: 1, + deviceId: "device-1", + tokens: { + node: { + token: "node-token", + role: "node", + scopes: ["node.invoke"], + updatedAtMs: 10, + }, + operator: entry, + }, + }); + }); + + it("replaces stale stores from other devices instead of merging them", () => { + const { adapter, readStore } = createAdapter({ + version: 1, + deviceId: "device-2", + tokens: { + operator: { + token: "old-token", + role: "operator", + scopes: [], + updatedAtMs: 1, + }, + }, + }); + + storeDeviceAuthTokenInStore({ + adapter, + deviceId: "device-1", + role: "node", + token: "node-token", + }); + + expect(readStore()).toEqual({ + version: 1, + deviceId: "device-1", + tokens: { + node: { + token: "node-token", + role: "node", + scopes: [], + updatedAtMs: expect.any(Number), + }, + }, + }); + }); + + it("avoids writes when clearing missing roles or mismatched devices", () => { + const missingRole = createAdapter({ + version: 1, + deviceId: "device-1", + tokens: {}, + }); + clearDeviceAuthTokenFromStore({ + adapter: missingRole.adapter, + deviceId: "device-1", + role: "operator", + }); + expect(missingRole.writes).toHaveLength(0); + + const otherDevice = createAdapter({ + version: 1, + deviceId: "device-2", + tokens: { + operator: { + token: "secret", + role: "operator", + scopes: [], + updatedAtMs: 1, + }, + }, + }); + clearDeviceAuthTokenFromStore({ + adapter: otherDevice.adapter, + deviceId: "device-1", + role: "operator", + }); + expect(otherDevice.writes).toHaveLength(0); + }); + + it("removes normalized roles when clearing stored tokens", () => { + const { adapter, writes, readStore } = createAdapter({ + version: 1, + deviceId: "device-1", + tokens: { + operator: { + token: "secret", + role: "operator", + scopes: ["operator.read"], + updatedAtMs: 1, + }, + node: { + token: "node-token", + role: "node", + scopes: [], + updatedAtMs: 2, + }, + }, + }); + + clearDeviceAuthTokenFromStore({ + adapter, + deviceId: "device-1", + role: " operator ", + }); + + expect(writes).toHaveLength(1); + expect(readStore()).toEqual({ + version: 1, + deviceId: "device-1", + tokens: { + node: { + token: "node-token", + role: "node", + scopes: [], + updatedAtMs: 2, + }, + }, + }); + }); +}); diff --git a/src/shared/device-auth.test.ts b/src/shared/device-auth.test.ts new file mode 100644 index 00000000000..a3bc6fa3956 --- /dev/null +++ b/src/shared/device-auth.test.ts @@ -0,0 +1,23 @@ +import { describe, expect, it } from "vitest"; +import { normalizeDeviceAuthRole, normalizeDeviceAuthScopes } from "./device-auth.js"; + +describe("shared/device-auth", () => { + it("trims device auth roles without further rewriting", () => { + expect(normalizeDeviceAuthRole(" operator ")).toBe("operator"); + expect(normalizeDeviceAuthRole("")).toBe(""); + expect(normalizeDeviceAuthRole(" NODE.Admin ")).toBe("NODE.Admin"); + }); + + it("dedupes, trims, sorts, and filters auth scopes", () => { + expect( + normalizeDeviceAuthScopes([" node.invoke ", "operator.read", "", "node.invoke", "a.scope"]), + ).toEqual(["a.scope", "node.invoke", "operator.read"]); + expect(normalizeDeviceAuthScopes(undefined)).toEqual([]); + expect(normalizeDeviceAuthScopes([" ", "\t", "\n"])).toEqual([]); + expect(normalizeDeviceAuthScopes(["z.scope", "A.scope", "m.scope"])).toEqual([ + "A.scope", + "m.scope", + "z.scope", + ]); + }); +}); diff --git a/src/shared/entry-metadata.test.ts b/src/shared/entry-metadata.test.ts new file mode 100644 index 00000000000..cf94453a62e --- /dev/null +++ b/src/shared/entry-metadata.test.ts @@ -0,0 +1,49 @@ +import { describe, expect, it } from "vitest"; +import { resolveEmojiAndHomepage } from "./entry-metadata.js"; + +describe("shared/entry-metadata", () => { + it("prefers metadata emoji and homepage when present", () => { + expect( + resolveEmojiAndHomepage({ + metadata: { emoji: "🦀", homepage: " https://openclaw.ai " }, + frontmatter: { emoji: "🙂", homepage: "https://example.com" }, + }), + ).toEqual({ + emoji: "🦀", + homepage: "https://openclaw.ai", + }); + }); + + it("keeps metadata precedence even when metadata values are blank", () => { + expect( + resolveEmojiAndHomepage({ + metadata: { emoji: "", homepage: " " }, + frontmatter: { emoji: "🙂", homepage: "https://example.com" }, + }), + ).toEqual({}); + }); + + it("falls back through frontmatter homepage aliases and drops blanks", () => { + expect( + resolveEmojiAndHomepage({ + frontmatter: { emoji: "🙂", website: " https://docs.openclaw.ai " }, + }), + ).toEqual({ + emoji: "🙂", + homepage: "https://docs.openclaw.ai", + }); + expect( + resolveEmojiAndHomepage({ + metadata: { homepage: " " }, + frontmatter: { url: " " }, + }), + ).toEqual({}); + expect( + resolveEmojiAndHomepage({ + frontmatter: { url: " https://openclaw.ai/install " }, + }), + ).toEqual({ + homepage: "https://openclaw.ai/install", + }); + }); +}); diff --git a/src/shared/entry-status.test.ts b/src/shared/entry-status.test.ts new file mode 100644 index 00000000000..88913913011 --- /dev/null +++ b/src/shared/entry-status.test.ts @@ -0,0 +1,132 @@ +import { afterEach, describe, expect, it } from "vitest"; +import { + evaluateEntryMetadataRequirements, + evaluateEntryMetadataRequirementsForCurrentPlatform, + evaluateEntryRequirementsForCurrentPlatform, +} from "./entry-status.js"; + +const originalPlatformDescriptor = Object.getOwnPropertyDescriptor(process, "platform"); + +function setPlatform(platform: NodeJS.Platform): void { + Object.defineProperty(process, "platform", { + value: platform, + configurable: true, + }); +} + +afterEach(() => { + if (originalPlatformDescriptor) { + Object.defineProperty(process, "platform", originalPlatformDescriptor); + } +}); + +describe("shared/entry-status", () => { + it("combines metadata presentation fields with evaluated requirements", () => { + const result = evaluateEntryMetadataRequirements({ + always: false, + metadata: { + emoji: "🦀", + homepage: "https://openclaw.ai", + requires: { + bins: ["bun"], + anyBins: ["ffmpeg", "sox"], + env: ["OPENCLAW_TOKEN"], + config: ["gateway.bind"], + }, + os: ["darwin"], + }, + frontmatter: { + emoji: "🙂", + homepage: "https://docs.openclaw.ai", + }, + hasLocalBin: (bin) => bin === "bun", + localPlatform: "linux", + remote: { + hasAnyBin: (bins) => bins.includes("sox"), + }, + isEnvSatisfied: () => false, + isConfigSatisfied: (path) => path === "gateway.bind", + }); + + expect(result).toEqual({ + emoji: "🦀", + homepage: "https://openclaw.ai", + required: { + bins: ["bun"], + anyBins: ["ffmpeg", "sox"], + env: ["OPENCLAW_TOKEN"], + config: ["gateway.bind"], + os: ["darwin"], + }, + missing: { + bins: [], + anyBins: [], + env: ["OPENCLAW_TOKEN"], + config: [], + os: ["darwin"], + }, + requirementsSatisfied: false, + configChecks: [{ path: "gateway.bind", satisfied: true }], + }); + }); + + it("uses process.platform in the current-platform wrapper", () => { + setPlatform("darwin"); + + const result = evaluateEntryMetadataRequirementsForCurrentPlatform({ + always: false, + metadata: { + os: ["darwin"], + }, + hasLocalBin: () => false, + isEnvSatisfied: () => true, + isConfigSatisfied: () => true, + }); + + expect(result.requirementsSatisfied).toBe(true); + expect(result.missing.os).toEqual([]); + }); + + it("pulls metadata and frontmatter from entry objects in the entry wrapper", () => { + setPlatform("linux"); + + const result = evaluateEntryRequirementsForCurrentPlatform({ + always: true, + entry: { + metadata: { + requires: { + bins: ["missing-bin"], + }, + }, + frontmatter: { + website: " https://docs.openclaw.ai ", + emoji: "🙂", + }, + }, + hasLocalBin: () => false, + isEnvSatisfied: () => false, + isConfigSatisfied: () => false, + }); + + expect(result).toEqual({ + emoji: "🙂", + homepage: "https://docs.openclaw.ai", + required: { + bins: ["missing-bin"], + anyBins: [], + env: [], + config: [], + os: [], + }, + missing: { + bins: [], + anyBins: [], + env: [], + config: [], + os: [], + }, + requirementsSatisfied: true, + configChecks: [], + }); + }); +}); diff --git a/src/shared/frontmatter.test.ts b/src/shared/frontmatter.test.ts new file mode 100644 index 00000000000..94cd4acabef --- /dev/null +++ b/src/shared/frontmatter.test.ts @@ -0,0 +1,142 @@ +import { describe, expect, it, test } from "vitest"; +import { + applyOpenClawManifestInstallCommonFields, + getFrontmatterString, + normalizeStringList, + parseFrontmatterBool, + parseOpenClawManifestInstallBase, + resolveOpenClawManifestBlock, + resolveOpenClawManifestInstall, + resolveOpenClawManifestOs, + resolveOpenClawManifestRequires, +} from "./frontmatter.js"; + +describe("shared/frontmatter", () => { + test("normalizeStringList handles strings, arrays, and non-list values", () => { + expect(normalizeStringList("a, b,,c")).toEqual(["a", "b", "c"]); + expect(normalizeStringList([" a ", "", "b", 42])).toEqual(["a", "b", "42"]); + expect(normalizeStringList(null)).toEqual([]); + }); + + test("getFrontmatterString extracts strings only", () => { + expect(getFrontmatterString({ a: "b" }, "a")).toBe("b"); + expect(getFrontmatterString({ a: 1 }, "a")).toBeUndefined(); + }); + + test("parseFrontmatterBool respects explicit values and fallback", () => { + expect(parseFrontmatterBool("true", false)).toBe(true); + expect(parseFrontmatterBool("false", true)).toBe(false); + expect(parseFrontmatterBool(undefined, true)).toBe(true); + }); + + test("resolveOpenClawManifestBlock reads current manifest keys and custom metadata fields", () => { + expect( + resolveOpenClawManifestBlock({ + frontmatter: { + metadata: "{ openclaw: { foo: 1, bar: 'baz' } }", + }, + }), + ).toEqual({ foo: 1, bar: "baz" }); + + expect( + resolveOpenClawManifestBlock({ + frontmatter: { + pluginMeta: "{ openclaw: { foo: 2 } }", + }, + key: "pluginMeta", + }), + ).toEqual({ foo: 2 }); + }); + + test("resolveOpenClawManifestBlock returns undefined for invalid input", () => { + expect(resolveOpenClawManifestBlock({ frontmatter: {} })).toBeUndefined(); + expect( + resolveOpenClawManifestBlock({ frontmatter: { metadata: "not-json5" } }), + ).toBeUndefined(); + expect( + resolveOpenClawManifestBlock({ frontmatter: { metadata: "{ nope: { a: 1 } }" } }), + ).toBeUndefined(); + }); + + it("normalizes manifest requirement and os lists", () => { + expect( + resolveOpenClawManifestRequires({ + requires: { + bins: "bun, node", + anyBins: [" ffmpeg ", ""], + env: ["OPENCLAW_TOKEN", " OPENCLAW_URL "], + config: null, + }, + }), + ).toEqual({ + bins: ["bun", "node"], + anyBins: ["ffmpeg"], + env: ["OPENCLAW_TOKEN", "OPENCLAW_URL"], + config: [], + }); + expect(resolveOpenClawManifestRequires({})).toBeUndefined(); + expect(resolveOpenClawManifestOs({ os: [" darwin ", "linux", ""] })).toEqual([ + "darwin", + "linux", + ]); + }); + + it("parses and applies install common fields", () => { + const parsed = parseOpenClawManifestInstallBase( + { + type: " Brew ", + id: "brew.git", + label: "Git", + bins: [" git ", "git"], + }, + ["brew", "npm"], + ); + + expect(parsed).toEqual({ + raw: { + type: " Brew ", + id: "brew.git", + label: "Git", + bins: [" git ", "git"], + }, + kind: "brew", + id: "brew.git", + label: "Git", + bins: ["git", "git"], + }); + expect(parseOpenClawManifestInstallBase({ kind: "bad" }, ["brew"])).toBeUndefined(); + expect( + applyOpenClawManifestInstallCommonFields<{ + extra: boolean; + id?: string; + label?: string; + bins?: string[]; + }>({ extra: true }, parsed!), + ).toEqual({ + extra: true, + id: "brew.git", + label: "Git", + bins: ["git", "git"], + }); + }); + + it("maps install entries through the parser and filters rejected specs", () => { + expect( + resolveOpenClawManifestInstall( + { + install: [{ id: "keep" }, { id: "drop" }, "bad"], + }, + (entry) => { + if ( + typeof entry === "object" && + entry !== null && + (entry as { id?: string }).id === "keep" + ) { + return { id: "keep" }; + } + return undefined; + }, + ), + ).toEqual([{ id: "keep" }]); + }); +}); diff --git a/src/shared/gateway-bind-url.test.ts b/src/shared/gateway-bind-url.test.ts new file mode 100644 index 00000000000..23dd855c4e6 --- /dev/null +++ b/src/shared/gateway-bind-url.test.ts @@ -0,0 +1,94 @@ +import { describe, expect, it, vi } from "vitest"; +import { resolveGatewayBindUrl } from "./gateway-bind-url.js"; + +describe("shared/gateway-bind-url", () => { + it("returns null for loopback/default binds", () => { + expect( + resolveGatewayBindUrl({ + scheme: "ws", + port: 18789, + pickTailnetHost: () => "100.64.0.1", + pickLanHost: () => "192.168.1.2", + }), + ).toBeNull(); + }); + + it("resolves custom binds only when custom host is present after trimming", () => { + expect( + resolveGatewayBindUrl({ + bind: "custom", + customBindHost: " gateway.local ", + scheme: "wss", + port: 443, + pickTailnetHost: vi.fn(), + pickLanHost: vi.fn(), + }), + ).toEqual({ + url: "wss://gateway.local:443", + source: "gateway.bind=custom", + }); + + expect( + resolveGatewayBindUrl({ + bind: "custom", + customBindHost: " ", + scheme: "ws", + port: 18789, + pickTailnetHost: vi.fn(), + pickLanHost: vi.fn(), + }), + ).toEqual({ + error: "gateway.bind=custom requires gateway.customBindHost.", + }); + }); + + it("resolves tailnet and lan binds or returns clear errors", () => { + expect( + resolveGatewayBindUrl({ + bind: "tailnet", + scheme: "ws", + port: 18789, + pickTailnetHost: () => "100.64.0.1", + pickLanHost: vi.fn(), + }), + ).toEqual({ + url: "ws://100.64.0.1:18789", + source: "gateway.bind=tailnet", + }); + expect( + resolveGatewayBindUrl({ + bind: "tailnet", + scheme: "ws", + port: 18789, + pickTailnetHost: () => null, + pickLanHost: vi.fn(), + }), + ).toEqual({ + error: "gateway.bind=tailnet set, but no tailnet IP was found.", + }); + + expect( + resolveGatewayBindUrl({ + bind: "lan", + scheme: "wss", + port: 8443, + pickTailnetHost: vi.fn(), + pickLanHost: () => "192.168.1.2", + }), + ).toEqual({ + url: "wss://192.168.1.2:8443", + source: "gateway.bind=lan", + }); + expect( + resolveGatewayBindUrl({ + bind: "lan", + scheme: "ws", + port: 18789, + pickTailnetHost: vi.fn(), + pickLanHost: () => null, + }), + ).toEqual({ + error: "gateway.bind=lan set, but no private LAN IP was found.", + }); + }); +}); diff --git a/src/shared/global-singleton.test.ts b/src/shared/global-singleton.test.ts index 0f0a29c506c..3d537f5cc4b 100644 --- a/src/shared/global-singleton.test.ts +++ b/src/shared/global-singleton.test.ts @@ -27,6 +27,15 @@ describe("resolveGlobalSingleton", () => { expect(resolveGlobalSingleton(TEST_KEY, create)).toBeUndefined(); expect(create).toHaveBeenCalledTimes(1); }); + + it("reuses a prepopulated global value without calling the factory", () => { + const existing = { value: 7 }; + const create = vi.fn(() => ({ value: 1 })); + (globalThis as Record)[TEST_KEY] = existing; + + expect(resolveGlobalSingleton(TEST_KEY, create)).toBe(existing); + expect(create).not.toHaveBeenCalled(); + }); }); describe("resolveGlobalMap", () => { @@ -36,4 +45,11 @@ describe("resolveGlobalMap", () => { expect(first).toBe(second); }); + + it("preserves existing map contents across repeated resolution", () => { + const map = resolveGlobalMap(TEST_MAP_KEY); + map.set("a", 1); + + expect(resolveGlobalMap(TEST_MAP_KEY).get("a")).toBe(1); + }); }); diff --git a/src/shared/model-param-b.test.ts b/src/shared/model-param-b.test.ts new file mode 100644 index 00000000000..7fb9a7b82d4 --- /dev/null +++ b/src/shared/model-param-b.test.ts @@ -0,0 +1,17 @@ +import { describe, expect, it } from "vitest"; +import { inferParamBFromIdOrName } from "./model-param-b.js"; + +describe("shared/model-param-b", () => { + it("extracts the largest valid b-sized parameter token", () => { + expect(inferParamBFromIdOrName("llama-8b mixtral-22b")).toBe(22); + expect(inferParamBFromIdOrName("Qwen 0.5B Instruct")).toBe(0.5); + expect(inferParamBFromIdOrName("prefix M7B and q4_32b")).toBe(32); + }); + + it("ignores malformed, zero, and non-delimited matches", () => { + expect(inferParamBFromIdOrName("abc70beta 0b x70b2")).toBeNull(); + expect(inferParamBFromIdOrName("model 0b")).toBeNull(); + expect(inferParamBFromIdOrName("model b5")).toBeNull(); + expect(inferParamBFromIdOrName("foo70bbar")).toBeNull(); + }); +}); diff --git a/src/shared/net/ip.test.ts b/src/shared/net/ip.test.ts index f89fb03f7ef..2ed2558214a 100644 --- a/src/shared/net/ip.test.ts +++ b/src/shared/net/ip.test.ts @@ -2,11 +2,16 @@ import { describe, expect, it } from "vitest"; import { blockedIpv6MulticastLiterals } from "./ip-test-fixtures.js"; import { extractEmbeddedIpv4FromIpv6, + isBlockedSpecialUseIpv4Address, isCanonicalDottedDecimalIPv4, + isCarrierGradeNatIpv4Address, isIpInCidr, isIpv6Address, isLegacyIpv4Literal, + isLoopbackIpAddress, isPrivateOrLoopbackIpAddress, + isRfc1918Ipv4Address, + normalizeIpAddress, parseCanonicalIpAddress, } from "./ip.js"; @@ -53,4 +58,35 @@ describe("shared ip helpers", () => { } expect(isPrivateOrLoopbackIpAddress("2001:4860:4860::8888")).toBe(false); }); + + it("normalizes canonical IP strings and loopback detection", () => { + expect(normalizeIpAddress("[::FFFF:127.0.0.1]")).toBe("127.0.0.1"); + expect(normalizeIpAddress(" [2001:DB8::1] ")).toBe("2001:db8::1"); + expect(isLoopbackIpAddress("::ffff:127.0.0.1")).toBe(true); + expect(isLoopbackIpAddress("198.18.0.1")).toBe(false); + }); + + it("classifies RFC1918 and carrier-grade-nat IPv4 ranges", () => { + expect(isRfc1918Ipv4Address("10.42.0.59")).toBe(true); + expect(isRfc1918Ipv4Address("100.64.0.1")).toBe(false); + expect(isCarrierGradeNatIpv4Address("100.64.0.1")).toBe(true); + expect(isCarrierGradeNatIpv4Address("10.42.0.59")).toBe(false); + }); + + it("blocks special-use IPv4 ranges while allowing optional RFC2544 benchmark addresses", () => { + const loopback = parseCanonicalIpAddress("127.0.0.1"); + const benchmark = parseCanonicalIpAddress("198.18.0.1"); + + expect(loopback?.kind()).toBe("ipv4"); + expect(benchmark?.kind()).toBe("ipv4"); + if (!loopback || loopback.kind() !== "ipv4" || !benchmark || benchmark.kind() !== "ipv4") { + throw new Error("expected ipv4 fixtures"); + } + + expect(isBlockedSpecialUseIpv4Address(loopback)).toBe(true); + expect(isBlockedSpecialUseIpv4Address(benchmark)).toBe(true); + expect(isBlockedSpecialUseIpv4Address(benchmark, { allowRfc2544BenchmarkRange: true })).toBe( + false, + ); + }); }); diff --git a/src/shared/net/ipv4.test.ts b/src/shared/net/ipv4.test.ts new file mode 100644 index 00000000000..21ff99b982b --- /dev/null +++ b/src/shared/net/ipv4.test.ts @@ -0,0 +1,31 @@ +import { describe, expect, it } from "vitest"; +import { validateDottedDecimalIPv4Input, validateIPv4AddressInput } from "./ipv4.js"; + +describe("shared/net/ipv4", () => { + it("requires a value for custom bind mode", () => { + expect(validateDottedDecimalIPv4Input(undefined)).toBe( + "IP address is required for custom bind mode", + ); + expect(validateDottedDecimalIPv4Input("")).toBe("IP address is required for custom bind mode"); + expect(validateDottedDecimalIPv4Input(" ")).toBe( + "Invalid IPv4 address (e.g., 192.168.1.100)", + ); + }); + + it("accepts canonical dotted-decimal ipv4 only", () => { + expect(validateDottedDecimalIPv4Input("192.168.1.100")).toBeUndefined(); + expect(validateDottedDecimalIPv4Input(" 192.168.1.100 ")).toBeUndefined(); + expect(validateDottedDecimalIPv4Input("0177.0.0.1")).toBe( + "Invalid IPv4 address (e.g., 192.168.1.100)", + ); + expect(validateDottedDecimalIPv4Input("[192.168.1.100]")).toBeUndefined(); + expect(validateDottedDecimalIPv4Input("example.com")).toBe( + "Invalid IPv4 address (e.g., 192.168.1.100)", + ); + }); + + it("keeps the backward-compatible alias wired to the same validation", () => { + expect(validateIPv4AddressInput("192.168.1.100")).toBeUndefined(); + expect(validateIPv4AddressInput("bad-ip")).toBe("Invalid IPv4 address (e.g., 192.168.1.100)"); + }); +}); diff --git a/src/shared/node-list-parse.test.ts b/src/shared/node-list-parse.test.ts index 379f4395054..9437e31118a 100644 --- a/src/shared/node-list-parse.test.ts +++ b/src/shared/node-list-parse.test.ts @@ -6,6 +6,7 @@ describe("shared/node-list-parse", () => { expect(parseNodeList({ nodes: [{ nodeId: "node-1" }] })).toEqual([{ nodeId: "node-1" }]); expect(parseNodeList({ nodes: "nope" })).toEqual([]); expect(parseNodeList(null)).toEqual([]); + expect(parseNodeList(["not-an-object"])).toEqual([]); }); it("parses node.pair.list payloads", () => { @@ -20,5 +21,6 @@ describe("shared/node-list-parse", () => { }); expect(parsePairingList({ pending: 1, paired: "x" })).toEqual({ pending: [], paired: [] }); expect(parsePairingList(undefined)).toEqual({ pending: [], paired: [] }); + expect(parsePairingList(["not-an-object"])).toEqual({ pending: [], paired: [] }); }); }); diff --git a/src/shared/node-match.test.ts b/src/shared/node-match.test.ts new file mode 100644 index 00000000000..2ddc3663d3f --- /dev/null +++ b/src/shared/node-match.test.ts @@ -0,0 +1,59 @@ +import { describe, expect, it } from "vitest"; +import { normalizeNodeKey, resolveNodeIdFromCandidates, resolveNodeMatches } from "./node-match.js"; + +describe("shared/node-match", () => { + it("normalizes node keys by lowercasing and collapsing separators", () => { + expect(normalizeNodeKey(" Mac Studio! ")).toBe("mac-studio"); + expect(normalizeNodeKey("---PI__Node---")).toBe("pi-node"); + }); + + it("matches candidates by node id, remote ip, normalized name, and long prefix", () => { + const nodes = [ + { nodeId: "mac-abcdef", displayName: "Mac Studio", remoteIp: "100.0.0.1" }, + { nodeId: "pi-456789", displayName: "Raspberry Pi", remoteIp: "100.0.0.2" }, + ]; + + expect(resolveNodeMatches(nodes, "mac-abcdef")).toEqual([nodes[0]]); + expect(resolveNodeMatches(nodes, "100.0.0.2")).toEqual([nodes[1]]); + expect(resolveNodeMatches(nodes, "mac studio")).toEqual([nodes[0]]); + expect(resolveNodeMatches(nodes, "pi-456")).toEqual([nodes[1]]); + expect(resolveNodeMatches(nodes, "pi")).toEqual([]); + expect(resolveNodeMatches(nodes, " ")).toEqual([]); + }); + + it("resolves unique matches and prefers a unique connected node", () => { + expect( + resolveNodeIdFromCandidates( + [ + { nodeId: "ios-old", displayName: "iPhone", connected: false }, + { nodeId: "ios-live", displayName: "iPhone", connected: true }, + ], + "iphone", + ), + ).toBe("ios-live"); + }); + + it("throws clear unknown and ambiguous node errors", () => { + expect(() => + resolveNodeIdFromCandidates( + [ + { nodeId: "mac-123", displayName: "Mac Studio", remoteIp: "100.0.0.1" }, + { nodeId: "pi-456" }, + ], + "nope", + ), + ).toThrow(/unknown node: nope.*known: Mac Studio, pi-456/); + + expect(() => + resolveNodeIdFromCandidates( + [ + { nodeId: "ios-a", displayName: "iPhone", connected: true }, + { nodeId: "ios-b", displayName: "iPhone", connected: true }, + ], + "iphone", + ), + ).toThrow(/ambiguous node: iphone.*matches: iPhone, iPhone/); + + expect(() => resolveNodeIdFromCandidates([], "")).toThrow(/node required/); + }); +}); diff --git a/src/shared/node-resolve.test.ts b/src/shared/node-resolve.test.ts new file mode 100644 index 00000000000..2020073a910 --- /dev/null +++ b/src/shared/node-resolve.test.ts @@ -0,0 +1,54 @@ +import { describe, expect, it } from "vitest"; +import { resolveNodeFromNodeList, resolveNodeIdFromNodeList } from "./node-resolve.js"; + +describe("shared/node-resolve", () => { + const nodes = [ + { nodeId: "mac-123", displayName: "Mac Studio", connected: true }, + { nodeId: "pi-456", displayName: "Raspberry Pi", connected: false }, + ]; + + it("resolves node ids through candidate matching", () => { + expect(resolveNodeIdFromNodeList(nodes, "Mac Studio")).toBe("mac-123"); + }); + + it("supports optional default-node selection when query is blank", () => { + expect( + resolveNodeIdFromNodeList(nodes, " ", { + allowDefault: true, + pickDefaultNode: (entries) => entries.find((entry) => entry.connected) ?? null, + }), + ).toBe("mac-123"); + }); + + it("passes the original node list to the default picker", () => { + expect( + resolveNodeIdFromNodeList(nodes, "", { + allowDefault: true, + pickDefaultNode: (entries) => { + expect(entries).toBe(nodes); + return entries[1] ?? null; + }, + }), + ).toBe("pi-456"); + }); + + it("still throws when default selection is disabled or returns null", () => { + expect(() => resolveNodeIdFromNodeList(nodes, " ")).toThrow(/node required/); + expect(() => + resolveNodeIdFromNodeList(nodes, "", { + allowDefault: true, + pickDefaultNode: () => null, + }), + ).toThrow(/node required/); + }); + + it("returns the full node object and falls back to a synthetic entry when needed", () => { + expect(resolveNodeFromNodeList(nodes, "pi-456")).toEqual(nodes[1]); + expect( + resolveNodeFromNodeList([], "", { + allowDefault: true, + pickDefaultNode: () => ({ nodeId: "synthetic-1" }), + }), + ).toEqual({ nodeId: "synthetic-1" }); + }); +}); diff --git a/src/shared/operator-scope-compat.test.ts b/src/shared/operator-scope-compat.test.ts index 11810673681..e48a17ad398 100644 --- a/src/shared/operator-scope-compat.test.ts +++ b/src/shared/operator-scope-compat.test.ts @@ -86,4 +86,31 @@ describe("roleScopesAllow", () => { }), ).toBe(false); }); + + it("normalizes blank and duplicate scopes before evaluating", () => { + expect( + roleScopesAllow({ + role: " operator ", + requestedScopes: [" operator.read ", "operator.read", " "], + allowedScopes: [" operator.write ", "operator.write", ""], + }), + ).toBe(true); + }); + + it("rejects unsatisfied operator write scopes and empty allowed scopes", () => { + expect( + roleScopesAllow({ + role: "operator", + requestedScopes: ["operator.write"], + allowedScopes: ["operator.read"], + }), + ).toBe(false); + expect( + roleScopesAllow({ + role: "operator", + requestedScopes: ["operator.read"], + allowedScopes: [" "], + }), + ).toBe(false); + }); }); diff --git a/src/shared/pid-alive.test.ts b/src/shared/pid-alive.test.ts index c0d714fb21a..88066f1a794 100644 --- a/src/shared/pid-alive.test.ts +++ b/src/shared/pid-alive.test.ts @@ -59,6 +59,21 @@ describe("isPidAlive", () => { expect(freshIsPidAlive(zombiePid)).toBe(false); }); }); + + it("treats unreadable linux proc status as non-zombie when kill succeeds", async () => { + const readFileSyncSpy = vi.spyOn(fsSync, "readFileSync").mockImplementation(() => { + throw new Error("no proc status"); + }); + const killSpy = vi.spyOn(process, "kill").mockImplementation(() => true); + + await withLinuxProcessPlatform(async () => { + const { isPidAlive: freshIsPidAlive } = await import("./pid-alive.js"); + expect(freshIsPidAlive(42)).toBe(true); + }); + + expect(readFileSyncSpy).toHaveBeenCalledWith("/proc/42/status", "utf8"); + expect(killSpy).toHaveBeenCalledWith(42, 0); + }); }); describe("getProcessStartTime", () => { @@ -114,4 +129,19 @@ describe("getProcessStartTime", () => { expect(fresh(42)).toBe(55555); }); }); + + it("returns null for negative or non-integer start times", async () => { + const fakeStatPrefix = "42 (node) S 1 42 42 0 -1 4194304 12345 0 0 0 100 50 0 0 20 0 8 0 "; + const fakeStatSuffix = + " 123456789 5000 18446744073709551615 0 0 0 0 0 0 0 0 0 0 0 0 17 0 0 0 0 0 0"; + mockProcReads({ + "/proc/42/stat": `${fakeStatPrefix}-1${fakeStatSuffix}`, + "/proc/43/stat": `${fakeStatPrefix}1.5${fakeStatSuffix}`, + }); + await withLinuxProcessPlatform(async () => { + const { getProcessStartTime: fresh } = await import("./pid-alive.js"); + expect(fresh(42)).toBeNull(); + expect(fresh(43)).toBeNull(); + }); + }); }); diff --git a/src/shared/process-scoped-map.test.ts b/src/shared/process-scoped-map.test.ts new file mode 100644 index 00000000000..d4a0ff17a59 --- /dev/null +++ b/src/shared/process-scoped-map.test.ts @@ -0,0 +1,39 @@ +import { afterEach, describe, expect, it } from "vitest"; +import { resolveProcessScopedMap } from "./process-scoped-map.js"; + +const MAP_KEY = Symbol("process-scoped-map:test"); +const OTHER_MAP_KEY = Symbol("process-scoped-map:other"); + +afterEach(() => { + delete (process as unknown as Record)[MAP_KEY]; + delete (process as unknown as Record)[OTHER_MAP_KEY]; +}); + +describe("shared/process-scoped-map", () => { + it("reuses the same map for the same symbol", () => { + const first = resolveProcessScopedMap(MAP_KEY); + first.set("a", 1); + + const second = resolveProcessScopedMap(MAP_KEY); + + expect(second).toBe(first); + expect(second.get("a")).toBe(1); + }); + + it("keeps distinct maps for distinct symbols", () => { + const first = resolveProcessScopedMap(MAP_KEY); + const second = resolveProcessScopedMap(OTHER_MAP_KEY); + + expect(second).not.toBe(first); + }); + + it("reuses a prepopulated process map without replacing it", () => { + const existing = new Map([["a", 1]]); + (process as unknown as Record)[MAP_KEY] = existing; + + const resolved = resolveProcessScopedMap(MAP_KEY); + + expect(resolved).toBe(existing); + expect(resolved.get("a")).toBe(1); + }); +}); diff --git a/src/shared/requirements.test.ts b/src/shared/requirements.test.ts index 06d48ec2e58..0a05a0eb85c 100644 --- a/src/shared/requirements.test.ts +++ b/src/shared/requirements.test.ts @@ -1,7 +1,9 @@ import { describe, expect, it } from "vitest"; import { buildConfigChecks, + evaluateRequirements, evaluateRequirementsFromMetadata, + evaluateRequirementsFromMetadataWithRemote, resolveMissingAnyBins, resolveMissingBins, resolveMissingEnv, @@ -79,4 +81,87 @@ describe("requirements helpers", () => { expect(res.missing.os).toEqual(["darwin"]); expect(res.eligible).toBe(false); }); + + it("evaluateRequirements reports config checks and all missing categories directly", () => { + const res = evaluateRequirements({ + always: false, + required: { + bins: ["node"], + anyBins: ["bun", "deno"], + env: ["OPENAI_API_KEY"], + config: ["browser.enabled", "gateway.enabled"], + os: ["darwin"], + }, + hasLocalBin: () => false, + hasRemoteBin: (bin) => bin === "node", + hasRemoteAnyBin: () => false, + localPlatform: "linux", + remotePlatforms: ["windows"], + isEnvSatisfied: () => false, + isConfigSatisfied: (path) => path === "gateway.enabled", + }); + + expect(res.missing).toEqual({ + bins: [], + anyBins: ["bun", "deno"], + env: ["OPENAI_API_KEY"], + config: ["browser.enabled"], + os: ["darwin"], + }); + expect(res.configChecks).toEqual([ + { path: "browser.enabled", satisfied: false }, + { path: "gateway.enabled", satisfied: true }, + ]); + expect(res.eligible).toBe(false); + }); + + it("clears missing requirements when always is true but preserves config checks", () => { + const res = evaluateRequirements({ + always: true, + required: { + bins: ["node"], + anyBins: ["bun"], + env: ["OPENAI_API_KEY"], + config: ["browser.enabled"], + os: ["darwin"], + }, + hasLocalBin: () => false, + localPlatform: "linux", + isEnvSatisfied: () => false, + isConfigSatisfied: () => false, + }); + + expect(res.missing).toEqual({ bins: [], anyBins: [], env: [], config: [], os: [] }); + expect(res.configChecks).toEqual([{ path: "browser.enabled", satisfied: false }]); + expect(res.eligible).toBe(true); + }); + + it("evaluateRequirementsFromMetadataWithRemote wires remote predicates and platforms through", () => { + const res = evaluateRequirementsFromMetadataWithRemote({ + always: false, + metadata: { + requires: { bins: ["node"], anyBins: ["bun"], env: ["OPENAI_API_KEY"] }, + os: ["darwin"], + }, + remote: { + hasBin: (bin) => bin === "node", + hasAnyBin: (bins) => bins.includes("bun"), + platforms: ["darwin"], + }, + hasLocalBin: () => false, + localPlatform: "linux", + isEnvSatisfied: (name) => name === "OPENAI_API_KEY", + isConfigSatisfied: () => true, + }); + + expect(res.required).toEqual({ + bins: ["node"], + anyBins: ["bun"], + env: ["OPENAI_API_KEY"], + config: [], + os: ["darwin"], + }); + expect(res.missing).toEqual({ bins: [], anyBins: [], env: [], config: [], os: [] }); + expect(res.eligible).toBe(true); + }); }); diff --git a/src/shared/shared-misc.test.ts b/src/shared/shared-misc.test.ts deleted file mode 100644 index 8a729109513..00000000000 --- a/src/shared/shared-misc.test.ts +++ /dev/null @@ -1,151 +0,0 @@ -import { describe, expect, it, test } from "vitest"; -import { extractTextFromChatContent } from "./chat-content.js"; -import { - getFrontmatterString, - normalizeStringList, - parseFrontmatterBool, - resolveOpenClawManifestBlock, -} from "./frontmatter.js"; -import { resolveNodeIdFromCandidates } from "./node-match.js"; - -describe("extractTextFromChatContent", () => { - it("normalizes string content", () => { - expect(extractTextFromChatContent(" hello\nworld ")).toBe("hello world"); - }); - - it("extracts text blocks from array content", () => { - expect( - extractTextFromChatContent([ - { type: "text", text: " hello " }, - { type: "image_url", image_url: "https://example.com" }, - { type: "text", text: "world" }, - ]), - ).toBe("hello world"); - }); - - it("applies sanitizer when provided", () => { - expect( - extractTextFromChatContent("Here [Tool Call: foo (ID: 1)] ok", { - sanitizeText: (text) => text.replace(/\[Tool Call:[^\]]+\]\s*/g, ""), - }), - ).toBe("Here ok"); - }); - - it("supports custom join and normalization", () => { - expect( - extractTextFromChatContent( - [ - { type: "text", text: " hello " }, - { type: "text", text: "world " }, - ], - { - sanitizeText: (text) => text.trim(), - joinWith: "\n", - normalizeText: (text) => text.trim(), - }, - ), - ).toBe("hello\nworld"); - }); -}); - -describe("shared/frontmatter", () => { - test("normalizeStringList handles strings and arrays", () => { - expect(normalizeStringList("a, b,,c")).toEqual(["a", "b", "c"]); - expect(normalizeStringList([" a ", "", "b"])).toEqual(["a", "b"]); - expect(normalizeStringList(null)).toEqual([]); - }); - - test("getFrontmatterString extracts strings only", () => { - expect(getFrontmatterString({ a: "b" }, "a")).toBe("b"); - expect(getFrontmatterString({ a: 1 }, "a")).toBeUndefined(); - }); - - test("parseFrontmatterBool respects fallback", () => { - expect(parseFrontmatterBool("true", false)).toBe(true); - expect(parseFrontmatterBool("false", true)).toBe(false); - expect(parseFrontmatterBool(undefined, true)).toBe(true); - }); - - test("resolveOpenClawManifestBlock parses JSON5 metadata and picks openclaw block", () => { - const frontmatter = { - metadata: "{ openclaw: { foo: 1, bar: 'baz' } }", - }; - expect(resolveOpenClawManifestBlock({ frontmatter })).toEqual({ foo: 1, bar: "baz" }); - }); - - test("resolveOpenClawManifestBlock returns undefined for invalid input", () => { - expect(resolveOpenClawManifestBlock({ frontmatter: {} })).toBeUndefined(); - expect( - resolveOpenClawManifestBlock({ frontmatter: { metadata: "not-json5" } }), - ).toBeUndefined(); - expect( - resolveOpenClawManifestBlock({ frontmatter: { metadata: "{ nope: { a: 1 } }" } }), - ).toBeUndefined(); - }); -}); - -describe("resolveNodeIdFromCandidates", () => { - it("matches nodeId", () => { - expect( - resolveNodeIdFromCandidates( - [ - { nodeId: "mac-123", displayName: "Mac Studio", remoteIp: "100.0.0.1" }, - { nodeId: "pi-456", displayName: "Raspberry Pi", remoteIp: "100.0.0.2" }, - ], - "pi-456", - ), - ).toBe("pi-456"); - }); - - it("matches displayName using normalization", () => { - expect( - resolveNodeIdFromCandidates([{ nodeId: "mac-123", displayName: "Mac Studio" }], "mac studio"), - ).toBe("mac-123"); - }); - - it("matches nodeId prefix (>=6 chars)", () => { - expect(resolveNodeIdFromCandidates([{ nodeId: "mac-abcdef" }], "mac-ab")).toBe("mac-abcdef"); - }); - - it("throws unknown node with known list", () => { - expect(() => - resolveNodeIdFromCandidates( - [ - { nodeId: "mac-123", displayName: "Mac Studio", remoteIp: "100.0.0.1" }, - { nodeId: "pi-456" }, - ], - "nope", - ), - ).toThrow(/unknown node: nope.*known: /); - }); - - it("throws ambiguous node with matches list", () => { - expect(() => - resolveNodeIdFromCandidates([{ nodeId: "mac-abcdef" }, { nodeId: "mac-abc999" }], "mac-abc"), - ).toThrow(/ambiguous node: mac-abc.*matches:/); - }); - - it("prefers a unique connected node when names are duplicated", () => { - expect( - resolveNodeIdFromCandidates( - [ - { nodeId: "ios-old", displayName: "iPhone", connected: false }, - { nodeId: "ios-live", displayName: "iPhone", connected: true }, - ], - "iphone", - ), - ).toBe("ios-live"); - }); - - it("stays ambiguous when multiple connected nodes match", () => { - expect(() => - resolveNodeIdFromCandidates( - [ - { nodeId: "ios-a", displayName: "iPhone", connected: true }, - { nodeId: "ios-b", displayName: "iPhone", connected: true }, - ], - "iphone", - ), - ).toThrow(/ambiguous node: iphone.*matches:/); - }); -}); diff --git a/src/shared/string-normalization.test.ts b/src/shared/string-normalization.test.ts index ca92a8ae89c..e0f8c8ae900 100644 --- a/src/shared/string-normalization.test.ts +++ b/src/shared/string-normalization.test.ts @@ -29,10 +29,20 @@ describe("shared/string-normalization", () => { expect(normalizeHyphenSlug(null)).toBe(""); }); + it("collapses repeated separators and trims leading/trailing punctuation", () => { + expect(normalizeHyphenSlug(" ...Hello / World--- ")).toBe("hello-world"); + expect(normalizeHyphenSlug(" ###Team@@@Room### ")).toBe("###team@@@room###"); + }); + it("normalizes @/# prefixed slugs used by channel allowlists", () => { expect(normalizeAtHashSlug(" #My_Channel + Alerts ")).toBe("my-channel-alerts"); expect(normalizeAtHashSlug("@@Room___Name")).toBe("room-name"); expect(normalizeAtHashSlug(undefined)).toBe(""); expect(normalizeAtHashSlug(null)).toBe(""); }); + + it("strips repeated prefixes and collapses separator-only results", () => { + expect(normalizeAtHashSlug("###__Room Name__")).toBe("room-name"); + expect(normalizeAtHashSlug("@@@___")).toBe(""); + }); }); diff --git a/src/shared/string-sample.test.ts b/src/shared/string-sample.test.ts index 4cff7957fe0..7ced1e7407a 100644 --- a/src/shared/string-sample.test.ts +++ b/src/shared/string-sample.test.ts @@ -4,6 +4,7 @@ import { summarizeStringEntries } from "./string-sample.js"; describe("summarizeStringEntries", () => { it("returns emptyText for empty lists", () => { expect(summarizeStringEntries({ entries: [], emptyText: "any" })).toBe("any"); + expect(summarizeStringEntries({ entries: null })).toBe(""); }); it("joins short lists without a suffix", () => { @@ -18,4 +19,27 @@ describe("summarizeStringEntries", () => { }), ).toBe("a, b, c, d (+1)"); }); + + it("uses a floored limit and clamps non-positive values to one entry", () => { + expect( + summarizeStringEntries({ + entries: ["a", "b", "c"], + limit: 2.8, + }), + ).toBe("a, b (+1)"); + expect( + summarizeStringEntries({ + entries: ["a", "b", "c"], + limit: 0, + }), + ).toBe("a (+2)"); + }); + + it("uses the default limit when none is provided", () => { + expect( + summarizeStringEntries({ + entries: ["a", "b", "c", "d", "e", "f", "g"], + }), + ).toBe("a, b, c, d, e, f (+1)"); + }); }); diff --git a/src/shared/subagents-format.test.ts b/src/shared/subagents-format.test.ts new file mode 100644 index 00000000000..34d1f9a8d5d --- /dev/null +++ b/src/shared/subagents-format.test.ts @@ -0,0 +1,58 @@ +import { describe, expect, it } from "vitest"; +import { + formatDurationCompact, + formatTokenShort, + formatTokenUsageDisplay, + resolveIoTokens, + resolveTotalTokens, + truncateLine, +} from "./subagents-format.js"; + +describe("shared/subagents-format", () => { + it("formats compact durations across minute, hour, and day buckets", () => { + expect(formatDurationCompact()).toBe("n/a"); + expect(formatDurationCompact(30_000)).toBe("1m"); + expect(formatDurationCompact(61 * 60_000)).toBe("1h1m"); + expect(formatDurationCompact(25 * 60 * 60_000)).toBe("1d1h"); + }); + + it("formats token counts with integer, kilo, and million branches", () => { + expect(formatTokenShort()).toBeUndefined(); + expect(formatTokenShort(999.9)).toBe("999"); + expect(formatTokenShort(1_500)).toBe("1.5k"); + expect(formatTokenShort(15_400)).toBe("15k"); + expect(formatTokenShort(1_250_000)).toBe("1.3m"); + }); + + it("truncates lines only when needed", () => { + expect(truncateLine("short", 10)).toBe("short"); + expect(truncateLine("trim me ", 7)).toBe("trim me..."); + }); + + it("resolves token totals and io breakdowns from valid numeric fields only", () => { + expect(resolveTotalTokens()).toBeUndefined(); + expect(resolveTotalTokens({ totalTokens: 42 })).toBe(42); + expect(resolveTotalTokens({ inputTokens: 10, outputTokens: 5 })).toBe(15); + expect(resolveTotalTokens({ inputTokens: Number.NaN, outputTokens: 5 })).toBeUndefined(); + + expect(resolveIoTokens({ inputTokens: 10, outputTokens: 5 })).toEqual({ + input: 10, + output: 5, + total: 15, + }); + expect(resolveIoTokens({ inputTokens: Number.NaN, outputTokens: 0 })).toBeUndefined(); + }); + + it("formats io and prompt-cache usage displays with fallback branches", () => { + expect( + formatTokenUsageDisplay({ + inputTokens: 1_200, + outputTokens: 300, + totalTokens: 2_100, + }), + ).toBe("tokens 1.5k (in 1.2k / out 300), prompt/cache 2.1k"); + + expect(formatTokenUsageDisplay({ totalTokens: 500 })).toBe("tokens 500 prompt/cache"); + expect(formatTokenUsageDisplay({ inputTokens: 0, outputTokens: 0, totalTokens: 0 })).toBe(""); + }); +}); diff --git a/src/shared/tailscale-status.test.ts b/src/shared/tailscale-status.test.ts new file mode 100644 index 00000000000..5826e4b00b3 --- /dev/null +++ b/src/shared/tailscale-status.test.ts @@ -0,0 +1,49 @@ +import { describe, expect, it, vi } from "vitest"; +import { resolveTailnetHostWithRunner } from "./tailscale-status.js"; + +describe("shared/tailscale-status", () => { + it("returns null when no runner is provided", async () => { + await expect(resolveTailnetHostWithRunner()).resolves.toBeNull(); + }); + + it("prefers DNS names and trims trailing dots from status json", async () => { + const run = vi.fn().mockResolvedValue({ + code: 0, + stdout: 'noise\n{"Self":{"DNSName":"mac.tail123.ts.net.","TailscaleIPs":["100.64.0.8"]}}', + }); + + await expect(resolveTailnetHostWithRunner(run)).resolves.toBe("mac.tail123.ts.net"); + expect(run).toHaveBeenCalledWith(["tailscale", "status", "--json"], { timeoutMs: 5000 }); + }); + + it("falls back across command candidates and then to the first tailscale ip", async () => { + const run = vi.fn().mockRejectedValueOnce(new Error("missing binary")).mockResolvedValueOnce({ + code: 0, + stdout: '{"Self":{"TailscaleIPs":["100.64.0.9","fd7a::1"]}}', + }); + + await expect(resolveTailnetHostWithRunner(run)).resolves.toBe("100.64.0.9"); + expect(run).toHaveBeenNthCalledWith( + 2, + ["/Applications/Tailscale.app/Contents/MacOS/Tailscale", "status", "--json"], + { + timeoutMs: 5000, + }, + ); + }); + + it("returns null for non-zero exits, blank output, or invalid json", async () => { + const run = vi + .fn() + .mockResolvedValueOnce({ code: 1, stdout: "boom" }) + .mockResolvedValueOnce({ code: 0, stdout: " " }); + + await expect(resolveTailnetHostWithRunner(run)).resolves.toBeNull(); + + const invalid = vi.fn().mockResolvedValue({ + code: 0, + stdout: "not-json", + }); + await expect(resolveTailnetHostWithRunner(invalid)).resolves.toBeNull(); + }); +}); diff --git a/src/shared/text-chunking.test.ts b/src/shared/text-chunking.test.ts new file mode 100644 index 00000000000..83b0de77ae5 --- /dev/null +++ b/src/shared/text-chunking.test.ts @@ -0,0 +1,40 @@ +import { describe, expect, it } from "vitest"; +import { chunkTextByBreakResolver } from "./text-chunking.js"; + +describe("shared/text-chunking", () => { + it("returns empty for blank input and the full text when under limit", () => { + expect(chunkTextByBreakResolver("", 10, () => 5)).toEqual([]); + expect(chunkTextByBreakResolver("hello", 10, () => 2)).toEqual(["hello"]); + expect(chunkTextByBreakResolver("hello", 0, () => 2)).toEqual(["hello"]); + }); + + it("splits at resolver-provided breakpoints and trims separator boundaries", () => { + expect( + chunkTextByBreakResolver("alpha beta gamma", 10, (window) => window.lastIndexOf(" ")), + ).toEqual(["alpha", "beta gamma"]); + expect(chunkTextByBreakResolver("abcd efgh", 4, () => 4)).toEqual(["abcd", "efgh"]); + }); + + it("falls back to hard limits for invalid break indexes", () => { + expect(chunkTextByBreakResolver("abcdefghij", 4, () => Number.NaN)).toEqual([ + "abcd", + "efgh", + "ij", + ]); + expect(chunkTextByBreakResolver("abcdefghij", 4, () => 99)).toEqual(["abcd", "efgh", "ij"]); + expect(chunkTextByBreakResolver("abcdefghij", 4, () => 0)).toEqual(["abcd", "efgh", "ij"]); + }); + + it("skips empty chunks created by whitespace-only segments", () => { + expect( + chunkTextByBreakResolver("word next", 5, (window) => window.lastIndexOf(" ")), + ).toEqual(["word", "next"]); + }); + + it("trims trailing whitespace from emitted chunks before continuing", () => { + expect(chunkTextByBreakResolver("abc def", 6, (window) => window.lastIndexOf(" "))).toEqual([ + "abc", + "def", + ]); + }); +}); diff --git a/src/shared/text/assistant-visible-text.test.ts b/src/shared/text/assistant-visible-text.test.ts index 234d37b96da..1962a86e371 100644 --- a/src/shared/text/assistant-visible-text.test.ts +++ b/src/shared/text/assistant-visible-text.test.ts @@ -42,8 +42,40 @@ describe("stripAssistantInternalScaffolding", () => { expect(stripAssistantInternalScaffolding(input)).toBe(input); }); + it("keeps relevant-memories tags inside inline code", () => { + const input = "Use `example` literally."; + expect(stripAssistantInternalScaffolding(input)).toBe(input); + }); + it("hides unfinished relevant-memories blocks", () => { const input = ["Hello", "", "internal-only"].join("\n"); expect(stripAssistantInternalScaffolding(input)).toBe("Hello\n"); }); + + it("trims leading whitespace after stripping scaffolding", () => { + const input = [ + "", + "secret", + "", + " ", + "", + "internal note", + "", + " Visible", + ].join("\n"); + expect(stripAssistantInternalScaffolding(input)).toBe("Visible"); + }); + + it("preserves unfinished reasoning text while still stripping memory blocks", () => { + const input = [ + "Before", + "", + "secret", + "", + "internal note", + "", + "After", + ].join("\n"); + expect(stripAssistantInternalScaffolding(input)).toBe("Before\n\nsecret\n\nAfter"); + }); }); diff --git a/src/shared/text/code-regions.test.ts b/src/shared/text/code-regions.test.ts new file mode 100644 index 00000000000..05934383bd2 --- /dev/null +++ b/src/shared/text/code-regions.test.ts @@ -0,0 +1,39 @@ +import { describe, expect, it } from "vitest"; +import { findCodeRegions, isInsideCode } from "./code-regions.js"; + +describe("shared/text/code-regions", () => { + it("finds fenced and inline code regions without double-counting inline code inside fences", () => { + const text = [ + "before `inline` after", + "```ts", + "const a = `inside fence`;", + "```", + "tail", + ].join("\n"); + + const regions = findCodeRegions(text); + + expect(regions).toHaveLength(2); + expect(text.slice(regions[0].start, regions[0].end)).toBe("`inline`"); + expect(text.slice(regions[1].start, regions[1].end)).toContain("```ts"); + }); + + it("accepts alternate fence markers and unterminated trailing fences", () => { + const text = "~~~js\nconsole.log(1)\n~~~\nplain\n```\nunterminated"; + const regions = findCodeRegions(text); + + expect(regions).toHaveLength(2); + expect(text.slice(regions[0].start, regions[0].end)).toContain("~~~js"); + expect(text.slice(regions[1].start, regions[1].end)).toBe("```\nunterminated"); + }); + + it("reports whether positions are inside discovered regions", () => { + const text = "plain `code` done"; + const regions = findCodeRegions(text); + const codeStart = text.indexOf("code"); + const plainStart = text.indexOf("plain"); + + expect(isInsideCode(codeStart, regions)).toBe(true); + expect(isInsideCode(plainStart, regions)).toBe(false); + }); +}); diff --git a/src/shared/text/join-segments.test.ts b/src/shared/text/join-segments.test.ts index 279516e4269..8da5c4644a7 100644 --- a/src/shared/text/join-segments.test.ts +++ b/src/shared/text/join-segments.test.ts @@ -9,6 +9,12 @@ describe("concatOptionalTextSegments", () => { it("keeps explicit empty-string right value", () => { expect(concatOptionalTextSegments({ left: "A", right: "" })).toBe(""); }); + + it("falls back to whichever side is present and honors custom separators", () => { + expect(concatOptionalTextSegments({ left: "A" })).toBe("A"); + expect(concatOptionalTextSegments({ right: "B" })).toBe("B"); + expect(concatOptionalTextSegments({ left: "A", right: "B", separator: " | " })).toBe("A | B"); + }); }); describe("joinPresentTextSegments", () => { @@ -23,4 +29,11 @@ describe("joinPresentTextSegments", () => { it("trims segments when requested", () => { expect(joinPresentTextSegments([" A ", " B "], { trim: true })).toBe("A\n\nB"); }); + + it("keeps whitespace-only segments unless trim is enabled and supports custom separators", () => { + expect(joinPresentTextSegments(["A", " ", "B"], { separator: " | " })).toBe("A | | B"); + expect(joinPresentTextSegments(["A", " ", "B"], { trim: true, separator: " | " })).toBe( + "A | B", + ); + }); }); diff --git a/src/shared/usage-aggregates.test.ts b/src/shared/usage-aggregates.test.ts new file mode 100644 index 00000000000..e5ba960ad95 --- /dev/null +++ b/src/shared/usage-aggregates.test.ts @@ -0,0 +1,117 @@ +import { describe, expect, it } from "vitest"; +import { + buildUsageAggregateTail, + mergeUsageDailyLatency, + mergeUsageLatency, +} from "./usage-aggregates.js"; + +describe("shared/usage-aggregates", () => { + it("merges latency totals and ignores empty inputs", () => { + const totals = { + count: 1, + sum: 100, + min: 100, + max: 100, + p95Max: 100, + }; + + mergeUsageLatency(totals, undefined); + mergeUsageLatency(totals, { + count: 2, + avgMs: 50, + minMs: 20, + maxMs: 90, + p95Ms: 80, + }); + + expect(totals).toEqual({ + count: 3, + sum: 200, + min: 20, + max: 100, + p95Max: 100, + }); + }); + + it("merges daily latency by date and computes aggregate tail sorting", () => { + const dailyLatencyMap = new Map< + string, + { + date: string; + count: number; + sum: number; + min: number; + max: number; + p95Max: number; + } + >(); + + mergeUsageDailyLatency(dailyLatencyMap, [ + { date: "2026-03-12", count: 2, avgMs: 50, minMs: 20, maxMs: 90, p95Ms: 80 }, + { date: "2026-03-12", count: 1, avgMs: 120, minMs: 120, maxMs: 120, p95Ms: 120 }, + { date: "2026-03-11", count: 1, avgMs: 30, minMs: 30, maxMs: 30, p95Ms: 30 }, + ]); + + const tail = buildUsageAggregateTail({ + byChannelMap: new Map([ + ["discord", { totalCost: 4 }], + ["telegram", { totalCost: 8 }], + ]), + latencyTotals: { + count: 3, + sum: 200, + min: 20, + max: 120, + p95Max: 120, + }, + dailyLatencyMap, + modelDailyMap: new Map([ + ["b", { date: "2026-03-12", cost: 1 }], + ["a", { date: "2026-03-12", cost: 2 }], + ["c", { date: "2026-03-11", cost: 9 }], + ]), + dailyMap: new Map([ + ["b", { date: "2026-03-12" }], + ["a", { date: "2026-03-11" }], + ]), + }); + + expect(tail.byChannel.map((entry) => entry.channel)).toEqual(["telegram", "discord"]); + expect(tail.latency).toEqual({ + count: 3, + avgMs: 200 / 3, + minMs: 20, + maxMs: 120, + p95Ms: 120, + }); + expect(tail.dailyLatency).toEqual([ + { date: "2026-03-11", count: 1, avgMs: 30, minMs: 30, maxMs: 30, p95Ms: 30 }, + { date: "2026-03-12", count: 3, avgMs: 220 / 3, minMs: 20, maxMs: 120, p95Ms: 120 }, + ]); + expect(tail.modelDaily).toEqual([ + { date: "2026-03-11", cost: 9 }, + { date: "2026-03-12", cost: 2 }, + { date: "2026-03-12", cost: 1 }, + ]); + expect(tail.daily).toEqual([{ date: "2026-03-11" }, { date: "2026-03-12" }]); + }); + + it("omits latency when no requests were counted", () => { + const tail = buildUsageAggregateTail({ + byChannelMap: new Map(), + latencyTotals: { + count: 0, + sum: 0, + min: Number.POSITIVE_INFINITY, + max: 0, + p95Max: 0, + }, + dailyLatencyMap: new Map(), + modelDailyMap: new Map(), + dailyMap: new Map(), + }); + + expect(tail.latency).toBeUndefined(); + expect(tail.dailyLatency).toEqual([]); + }); +}); diff --git a/src/slack/interactive-replies.test.ts b/src/slack/interactive-replies.test.ts new file mode 100644 index 00000000000..5222a4fc873 --- /dev/null +++ b/src/slack/interactive-replies.test.ts @@ -0,0 +1,38 @@ +import { describe, expect, it } from "vitest"; +import type { OpenClawConfig } from "../config/config.js"; +import { isSlackInteractiveRepliesEnabled } from "./interactive-replies.js"; + +describe("isSlackInteractiveRepliesEnabled", () => { + it("fails closed when accountId is unknown and multiple accounts exist", () => { + const cfg = { + channels: { + slack: { + accounts: { + one: { + capabilities: { interactiveReplies: true }, + }, + two: {}, + }, + }, + }, + } as OpenClawConfig; + + expect(isSlackInteractiveRepliesEnabled({ cfg, accountId: undefined })).toBe(false); + }); + + it("uses the only configured account when accountId is unknown", () => { + const cfg = { + channels: { + slack: { + accounts: { + only: { + capabilities: { interactiveReplies: true }, + }, + }, + }, + }, + } as OpenClawConfig; + + expect(isSlackInteractiveRepliesEnabled({ cfg, accountId: undefined })).toBe(true); + }); +}); diff --git a/src/slack/interactive-replies.ts b/src/slack/interactive-replies.ts new file mode 100644 index 00000000000..399c186cfdc --- /dev/null +++ b/src/slack/interactive-replies.ts @@ -0,0 +1,36 @@ +import type { OpenClawConfig } from "../config/config.js"; +import { listSlackAccountIds, resolveSlackAccount } from "./accounts.js"; + +function resolveInteractiveRepliesFromCapabilities(capabilities: unknown): boolean { + if (!capabilities) { + return false; + } + if (Array.isArray(capabilities)) { + return capabilities.some( + (entry) => String(entry).trim().toLowerCase() === "interactivereplies", + ); + } + if (typeof capabilities === "object") { + return (capabilities as { interactiveReplies?: unknown }).interactiveReplies === true; + } + return false; +} + +export function isSlackInteractiveRepliesEnabled(params: { + cfg: OpenClawConfig; + accountId?: string | null; +}): boolean { + if (params.accountId) { + const account = resolveSlackAccount({ cfg: params.cfg, accountId: params.accountId }); + return resolveInteractiveRepliesFromCapabilities(account.config.capabilities); + } + const accountIds = listSlackAccountIds(params.cfg); + if (accountIds.length === 0) { + return resolveInteractiveRepliesFromCapabilities(params.cfg.channels?.slack?.capabilities); + } + if (accountIds.length > 1) { + return false; + } + const account = resolveSlackAccount({ cfg: params.cfg, accountId: accountIds[0] }); + return resolveInteractiveRepliesFromCapabilities(account.config.capabilities); +} diff --git a/src/slack/probe.test.ts b/src/slack/probe.test.ts new file mode 100644 index 00000000000..501d808d492 --- /dev/null +++ b/src/slack/probe.test.ts @@ -0,0 +1,64 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; + +const authTestMock = vi.hoisted(() => vi.fn()); +const createSlackWebClientMock = vi.hoisted(() => vi.fn()); +const withTimeoutMock = vi.hoisted(() => vi.fn()); + +vi.mock("./client.js", () => ({ + createSlackWebClient: createSlackWebClientMock, +})); + +vi.mock("../utils/with-timeout.js", () => ({ + withTimeout: withTimeoutMock, +})); + +const { probeSlack } = await import("./probe.js"); + +describe("probeSlack", () => { + beforeEach(() => { + authTestMock.mockReset(); + createSlackWebClientMock.mockReset(); + withTimeoutMock.mockReset(); + + createSlackWebClientMock.mockReturnValue({ + auth: { + test: authTestMock, + }, + }); + withTimeoutMock.mockImplementation(async (promise: Promise) => await promise); + }); + + it("maps Slack auth metadata on success", async () => { + vi.spyOn(Date, "now").mockReturnValueOnce(100).mockReturnValueOnce(145); + authTestMock.mockResolvedValue({ + ok: true, + user_id: "U123", + user: "openclaw-bot", + team_id: "T123", + team: "OpenClaw", + }); + + await expect(probeSlack("xoxb-test", 2500)).resolves.toEqual({ + ok: true, + status: 200, + elapsedMs: 45, + bot: { id: "U123", name: "openclaw-bot" }, + team: { id: "T123", name: "OpenClaw" }, + }); + expect(createSlackWebClientMock).toHaveBeenCalledWith("xoxb-test"); + expect(withTimeoutMock).toHaveBeenCalledWith(expect.any(Promise), 2500); + }); + + it("keeps optional auth metadata fields undefined when Slack omits them", async () => { + vi.spyOn(Date, "now").mockReturnValueOnce(200).mockReturnValueOnce(235); + authTestMock.mockResolvedValue({ ok: true }); + + const result = await probeSlack("xoxb-test"); + + expect(result.ok).toBe(true); + expect(result.status).toBe(200); + expect(result.elapsedMs).toBe(35); + expect(result.bot).toStrictEqual({ id: undefined, name: undefined }); + expect(result.team).toStrictEqual({ id: undefined, name: undefined }); + }); +}); diff --git a/src/slack/probe.ts b/src/slack/probe.ts index 22857ca2bc6..165c5af636b 100644 --- a/src/slack/probe.ts +++ b/src/slack/probe.ts @@ -26,8 +26,8 @@ export async function probeSlack(token: string, timeoutMs = 2500): Promise { return callArgs?.updateLastRoute; } + function buildNamedAccountDmMessage(messageId = 1) { + return { + message_id: messageId, + chat: { id: 814912386, type: "private" as const }, + date: 1700000000 + messageId - 1, + text: "hello", + from: { id: 814912386, first_name: "Alice" }, + }; + } + + async function buildNamedAccountDmContext(accountId = "atlas", messageId = 1) { + setRuntimeConfigSnapshot(baseCfg); + return await buildTelegramMessageContextForTest({ + cfg: baseCfg, + accountId, + message: buildNamedAccountDmMessage(messageId), + }); + } + it("allows DM through for a named account with no explicit binding", async () => { setRuntimeConfigSnapshot(baseCfg); @@ -47,67 +66,21 @@ describe("buildTelegramMessageContext named-account DM fallback", () => { }); it("uses a per-account session key for named-account DMs", async () => { - setRuntimeConfigSnapshot(baseCfg); - - const ctx = await buildTelegramMessageContextForTest({ - cfg: baseCfg, - accountId: "atlas", - message: { - message_id: 1, - chat: { id: 814912386, type: "private" }, - date: 1700000000, - text: "hello", - from: { id: 814912386, first_name: "Alice" }, - }, - }); + const ctx = await buildNamedAccountDmContext(); expect(ctx?.ctxPayload?.SessionKey).toBe("agent:main:telegram:atlas:direct:814912386"); }); it("keeps named-account fallback lastRoute on the isolated DM session", async () => { - setRuntimeConfigSnapshot(baseCfg); - - const ctx = await buildTelegramMessageContextForTest({ - cfg: baseCfg, - accountId: "atlas", - message: { - message_id: 1, - chat: { id: 814912386, type: "private" }, - date: 1700000000, - text: "hello", - from: { id: 814912386, first_name: "Alice" }, - }, - }); + const ctx = await buildNamedAccountDmContext(); expect(ctx?.ctxPayload?.SessionKey).toBe("agent:main:telegram:atlas:direct:814912386"); expect(getLastUpdateLastRoute()?.sessionKey).toBe("agent:main:telegram:atlas:direct:814912386"); }); it("isolates sessions between named accounts that share the default agent", async () => { - setRuntimeConfigSnapshot(baseCfg); - - const atlas = await buildTelegramMessageContextForTest({ - cfg: baseCfg, - accountId: "atlas", - message: { - message_id: 1, - chat: { id: 814912386, type: "private" }, - date: 1700000000, - text: "hello", - from: { id: 814912386, first_name: "Alice" }, - }, - }); - const skynet = await buildTelegramMessageContextForTest({ - cfg: baseCfg, - accountId: "skynet", - message: { - message_id: 2, - chat: { id: 814912386, type: "private" }, - date: 1700000001, - text: "hello", - from: { id: 814912386, first_name: "Alice" }, - }, - }); + const atlas = await buildNamedAccountDmContext("atlas", 1); + const skynet = await buildNamedAccountDmContext("skynet", 2); expect(atlas?.ctxPayload?.SessionKey).toBe("agent:main:telegram:atlas:direct:814912386"); expect(skynet?.ctxPayload?.SessionKey).toBe("agent:main:telegram:skynet:direct:814912386"); diff --git a/src/telegram/bot-message.test.ts b/src/telegram/bot-message.test.ts index 4a745cbbe47..14f3ea37594 100644 --- a/src/telegram/bot-message.test.ts +++ b/src/telegram/bot-message.test.ts @@ -57,6 +57,21 @@ describe("telegram bot message processor", () => { ); } + function createDispatchFailureHarness( + context: Record, + sendMessage: ReturnType, + ) { + const runtimeError = vi.fn(); + buildTelegramMessageContext.mockResolvedValue(context); + dispatchTelegramMessage.mockRejectedValue(new Error("dispatch exploded")); + const processMessage = createTelegramMessageProcessor({ + ...baseDeps, + bot: { api: { sendMessage } }, + runtime: { error: runtimeError }, + } as unknown as Parameters[0]); + return { processMessage, runtimeError }; + } + it("dispatches when context is available", async () => { buildTelegramMessageContext.mockResolvedValue({ route: { sessionKey: "agent:main:main" } }); @@ -75,19 +90,14 @@ describe("telegram bot message processor", () => { it("sends user-visible fallback when dispatch throws", async () => { const sendMessage = vi.fn().mockResolvedValue(undefined); - const runtimeError = vi.fn(); - buildTelegramMessageContext.mockResolvedValue({ - chatId: 123, - threadSpec: { id: 456 }, - route: { sessionKey: "agent:main:main" }, - }); - dispatchTelegramMessage.mockRejectedValue(new Error("dispatch exploded")); - - const processMessage = createTelegramMessageProcessor({ - ...baseDeps, - bot: { api: { sendMessage } }, - runtime: { error: runtimeError }, - } as unknown as Parameters[0]); + const { processMessage, runtimeError } = createDispatchFailureHarness( + { + chatId: 123, + threadSpec: { id: 456 }, + route: { sessionKey: "agent:main:main" }, + }, + sendMessage, + ); await expect(processSampleMessage(processMessage)).resolves.toBeUndefined(); expect(sendMessage).toHaveBeenCalledWith( @@ -100,18 +110,13 @@ describe("telegram bot message processor", () => { it("swallows fallback delivery failures after dispatch throws", async () => { const sendMessage = vi.fn().mockRejectedValue(new Error("blocked by user")); - const runtimeError = vi.fn(); - buildTelegramMessageContext.mockResolvedValue({ - chatId: 123, - route: { sessionKey: "agent:main:main" }, - }); - dispatchTelegramMessage.mockRejectedValue(new Error("dispatch exploded")); - - const processMessage = createTelegramMessageProcessor({ - ...baseDeps, - bot: { api: { sendMessage } }, - runtime: { error: runtimeError }, - } as unknown as Parameters[0]); + const { processMessage, runtimeError } = createDispatchFailureHarness( + { + chatId: 123, + route: { sessionKey: "agent:main:main" }, + }, + sendMessage, + ); await expect(processSampleMessage(processMessage)).resolves.toBeUndefined(); expect(sendMessage).toHaveBeenCalledWith( diff --git a/src/telegram/bot-native-commands.group-auth.test.ts b/src/telegram/bot-native-commands.group-auth.test.ts index 77d73497c26..cca25aedc2c 100644 --- a/src/telegram/bot-native-commands.group-auth.test.ts +++ b/src/telegram/bot-native-commands.group-auth.test.ts @@ -1,26 +1,12 @@ -import { describe, expect, it, vi } from "vitest"; +import { describe, expect, it } from "vitest"; import type { OpenClawConfig } from "../config/config.js"; import type { ChannelGroupPolicy } from "../config/group-policy.js"; import type { TelegramAccountConfig } from "../config/types.js"; -import type { RuntimeEnv } from "../runtime.js"; -import { registerTelegramNativeCommands } from "./bot-native-commands.js"; - -const getPluginCommandSpecs = vi.hoisted(() => vi.fn(() => [])); -const matchPluginCommand = vi.hoisted(() => vi.fn(() => null)); -const executePluginCommand = vi.hoisted(() => vi.fn(async () => ({ text: "ok" }))); - -vi.mock("../plugins/commands.js", () => ({ - getPluginCommandSpecs, - matchPluginCommand, - executePluginCommand, -})); - -const deliverReplies = vi.hoisted(() => vi.fn(async () => {})); -vi.mock("./bot/delivery.js", () => ({ deliverReplies })); - -vi.mock("../pairing/pairing-store.js", () => ({ - readChannelAllowFromStore: vi.fn(async () => []), -})); +import { + createNativeCommandsHarness, + createTelegramGroupCommandContext, + findNotAuthorizedCalls, +} from "./bot-native-commands.test-helpers.js"; describe("native command auth in groups", () => { function setup(params: { @@ -32,32 +18,12 @@ describe("native command auth in groups", () => { groupConfig?: Record; resolveGroupPolicy?: () => ChannelGroupPolicy; }) { - const handlers: Record Promise> = {}; - const sendMessage = vi.fn().mockResolvedValue(undefined); - const bot = { - api: { - setMyCommands: vi.fn().mockResolvedValue(undefined), - sendMessage, - }, - command: (name: string, handler: (ctx: unknown) => Promise) => { - handlers[name] = handler; - }, - } as const; - - registerTelegramNativeCommands({ - bot: bot as unknown as Parameters[0]["bot"], + return createNativeCommandsHarness({ cfg: params.cfg ?? ({} as OpenClawConfig), - runtime: {} as unknown as RuntimeEnv, - accountId: "default", telegramCfg: params.telegramCfg ?? ({} as TelegramAccountConfig), allowFrom: params.allowFrom ?? [], groupAllowFrom: params.groupAllowFrom ?? [], - replyToMode: "off", - textLimit: 4000, useAccessGroups: params.useAccessGroups ?? false, - nativeEnabled: true, - nativeSkillsEnabled: false, - nativeDisabledExplicit: false, resolveGroupPolicy: params.resolveGroupPolicy ?? (() => @@ -65,15 +31,8 @@ describe("native command auth in groups", () => { allowlistEnabled: false, allowed: true, }) as ChannelGroupPolicy), - resolveTelegramGroupConfig: () => ({ - groupConfig: params.groupConfig as undefined, - topicConfig: undefined, - }), - shouldSkipUpdate: () => false, - opts: { token: "token" }, + groupConfig: params.groupConfig, }); - - return { handlers, sendMessage }; } it("authorizes native commands in groups when sender is in groupAllowFrom", async () => { @@ -83,23 +42,11 @@ describe("native command auth in groups", () => { // no allowFrom — sender is NOT in DM allowlist }); - const ctx = { - message: { - chat: { id: -100999, type: "supergroup", is_forum: true }, - from: { id: 12345, username: "testuser" }, - message_thread_id: 42, - message_id: 1, - date: 1700000000, - }, - match: "", - }; + const ctx = createTelegramGroupCommandContext(); await handlers.status?.(ctx); - // should NOT send "not authorized" rejection - const notAuthCalls = sendMessage.mock.calls.filter( - (call) => typeof call[1] === "string" && call[1].includes("not authorized"), - ); + const notAuthCalls = findNotAuthorizedCalls(sendMessage); expect(notAuthCalls).toHaveLength(0); }); @@ -117,22 +64,11 @@ describe("native command auth in groups", () => { useAccessGroups: true, }); - const ctx = { - message: { - chat: { id: -100999, type: "supergroup", is_forum: true }, - from: { id: 12345, username: "testuser" }, - message_thread_id: 42, - message_id: 1, - date: 1700000000, - }, - match: "", - }; + const ctx = createTelegramGroupCommandContext(); await handlers.status?.(ctx); - const notAuthCalls = sendMessage.mock.calls.filter( - (call) => typeof call[1] === "string" && call[1].includes("not authorized"), - ); + const notAuthCalls = findNotAuthorizedCalls(sendMessage); expect(notAuthCalls).toHaveLength(0); }); @@ -149,16 +85,7 @@ describe("native command auth in groups", () => { useAccessGroups: true, }); - const ctx = { - message: { - chat: { id: -100999, type: "supergroup", is_forum: true }, - from: { id: 12345, username: "testuser" }, - message_thread_id: 42, - message_id: 1, - date: 1700000000, - }, - match: "", - }; + const ctx = createTelegramGroupCommandContext(); await handlers.status?.(ctx); @@ -189,16 +116,7 @@ describe("native command auth in groups", () => { }) as ChannelGroupPolicy, }); - const ctx = { - message: { - chat: { id: -100999, type: "supergroup", is_forum: true }, - from: { id: 12345, username: "testuser" }, - message_thread_id: 42, - message_id: 1, - date: 1700000000, - }, - match: "", - }; + const ctx = createTelegramGroupCommandContext(); await handlers.status?.(ctx); @@ -226,16 +144,7 @@ describe("native command auth in groups", () => { }) as ChannelGroupPolicy, }); - const ctx = { - message: { - chat: { id: -100999, type: "supergroup", is_forum: true }, - from: { id: 12345, username: "testuser" }, - message_thread_id: 42, - message_id: 1, - date: 1700000000, - }, - match: "", - }; + const ctx = createTelegramGroupCommandContext(); await handlers.status?.(ctx); @@ -253,22 +162,13 @@ describe("native command auth in groups", () => { useAccessGroups: true, }); - const ctx = { - message: { - chat: { id: -100999, type: "supergroup", is_forum: true }, - from: { id: 12345, username: "intruder" }, - message_thread_id: 42, - message_id: 1, - date: 1700000000, - }, - match: "", - }; + const ctx = createTelegramGroupCommandContext({ + username: "intruder", + }); await handlers.status?.(ctx); - const notAuthCalls = sendMessage.mock.calls.filter( - (call) => typeof call[1] === "string" && call[1].includes("not authorized"), - ); + const notAuthCalls = findNotAuthorizedCalls(sendMessage); expect(notAuthCalls.length).toBeGreaterThan(0); }); @@ -279,16 +179,9 @@ describe("native command auth in groups", () => { useAccessGroups: true, }); - const ctx = { - message: { - chat: { id: -100999, type: "supergroup", is_forum: true }, - from: { id: 12345, username: "intruder" }, - message_thread_id: 42, - message_id: 1, - date: 1700000000, - }, - match: "", - }; + const ctx = createTelegramGroupCommandContext({ + username: "intruder", + }); await handlers.status?.(ctx); diff --git a/src/telegram/bot-native-commands.plugin-auth.test.ts b/src/telegram/bot-native-commands.plugin-auth.test.ts index f6f6d16c2fc..d611250bdeb 100644 --- a/src/telegram/bot-native-commands.plugin-auth.test.ts +++ b/src/telegram/bot-native-commands.plugin-auth.test.ts @@ -1,73 +1,47 @@ import { describe, expect, it, vi } from "vitest"; import type { OpenClawConfig } from "../config/config.js"; -import type { ChannelGroupPolicy } from "../config/group-policy.js"; import type { TelegramAccountConfig } from "../config/types.js"; -import type { RuntimeEnv } from "../runtime.js"; -import { registerTelegramNativeCommands } from "./bot-native-commands.js"; - -const getPluginCommandSpecs = vi.hoisted(() => vi.fn()); -const matchPluginCommand = vi.hoisted(() => vi.fn()); -const executePluginCommand = vi.hoisted(() => vi.fn()); - -vi.mock("../plugins/commands.js", () => ({ +import { + createNativeCommandsHarness, + deliverReplies, + executePluginCommand, getPluginCommandSpecs, matchPluginCommand, - executePluginCommand, -})); +} from "./bot-native-commands.test-helpers.js"; -const deliverReplies = vi.hoisted(() => vi.fn(async () => {})); -vi.mock("./bot/delivery.js", () => ({ deliverReplies })); +type GetPluginCommandSpecsMock = { + mockReturnValue: ( + value: ReturnType, + ) => unknown; +}; +type MatchPluginCommandMock = { + mockReturnValue: ( + value: ReturnType, + ) => unknown; +}; +type ExecutePluginCommandMock = { + mockResolvedValue: ( + value: Awaited>, + ) => unknown; +}; -vi.mock("../pairing/pairing-store.js", () => ({ - readChannelAllowFromStore: vi.fn(async () => []), -})); +const getPluginCommandSpecsMock = getPluginCommandSpecs as unknown as GetPluginCommandSpecsMock; +const matchPluginCommandMock = matchPluginCommand as unknown as MatchPluginCommandMock; +const executePluginCommandMock = executePluginCommand as unknown as ExecutePluginCommandMock; describe("registerTelegramNativeCommands (plugin auth)", () => { it("does not register plugin commands in menu when native=false but keeps handlers available", () => { const specs = Array.from({ length: 101 }, (_, i) => ({ name: `cmd_${i}`, description: `Command ${i}`, + acceptsArgs: false, })); - getPluginCommandSpecs.mockReturnValue(specs); + getPluginCommandSpecsMock.mockReturnValue(specs); - const handlers: Record Promise> = {}; - const setMyCommands = vi.fn().mockResolvedValue(undefined); - const log = vi.fn(); - const bot = { - api: { - setMyCommands, - sendMessage: vi.fn(), - }, - command: (name: string, handler: (ctx: unknown) => Promise) => { - handlers[name] = handler; - }, - } as const; - - registerTelegramNativeCommands({ - bot: bot as unknown as Parameters[0]["bot"], + const { handlers, setMyCommands, log } = createNativeCommandsHarness({ cfg: {} as OpenClawConfig, - runtime: { log } as unknown as RuntimeEnv, - accountId: "default", telegramCfg: {} as TelegramAccountConfig, - allowFrom: [], - groupAllowFrom: [], - replyToMode: "off", - textLimit: 4000, - useAccessGroups: false, nativeEnabled: false, - nativeSkillsEnabled: false, - nativeDisabledExplicit: false, - resolveGroupPolicy: () => - ({ - allowlistEnabled: false, - allowed: true, - }) as ChannelGroupPolicy, - resolveTelegramGroupConfig: () => ({ - groupConfig: undefined, - topicConfig: undefined, - }), - shouldSkipUpdate: () => false, - opts: { token: "token" }, }); expect(setMyCommands).not.toHaveBeenCalled(); @@ -79,54 +53,22 @@ describe("registerTelegramNativeCommands (plugin auth)", () => { const command = { name: "plugin", description: "Plugin command", + pluginId: "test-plugin", requireAuth: false, handler: vi.fn(), } as const; - getPluginCommandSpecs.mockReturnValue([{ name: "plugin", description: "Plugin command" }]); - matchPluginCommand.mockReturnValue({ command, args: undefined }); - executePluginCommand.mockResolvedValue({ text: "ok" }); + getPluginCommandSpecsMock.mockReturnValue([ + { name: "plugin", description: "Plugin command", acceptsArgs: false }, + ]); + matchPluginCommandMock.mockReturnValue({ command, args: undefined }); + executePluginCommandMock.mockResolvedValue({ text: "ok" }); - const handlers: Record Promise> = {}; - const bot = { - api: { - setMyCommands: vi.fn().mockResolvedValue(undefined), - sendMessage: vi.fn(), - }, - command: (name: string, handler: (ctx: unknown) => Promise) => { - handlers[name] = handler; - }, - } as const; - - const cfg = {} as OpenClawConfig; - const telegramCfg = {} as TelegramAccountConfig; - const resolveGroupPolicy = () => - ({ - allowlistEnabled: false, - allowed: true, - }) as ChannelGroupPolicy; - - registerTelegramNativeCommands({ - bot: bot as unknown as Parameters[0]["bot"], - cfg, - runtime: {} as unknown as RuntimeEnv, - accountId: "default", - telegramCfg, + const { handlers, bot } = createNativeCommandsHarness({ + cfg: {} as OpenClawConfig, + telegramCfg: {} as TelegramAccountConfig, allowFrom: ["999"], - groupAllowFrom: [], - replyToMode: "off", - textLimit: 4000, - useAccessGroups: false, nativeEnabled: false, - nativeSkillsEnabled: false, - nativeDisabledExplicit: false, - resolveGroupPolicy, - resolveTelegramGroupConfig: () => ({ - groupConfig: undefined, - topicConfig: undefined, - }), - shouldSkipUpdate: () => false, - opts: { token: "token" }, }); const ctx = { diff --git a/src/telegram/bot-native-commands.session-meta.test.ts b/src/telegram/bot-native-commands.session-meta.test.ts index 1d1b7df5fc2..43b5bb4133f 100644 --- a/src/telegram/bot-native-commands.session-meta.test.ts +++ b/src/telegram/bot-native-commands.session-meta.test.ts @@ -4,7 +4,8 @@ import { registerTelegramNativeCommands, type RegisterTelegramHandlerParams, } from "./bot-native-commands.js"; -import { createNativeCommandTestParams } from "./bot-native-commands.test-helpers.js"; + +type RegisterTelegramNativeCommandsParams = Parameters[0]; // All mocks scoped to this file only — does not affect bot-native-commands.test.ts @@ -108,6 +109,48 @@ function createDeferred() { return { promise, resolve }; } +function createNativeCommandTestParams( + params: Partial = {}, +): RegisterTelegramNativeCommandsParams { + const log = vi.fn(); + return { + bot: + params.bot ?? + ({ + api: { + setMyCommands: vi.fn().mockResolvedValue(undefined), + sendMessage: vi.fn().mockResolvedValue(undefined), + }, + command: vi.fn(), + } as unknown as RegisterTelegramNativeCommandsParams["bot"]), + cfg: params.cfg ?? ({} as OpenClawConfig), + runtime: + params.runtime ?? ({ log } as unknown as RegisterTelegramNativeCommandsParams["runtime"]), + accountId: params.accountId ?? "default", + telegramCfg: params.telegramCfg ?? ({} as RegisterTelegramNativeCommandsParams["telegramCfg"]), + allowFrom: params.allowFrom ?? [], + groupAllowFrom: params.groupAllowFrom ?? [], + replyToMode: params.replyToMode ?? "off", + textLimit: params.textLimit ?? 4000, + useAccessGroups: params.useAccessGroups ?? false, + nativeEnabled: params.nativeEnabled ?? true, + nativeSkillsEnabled: params.nativeSkillsEnabled ?? false, + nativeDisabledExplicit: params.nativeDisabledExplicit ?? false, + resolveGroupPolicy: + params.resolveGroupPolicy ?? + (() => + ({ + allowlistEnabled: false, + allowed: true, + }) as ReturnType), + resolveTelegramGroupConfig: + params.resolveTelegramGroupConfig ?? + (() => ({ groupConfig: undefined, topicConfig: undefined })), + shouldSkipUpdate: params.shouldSkipUpdate ?? (() => false), + opts: params.opts ?? { token: "token" }, + }; +} + type TelegramCommandHandler = (ctx: unknown) => Promise; function buildStatusCommandContext() { diff --git a/src/telegram/bot-native-commands.skills-allowlist.test.ts b/src/telegram/bot-native-commands.skills-allowlist.test.ts index 9c5fce1295c..40a428064e1 100644 --- a/src/telegram/bot-native-commands.skills-allowlist.test.ts +++ b/src/telegram/bot-native-commands.skills-allowlist.test.ts @@ -6,7 +6,6 @@ import { writeSkill } from "../agents/skills.e2e-test-helpers.js"; import type { OpenClawConfig } from "../config/config.js"; import type { TelegramAccountConfig } from "../config/types.js"; import { registerTelegramNativeCommands } from "./bot-native-commands.js"; -import { createNativeCommandTestParams } from "./bot-native-commands.test-helpers.js"; const pluginCommandMocks = vi.hoisted(() => ({ getPluginCommandSpecs: vi.fn(() => []), @@ -77,18 +76,40 @@ describe("registerTelegramNativeCommands skill allowlist integration", () => { }; registerTelegramNativeCommands({ - ...createNativeCommandTestParams({ - bot: { - api: { - setMyCommands, - sendMessage: vi.fn().mockResolvedValue(undefined), - }, - command: vi.fn(), - } as unknown as Parameters[0]["bot"], - cfg, - accountId: "bot-a", - telegramCfg: {} as TelegramAccountConfig, + bot: { + api: { + setMyCommands, + sendMessage: vi.fn().mockResolvedValue(undefined), + }, + command: vi.fn(), + } as unknown as Parameters[0]["bot"], + cfg, + runtime: { log: vi.fn() } as unknown as Parameters< + typeof registerTelegramNativeCommands + >[0]["runtime"], + accountId: "bot-a", + telegramCfg: {} as TelegramAccountConfig, + allowFrom: [], + groupAllowFrom: [], + replyToMode: "off", + textLimit: 4000, + useAccessGroups: false, + nativeEnabled: true, + nativeSkillsEnabled: true, + nativeDisabledExplicit: false, + resolveGroupPolicy: () => + ({ + allowlistEnabled: false, + allowed: true, + }) as ReturnType< + Parameters[0]["resolveGroupPolicy"] + >, + resolveTelegramGroupConfig: () => ({ + groupConfig: undefined, + topicConfig: undefined, }), + shouldSkipUpdate: () => false, + opts: { token: "token" }, }); await vi.waitFor(() => { diff --git a/src/telegram/bot-native-commands.test-helpers.ts b/src/telegram/bot-native-commands.test-helpers.ts index b79d61d48a3..02f1028becf 100644 --- a/src/telegram/bot-native-commands.test-helpers.ts +++ b/src/telegram/bot-native-commands.test-helpers.ts @@ -1,49 +1,160 @@ +import { vi } from "vitest"; import type { OpenClawConfig } from "../config/config.js"; +import type { ChannelGroupPolicy } from "../config/group-policy.js"; import type { TelegramAccountConfig } from "../config/types.js"; import type { RuntimeEnv } from "../runtime.js"; -import type { registerTelegramNativeCommands } from "./bot-native-commands.js"; +import { registerTelegramNativeCommands } from "./bot-native-commands.js"; -type RegisterTelegramNativeCommandParams = Parameters[0]; +type RegisterTelegramNativeCommandsParams = Parameters[0]; +type GetPluginCommandSpecsFn = typeof import("../plugins/commands.js").getPluginCommandSpecs; +type MatchPluginCommandFn = typeof import("../plugins/commands.js").matchPluginCommand; +type ExecutePluginCommandFn = typeof import("../plugins/commands.js").executePluginCommand; -export function createNativeCommandTestParams(params: { - bot: RegisterTelegramNativeCommandParams["bot"]; - cfg?: OpenClawConfig; - runtime?: RuntimeEnv; - accountId?: string; - telegramCfg?: TelegramAccountConfig; - allowFrom?: string[]; - groupAllowFrom?: string[]; - replyToMode?: RegisterTelegramNativeCommandParams["replyToMode"]; - textLimit?: number; - useAccessGroups?: boolean; - nativeEnabled?: boolean; - nativeSkillsEnabled?: boolean; - nativeDisabledExplicit?: boolean; - resolveTelegramGroupConfig?: RegisterTelegramNativeCommandParams["resolveTelegramGroupConfig"]; - opts?: RegisterTelegramNativeCommandParams["opts"]; -}): RegisterTelegramNativeCommandParams { +const pluginCommandMocks = vi.hoisted(() => ({ + getPluginCommandSpecs: vi.fn(() => []), + matchPluginCommand: vi.fn(() => null), + executePluginCommand: vi.fn(async () => ({ text: "ok" })), +})); +export const getPluginCommandSpecs = pluginCommandMocks.getPluginCommandSpecs; +export const matchPluginCommand = pluginCommandMocks.matchPluginCommand; +export const executePluginCommand = pluginCommandMocks.executePluginCommand; + +vi.mock("../plugins/commands.js", () => ({ + getPluginCommandSpecs: pluginCommandMocks.getPluginCommandSpecs, + matchPluginCommand: pluginCommandMocks.matchPluginCommand, + executePluginCommand: pluginCommandMocks.executePluginCommand, +})); + +const deliveryMocks = vi.hoisted(() => ({ + deliverReplies: vi.fn(async () => {}), +})); +export const deliverReplies = deliveryMocks.deliverReplies; +vi.mock("./bot/delivery.js", () => ({ deliverReplies: deliveryMocks.deliverReplies })); +vi.mock("../pairing/pairing-store.js", () => ({ + readChannelAllowFromStore: vi.fn(async () => []), +})); + +export function createNativeCommandTestParams( + params: Partial = {}, +): RegisterTelegramNativeCommandsParams { + const log = vi.fn(); return { - bot: params.bot, - cfg: params.cfg ?? {}, - runtime: params.runtime ?? ({} as RuntimeEnv), + bot: + params.bot ?? + ({ + api: { + setMyCommands: vi.fn().mockResolvedValue(undefined), + sendMessage: vi.fn().mockResolvedValue(undefined), + }, + command: vi.fn(), + } as unknown as RegisterTelegramNativeCommandsParams["bot"]), + cfg: params.cfg ?? ({} as OpenClawConfig), + runtime: + params.runtime ?? ({ log } as unknown as RegisterTelegramNativeCommandsParams["runtime"]), accountId: params.accountId ?? "default", - telegramCfg: params.telegramCfg ?? ({} as TelegramAccountConfig), + telegramCfg: params.telegramCfg ?? ({} as RegisterTelegramNativeCommandsParams["telegramCfg"]), allowFrom: params.allowFrom ?? [], groupAllowFrom: params.groupAllowFrom ?? [], replyToMode: params.replyToMode ?? "off", - textLimit: params.textLimit ?? 4096, + textLimit: params.textLimit ?? 4000, useAccessGroups: params.useAccessGroups ?? false, nativeEnabled: params.nativeEnabled ?? true, - nativeSkillsEnabled: params.nativeSkillsEnabled ?? true, + nativeSkillsEnabled: params.nativeSkillsEnabled ?? false, nativeDisabledExplicit: params.nativeDisabledExplicit ?? false, - resolveGroupPolicy: () => ({ allowlistEnabled: false, allowed: true }), + resolveGroupPolicy: + params.resolveGroupPolicy ?? + (() => + ({ + allowlistEnabled: false, + allowed: true, + }) as ReturnType), resolveTelegramGroupConfig: params.resolveTelegramGroupConfig ?? - (() => ({ - groupConfig: undefined, - topicConfig: undefined, - })), - shouldSkipUpdate: () => false, + (() => ({ groupConfig: undefined, topicConfig: undefined })), + shouldSkipUpdate: params.shouldSkipUpdate ?? (() => false), opts: params.opts ?? { token: "token" }, }; } + +export function createNativeCommandsHarness(params?: { + cfg?: OpenClawConfig; + runtime?: RuntimeEnv; + telegramCfg?: TelegramAccountConfig; + allowFrom?: string[]; + groupAllowFrom?: string[]; + useAccessGroups?: boolean; + nativeEnabled?: boolean; + groupConfig?: Record; + resolveGroupPolicy?: () => ChannelGroupPolicy; +}) { + const handlers: Record Promise> = {}; + const sendMessage = vi.fn().mockResolvedValue(undefined); + const setMyCommands = vi.fn().mockResolvedValue(undefined); + const log = vi.fn(); + const bot = { + api: { + setMyCommands, + sendMessage, + }, + command: (name: string, handler: (ctx: unknown) => Promise) => { + handlers[name] = handler; + }, + } as const; + + registerTelegramNativeCommands({ + bot: bot as unknown as Parameters[0]["bot"], + cfg: params?.cfg ?? ({} as OpenClawConfig), + runtime: params?.runtime ?? ({ log } as unknown as RuntimeEnv), + accountId: "default", + telegramCfg: params?.telegramCfg ?? ({} as TelegramAccountConfig), + allowFrom: params?.allowFrom ?? [], + groupAllowFrom: params?.groupAllowFrom ?? [], + replyToMode: "off", + textLimit: 4000, + useAccessGroups: params?.useAccessGroups ?? false, + nativeEnabled: params?.nativeEnabled ?? true, + nativeSkillsEnabled: false, + nativeDisabledExplicit: false, + resolveGroupPolicy: + params?.resolveGroupPolicy ?? + (() => + ({ + allowlistEnabled: false, + allowed: true, + }) as ChannelGroupPolicy), + resolveTelegramGroupConfig: () => ({ + groupConfig: params?.groupConfig as undefined, + topicConfig: undefined, + }), + shouldSkipUpdate: () => false, + opts: { token: "token" }, + }); + + return { handlers, sendMessage, setMyCommands, log, bot }; +} + +export function createTelegramGroupCommandContext(params?: { + senderId?: number; + username?: string; + threadId?: number; +}) { + return { + message: { + chat: { id: -100999, type: "supergroup", is_forum: true }, + from: { + id: params?.senderId ?? 12345, + username: params?.username ?? "testuser", + }, + message_thread_id: params?.threadId ?? 42, + message_id: 1, + date: 1700000000, + }, + match: "", + }; +} + +export function findNotAuthorizedCalls(sendMessage: ReturnType) { + return sendMessage.mock.calls.filter( + (call) => typeof call[1] === "string" && call[1].includes("not authorized"), + ); +} diff --git a/src/telegram/bot-native-commands.test.ts b/src/telegram/bot-native-commands.test.ts index eea0937ad0e..a208649c62b 100644 --- a/src/telegram/bot-native-commands.test.ts +++ b/src/telegram/bot-native-commands.test.ts @@ -6,7 +6,6 @@ import { TELEGRAM_COMMAND_NAME_PATTERN } from "../config/telegram-custom-command import type { TelegramAccountConfig } from "../config/types.js"; import type { RuntimeEnv } from "../runtime.js"; import { registerTelegramNativeCommands } from "./bot-native-commands.js"; -import { createNativeCommandTestParams } from "./bot-native-commands.test-helpers.js"; const { listSkillCommandsForAgents } = vi.hoisted(() => ({ listSkillCommandsForAgents: vi.fn(() => []), @@ -65,7 +64,7 @@ describe("registerTelegramNativeCommands", () => { }); const buildParams = (cfg: OpenClawConfig, accountId = "default") => - createNativeCommandTestParams({ + ({ bot: { api: { setMyCommands: vi.fn().mockResolvedValue(undefined), @@ -77,7 +76,28 @@ describe("registerTelegramNativeCommands", () => { runtime: {} as RuntimeEnv, accountId, telegramCfg: {} as TelegramAccountConfig, - }); + allowFrom: [], + groupAllowFrom: [], + replyToMode: "off", + textLimit: 4000, + useAccessGroups: false, + nativeEnabled: true, + nativeSkillsEnabled: true, + nativeDisabledExplicit: false, + resolveGroupPolicy: () => + ({ + allowlistEnabled: false, + allowed: true, + }) as ReturnType< + Parameters[0]["resolveGroupPolicy"] + >, + resolveTelegramGroupConfig: () => ({ + groupConfig: undefined, + topicConfig: undefined, + }), + shouldSkipUpdate: () => false, + opts: { token: "token" }, + }) satisfies Parameters[0]; it("scopes skill commands when account binding exists", () => { const cfg: OpenClawConfig = { diff --git a/src/telegram/bot.fetch-abort.test.ts b/src/telegram/bot.fetch-abort.test.ts index 0d9bd53643b..258215d4c6d 100644 --- a/src/telegram/bot.fetch-abort.test.ts +++ b/src/telegram/bot.fetch-abort.test.ts @@ -3,9 +3,22 @@ import { botCtorSpy } from "./bot.create-telegram-bot.test-harness.js"; import { createTelegramBot } from "./bot.js"; import { getTelegramNetworkErrorOrigin } from "./network-errors.js"; +function createWrappedTelegramClientFetch(proxyFetch: typeof fetch) { + const shutdown = new AbortController(); + botCtorSpy.mockClear(); + createTelegramBot({ + token: "tok", + fetchAbortSignal: shutdown.signal, + proxyFetch, + }); + const clientFetch = (botCtorSpy.mock.calls.at(-1)?.[1] as { client?: { fetch?: unknown } }) + ?.client?.fetch as (input: RequestInfo | URL, init?: RequestInit) => Promise; + expect(clientFetch).toBeTypeOf("function"); + return { clientFetch, shutdown }; +} + describe("createTelegramBot fetch abort", () => { it("aborts wrapped client fetch when fetchAbortSignal aborts", async () => { - const shutdown = new AbortController(); const fetchSpy = vi.fn( (_input: RequestInfo | URL, init?: RequestInit) => new Promise((resolve) => { @@ -13,15 +26,9 @@ describe("createTelegramBot fetch abort", () => { signal.addEventListener("abort", () => resolve(signal), { once: true }); }), ); - botCtorSpy.mockClear(); - createTelegramBot({ - token: "tok", - fetchAbortSignal: shutdown.signal, - proxyFetch: fetchSpy as unknown as typeof fetch, - }); - const clientFetch = (botCtorSpy.mock.calls.at(-1)?.[1] as { client?: { fetch?: unknown } }) - ?.client?.fetch as (input: RequestInfo | URL, init?: RequestInit) => Promise; - expect(clientFetch).toBeTypeOf("function"); + const { clientFetch, shutdown } = createWrappedTelegramClientFetch( + fetchSpy as unknown as typeof fetch, + ); const observedSignalPromise = clientFetch("https://example.test"); shutdown.abort(new Error("shutdown")); @@ -32,7 +39,6 @@ describe("createTelegramBot fetch abort", () => { }); it("tags wrapped Telegram fetch failures with the Bot API method", async () => { - const shutdown = new AbortController(); const fetchError = Object.assign(new TypeError("fetch failed"), { cause: Object.assign(new Error("connect timeout"), { code: "UND_ERR_CONNECT_TIMEOUT", @@ -41,15 +47,7 @@ describe("createTelegramBot fetch abort", () => { const fetchSpy = vi.fn(async () => { throw fetchError; }); - botCtorSpy.mockClear(); - createTelegramBot({ - token: "tok", - fetchAbortSignal: shutdown.signal, - proxyFetch: fetchSpy as unknown as typeof fetch, - }); - const clientFetch = (botCtorSpy.mock.calls.at(-1)?.[1] as { client?: { fetch?: unknown } }) - ?.client?.fetch as (input: RequestInfo | URL, init?: RequestInit) => Promise; - expect(clientFetch).toBeTypeOf("function"); + const { clientFetch } = createWrappedTelegramClientFetch(fetchSpy as unknown as typeof fetch); await expect(clientFetch("https://api.telegram.org/bot123456:ABC/getUpdates")).rejects.toBe( fetchError, @@ -61,7 +59,6 @@ describe("createTelegramBot fetch abort", () => { }); it("preserves the original fetch error when tagging cannot attach metadata", async () => { - const shutdown = new AbortController(); const frozenError = Object.freeze( Object.assign(new TypeError("fetch failed"), { cause: Object.assign(new Error("connect timeout"), { @@ -72,15 +69,7 @@ describe("createTelegramBot fetch abort", () => { const fetchSpy = vi.fn(async () => { throw frozenError; }); - botCtorSpy.mockClear(); - createTelegramBot({ - token: "tok", - fetchAbortSignal: shutdown.signal, - proxyFetch: fetchSpy as unknown as typeof fetch, - }); - const clientFetch = (botCtorSpy.mock.calls.at(-1)?.[1] as { client?: { fetch?: unknown } }) - ?.client?.fetch as (input: RequestInfo | URL, init?: RequestInit) => Promise; - expect(clientFetch).toBeTypeOf("function"); + const { clientFetch } = createWrappedTelegramClientFetch(fetchSpy as unknown as typeof fetch); await expect(clientFetch("https://api.telegram.org/bot123456:ABC/getUpdates")).rejects.toBe( frozenError, diff --git a/src/telegram/exec-approvals-handler.ts b/src/telegram/exec-approvals-handler.ts index 65488928469..01e3b51bedd 100644 --- a/src/telegram/exec-approvals-handler.ts +++ b/src/telegram/exec-approvals-handler.ts @@ -1,5 +1,4 @@ import type { OpenClawConfig } from "../config/config.js"; -import { loadSessionStore, resolveStorePath } from "../config/sessions.js"; import { GatewayClient } from "../gateway/client.js"; import { createOperatorApprovalsGatewayClient } from "../gateway/operator-approvals-client.js"; import type { EventFrame } from "../gateway/protocol/index.js"; @@ -8,8 +7,8 @@ import { buildExecApprovalPendingReplyPayload, type ExecApprovalPendingReplyParams, } from "../infra/exec-approval-reply.js"; +import { resolveExecApprovalSessionTarget } from "../infra/exec-approval-session-target.js"; import type { ExecApprovalRequest, ExecApprovalResolved } from "../infra/exec-approvals.js"; -import { resolveSessionDeliveryTarget } from "../infra/outbound/targets.js"; import { createSubsystemLogger } from "../logging/subsystem.js"; import { normalizeAccountId, parseAgentSessionKey } from "../routing/session-key.js"; import type { RuntimeEnv } from "../runtime.js"; @@ -120,40 +119,14 @@ function resolveRequestSessionTarget(params: { cfg: OpenClawConfig; request: ExecApprovalRequest; }): { to: string; accountId?: string; threadId?: number; channel?: string } | null { - const sessionKey = params.request.request.sessionKey?.trim(); - if (!sessionKey) { - return null; - } - const parsed = parseAgentSessionKey(sessionKey); - const agentId = parsed?.agentId ?? params.request.request.agentId ?? "main"; - const storePath = resolveStorePath(params.cfg.session?.store, { agentId }); - const store = loadSessionStore(storePath); - const entry = store[sessionKey]; - if (!entry) { - return null; - } - const target = resolveSessionDeliveryTarget({ - entry, - requestedChannel: "last", + return resolveExecApprovalSessionTarget({ + cfg: params.cfg, + request: params.request, turnSourceChannel: params.request.request.turnSourceChannel ?? undefined, turnSourceTo: params.request.request.turnSourceTo ?? undefined, turnSourceAccountId: params.request.request.turnSourceAccountId ?? undefined, turnSourceThreadId: params.request.request.turnSourceThreadId ?? undefined, }); - if (!target.to) { - return null; - } - return { - channel: target.channel ?? undefined, - to: target.to, - accountId: target.accountId ?? undefined, - threadId: - typeof target.threadId === "number" - ? target.threadId - : typeof target.threadId === "string" - ? Number.parseInt(target.threadId, 10) - : undefined, - }; } function resolveTelegramSourceTarget(params: { diff --git a/src/telegram/fetch.test.ts b/src/telegram/fetch.test.ts index 4d6658e0327..73f46c9ed5a 100644 --- a/src/telegram/fetch.test.ts +++ b/src/telegram/fetch.test.ts @@ -88,6 +88,44 @@ function buildFetchFallbackError(code: string) { }); } +const STICKY_IPV4_FALLBACK_NETWORK = { + network: { + autoSelectFamily: true, + dnsResultOrder: "ipv4first" as const, + }, +}; + +async function runDefaultStickyIpv4FallbackProbe(code = "EHOSTUNREACH"): Promise { + undiciFetch + .mockRejectedValueOnce(buildFetchFallbackError(code)) + .mockResolvedValueOnce({ ok: true } as Response) + .mockResolvedValueOnce({ ok: true } as Response); + + const resolved = resolveTelegramFetchOrThrow(undefined, STICKY_IPV4_FALLBACK_NETWORK); + await resolved("https://api.telegram.org/botx/sendMessage"); + await resolved("https://api.telegram.org/botx/sendChatAction"); +} + +function expectPinnedIpv4ConnectDispatcher(args: { + pinnedCall: number; + firstCall?: number; + followupCall?: number; +}): void { + const pinnedDispatcher = getDispatcherFromUndiciCall(args.pinnedCall); + expect(pinnedDispatcher?.options?.connect).toEqual( + expect.objectContaining({ + family: 4, + autoSelectFamily: false, + }), + ); + if (args.firstCall) { + expect(getDispatcherFromUndiciCall(args.firstCall)).not.toBe(pinnedDispatcher); + } + if (args.followupCall) { + expect(getDispatcherFromUndiciCall(args.followupCall)).toBe(pinnedDispatcher); + } +} + afterEach(() => { undiciFetch.mockReset(); setGlobalDispatcher.mockReset(); @@ -307,9 +345,8 @@ describe("resolveTelegramFetch", () => { it("treats ALL_PROXY-only env as direct transport and arms sticky IPv4 fallback", async () => { vi.stubEnv("ALL_PROXY", "socks5://127.0.0.1:1080"); - const fetchError = buildFetchFallbackError("EHOSTUNREACH"); undiciFetch - .mockRejectedValueOnce(fetchError) + .mockRejectedValueOnce(buildFetchFallbackError("EHOSTUNREACH")) .mockResolvedValueOnce({ ok: true } as Response) .mockResolvedValueOnce({ ok: true } as Response); @@ -327,18 +364,11 @@ describe("resolveTelegramFetch", () => { expect(EnvHttpProxyAgentCtor).not.toHaveBeenCalled(); expect(AgentCtor).toHaveBeenCalledTimes(2); - const firstDispatcher = getDispatcherFromUndiciCall(1); - const secondDispatcher = getDispatcherFromUndiciCall(2); - const thirdDispatcher = getDispatcherFromUndiciCall(3); - - expect(firstDispatcher).not.toBe(secondDispatcher); - expect(secondDispatcher).toBe(thirdDispatcher); - expect(secondDispatcher?.options?.connect).toEqual( - expect.objectContaining({ - family: 4, - autoSelectFamily: false, - }), - ); + expectPinnedIpv4ConnectDispatcher({ + firstCall: 1, + pinnedCall: 2, + followupCall: 3, + }); expect(transport.pinnedDispatcherPolicy).toEqual( expect.objectContaining({ mode: "direct", @@ -351,134 +381,52 @@ describe("resolveTelegramFetch", () => { EnvHttpProxyAgentCtor.mockImplementationOnce(function ThrowingEnvProxyAgent() { throw new Error("invalid proxy config"); }); - const fetchError = buildFetchFallbackError("EHOSTUNREACH"); - undiciFetch - .mockRejectedValueOnce(fetchError) - .mockResolvedValueOnce({ ok: true } as Response) - .mockResolvedValueOnce({ ok: true } as Response); - - const resolved = resolveTelegramFetchOrThrow(undefined, { - network: { - autoSelectFamily: true, - dnsResultOrder: "ipv4first", - }, - }); - - await resolved("https://api.telegram.org/botx/sendMessage"); - await resolved("https://api.telegram.org/botx/sendChatAction"); + await runDefaultStickyIpv4FallbackProbe(); expect(undiciFetch).toHaveBeenCalledTimes(3); expect(EnvHttpProxyAgentCtor).toHaveBeenCalledTimes(1); expect(AgentCtor).toHaveBeenCalledTimes(2); - const firstDispatcher = getDispatcherFromUndiciCall(1); - const secondDispatcher = getDispatcherFromUndiciCall(2); - const thirdDispatcher = getDispatcherFromUndiciCall(3); - - expect(firstDispatcher).not.toBe(secondDispatcher); - expect(secondDispatcher).toBe(thirdDispatcher); - expect(secondDispatcher?.options?.connect).toEqual( - expect.objectContaining({ - family: 4, - autoSelectFamily: false, - }), - ); + expectPinnedIpv4ConnectDispatcher({ + firstCall: 1, + pinnedCall: 2, + followupCall: 3, + }); }); it("arms sticky IPv4 fallback when NO_PROXY bypasses telegram under env proxy", async () => { vi.stubEnv("HTTPS_PROXY", "http://127.0.0.1:7890"); vi.stubEnv("NO_PROXY", "api.telegram.org"); - const fetchError = buildFetchFallbackError("EHOSTUNREACH"); - undiciFetch - .mockRejectedValueOnce(fetchError) - .mockResolvedValueOnce({ ok: true } as Response) - .mockResolvedValueOnce({ ok: true } as Response); - - const resolved = resolveTelegramFetchOrThrow(undefined, { - network: { - autoSelectFamily: true, - dnsResultOrder: "ipv4first", - }, - }); - - await resolved("https://api.telegram.org/botx/sendMessage"); - await resolved("https://api.telegram.org/botx/sendChatAction"); + await runDefaultStickyIpv4FallbackProbe(); expect(undiciFetch).toHaveBeenCalledTimes(3); expect(EnvHttpProxyAgentCtor).toHaveBeenCalledTimes(2); expect(AgentCtor).not.toHaveBeenCalled(); - const firstDispatcher = getDispatcherFromUndiciCall(1); - const secondDispatcher = getDispatcherFromUndiciCall(2); - const thirdDispatcher = getDispatcherFromUndiciCall(3); - - expect(firstDispatcher).not.toBe(secondDispatcher); - expect(secondDispatcher).toBe(thirdDispatcher); - expect(secondDispatcher?.options?.connect).toEqual( - expect.objectContaining({ - family: 4, - autoSelectFamily: false, - }), - ); + expectPinnedIpv4ConnectDispatcher({ + firstCall: 1, + pinnedCall: 2, + followupCall: 3, + }); }); it("uses no_proxy over NO_PROXY when deciding env-proxy bypass", async () => { vi.stubEnv("HTTPS_PROXY", "http://127.0.0.1:7890"); vi.stubEnv("NO_PROXY", ""); vi.stubEnv("no_proxy", "api.telegram.org"); - const fetchError = buildFetchFallbackError("EHOSTUNREACH"); - undiciFetch - .mockRejectedValueOnce(fetchError) - .mockResolvedValueOnce({ ok: true } as Response) - .mockResolvedValueOnce({ ok: true } as Response); - - const resolved = resolveTelegramFetchOrThrow(undefined, { - network: { - autoSelectFamily: true, - dnsResultOrder: "ipv4first", - }, - }); - - await resolved("https://api.telegram.org/botx/sendMessage"); - await resolved("https://api.telegram.org/botx/sendChatAction"); + await runDefaultStickyIpv4FallbackProbe(); expect(EnvHttpProxyAgentCtor).toHaveBeenCalledTimes(2); - const secondDispatcher = getDispatcherFromUndiciCall(2); - expect(secondDispatcher?.options?.connect).toEqual( - expect.objectContaining({ - family: 4, - autoSelectFamily: false, - }), - ); + expectPinnedIpv4ConnectDispatcher({ pinnedCall: 2 }); }); it("matches whitespace and wildcard no_proxy entries like EnvHttpProxyAgent", async () => { vi.stubEnv("HTTPS_PROXY", "http://127.0.0.1:7890"); vi.stubEnv("no_proxy", "localhost *.telegram.org"); - const fetchError = buildFetchFallbackError("EHOSTUNREACH"); - undiciFetch - .mockRejectedValueOnce(fetchError) - .mockResolvedValueOnce({ ok: true } as Response) - .mockResolvedValueOnce({ ok: true } as Response); - - const resolved = resolveTelegramFetchOrThrow(undefined, { - network: { - autoSelectFamily: true, - dnsResultOrder: "ipv4first", - }, - }); - - await resolved("https://api.telegram.org/botx/sendMessage"); - await resolved("https://api.telegram.org/botx/sendChatAction"); + await runDefaultStickyIpv4FallbackProbe(); expect(EnvHttpProxyAgentCtor).toHaveBeenCalledTimes(2); - const secondDispatcher = getDispatcherFromUndiciCall(2); - expect(secondDispatcher?.options?.connect).toEqual( - expect.objectContaining({ - family: 4, - autoSelectFamily: false, - }), - ); + expectPinnedIpv4ConnectDispatcher({ pinnedCall: 2 }); }); it("fails closed when explicit proxy dispatcher initialization fails", async () => { diff --git a/src/telegram/lane-delivery.test.ts b/src/telegram/lane-delivery.test.ts index 3a165147d84..3ddad092d7a 100644 --- a/src/telegram/lane-delivery.test.ts +++ b/src/telegram/lane-delivery.test.ts @@ -84,6 +84,39 @@ function createHarness(params?: { }; } +async function deliverFinalAnswer(harness: ReturnType, text: string) { + return harness.deliverLaneText({ + laneName: "answer", + text, + payload: { text }, + infoKind: "final", + }); +} + +function seedArchivedAnswerPreview(harness: ReturnType) { + harness.archivedAnswerPreviews.push({ + messageId: 5555, + textSnapshot: "Partial streaming...", + deleteIfUnused: true, + }); +} + +async function expectFinalEditFallbackToSend(params: { + harness: ReturnType; + text: string; + expectedLogSnippet: string; +}) { + const result = await deliverFinalAnswer(params.harness, params.text); + expect(result).toBe("sent"); + expect(params.harness.editPreview).toHaveBeenCalledTimes(1); + expect(params.harness.sendPayload).toHaveBeenCalledWith( + expect.objectContaining({ text: params.text }), + ); + expect(params.harness.log).toHaveBeenCalledWith( + expect.stringContaining(params.expectedLogSnippet), + ); +} + describe("createLaneTextDeliverer", () => { it("finalizes text-only replies by editing an existing preview message", async () => { const harness = createHarness({ answerMessageId: 999 }); @@ -198,21 +231,11 @@ describe("createLaneTextDeliverer", () => { const harness = createHarness({ answerMessageId: 999 }); harness.editPreview.mockRejectedValue(new Error("400: Bad Request: message to edit not found")); - const result = await harness.deliverLaneText({ - laneName: "answer", + await expectFinalEditFallbackToSend({ + harness, text: "Hello final", - payload: { text: "Hello final" }, - infoKind: "final", + expectedLogSnippet: "edit target missing with no alternate preview; falling back", }); - - expect(result).toBe("sent"); - expect(harness.editPreview).toHaveBeenCalledTimes(1); - expect(harness.sendPayload).toHaveBeenCalledWith( - expect.objectContaining({ text: "Hello final" }), - ); - expect(harness.log).toHaveBeenCalledWith( - expect.stringContaining("edit target missing with no alternate preview; falling back"), - ); }); it("falls back to sendPayload when the final edit fails before reaching Telegram", async () => { @@ -451,19 +474,10 @@ describe("createLaneTextDeliverer", () => { it("falls back when an archived preview edit target is missing and no alternate preview exists", async () => { const harness = createHarness(); - harness.archivedAnswerPreviews.push({ - messageId: 5555, - textSnapshot: "Partial streaming...", - deleteIfUnused: true, - }); + seedArchivedAnswerPreview(harness); harness.editPreview.mockRejectedValue(new Error("400: Bad Request: message to edit not found")); - const result = await harness.deliverLaneText({ - laneName: "answer", - text: "Complete final answer", - payload: { text: "Complete final answer" }, - infoKind: "final", - }); + const result = await deliverFinalAnswer(harness, "Complete final answer"); expect(harness.editPreview).toHaveBeenCalledTimes(1); expect(harness.sendPayload).toHaveBeenCalledWith( @@ -475,19 +489,10 @@ describe("createLaneTextDeliverer", () => { it("keeps the active preview when an archived final edit target is missing", async () => { const harness = createHarness({ answerMessageId: 999 }); - harness.archivedAnswerPreviews.push({ - messageId: 5555, - textSnapshot: "Partial streaming...", - deleteIfUnused: true, - }); + seedArchivedAnswerPreview(harness); harness.editPreview.mockRejectedValue(new Error("400: Bad Request: message to edit not found")); - const result = await harness.deliverLaneText({ - laneName: "answer", - text: "Complete final answer", - payload: { text: "Complete final answer" }, - infoKind: "final", - }); + const result = await deliverFinalAnswer(harness, "Complete final answer"); expect(harness.editPreview).toHaveBeenCalledTimes(1); expect(harness.sendPayload).not.toHaveBeenCalled(); @@ -502,21 +507,11 @@ describe("createLaneTextDeliverer", () => { const err = Object.assign(new Error("403: Forbidden"), { error_code: 403 }); harness.editPreview.mockRejectedValue(err); - const result = await harness.deliverLaneText({ - laneName: "answer", + await expectFinalEditFallbackToSend({ + harness, text: "Hello final", - payload: { text: "Hello final" }, - infoKind: "final", + expectedLogSnippet: "rejected by Telegram (client error); falling back", }); - - expect(result).toBe("sent"); - expect(harness.editPreview).toHaveBeenCalledTimes(1); - expect(harness.sendPayload).toHaveBeenCalledWith( - expect.objectContaining({ text: "Hello final" }), - ); - expect(harness.log).toHaveBeenCalledWith( - expect.stringContaining("rejected by Telegram (client error); falling back"), - ); }); it("retains preview on 502 with error_code during final (ambiguous server error)", async () => { diff --git a/src/telegram/monitor.test.ts b/src/telegram/monitor.test.ts index d7ebef73373..0b28734f835 100644 --- a/src/telegram/monitor.test.ts +++ b/src/telegram/monitor.test.ts @@ -129,6 +129,28 @@ function mockRunOnceAndAbort(abort: AbortController) { runSpy.mockImplementationOnce(() => makeAbortRunner(abort)); } +function mockRunOnceWithStalledPollingRunner(): { + stop: ReturnType void | Promise>>; +} { + let running = true; + let releaseTask: (() => void) | undefined; + const stop = vi.fn(async () => { + running = false; + releaseTask?.(); + }); + runSpy.mockImplementationOnce(() => + makeRunnerStub({ + task: () => + new Promise((resolve) => { + releaseTask = resolve; + }), + stop, + isRunning: () => running, + }), + ); + return { stop }; +} + function expectRecoverableRetryState(expectedRunCalls: number) { expect(computeBackoff).toHaveBeenCalled(); expect(sleepWithAbort).toHaveBeenCalled(); @@ -434,31 +456,8 @@ describe("monitorTelegramProvider (grammY)", () => { it("force-restarts polling when unhandled network rejection stalls runner", async () => { const abort = new AbortController(); - let running = true; - let releaseTask: (() => void) | undefined; - const stop = vi.fn(async () => { - running = false; - releaseTask?.(); - }); - - runSpy - .mockImplementationOnce(() => - makeRunnerStub({ - task: () => - new Promise((resolve) => { - releaseTask = resolve; - }), - stop, - isRunning: () => running, - }), - ) - .mockImplementationOnce(() => - makeRunnerStub({ - task: async () => { - abort.abort(); - }, - }), - ); + const { stop } = mockRunOnceWithStalledPollingRunner(); + mockRunOnceAndAbort(abort); const monitor = monitorTelegramProvider({ token: "tok", abortSignal: abort.signal }); await vi.waitFor(() => expect(runSpy).toHaveBeenCalledTimes(1)); @@ -474,31 +473,8 @@ describe("monitorTelegramProvider (grammY)", () => { it("aborts the active Telegram fetch when unhandled network rejection forces restart", async () => { const abort = new AbortController(); - let running = true; - let releaseTask: (() => void) | undefined; - const stop = vi.fn(async () => { - running = false; - releaseTask?.(); - }); - - runSpy - .mockImplementationOnce(() => - makeRunnerStub({ - task: () => - new Promise((resolve) => { - releaseTask = resolve; - }), - stop, - isRunning: () => running, - }), - ) - .mockImplementationOnce(() => - makeRunnerStub({ - task: async () => { - abort.abort(); - }, - }), - ); + const { stop } = mockRunOnceWithStalledPollingRunner(); + mockRunOnceAndAbort(abort); const monitor = monitorTelegramProvider({ token: "tok", abortSignal: abort.signal }); await vi.waitFor(() => expect(createTelegramBotCalls.length).toBeGreaterThanOrEqual(1)); @@ -515,23 +491,7 @@ describe("monitorTelegramProvider (grammY)", () => { it("ignores unrelated process-level network errors while telegram polling is active", async () => { const abort = new AbortController(); - let running = true; - let releaseTask: (() => void) | undefined; - const stop = vi.fn(async () => { - running = false; - releaseTask?.(); - }); - - runSpy.mockImplementationOnce(() => - makeRunnerStub({ - task: () => - new Promise((resolve) => { - releaseTask = resolve; - }), - stop, - isRunning: () => running, - }), - ); + const { stop } = mockRunOnceWithStalledPollingRunner(); const monitor = monitorTelegramProvider({ token: "tok", abortSignal: abort.signal }); await vi.waitFor(() => expect(runSpy).toHaveBeenCalledTimes(1)); @@ -600,31 +560,8 @@ describe("monitorTelegramProvider (grammY)", () => { it("force-restarts polling when getUpdates stalls (watchdog)", async () => { vi.useFakeTimers({ shouldAdvanceTime: true }); const abort = new AbortController(); - let running = true; - let releaseTask: (() => void) | undefined; - const stop = vi.fn(async () => { - running = false; - releaseTask?.(); - }); - - runSpy - .mockImplementationOnce(() => - makeRunnerStub({ - task: () => - new Promise((resolve) => { - releaseTask = resolve; - }), - stop, - isRunning: () => running, - }), - ) - .mockImplementationOnce(() => - makeRunnerStub({ - task: async () => { - abort.abort(); - }, - }), - ); + const { stop } = mockRunOnceWithStalledPollingRunner(); + mockRunOnceAndAbort(abort); const monitor = monitorTelegramProvider({ token: "tok", abortSignal: abort.signal }); await vi.waitFor(() => expect(runSpy).toHaveBeenCalledTimes(1)); diff --git a/src/telegram/network-config.test.ts b/src/telegram/network-config.test.ts index 7a7dd197c75..70de5f46826 100644 --- a/src/telegram/network-config.test.ts +++ b/src/telegram/network-config.test.ts @@ -1,4 +1,5 @@ import { afterEach, describe, expect, it, vi } from "vitest"; +import type { TelegramNetworkConfig } from "../config/types.telegram.js"; import { resetTelegramNetworkConfigStateForTests, resolveTelegramAutoSelectFamilyDecision, @@ -18,62 +19,57 @@ describe("resolveTelegramAutoSelectFamilyDecision", () => { resetTelegramNetworkConfigStateForTests(); }); - it("prefers env enable over env disable", () => { - const decision = resolveTelegramAutoSelectFamilyDecision({ + it.each([ + { + name: "prefers env enable over env disable", env: { OPENCLAW_TELEGRAM_ENABLE_AUTO_SELECT_FAMILY: "1", OPENCLAW_TELEGRAM_DISABLE_AUTO_SELECT_FAMILY: "1", }, - nodeMajor: 22, - }); - expect(decision).toEqual({ - value: true, - source: "env:OPENCLAW_TELEGRAM_ENABLE_AUTO_SELECT_FAMILY", - }); - }); - - it("uses env disable when set", () => { - const decision = resolveTelegramAutoSelectFamilyDecision({ + expected: { + value: true, + source: "env:OPENCLAW_TELEGRAM_ENABLE_AUTO_SELECT_FAMILY", + }, + }, + { + name: "uses env disable when set", env: { OPENCLAW_TELEGRAM_DISABLE_AUTO_SELECT_FAMILY: "1" }, - nodeMajor: 22, - }); - expect(decision).toEqual({ - value: false, - source: "env:OPENCLAW_TELEGRAM_DISABLE_AUTO_SELECT_FAMILY", - }); - }); - - it("prefers env enable over config", () => { - const decision = resolveTelegramAutoSelectFamilyDecision({ + expected: { + value: false, + source: "env:OPENCLAW_TELEGRAM_DISABLE_AUTO_SELECT_FAMILY", + }, + }, + { + name: "prefers env enable over config", env: { OPENCLAW_TELEGRAM_ENABLE_AUTO_SELECT_FAMILY: "1" }, network: { autoSelectFamily: false }, - nodeMajor: 22, - }); - expect(decision).toEqual({ - value: true, - source: "env:OPENCLAW_TELEGRAM_ENABLE_AUTO_SELECT_FAMILY", - }); - }); - - it("prefers env disable over config", () => { - const decision = resolveTelegramAutoSelectFamilyDecision({ + expected: { + value: true, + source: "env:OPENCLAW_TELEGRAM_ENABLE_AUTO_SELECT_FAMILY", + }, + }, + { + name: "prefers env disable over config", env: { OPENCLAW_TELEGRAM_DISABLE_AUTO_SELECT_FAMILY: "1" }, network: { autoSelectFamily: true }, - nodeMajor: 22, - }); - expect(decision).toEqual({ - value: false, - source: "env:OPENCLAW_TELEGRAM_DISABLE_AUTO_SELECT_FAMILY", - }); - }); - - it("uses config override when provided", () => { - const decision = resolveTelegramAutoSelectFamilyDecision({ + expected: { + value: false, + source: "env:OPENCLAW_TELEGRAM_DISABLE_AUTO_SELECT_FAMILY", + }, + }, + { + name: "uses config override when provided", env: {}, network: { autoSelectFamily: true }, + expected: { value: true, source: "config" }, + }, + ])("$name", ({ env, network, expected }) => { + const decision = resolveTelegramAutoSelectFamilyDecision({ + env, + network, nodeMajor: 22, }); - expect(decision).toEqual({ value: true, source: "config" }); + expect(decision).toEqual(expected); }); it("defaults to enable on Node 22", () => { @@ -87,41 +83,44 @@ describe("resolveTelegramAutoSelectFamilyDecision", () => { }); describe("WSL2 detection", () => { - it("disables autoSelectFamily on WSL2", () => { - vi.mocked(isWSL2Sync).mockReturnValue(true); - const decision = resolveTelegramAutoSelectFamilyDecision({ env: {}, nodeMajor: 22 }); - expect(decision).toEqual({ value: false, source: "default-wsl2" }); - }); - - it("respects config override on WSL2", () => { - vi.mocked(isWSL2Sync).mockReturnValue(true); - const decision = resolveTelegramAutoSelectFamilyDecision({ + it.each([ + { + name: "disables autoSelectFamily on WSL2", + env: {}, + expected: { value: false, source: "default-wsl2" }, + }, + { + name: "respects config override on WSL2", env: {}, network: { autoSelectFamily: true }, - nodeMajor: 22, - }); - expect(decision).toEqual({ value: true, source: "config" }); - }); - - it("respects env override on WSL2", () => { - vi.mocked(isWSL2Sync).mockReturnValue(true); - const decision = resolveTelegramAutoSelectFamilyDecision({ + expected: { value: true, source: "config" }, + }, + { + name: "respects env override on WSL2", env: { OPENCLAW_TELEGRAM_ENABLE_AUTO_SELECT_FAMILY: "1" }, + expected: { + value: true, + source: "env:OPENCLAW_TELEGRAM_ENABLE_AUTO_SELECT_FAMILY", + }, + }, + { + name: "uses Node 22 default when not on WSL2", + wsl2: false, + env: {}, + expected: { value: true, source: "default-node22" }, + }, + ])("$name", ({ env, network, expected, wsl2 = true }) => { + vi.mocked(isWSL2Sync).mockReturnValue(wsl2); + const decision = resolveTelegramAutoSelectFamilyDecision({ + env, + network, nodeMajor: 22, }); - expect(decision).toEqual({ - value: true, - source: "env:OPENCLAW_TELEGRAM_ENABLE_AUTO_SELECT_FAMILY", - }); - }); - - it("uses Node 22 default when not on WSL2", () => { - vi.mocked(isWSL2Sync).mockReturnValue(false); - const decision = resolveTelegramAutoSelectFamilyDecision({ env: {}, nodeMajor: 22 }); - expect(decision).toEqual({ value: true, source: "default-node22" }); + expect(decision).toEqual(expected); }); it("memoizes WSL2 detection across repeated defaults", () => { + vi.mocked(isWSL2Sync).mockReturnValue(true); vi.mocked(isWSL2Sync).mockClear(); vi.mocked(isWSL2Sync).mockReturnValue(false); resolveTelegramAutoSelectFamilyDecision({ env: {}, nodeMajor: 22 }); @@ -132,23 +131,66 @@ describe("resolveTelegramAutoSelectFamilyDecision", () => { }); describe("resolveTelegramDnsResultOrderDecision", () => { - it("uses env override when provided", () => { - const decision = resolveTelegramDnsResultOrderDecision({ + it.each([ + { + name: "uses env override when provided", env: { OPENCLAW_TELEGRAM_DNS_RESULT_ORDER: "verbatim" }, nodeMajor: 22, - }); - expect(decision).toEqual({ - value: "verbatim", - source: "env:OPENCLAW_TELEGRAM_DNS_RESULT_ORDER", - }); - }); - - it("uses config override when provided", () => { - const decision = resolveTelegramDnsResultOrderDecision({ + expected: { + value: "verbatim", + source: "env:OPENCLAW_TELEGRAM_DNS_RESULT_ORDER", + }, + }, + { + name: "normalizes trimmed env values", + env: { OPENCLAW_TELEGRAM_DNS_RESULT_ORDER: " IPV4FIRST " }, + nodeMajor: 20, + expected: { + value: "ipv4first", + source: "env:OPENCLAW_TELEGRAM_DNS_RESULT_ORDER", + }, + }, + { + name: "uses config override when provided", network: { dnsResultOrder: "ipv4first" }, nodeMajor: 20, + expected: { value: "ipv4first", source: "config" }, + }, + { + name: "normalizes trimmed config values", + network: { dnsResultOrder: " Verbatim " } as TelegramNetworkConfig & { + dnsResultOrder: string; + }, + nodeMajor: 20, + expected: { value: "verbatim", source: "config" }, + }, + { + name: "ignores invalid env values and falls back to config", + env: { OPENCLAW_TELEGRAM_DNS_RESULT_ORDER: "bogus" }, + network: { dnsResultOrder: "ipv4first" }, + nodeMajor: 20, + expected: { value: "ipv4first", source: "config" }, + }, + { + name: "ignores invalid env and config values before applying Node 22 default", + env: { OPENCLAW_TELEGRAM_DNS_RESULT_ORDER: "bogus" }, + network: { dnsResultOrder: "invalid" } as TelegramNetworkConfig & { dnsResultOrder: string }, + nodeMajor: 22, + expected: { value: "ipv4first", source: "default-node22" }, + }, + ] satisfies Array<{ + name: string; + env?: NodeJS.ProcessEnv; + network?: TelegramNetworkConfig | (TelegramNetworkConfig & { dnsResultOrder: string }); + nodeMajor: number; + expected: ReturnType; + }>)("$name", ({ env, network, nodeMajor, expected }) => { + const decision = resolveTelegramDnsResultOrderDecision({ + env, + network, + nodeMajor, }); - expect(decision).toEqual({ value: "ipv4first", source: "config" }); + expect(decision).toEqual(expected); }); it("defaults to ipv4first on Node 22", () => { diff --git a/src/telegram/network-errors.test.ts b/src/telegram/network-errors.test.ts index 56106a292b8..f8437aa2a2f 100644 --- a/src/telegram/network-errors.test.ts +++ b/src/telegram/network-errors.test.ts @@ -9,6 +9,11 @@ import { tagTelegramNetworkError, } from "./network-errors.js"; +const errorWithCode = (message: string, code: string) => + Object.assign(new Error(message), { code }); +const errorWithTelegramCode = (message: string, error_code: number) => + Object.assign(new Error(message), { error_code }); + describe("isRecoverableTelegramNetworkError", () => { it("tracks Telegram polling origin separately from generic network matching", () => { const slackDnsError = Object.assign( @@ -32,16 +37,12 @@ describe("isRecoverableTelegramNetworkError", () => { expect(isTelegramPollingNetworkError(slackDnsError)).toBe(true); }); - it("detects recoverable error codes", () => { - const err = Object.assign(new Error("timeout"), { code: "ETIMEDOUT" }); - expect(isRecoverableTelegramNetworkError(err)).toBe(true); - }); - - it("detects additional recoverable error codes", () => { - const aborted = Object.assign(new Error("aborted"), { code: "ECONNABORTED" }); - const network = Object.assign(new Error("network"), { code: "ERR_NETWORK" }); - expect(isRecoverableTelegramNetworkError(aborted)).toBe(true); - expect(isRecoverableTelegramNetworkError(network)).toBe(true); + it.each([ + ["ETIMEDOUT", "timeout"], + ["ECONNABORTED", "aborted"], + ["ERR_NETWORK", "network"], + ])("detects recoverable error code %s", (code, message) => { + expect(isRecoverableTelegramNetworkError(errorWithCode(message, code))).toBe(true); }); it("detects AbortError names", () => { @@ -69,6 +70,19 @@ describe("isRecoverableTelegramNetworkError", () => { expect(isRecoverableTelegramNetworkError(err, { context: "polling" })).toBe(true); }); + it("honors allowMessageMatch=false for broad snippet matches", () => { + expect( + isRecoverableTelegramNetworkError(new Error("Undici: socket failure"), { + allowMessageMatch: false, + }), + ).toBe(false); + expect( + isRecoverableTelegramNetworkError(new Error("TypeError: fetch failed"), { + allowMessageMatch: false, + }), + ).toBe(true); + }); + it("skips broad message matches for send context", () => { const networkRequestErr = new Error("Network request for 'sendMessage' failed!"); expect(isRecoverableTelegramNetworkError(networkRequestErr, { context: "send" })).toBe(false); @@ -97,6 +111,14 @@ describe("isRecoverableTelegramNetworkError", () => { expect(isRecoverableTelegramNetworkError(err)).toBe(true); }); + it("normalizes blank tagged origins to null and finds nested tags", () => { + const inner = new Error("inner"); + tagTelegramNetworkError(inner, { method: " ", url: " " }); + const outer = Object.assign(new Error("outer"), { cause: inner }); + expect(getTelegramNetworkErrorOrigin(outer)).toEqual({ method: null, url: null }); + expect(isTelegramPollingNetworkError(outer)).toBe(false); + }); + // Grammy HttpError tests (issue #3815) // Grammy wraps fetch errors in .error property, not .cause describe("Grammy HttpError", () => { @@ -138,49 +160,18 @@ describe("isRecoverableTelegramNetworkError", () => { }); describe("isSafeToRetrySendError", () => { - it("allows retry for ECONNREFUSED (pre-connect, message not sent)", () => { - const err = Object.assign(new Error("connect ECONNREFUSED"), { code: "ECONNREFUSED" }); - expect(isSafeToRetrySendError(err)).toBe(true); - }); - - it("allows retry for ENOTFOUND (DNS failure, message not sent)", () => { - const err = Object.assign(new Error("getaddrinfo ENOTFOUND"), { code: "ENOTFOUND" }); - expect(isSafeToRetrySendError(err)).toBe(true); - }); - - it("allows retry for EAI_AGAIN (transient DNS, message not sent)", () => { - const err = Object.assign(new Error("getaddrinfo EAI_AGAIN"), { code: "EAI_AGAIN" }); - expect(isSafeToRetrySendError(err)).toBe(true); - }); - - it("allows retry for ENETUNREACH (no route to host, message not sent)", () => { - const err = Object.assign(new Error("connect ENETUNREACH"), { code: "ENETUNREACH" }); - expect(isSafeToRetrySendError(err)).toBe(true); - }); - - it("allows retry for EHOSTUNREACH (host unreachable, message not sent)", () => { - const err = Object.assign(new Error("connect EHOSTUNREACH"), { code: "EHOSTUNREACH" }); - expect(isSafeToRetrySendError(err)).toBe(true); - }); - - it("does NOT allow retry for ECONNRESET (message may already be delivered)", () => { - const err = Object.assign(new Error("read ECONNRESET"), { code: "ECONNRESET" }); - expect(isSafeToRetrySendError(err)).toBe(false); - }); - - it("does NOT allow retry for ETIMEDOUT (message may already be delivered)", () => { - const err = Object.assign(new Error("connect ETIMEDOUT"), { code: "ETIMEDOUT" }); - expect(isSafeToRetrySendError(err)).toBe(false); - }); - - it("does NOT allow retry for EPIPE (connection broken mid-transfer, message may be delivered)", () => { - const err = Object.assign(new Error("write EPIPE"), { code: "EPIPE" }); - expect(isSafeToRetrySendError(err)).toBe(false); - }); - - it("does NOT allow retry for UND_ERR_CONNECT_TIMEOUT (ambiguous timing)", () => { - const err = Object.assign(new Error("connect timeout"), { code: "UND_ERR_CONNECT_TIMEOUT" }); - expect(isSafeToRetrySendError(err)).toBe(false); + it.each([ + ["ECONNREFUSED", "connect ECONNREFUSED", true], + ["ENOTFOUND", "getaddrinfo ENOTFOUND", true], + ["EAI_AGAIN", "getaddrinfo EAI_AGAIN", true], + ["ENETUNREACH", "connect ENETUNREACH", true], + ["EHOSTUNREACH", "connect EHOSTUNREACH", true], + ["ECONNRESET", "read ECONNRESET", false], + ["ETIMEDOUT", "connect ETIMEDOUT", false], + ["EPIPE", "write EPIPE", false], + ["UND_ERR_CONNECT_TIMEOUT", "connect timeout", false], + ])("returns %s => %s", (code, message, expected) => { + expect(isSafeToRetrySendError(errorWithCode(message, code))).toBe(expected); }); it("does NOT allow retry for non-network errors", () => { @@ -196,19 +187,12 @@ describe("isSafeToRetrySendError", () => { }); describe("isTelegramServerError", () => { - it("returns true for error_code 500", () => { - const err = Object.assign(new Error("Internal Server Error"), { error_code: 500 }); - expect(isTelegramServerError(err)).toBe(true); - }); - - it("returns true for error_code 502", () => { - const err = Object.assign(new Error("Bad Gateway"), { error_code: 502 }); - expect(isTelegramServerError(err)).toBe(true); - }); - - it("returns false for error_code 403", () => { - const err = Object.assign(new Error("Forbidden"), { error_code: 403 }); - expect(isTelegramServerError(err)).toBe(false); + it.each([ + ["Internal Server Error", 500, true], + ["Bad Gateway", 502, true], + ["Forbidden", 403, false], + ])("returns %s for error_code %s", (message, errorCode, expected) => { + expect(isTelegramServerError(errorWithTelegramCode(message, errorCode))).toBe(expected); }); it("returns false for plain Error", () => { @@ -217,19 +201,12 @@ describe("isTelegramServerError", () => { }); describe("isTelegramClientRejection", () => { - it("returns true for error_code 400", () => { - const err = Object.assign(new Error("Bad Request"), { error_code: 400 }); - expect(isTelegramClientRejection(err)).toBe(true); - }); - - it("returns true for error_code 403", () => { - const err = Object.assign(new Error("Forbidden"), { error_code: 403 }); - expect(isTelegramClientRejection(err)).toBe(true); - }); - - it("returns false for error_code 502", () => { - const err = Object.assign(new Error("Bad Gateway"), { error_code: 502 }); - expect(isTelegramClientRejection(err)).toBe(false); + it.each([ + ["Bad Request", 400, true], + ["Forbidden", 403, true], + ["Bad Gateway", 502, false], + ])("returns %s for error_code %s", (message, errorCode, expected) => { + expect(isTelegramClientRejection(errorWithTelegramCode(message, errorCode))).toBe(expected); }); it("returns false for plain Error", () => { diff --git a/src/telegram/token.test.ts b/src/telegram/token.test.ts index f888ddbfc36..17e412cf584 100644 --- a/src/telegram/token.test.ts +++ b/src/telegram/token.test.ts @@ -7,50 +7,75 @@ import { withStateDirEnv } from "../test-helpers/state-dir-env.js"; import { resolveTelegramToken } from "./token.js"; import { readTelegramUpdateOffset, writeTelegramUpdateOffset } from "./update-offset-store.js"; -function withTempDir(): string { - return fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-telegram-token-")); -} - describe("resolveTelegramToken", () => { + const tempDirs: string[] = []; + + function createTempDir(): string { + const dir = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-telegram-token-")); + tempDirs.push(dir); + return dir; + } + + function createTokenFile(fileName: string, contents = "file-token\n"): string { + const dir = createTempDir(); + const tokenFile = path.join(dir, fileName); + fs.writeFileSync(tokenFile, contents, "utf-8"); + return tokenFile; + } + afterEach(() => { vi.unstubAllEnvs(); + for (const dir of tempDirs.splice(0)) { + fs.rmSync(dir, { recursive: true, force: true }); + } }); - it("prefers config token over env", () => { - vi.stubEnv("TELEGRAM_BOT_TOKEN", "env-token"); - const cfg = { - channels: { telegram: { botToken: "cfg-token" } }, - } as OpenClawConfig; - const res = resolveTelegramToken(cfg); - expect(res.token).toBe("cfg-token"); - expect(res.source).toBe("config"); - }); - - it("uses env token when config is missing", () => { - vi.stubEnv("TELEGRAM_BOT_TOKEN", "env-token"); - const cfg = { - channels: { telegram: {} }, - } as OpenClawConfig; - const res = resolveTelegramToken(cfg); - expect(res.token).toBe("env-token"); - expect(res.source).toBe("env"); - }); - - it("uses tokenFile when configured", () => { - vi.stubEnv("TELEGRAM_BOT_TOKEN", ""); - const dir = withTempDir(); - const tokenFile = path.join(dir, "token.txt"); - fs.writeFileSync(tokenFile, "file-token\n", "utf-8"); - const cfg = { channels: { telegram: { tokenFile } } } as OpenClawConfig; - const res = resolveTelegramToken(cfg); - expect(res.token).toBe("file-token"); - expect(res.source).toBe("tokenFile"); - fs.rmSync(dir, { recursive: true, force: true }); + it.each([ + { + name: "prefers config token over env", + envToken: "env-token", + cfg: { + channels: { telegram: { botToken: "cfg-token" } }, + } as OpenClawConfig, + expected: { token: "cfg-token", source: "config" }, + }, + { + name: "uses env token when config is missing", + envToken: "env-token", + cfg: { + channels: { telegram: {} }, + } as OpenClawConfig, + expected: { token: "env-token", source: "env" }, + }, + { + name: "uses tokenFile when configured", + envToken: "", + cfg: { + channels: { telegram: { tokenFile: "" } }, + } as OpenClawConfig, + resolveCfg: () => + ({ + channels: { telegram: { tokenFile: createTokenFile("token.txt") } }, + }) as OpenClawConfig, + expected: { token: "file-token", source: "tokenFile" }, + }, + { + name: "falls back to config token when no env or tokenFile", + envToken: "", + cfg: { + channels: { telegram: { botToken: "cfg-token" } }, + } as OpenClawConfig, + expected: { token: "cfg-token", source: "config" }, + }, + ])("$name", ({ envToken, cfg, resolveCfg, expected }) => { + vi.stubEnv("TELEGRAM_BOT_TOKEN", envToken); + const res = resolveTelegramToken(resolveCfg ? resolveCfg() : cfg); + expect(res).toEqual(expected); }); it.runIf(process.platform !== "win32")("rejects symlinked tokenFile paths", () => { vi.stubEnv("TELEGRAM_BOT_TOKEN", ""); - const dir = withTempDir(); + const dir = createTempDir(); const tokenFile = path.join(dir, "token.txt"); const tokenLink = path.join(dir, "token-link.txt"); fs.writeFileSync(tokenFile, "file-token\n", "utf-8"); @@ -60,22 +85,11 @@ describe("resolveTelegramToken", () => { const res = resolveTelegramToken(cfg); expect(res.token).toBe(""); expect(res.source).toBe("none"); - fs.rmSync(dir, { recursive: true, force: true }); - }); - - it("falls back to config token when no env or tokenFile", () => { - vi.stubEnv("TELEGRAM_BOT_TOKEN", ""); - const cfg = { - channels: { telegram: { botToken: "cfg-token" } }, - } as OpenClawConfig; - const res = resolveTelegramToken(cfg); - expect(res.token).toBe("cfg-token"); - expect(res.source).toBe("config"); }); it("does not fall back to config when tokenFile is missing", () => { vi.stubEnv("TELEGRAM_BOT_TOKEN", ""); - const dir = withTempDir(); + const dir = createTempDir(); const tokenFile = path.join(dir, "missing-token.txt"); const cfg = { channels: { telegram: { tokenFile, botToken: "cfg-token" } }, @@ -83,7 +97,6 @@ describe("resolveTelegramToken", () => { const res = resolveTelegramToken(cfg); expect(res.token).toBe(""); expect(res.source).toBe("none"); - fs.rmSync(dir, { recursive: true, force: true }); }); it("resolves per-account tokens when the config account key casing doesn't match routing normalization", () => { @@ -121,14 +134,31 @@ describe("resolveTelegramToken", () => { expect(res.source).toBe("config"); }); - it("falls back to top-level tokenFile for non-default accounts", () => { - const dir = withTempDir(); - const tokenFile = path.join(dir, "token.txt"); - fs.writeFileSync(tokenFile, "file-token\n", "utf-8"); + it("uses account-level tokenFile before top-level fallbacks", () => { const cfg = { channels: { telegram: { - tokenFile, + botToken: "top-level-token", + tokenFile: createTokenFile("top-level-token.txt", "top-level-file-token\n"), + accounts: { + work: { + tokenFile: createTokenFile("account-token.txt", "account-file-token\n"), + }, + }, + }, + }, + } as OpenClawConfig; + + const res = resolveTelegramToken(cfg, { accountId: "work" }); + expect(res.token).toBe("account-file-token"); + expect(res.source).toBe("tokenFile"); + }); + + it("falls back to top-level tokenFile for non-default accounts", () => { + const cfg = { + channels: { + telegram: { + tokenFile: createTokenFile("token.txt"), accounts: { work: {}, }, @@ -139,7 +169,23 @@ describe("resolveTelegramToken", () => { const res = resolveTelegramToken(cfg, { accountId: "work" }); expect(res.token).toBe("file-token"); expect(res.source).toBe("tokenFile"); - fs.rmSync(dir, { recursive: true, force: true }); + }); + + it("does not use env token for non-default accounts", () => { + vi.stubEnv("TELEGRAM_BOT_TOKEN", "env-token"); + const cfg = { + channels: { + telegram: { + accounts: { + work: {}, + }, + }, + }, + } as OpenClawConfig; + + const res = resolveTelegramToken(cfg, { accountId: "work" }); + expect(res.token).toBe(""); + expect(res.source).toBe("none"); }); it("throws when botToken is an unresolved SecretRef object", () => { diff --git a/src/test-helpers/http.ts b/src/test-helpers/http.ts new file mode 100644 index 00000000000..2aa6f21ba6c --- /dev/null +++ b/src/test-helpers/http.ts @@ -0,0 +1,20 @@ +export function jsonResponse(body: unknown, status = 200): Response { + return new Response(JSON.stringify(body), { + status, + headers: { "Content-Type": "application/json" }, + }); +} + +export function requestUrl(input: string | URL | Request): string { + if (typeof input === "string") { + return input; + } + if (input instanceof URL) { + return input.toString(); + } + return input.url; +} + +export function requestBodyText(body: BodyInit | null | undefined): string { + return typeof body === "string" ? body : "{}"; +} diff --git a/src/test-helpers/temp-dir.ts b/src/test-helpers/temp-dir.ts new file mode 100644 index 00000000000..b5a55dfe03d --- /dev/null +++ b/src/test-helpers/temp-dir.ts @@ -0,0 +1,23 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; + +export async function withTempDir( + options: { + prefix: string; + parentDir?: string; + subdir?: string; + }, + run: (dir: string) => Promise, +): Promise { + const base = await fs.mkdtemp(path.join(options.parentDir ?? os.tmpdir(), options.prefix)); + const dir = options.subdir ? path.join(base, options.subdir) : base; + if (options.subdir) { + await fs.mkdir(dir, { recursive: true }); + } + try { + return await run(dir); + } finally { + await fs.rm(base, { recursive: true, force: true }); + } +} diff --git a/src/test-utils/exec-assertions.ts b/src/test-utils/exec-assertions.ts index 58b77f9f730..6e9149725ef 100644 --- a/src/test-utils/exec-assertions.ts +++ b/src/test-utils/exec-assertions.ts @@ -28,14 +28,7 @@ export function expectSingleNpmInstallIgnoreScriptsCall(params: { throw new Error("expected npm install call"); } const [argv, opts] = first; - expect(argv).toEqual([ - "npm", - "install", - "--omit=dev", - "--omit=peer", - "--silent", - "--ignore-scripts", - ]); + expect(argv).toEqual(["npm", "install", "--omit=dev", "--silent", "--ignore-scripts"]); expect(opts?.cwd).toBeTruthy(); const cwd = String(opts?.cwd); const expectedTargetDir = params.expectedTargetDir; diff --git a/src/tts/tts.test.ts b/src/tts/tts.test.ts index eedc325fd4f..b326b4835e5 100644 --- a/src/tts/tts.test.ts +++ b/src/tts/tts.test.ts @@ -91,6 +91,22 @@ const mockAssistantMessage = (content: AssistantMessage["content"]): AssistantMe timestamp: Date.now(), }); +function createOpenAiTelephonyCfg(model: "tts-1" | "gpt-4o-mini-tts"): OpenClawConfig { + return { + messages: { + tts: { + provider: "openai", + openai: { + apiKey: "test-key", + model, + voice: "alloy", + instructions: "Speak warmly", + }, + }, + }, + }; +} + describe("tts", () => { beforeEach(() => { vi.clearAllMocks(); @@ -592,25 +608,14 @@ describe("tts", () => { } }; - it("omits instructions for unsupported speech models", async () => { - const cfg: OpenClawConfig = { - messages: { - tts: { - provider: "openai", - openai: { - apiKey: "test-key", - model: "tts-1", - voice: "alloy", - instructions: "Speak warmly", - }, - }, - }, - }; - + async function expectTelephonyInstructions( + model: "tts-1" | "gpt-4o-mini-tts", + expectedInstructions: string | undefined, + ) { await withMockedTelephonyFetch(async (fetchMock) => { const result = await tts.textToSpeechTelephony({ text: "Hello there, friendly caller.", - cfg, + cfg: createOpenAiTelephonyCfg(model), }); expect(result.success).toBe(true); @@ -618,38 +623,16 @@ describe("tts", () => { const [, init] = fetchMock.mock.calls[0] as [string, RequestInit]; expect(typeof init.body).toBe("string"); const body = JSON.parse(init.body as string) as Record; - expect(body.instructions).toBeUndefined(); + expect(body.instructions).toBe(expectedInstructions); }); + } + + it("omits instructions for unsupported speech models", async () => { + await expectTelephonyInstructions("tts-1", undefined); }); it("includes instructions for gpt-4o-mini-tts", async () => { - const cfg: OpenClawConfig = { - messages: { - tts: { - provider: "openai", - openai: { - apiKey: "test-key", - model: "gpt-4o-mini-tts", - voice: "alloy", - instructions: "Speak warmly", - }, - }, - }, - }; - - await withMockedTelephonyFetch(async (fetchMock) => { - const result = await tts.textToSpeechTelephony({ - text: "Hello there, friendly caller.", - cfg, - }); - - expect(result.success).toBe(true); - expect(fetchMock).toHaveBeenCalledTimes(1); - const [, init] = fetchMock.mock.calls[0] as [string, RequestInit]; - expect(typeof init.body).toBe("string"); - const body = JSON.parse(init.body as string) as Record; - expect(body.instructions).toBe("Speak warmly"); - }); + await expectTelephonyInstructions("gpt-4o-mini-tts", "Speak warmly"); }); }); diff --git a/ui/src/ui/views/chat.ts b/ui/src/ui/views/chat.ts index 36412b965a6..1d0b877d042 100644 --- a/ui/src/ui/views/chat.ts +++ b/ui/src/ui/views/chat.ts @@ -1169,7 +1169,7 @@ export function renderChat(props: ChatProps) { props.showNewMessages ? html`